summaryrefslogtreecommitdiffstats
path: root/meta-moblin
diff options
context:
space:
mode:
authorRichard Purdie <rpurdie@linux.intel.com>2009-04-21 17:33:19 +0100
committerRichard Purdie <rpurdie@linux.intel.com>2009-04-21 17:33:19 +0100
commitf114fd24924540dd5dfbd7483824d6b30c246bc6 (patch)
tree42936b142dd203c63f5133c95163400641a7ffdf /meta-moblin
parente23c356916550cafbe3a93b28b76e762cb5043b7 (diff)
downloadpoky-f114fd24924540dd5dfbd7483824d6b30c246bc6.tar.gz
linux-moblin: Switch to 2.6.29.1
Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
Diffstat (limited to 'meta-moblin')
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0001-drm-remove-define-for-non-linux-systems.patch48
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0002-i915-remove-settable-use_mi_batchbuffer_start.patch60
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0003-i915-Ignore-X-server-provided-mmio-address.patch41
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0004-i915-Use-more-consistent-names-for-regs-and-store.patch2746
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch424
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch46
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0007-i915-Initialize-hardware-status-page-at-device-load.patch137
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0008-Add-Intel-ACPI-IGD-OpRegion-support.patch572
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0009-drm-fix-sysfs-error-path.patch23
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0010-i915-separate-suspend-resume-functions.patch1079
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0011-drm-vblank-rework.patch1534
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0012-Export-shmem_file_setup-for-DRM-GEM.patch25
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch24
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch5483
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0015-i915-Add-chip-set-ID-param.patch35
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch205
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0017-i915-Make-use-of-sarea_priv-conditional.patch147
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch44
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch32
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch23
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch23
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch58
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0023-drm-clean-up-many-sparse-warnings-in-i915.patch192
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0024-fastboot-create-a-asynchronous-initlevel.patch133
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch59
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch51
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch37
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0028-fastboot-sync-the-async-execution-before-late_initc.patch92
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0029-fastboot-make-fastboot-a-config-option.patch53
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch64
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch41
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0032-fastboot-remove-wait-for-all-devices-before-mounti.patch41
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch32
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0034-fastboot-fix-typo-in-init-Kconfig-text.patch26
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0035-fastboot-remove-duplicate-unpack_to_rootfs.patch161
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0036-warning-fix-init-do_mounts_md-c.patch82
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0037-init-initramfs.c-unused-function-when-compiling-wit.patch37
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch38
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0039-Add-a-script-to-visualize-the-kernel-boot-process.patch177
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch91
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0041-r8169-8101e.patch940
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0042-intelfb-945gme.patch154
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0043-superreadahead-patch.patch65
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-menlow3137
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/psb-driver.patch33991
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/i915_split.patch1627
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/psb-driver.patch21566
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch486
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/0002-drm-Add-a-tracker-for-global-objects.patch191
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/0003-drm-Export-hash-table-functionality.patch58
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch53
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-generic (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/defconfig-netbook)1165
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-ivi127
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-menlow8
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-mrst2316
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-netbook52
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-menlow (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/defconfig-menlow)543
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-netbook (renamed from meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-netbook)2
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6-build-nonintconfig.patch128
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.19-modesetting-by-default.patch11
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-dont-wait-for-mouse.patch (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0002-fastboot-remove-wait-for-all-devices-before-mounti.patch)15
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch56
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-revert.patch (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0006-Revert-drm-i915-GEM-on-PAE-has-problems-disable.patch)10
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch208
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-enable-async-by-default.patch (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0005-fastboot-async-enable-default.patch)0
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-even-faster-kms.patch20
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-initrd.patch (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0003-fastboot-remove-duplicate-unpack_to_rootfs.patch)0
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-kms.patch285
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-flip-ide-net.patch40
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch92
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-jbd-longer-commit-interval.patch28
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-kms-after-sata.patch32
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-msiwind.patch57
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-agp.patch83
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-drm.patch336
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-gtt-size.patch21
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-i2c.patch38
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch28
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch37524
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-retry-root-mount.patch (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0001-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch)0
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-silence-acer-message.patch (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0007-acer-error-msg.patch)0
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-sreadahead.patch (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0004-superreadahead-patch.patch)0
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-timberdale.patch6095
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-touchkit.patch130
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-async.patch69
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-suspend.patch139
-rw-r--r--meta-moblin/packages/linux/linux-moblin_2.6.27.bb59
-rw-r--r--meta-moblin/packages/linux/linux-moblin_2.6.28+2.6.29-rc2.bb24
-rw-r--r--meta-moblin/packages/linux/linux-moblin_2.6.29.1.bb46
89 files changed, 49952 insertions, 76319 deletions
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0001-drm-remove-define-for-non-linux-systems.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0001-drm-remove-define-for-non-linux-systems.patch
deleted file mode 100644
index 588c1af70b..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0001-drm-remove-define-for-non-linux-systems.patch
+++ /dev/null
@@ -1,48 +0,0 @@
1commit 2e6ec7cdc09f36be1cbe9aeaccfc45f307fc0060
2Author: Carlos R. Mafra <crmafra2@gmail.com>
3Date: Wed Jul 30 12:29:37 2008 -0700
4
5 drm: remove #define's for non-linux systems
6
7 There is no point in considering FreeBSD et al. in the linux kernel
8 source code.
9
10 Signed-off-by: Carlos R. Mafra <crmafra@gmail.com>
11 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
12 Signed-off-by: Dave Airlie <airlied@redhat.com>
13
14diff --git a/include/drm/drm.h b/include/drm/drm.h
15index 38d3c6b..0864c69 100644
16--- a/include/drm/drm.h
17+++ b/include/drm/drm.h
18@@ -36,7 +36,6 @@
19 #ifndef _DRM_H_
20 #define _DRM_H_
21
22-#if defined(__linux__)
23 #if defined(__KERNEL__)
24 #endif
25 #include <asm/ioctl.h> /* For _IO* macros */
26@@ -46,22 +45,6 @@
27 #define DRM_IOC_WRITE _IOC_WRITE
28 #define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
29 #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
30-#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
31-#if defined(__FreeBSD__) && defined(IN_MODULE)
32-/* Prevent name collision when including sys/ioccom.h */
33-#undef ioctl
34-#include <sys/ioccom.h>
35-#define ioctl(a,b,c) xf86ioctl(a,b,c)
36-#else
37-#include <sys/ioccom.h>
38-#endif /* __FreeBSD__ && xf86ioctl */
39-#define DRM_IOCTL_NR(n) ((n) & 0xff)
40-#define DRM_IOC_VOID IOC_VOID
41-#define DRM_IOC_READ IOC_OUT
42-#define DRM_IOC_WRITE IOC_IN
43-#define DRM_IOC_READWRITE IOC_INOUT
44-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
45-#endif
46
47 #define DRM_MAJOR 226
48 #define DRM_MAX_MINOR 15
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0002-i915-remove-settable-use_mi_batchbuffer_start.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0002-i915-remove-settable-use_mi_batchbuffer_start.patch
deleted file mode 100644
index f3c41f7cbd..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0002-i915-remove-settable-use_mi_batchbuffer_start.patch
+++ /dev/null
@@ -1,60 +0,0 @@
1commit 91019197abbfde388d0b71b0fc8979a936c23fe3
2Author: Keith Packard <keithp@keithp.com>
3Date: Wed Jul 30 12:28:47 2008 -0700
4
5 i915: remove settable use_mi_batchbuffer_start
6
7 The driver can know what hardware requires MI_BATCH_BUFFER vs
8 MI_BATCH_BUFFER_START; there's no reason to let user mode configure this.
9
10 Signed-off-by: Eric Anholt <eric@anholt.net>
11 Signed-off-by: Dave Airlie <airlied@redhat.com>
12
13diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
14index 8897434..24adbde 100644
15--- a/drivers/gpu/drm/i915/i915_dma.c
16+++ b/drivers/gpu/drm/i915/i915_dma.c
17@@ -159,13 +159,6 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
18 dev_priv->current_page = 0;
19 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
20
21- /* We are using separate values as placeholders for mechanisms for
22- * private backbuffer/depthbuffer usage.
23- */
24- dev_priv->use_mi_batchbuffer_start = 0;
25- if (IS_I965G(dev)) /* 965 doesn't support older method */
26- dev_priv->use_mi_batchbuffer_start = 1;
27-
28 /* Allow hardware batchbuffers unless told otherwise.
29 */
30 dev_priv->allow_batchbuffer = 1;
31@@ -486,7 +479,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
32 return ret;
33 }
34
35- if (dev_priv->use_mi_batchbuffer_start) {
36+ if (!IS_I830(dev) && !IS_845G(dev)) {
37 BEGIN_LP_RING(2);
38 if (IS_I965G(dev)) {
39 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
40@@ -697,8 +690,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
41
42 switch (param->param) {
43 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
44- if (!IS_I965G(dev))
45- dev_priv->use_mi_batchbuffer_start = param->value;
46 break;
47 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
48 dev_priv->tex_lru_log_granularity = param->value;
49diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
50index d7326d9..2d441d3 100644
51--- a/drivers/gpu/drm/i915/i915_drv.h
52+++ b/drivers/gpu/drm/i915/i915_drv.h
53@@ -99,7 +99,6 @@ typedef struct drm_i915_private {
54 int front_offset;
55 int current_page;
56 int page_flipping;
57- int use_mi_batchbuffer_start;
58
59 wait_queue_head_t irq_queue;
60 atomic_t irq_received;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0003-i915-Ignore-X-server-provided-mmio-address.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0003-i915-Ignore-X-server-provided-mmio-address.patch
deleted file mode 100644
index 9f7e0b4bcd..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0003-i915-Ignore-X-server-provided-mmio-address.patch
+++ /dev/null
@@ -1,41 +0,0 @@
1commit 20ae3cf7d4a9ae8d23bcffa67c9a34fc2640d217
2Author: Keith Packard <keithp@keithp.com>
3Date: Wed Jul 30 12:36:08 2008 -0700
4
5 i915: Ignore X server provided mmio address
6
7 It is already correctly detected by the kernel for use in suspend/resume.
8
9 Signed-off-by: Eric Anholt <eric@anholt.net>
10 Signed-off-by: Dave Airlie <airlied@redhat.com>
11
12diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
13index 24adbde..01a869b 100644
14--- a/drivers/gpu/drm/i915/i915_dma.c
15+++ b/drivers/gpu/drm/i915/i915_dma.c
16@@ -121,13 +121,6 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
17 return -EINVAL;
18 }
19
20- dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
21- if (!dev_priv->mmio_map) {
22- i915_dma_cleanup(dev);
23- DRM_ERROR("can not find mmio map!\n");
24- return -EINVAL;
25- }
26-
27 dev_priv->sarea_priv = (drm_i915_sarea_t *)
28 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
29
30@@ -194,11 +187,6 @@ static int i915_dma_resume(struct drm_device * dev)
31 return -EINVAL;
32 }
33
34- if (!dev_priv->mmio_map) {
35- DRM_ERROR("can not find mmio map!\n");
36- return -EINVAL;
37- }
38-
39 if (dev_priv->ring.map.handle == NULL) {
40 DRM_ERROR("can not ioremap virtual address for"
41 " ring buffer\n");
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0004-i915-Use-more-consistent-names-for-regs-and-store.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0004-i915-Use-more-consistent-names-for-regs-and-store.patch
deleted file mode 100644
index f7a310ea60..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0004-i915-Use-more-consistent-names-for-regs-and-store.patch
+++ /dev/null
@@ -1,2746 +0,0 @@
1commit 573e91575687018b4307f53a50f4da0084dbdf3d
2Author: Jesse Barnes <jbarnes@virtuousgeek.org>
3Date: Tue Jul 29 11:54:06 2008 -0700
4
5 i915: Use more consistent names for regs, and store them in a separate file.
6
7 Signed-off-by: Eric Anholt <eric@anholt.net>
8 Signed-off-by: Dave Airlie <airlied@redhat.com>
9
10diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
11index 01a869b..7be580b 100644
12--- a/drivers/gpu/drm/i915/i915_dma.c
13+++ b/drivers/gpu/drm/i915/i915_dma.c
14@@ -40,11 +40,11 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
15 {
16 drm_i915_private_t *dev_priv = dev->dev_private;
17 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
18- u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
19+ u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
20 int i;
21
22 for (i = 0; i < 10000; i++) {
23- ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
24+ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
25 ring->space = ring->head - (ring->tail + 8);
26 if (ring->space < 0)
27 ring->space += ring->Size;
28@@ -67,8 +67,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
29 drm_i915_private_t *dev_priv = dev->dev_private;
30 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
31
32- ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
33- ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
34+ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
35+ ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
36 ring->space = ring->head - (ring->tail + 8);
37 if (ring->space < 0)
38 ring->space += ring->Size;
39@@ -98,13 +98,13 @@ static int i915_dma_cleanup(struct drm_device * dev)
40 drm_pci_free(dev, dev_priv->status_page_dmah);
41 dev_priv->status_page_dmah = NULL;
42 /* Need to rewrite hardware status page */
43- I915_WRITE(0x02080, 0x1ffff000);
44+ I915_WRITE(HWS_PGA, 0x1ffff000);
45 }
46
47 if (dev_priv->status_gfx_addr) {
48 dev_priv->status_gfx_addr = 0;
49 drm_core_ioremapfree(&dev_priv->hws_map, dev);
50- I915_WRITE(0x2080, 0x1ffff000);
51+ I915_WRITE(HWS_PGA, 0x1ffff000);
52 }
53
54 return 0;
55@@ -170,7 +170,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
56 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
57
58 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
59- I915_WRITE(0x02080, dev_priv->dma_status_page);
60+ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
61 }
62 DRM_DEBUG("Enabled hardware status page\n");
63 return 0;
64@@ -201,9 +201,9 @@ static int i915_dma_resume(struct drm_device * dev)
65 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
66
67 if (dev_priv->status_gfx_addr != 0)
68- I915_WRITE(0x02080, dev_priv->status_gfx_addr);
69+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
70 else
71- I915_WRITE(0x02080, dev_priv->dma_status_page);
72+ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
73 DRM_DEBUG("Enabled hardware status page\n");
74
75 return 0;
76@@ -402,8 +402,8 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
77 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
78
79 BEGIN_LP_RING(4);
80- OUT_RING(CMD_STORE_DWORD_IDX);
81- OUT_RING(20);
82+ OUT_RING(MI_STORE_DWORD_INDEX);
83+ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
84 OUT_RING(dev_priv->counter);
85 OUT_RING(0);
86 ADVANCE_LP_RING();
87@@ -505,7 +505,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
88 i915_kernel_lost_context(dev);
89
90 BEGIN_LP_RING(2);
91- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
92+ OUT_RING(MI_FLUSH | MI_READ_FLUSH);
93 OUT_RING(0);
94 ADVANCE_LP_RING();
95
96@@ -530,8 +530,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
97 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
98
99 BEGIN_LP_RING(4);
100- OUT_RING(CMD_STORE_DWORD_IDX);
101- OUT_RING(20);
102+ OUT_RING(MI_STORE_DWORD_INDEX);
103+ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
104 OUT_RING(dev_priv->counter);
105 OUT_RING(0);
106 ADVANCE_LP_RING();
107@@ -728,8 +728,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
108 dev_priv->hw_status_page = dev_priv->hws_map.handle;
109
110 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
111- I915_WRITE(0x02080, dev_priv->status_gfx_addr);
112- DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
113+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
114+ DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
115 dev_priv->status_gfx_addr);
116 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
117 return 0;
118diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
119index 93aed1c..6c99aab 100644
120--- a/drivers/gpu/drm/i915/i915_drv.c
121+++ b/drivers/gpu/drm/i915/i915_drv.c
122@@ -279,13 +279,13 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
123 dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
124 dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
125 dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
126- dev_priv->saveDSPABASE = I915_READ(DSPABASE);
127+ dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
128 if (IS_I965G(dev)) {
129 dev_priv->saveDSPASURF = I915_READ(DSPASURF);
130 dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
131 }
132 i915_save_palette(dev, PIPE_A);
133- dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
134+ dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
135
136 /* Pipe & plane B info */
137 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
138@@ -307,13 +307,13 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
139 dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
140 dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
141 dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
142- dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
143+ dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
144 if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
145 dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
146 dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
147 }
148 i915_save_palette(dev, PIPE_B);
149- dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
150+ dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
151
152 /* CRT state */
153 dev_priv->saveADPA = I915_READ(ADPA);
154@@ -328,9 +328,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
155 dev_priv->saveLVDS = I915_READ(LVDS);
156 if (!IS_I830(dev) && !IS_845G(dev))
157 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
158- dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
159- dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
160- dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
161+ dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
162+ dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
163+ dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
164
165 /* FIXME: save TV & SDVO state */
166
167@@ -341,19 +341,19 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
168 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
169
170 /* Interrupt state */
171- dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
172- dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
173- dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
174+ dev_priv->saveIIR = I915_READ(IIR);
175+ dev_priv->saveIER = I915_READ(IER);
176+ dev_priv->saveIMR = I915_READ(IMR);
177
178 /* VGA state */
179- dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
180- dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
181- dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
182+ dev_priv->saveVGA0 = I915_READ(VGA0);
183+ dev_priv->saveVGA1 = I915_READ(VGA1);
184+ dev_priv->saveVGA_PD = I915_READ(VGA_PD);
185 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
186
187 /* Clock gating state */
188 dev_priv->saveD_STATE = I915_READ(D_STATE);
189- dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
190+ dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
191
192 /* Cache mode state */
193 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
194@@ -363,7 +363,7 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
195
196 /* Scratch space */
197 for (i = 0; i < 16; i++) {
198- dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
199+ dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
200 dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
201 }
202 for (i = 0; i < 3; i++)
203@@ -424,7 +424,7 @@ static int i915_resume(struct drm_device *dev)
204 I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
205 I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
206 I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
207- I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
208+ I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
209 I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
210 if (IS_I965G(dev)) {
211 I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
212@@ -436,7 +436,7 @@ static int i915_resume(struct drm_device *dev)
213 i915_restore_palette(dev, PIPE_A);
214 /* Enable the plane */
215 I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
216- I915_WRITE(DSPABASE, I915_READ(DSPABASE));
217+ I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
218
219 /* Pipe & plane B info */
220 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
221@@ -466,7 +466,7 @@ static int i915_resume(struct drm_device *dev)
222 I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
223 I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
224 I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
225- I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
226+ I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
227 I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
228 if (IS_I965G(dev)) {
229 I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
230@@ -478,7 +478,7 @@ static int i915_resume(struct drm_device *dev)
231 i915_restore_palette(dev, PIPE_B);
232 /* Enable the plane */
233 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
234- I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
235+ I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
236
237 /* CRT state */
238 I915_WRITE(ADPA, dev_priv->saveADPA);
239@@ -493,9 +493,9 @@ static int i915_resume(struct drm_device *dev)
240
241 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
242 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
243- I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
244- I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
245- I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
246+ I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
247+ I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
248+ I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
249 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
250
251 /* FIXME: restore TV & SDVO state */
252@@ -508,14 +508,14 @@ static int i915_resume(struct drm_device *dev)
253
254 /* VGA state */
255 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
256- I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
257- I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
258- I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
259+ I915_WRITE(VGA0, dev_priv->saveVGA0);
260+ I915_WRITE(VGA1, dev_priv->saveVGA1);
261+ I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
262 udelay(150);
263
264 /* Clock gating state */
265 I915_WRITE (D_STATE, dev_priv->saveD_STATE);
266- I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
267+ I915_WRITE(CG_2D_DIS, dev_priv->saveCG_2D_DIS);
268
269 /* Cache mode state */
270 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
271@@ -524,7 +524,7 @@ static int i915_resume(struct drm_device *dev)
272 I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
273
274 for (i = 0; i < 16; i++) {
275- I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
276+ I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
277 I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
278 }
279 for (i = 0; i < 3; i++)
280diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
281index 2d441d3..afb51a3 100644
282--- a/drivers/gpu/drm/i915/i915_drv.h
283+++ b/drivers/gpu/drm/i915/i915_drv.h
284@@ -30,6 +30,8 @@
285 #ifndef _I915_DRV_H_
286 #define _I915_DRV_H_
287
288+#include "i915_reg.h"
289+
290 /* General customization:
291 */
292
293@@ -138,7 +140,7 @@ typedef struct drm_i915_private {
294 u32 saveDSPASTRIDE;
295 u32 saveDSPASIZE;
296 u32 saveDSPAPOS;
297- u32 saveDSPABASE;
298+ u32 saveDSPAADDR;
299 u32 saveDSPASURF;
300 u32 saveDSPATILEOFF;
301 u32 savePFIT_PGM_RATIOS;
302@@ -159,24 +161,24 @@ typedef struct drm_i915_private {
303 u32 saveDSPBSTRIDE;
304 u32 saveDSPBSIZE;
305 u32 saveDSPBPOS;
306- u32 saveDSPBBASE;
307+ u32 saveDSPBADDR;
308 u32 saveDSPBSURF;
309 u32 saveDSPBTILEOFF;
310- u32 saveVCLK_DIVISOR_VGA0;
311- u32 saveVCLK_DIVISOR_VGA1;
312- u32 saveVCLK_POST_DIV;
313+ u32 saveVGA0;
314+ u32 saveVGA1;
315+ u32 saveVGA_PD;
316 u32 saveVGACNTRL;
317 u32 saveADPA;
318 u32 saveLVDS;
319- u32 saveLVDSPP_ON;
320- u32 saveLVDSPP_OFF;
321+ u32 savePP_ON_DELAYS;
322+ u32 savePP_OFF_DELAYS;
323 u32 saveDVOA;
324 u32 saveDVOB;
325 u32 saveDVOC;
326 u32 savePP_ON;
327 u32 savePP_OFF;
328 u32 savePP_CONTROL;
329- u32 savePP_CYCLE;
330+ u32 savePP_DIVISOR;
331 u32 savePFIT_CONTROL;
332 u32 save_palette_a[256];
333 u32 save_palette_b[256];
334@@ -189,7 +191,7 @@ typedef struct drm_i915_private {
335 u32 saveIMR;
336 u32 saveCACHE_MODE_0;
337 u32 saveD_STATE;
338- u32 saveDSPCLK_GATE_D;
339+ u32 saveCG_2D_DIS;
340 u32 saveMI_ARB_STATE;
341 u32 saveSWF0[16];
342 u32 saveSWF1[16];
343@@ -283,816 +285,26 @@ extern void i915_mem_release(struct drm_device * dev,
344 if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
345 dev_priv->ring.tail = outring; \
346 dev_priv->ring.space -= outcount * 4; \
347- I915_WRITE(LP_RING + RING_TAIL, outring); \
348+ I915_WRITE(PRB0_TAIL, outring); \
349 } while(0)
350
351-extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
352-
353-/* Extended config space */
354-#define LBB 0xf4
355-
356-/* VGA stuff */
357-
358-#define VGA_ST01_MDA 0x3ba
359-#define VGA_ST01_CGA 0x3da
360-
361-#define VGA_MSR_WRITE 0x3c2
362-#define VGA_MSR_READ 0x3cc
363-#define VGA_MSR_MEM_EN (1<<1)
364-#define VGA_MSR_CGA_MODE (1<<0)
365-
366-#define VGA_SR_INDEX 0x3c4
367-#define VGA_SR_DATA 0x3c5
368-
369-#define VGA_AR_INDEX 0x3c0
370-#define VGA_AR_VID_EN (1<<5)
371-#define VGA_AR_DATA_WRITE 0x3c0
372-#define VGA_AR_DATA_READ 0x3c1
373-
374-#define VGA_GR_INDEX 0x3ce
375-#define VGA_GR_DATA 0x3cf
376-/* GR05 */
377-#define VGA_GR_MEM_READ_MODE_SHIFT 3
378-#define VGA_GR_MEM_READ_MODE_PLANE 1
379-/* GR06 */
380-#define VGA_GR_MEM_MODE_MASK 0xc
381-#define VGA_GR_MEM_MODE_SHIFT 2
382-#define VGA_GR_MEM_A0000_AFFFF 0
383-#define VGA_GR_MEM_A0000_BFFFF 1
384-#define VGA_GR_MEM_B0000_B7FFF 2
385-#define VGA_GR_MEM_B0000_BFFFF 3
386-
387-#define VGA_DACMASK 0x3c6
388-#define VGA_DACRX 0x3c7
389-#define VGA_DACWX 0x3c8
390-#define VGA_DACDATA 0x3c9
391-
392-#define VGA_CR_INDEX_MDA 0x3b4
393-#define VGA_CR_DATA_MDA 0x3b5
394-#define VGA_CR_INDEX_CGA 0x3d4
395-#define VGA_CR_DATA_CGA 0x3d5
396-
397-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
398-#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
399-#define CMD_REPORT_HEAD (7<<23)
400-#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
401-#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
402-
403-#define INST_PARSER_CLIENT 0x00000000
404-#define INST_OP_FLUSH 0x02000000
405-#define INST_FLUSH_MAP_CACHE 0x00000001
406-
407-#define BB1_START_ADDR_MASK (~0x7)
408-#define BB1_PROTECTED (1<<0)
409-#define BB1_UNPROTECTED (0<<0)
410-#define BB2_END_ADDR_MASK (~0x7)
411-
412-/* Framebuffer compression */
413-#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
414-#define FBC_LL_BASE 0x03204 /* 4k page aligned */
415-#define FBC_CONTROL 0x03208
416-#define FBC_CTL_EN (1<<31)
417-#define FBC_CTL_PERIODIC (1<<30)
418-#define FBC_CTL_INTERVAL_SHIFT (16)
419-#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
420-#define FBC_CTL_STRIDE_SHIFT (5)
421-#define FBC_CTL_FENCENO (1<<0)
422-#define FBC_COMMAND 0x0320c
423-#define FBC_CMD_COMPRESS (1<<0)
424-#define FBC_STATUS 0x03210
425-#define FBC_STAT_COMPRESSING (1<<31)
426-#define FBC_STAT_COMPRESSED (1<<30)
427-#define FBC_STAT_MODIFIED (1<<29)
428-#define FBC_STAT_CURRENT_LINE (1<<0)
429-#define FBC_CONTROL2 0x03214
430-#define FBC_CTL_FENCE_DBL (0<<4)
431-#define FBC_CTL_IDLE_IMM (0<<2)
432-#define FBC_CTL_IDLE_FULL (1<<2)
433-#define FBC_CTL_IDLE_LINE (2<<2)
434-#define FBC_CTL_IDLE_DEBUG (3<<2)
435-#define FBC_CTL_CPU_FENCE (1<<1)
436-#define FBC_CTL_PLANEA (0<<0)
437-#define FBC_CTL_PLANEB (1<<0)
438-#define FBC_FENCE_OFF 0x0321b
439-
440-#define FBC_LL_SIZE (1536)
441-#define FBC_LL_PAD (32)
442-
443-/* Interrupt bits:
444- */
445-#define USER_INT_FLAG (1<<1)
446-#define VSYNC_PIPEB_FLAG (1<<5)
447-#define VSYNC_PIPEA_FLAG (1<<7)
448-#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
449-
450-#define I915REG_HWSTAM 0x02098
451-#define I915REG_INT_IDENTITY_R 0x020a4
452-#define I915REG_INT_MASK_R 0x020a8
453-#define I915REG_INT_ENABLE_R 0x020a0
454-
455-#define I915REG_PIPEASTAT 0x70024
456-#define I915REG_PIPEBSTAT 0x71024
457-
458-#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
459-#define I915_VBLANK_CLEAR (1UL<<1)
460-
461-#define SRX_INDEX 0x3c4
462-#define SRX_DATA 0x3c5
463-#define SR01 1
464-#define SR01_SCREEN_OFF (1<<5)
465-
466-#define PPCR 0x61204
467-#define PPCR_ON (1<<0)
468-
469-#define DVOB 0x61140
470-#define DVOB_ON (1<<31)
471-#define DVOC 0x61160
472-#define DVOC_ON (1<<31)
473-#define LVDS 0x61180
474-#define LVDS_ON (1<<31)
475-
476-#define ADPA 0x61100
477-#define ADPA_DPMS_MASK (~(3<<10))
478-#define ADPA_DPMS_ON (0<<10)
479-#define ADPA_DPMS_SUSPEND (1<<10)
480-#define ADPA_DPMS_STANDBY (2<<10)
481-#define ADPA_DPMS_OFF (3<<10)
482-
483-#define NOPID 0x2094
484-#define LP_RING 0x2030
485-#define HP_RING 0x2040
486-/* The binner has its own ring buffer:
487- */
488-#define HWB_RING 0x2400
489-
490-#define RING_TAIL 0x00
491-#define TAIL_ADDR 0x001FFFF8
492-#define RING_HEAD 0x04
493-#define HEAD_WRAP_COUNT 0xFFE00000
494-#define HEAD_WRAP_ONE 0x00200000
495-#define HEAD_ADDR 0x001FFFFC
496-#define RING_START 0x08
497-#define START_ADDR 0x0xFFFFF000
498-#define RING_LEN 0x0C
499-#define RING_NR_PAGES 0x001FF000
500-#define RING_REPORT_MASK 0x00000006
501-#define RING_REPORT_64K 0x00000002
502-#define RING_REPORT_128K 0x00000004
503-#define RING_NO_REPORT 0x00000000
504-#define RING_VALID_MASK 0x00000001
505-#define RING_VALID 0x00000001
506-#define RING_INVALID 0x00000000
507-
508-/* Instruction parser error reg:
509- */
510-#define IPEIR 0x2088
511-
512-/* Scratch pad debug 0 reg:
513- */
514-#define SCPD0 0x209c
515-
516-/* Error status reg:
517- */
518-#define ESR 0x20b8
519-
520-/* Secondary DMA fetch address debug reg:
521- */
522-#define DMA_FADD_S 0x20d4
523-
524-/* Memory Interface Arbitration State
525- */
526-#define MI_ARB_STATE 0x20e4
527-
528-/* Cache mode 0 reg.
529- * - Manipulating render cache behaviour is central
530- * to the concept of zone rendering, tuning this reg can help avoid
531- * unnecessary render cache reads and even writes (for z/stencil)
532- * at beginning and end of scene.
533- *
534- * - To change a bit, write to this reg with a mask bit set and the
535- * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
536- */
537-#define Cache_Mode_0 0x2120
538-#define CACHE_MODE_0 0x2120
539-#define CM0_MASK_SHIFT 16
540-#define CM0_IZ_OPT_DISABLE (1<<6)
541-#define CM0_ZR_OPT_DISABLE (1<<5)
542-#define CM0_DEPTH_EVICT_DISABLE (1<<4)
543-#define CM0_COLOR_EVICT_DISABLE (1<<3)
544-#define CM0_DEPTH_WRITE_DISABLE (1<<1)
545-#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
546-
547-
548-/* Graphics flush control. A CPU write flushes the GWB of all writes.
549- * The data is discarded.
550- */
551-#define GFX_FLSH_CNTL 0x2170
552-
553-/* Binner control. Defines the location of the bin pointer list:
554- */
555-#define BINCTL 0x2420
556-#define BC_MASK (1 << 9)
557-
558-/* Binned scene info.
559- */
560-#define BINSCENE 0x2428
561-#define BS_OP_LOAD (1 << 8)
562-#define BS_MASK (1 << 22)
563-
564-/* Bin command parser debug reg:
565- */
566-#define BCPD 0x2480
567-
568-/* Bin memory control debug reg:
569- */
570-#define BMCD 0x2484
571-
572-/* Bin data cache debug reg:
573- */
574-#define BDCD 0x2488
575-
576-/* Binner pointer cache debug reg:
577- */
578-#define BPCD 0x248c
579-
580-/* Binner scratch pad debug reg:
581- */
582-#define BINSKPD 0x24f0
583-
584-/* HWB scratch pad debug reg:
585- */
586-#define HWBSKPD 0x24f4
587-
588-/* Binner memory pool reg:
589- */
590-#define BMP_BUFFER 0x2430
591-#define BMP_PAGE_SIZE_4K (0 << 10)
592-#define BMP_BUFFER_SIZE_SHIFT 1
593-#define BMP_ENABLE (1 << 0)
594-
595-/* Get/put memory from the binner memory pool:
596- */
597-#define BMP_GET 0x2438
598-#define BMP_PUT 0x2440
599-#define BMP_OFFSET_SHIFT 5
600-
601-/* 3D state packets:
602- */
603-#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
604-
605-#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
606-#define SC_UPDATE_SCISSOR (0x1<<1)
607-#define SC_ENABLE_MASK (0x1<<0)
608-#define SC_ENABLE (0x1<<0)
609-
610-#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
611-
612-#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
613-#define SCI_YMIN_MASK (0xffff<<16)
614-#define SCI_XMIN_MASK (0xffff<<0)
615-#define SCI_YMAX_MASK (0xffff<<16)
616-#define SCI_XMAX_MASK (0xffff<<0)
617-
618-#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
619-#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
620-#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
621-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
622-#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
623-#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
624-#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
625-
626-#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
627-
628-#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
629-#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
630-#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
631-#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
632-#define XY_SRC_COPY_BLT_SRC_TILED (1<<15)
633-#define XY_SRC_COPY_BLT_DST_TILED (1<<11)
634-
635-#define MI_BATCH_BUFFER ((0x30<<23)|1)
636-#define MI_BATCH_BUFFER_START (0x31<<23)
637-#define MI_BATCH_BUFFER_END (0xA<<23)
638-#define MI_BATCH_NON_SECURE (1)
639-#define MI_BATCH_NON_SECURE_I965 (1<<8)
640-
641-#define MI_WAIT_FOR_EVENT ((0x3<<23))
642-#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
643-#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
644-#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
645-
646-#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
647-
648-#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
649-#define ASYNC_FLIP (1<<22)
650-#define DISPLAY_PLANE_A (0<<20)
651-#define DISPLAY_PLANE_B (1<<20)
652-
653-/* Display regs */
654-#define DSPACNTR 0x70180
655-#define DSPBCNTR 0x71180
656-#define DISPPLANE_SEL_PIPE_MASK (1<<24)
657-
658-/* Define the region of interest for the binner:
659- */
660-#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
661-
662-#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
663-
664-#define CMD_MI_FLUSH (0x04 << 23)
665-#define MI_NO_WRITE_FLUSH (1 << 2)
666-#define MI_READ_FLUSH (1 << 0)
667-#define MI_EXE_FLUSH (1 << 1)
668-#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
669-#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
670-
671-#define BREADCRUMB_BITS 31
672-#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
673-
674-#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
675-#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
676-
677-#define BLC_PWM_CTL 0x61254
678-#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
679-
680-#define BLC_PWM_CTL2 0x61250
681 /**
682- * This is the most significant 15 bits of the number of backlight cycles in a
683- * complete cycle of the modulated backlight control.
684+ * Reads a dword out of the status page, which is written to from the command
685+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
686+ * MI_STORE_DATA_IMM.
687 *
688- * The actual value is this field multiplied by two.
689- */
690-#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
691-#define BLM_LEGACY_MODE (1 << 16)
692-/**
693- * This is the number of cycles out of the backlight modulation cycle for which
694- * the backlight is on.
695+ * The following dwords have a reserved meaning:
696+ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
697+ * 4: ring 0 head pointer
698+ * 5: ring 1 head pointer (915-class)
699+ * 6: ring 2 head pointer (915-class)
700 *
701- * This field must be no greater than the number of cycles in the complete
702- * backlight modulation cycle.
703- */
704-#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
705-#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
706-
707-#define I915_GCFGC 0xf0
708-#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
709-#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
710-#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
711-#define I915_DISPLAY_CLOCK_MASK (7 << 4)
712-
713-#define I855_HPLLCC 0xc0
714-#define I855_CLOCK_CONTROL_MASK (3 << 0)
715-#define I855_CLOCK_133_200 (0 << 0)
716-#define I855_CLOCK_100_200 (1 << 0)
717-#define I855_CLOCK_100_133 (2 << 0)
718-#define I855_CLOCK_166_250 (3 << 0)
719-
720-/* p317, 319
721+ * The area from dword 0x10 to 0x3ff is available for driver usage.
722 */
723-#define VCLK2_VCO_M 0x6008 /* treat as 16 bit? (includes msbs) */
724-#define VCLK2_VCO_N 0x600a
725-#define VCLK2_VCO_DIV_SEL 0x6012
726-
727-#define VCLK_DIVISOR_VGA0 0x6000
728-#define VCLK_DIVISOR_VGA1 0x6004
729-#define VCLK_POST_DIV 0x6010
730-/** Selects a post divisor of 4 instead of 2. */
731-# define VGA1_PD_P2_DIV_4 (1 << 15)
732-/** Overrides the p2 post divisor field */
733-# define VGA1_PD_P1_DIV_2 (1 << 13)
734-# define VGA1_PD_P1_SHIFT 8
735-/** P1 value is 2 greater than this field */
736-# define VGA1_PD_P1_MASK (0x1f << 8)
737-/** Selects a post divisor of 4 instead of 2. */
738-# define VGA0_PD_P2_DIV_4 (1 << 7)
739-/** Overrides the p2 post divisor field */
740-# define VGA0_PD_P1_DIV_2 (1 << 5)
741-# define VGA0_PD_P1_SHIFT 0
742-/** P1 value is 2 greater than this field */
743-# define VGA0_PD_P1_MASK (0x1f << 0)
744-
745-/* PCI D state control register */
746-#define D_STATE 0x6104
747-#define DSPCLK_GATE_D 0x6200
748-
749-/* I830 CRTC registers */
750-#define HTOTAL_A 0x60000
751-#define HBLANK_A 0x60004
752-#define HSYNC_A 0x60008
753-#define VTOTAL_A 0x6000c
754-#define VBLANK_A 0x60010
755-#define VSYNC_A 0x60014
756-#define PIPEASRC 0x6001c
757-#define BCLRPAT_A 0x60020
758-#define VSYNCSHIFT_A 0x60028
759-
760-#define HTOTAL_B 0x61000
761-#define HBLANK_B 0x61004
762-#define HSYNC_B 0x61008
763-#define VTOTAL_B 0x6100c
764-#define VBLANK_B 0x61010
765-#define VSYNC_B 0x61014
766-#define PIPEBSRC 0x6101c
767-#define BCLRPAT_B 0x61020
768-#define VSYNCSHIFT_B 0x61028
769-
770-#define PP_STATUS 0x61200
771-# define PP_ON (1 << 31)
772-/**
773- * Indicates that all dependencies of the panel are on:
774- *
775- * - PLL enabled
776- * - pipe enabled
777- * - LVDS/DVOB/DVOC on
778- */
779-# define PP_READY (1 << 30)
780-# define PP_SEQUENCE_NONE (0 << 28)
781-# define PP_SEQUENCE_ON (1 << 28)
782-# define PP_SEQUENCE_OFF (2 << 28)
783-# define PP_SEQUENCE_MASK 0x30000000
784-#define PP_CONTROL 0x61204
785-# define POWER_TARGET_ON (1 << 0)
786-
787-#define LVDSPP_ON 0x61208
788-#define LVDSPP_OFF 0x6120c
789-#define PP_CYCLE 0x61210
790-
791-#define PFIT_CONTROL 0x61230
792-# define PFIT_ENABLE (1 << 31)
793-# define PFIT_PIPE_MASK (3 << 29)
794-# define PFIT_PIPE_SHIFT 29
795-# define VERT_INTERP_DISABLE (0 << 10)
796-# define VERT_INTERP_BILINEAR (1 << 10)
797-# define VERT_INTERP_MASK (3 << 10)
798-# define VERT_AUTO_SCALE (1 << 9)
799-# define HORIZ_INTERP_DISABLE (0 << 6)
800-# define HORIZ_INTERP_BILINEAR (1 << 6)
801-# define HORIZ_INTERP_MASK (3 << 6)
802-# define HORIZ_AUTO_SCALE (1 << 5)
803-# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
804-
805-#define PFIT_PGM_RATIOS 0x61234
806-# define PFIT_VERT_SCALE_MASK 0xfff00000
807-# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
808-
809-#define PFIT_AUTO_RATIOS 0x61238
810-
811-
812-#define DPLL_A 0x06014
813-#define DPLL_B 0x06018
814-# define DPLL_VCO_ENABLE (1 << 31)
815-# define DPLL_DVO_HIGH_SPEED (1 << 30)
816-# define DPLL_SYNCLOCK_ENABLE (1 << 29)
817-# define DPLL_VGA_MODE_DIS (1 << 28)
818-# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
819-# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
820-# define DPLL_MODE_MASK (3 << 26)
821-# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
822-# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
823-# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
824-# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
825-# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
826-# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
827-/**
828- * The i830 generation, in DAC/serial mode, defines p1 as two plus this
829- * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
830- */
831-# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
832-/**
833- * The i830 generation, in LVDS mode, defines P1 as the bit number set within
834- * this field (only one bit may be set).
835- */
836-# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
837-# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
838-# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required in DVO non-gang */
839-# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
840-# define PLL_REF_INPUT_DREFCLK (0 << 13)
841-# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
842-# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
843-# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
844-# define PLL_REF_INPUT_MASK (3 << 13)
845-# define PLL_LOAD_PULSE_PHASE_SHIFT 9
846-/*
847- * Parallel to Serial Load Pulse phase selection.
848- * Selects the phase for the 10X DPLL clock for the PCIe
849- * digital display port. The range is 4 to 13; 10 or more
850- * is just a flip delay. The default is 6
851- */
852-# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
853-# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
854-
855-/**
856- * SDVO multiplier for 945G/GM. Not used on 965.
857- *
858- * \sa DPLL_MD_UDI_MULTIPLIER_MASK
859- */
860-# define SDVO_MULTIPLIER_MASK 0x000000ff
861-# define SDVO_MULTIPLIER_SHIFT_HIRES 4
862-# define SDVO_MULTIPLIER_SHIFT_VGA 0
863-
864-/** @defgroup DPLL_MD
865- * @{
866- */
867-/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
868-#define DPLL_A_MD 0x0601c
869-/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
870-#define DPLL_B_MD 0x06020
871-/**
872- * UDI pixel divider, controlling how many pixels are stuffed into a packet.
873- *
874- * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
875- */
876-# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
877-# define DPLL_MD_UDI_DIVIDER_SHIFT 24
878-/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
879-# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
880-# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
881-/**
882- * SDVO/UDI pixel multiplier.
883- *
884- * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
885- * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
886- * modes, the bus rate would be below the limits, so SDVO allows for stuffing
887- * dummy bytes in the datastream at an increased clock rate, with both sides of
888- * the link knowing how many bytes are fill.
889- *
890- * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
891- * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
892- * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
893- * through an SDVO command.
894- *
895- * This register field has values of multiplication factor minus 1, with
896- * a maximum multiplier of 5 for SDVO.
897- */
898-# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
899-# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
900-/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
901- * This best be set to the default value (3) or the CRT won't work. No,
902- * I don't entirely understand what this does...
903- */
904-# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
905-# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
906-/** @} */
907-
908-#define DPLL_TEST 0x606c
909-# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
910-# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
911-# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
912-# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
913-# define DPLLB_TEST_N_BYPASS (1 << 19)
914-# define DPLLB_TEST_M_BYPASS (1 << 18)
915-# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
916-# define DPLLA_TEST_N_BYPASS (1 << 3)
917-# define DPLLA_TEST_M_BYPASS (1 << 2)
918-# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
919-
920-#define ADPA 0x61100
921-#define ADPA_DAC_ENABLE (1<<31)
922-#define ADPA_DAC_DISABLE 0
923-#define ADPA_PIPE_SELECT_MASK (1<<30)
924-#define ADPA_PIPE_A_SELECT 0
925-#define ADPA_PIPE_B_SELECT (1<<30)
926-#define ADPA_USE_VGA_HVPOLARITY (1<<15)
927-#define ADPA_SETS_HVPOLARITY 0
928-#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
929-#define ADPA_VSYNC_CNTL_ENABLE 0
930-#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
931-#define ADPA_HSYNC_CNTL_ENABLE 0
932-#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
933-#define ADPA_VSYNC_ACTIVE_LOW 0
934-#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
935-#define ADPA_HSYNC_ACTIVE_LOW 0
936-
937-#define FPA0 0x06040
938-#define FPA1 0x06044
939-#define FPB0 0x06048
940-#define FPB1 0x0604c
941-# define FP_N_DIV_MASK 0x003f0000
942-# define FP_N_DIV_SHIFT 16
943-# define FP_M1_DIV_MASK 0x00003f00
944-# define FP_M1_DIV_SHIFT 8
945-# define FP_M2_DIV_MASK 0x0000003f
946-# define FP_M2_DIV_SHIFT 0
947-
948-
949-#define PORT_HOTPLUG_EN 0x61110
950-# define SDVOB_HOTPLUG_INT_EN (1 << 26)
951-# define SDVOC_HOTPLUG_INT_EN (1 << 25)
952-# define TV_HOTPLUG_INT_EN (1 << 18)
953-# define CRT_HOTPLUG_INT_EN (1 << 9)
954-# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
955-
956-#define PORT_HOTPLUG_STAT 0x61114
957-# define CRT_HOTPLUG_INT_STATUS (1 << 11)
958-# define TV_HOTPLUG_INT_STATUS (1 << 10)
959-# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
960-# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
961-# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
962-# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
963-# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
964-# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
965-
966-#define SDVOB 0x61140
967-#define SDVOC 0x61160
968-#define SDVO_ENABLE (1 << 31)
969-#define SDVO_PIPE_B_SELECT (1 << 30)
970-#define SDVO_STALL_SELECT (1 << 29)
971-#define SDVO_INTERRUPT_ENABLE (1 << 26)
972-/**
973- * 915G/GM SDVO pixel multiplier.
974- *
975- * Programmed value is multiplier - 1, up to 5x.
976- *
977- * \sa DPLL_MD_UDI_MULTIPLIER_MASK
978- */
979-#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
980-#define SDVO_PORT_MULTIPLY_SHIFT 23
981-#define SDVO_PHASE_SELECT_MASK (15 << 19)
982-#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
983-#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
984-#define SDVOC_GANG_MODE (1 << 16)
985-#define SDVO_BORDER_ENABLE (1 << 7)
986-#define SDVOB_PCIE_CONCURRENCY (1 << 3)
987-#define SDVO_DETECTED (1 << 2)
988-/* Bits to be preserved when writing */
989-#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
990-#define SDVOC_PRESERVE_MASK (1 << 17)
991-
992-/** @defgroup LVDS
993- * @{
994- */
995-/**
996- * This register controls the LVDS output enable, pipe selection, and data
997- * format selection.
998- *
999- * All of the clock/data pairs are force powered down by power sequencing.
1000- */
1001-#define LVDS 0x61180
1002-/**
1003- * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
1004- * the DPLL semantics change when the LVDS is assigned to that pipe.
1005- */
1006-# define LVDS_PORT_EN (1 << 31)
1007-/** Selects pipe B for LVDS data. Must be set on pre-965. */
1008-# define LVDS_PIPEB_SELECT (1 << 30)
1009-
1010-/**
1011- * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
1012- * pixel.
1013- */
1014-# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
1015-# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
1016-# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
1017-/**
1018- * Controls the A3 data pair, which contains the additional LSBs for 24 bit
1019- * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
1020- * on.
1021- */
1022-# define LVDS_A3_POWER_MASK (3 << 6)
1023-# define LVDS_A3_POWER_DOWN (0 << 6)
1024-# define LVDS_A3_POWER_UP (3 << 6)
1025-/**
1026- * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
1027- * is set.
1028- */
1029-# define LVDS_CLKB_POWER_MASK (3 << 4)
1030-# define LVDS_CLKB_POWER_DOWN (0 << 4)
1031-# define LVDS_CLKB_POWER_UP (3 << 4)
1032-
1033-/**
1034- * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
1035- * setting for whether we are in dual-channel mode. The B3 pair will
1036- * additionally only be powered up when LVDS_A3_POWER_UP is set.
1037- */
1038-# define LVDS_B0B3_POWER_MASK (3 << 2)
1039-# define LVDS_B0B3_POWER_DOWN (0 << 2)
1040-# define LVDS_B0B3_POWER_UP (3 << 2)
1041-
1042-#define PIPEACONF 0x70008
1043-#define PIPEACONF_ENABLE (1<<31)
1044-#define PIPEACONF_DISABLE 0
1045-#define PIPEACONF_DOUBLE_WIDE (1<<30)
1046-#define I965_PIPECONF_ACTIVE (1<<30)
1047-#define PIPEACONF_SINGLE_WIDE 0
1048-#define PIPEACONF_PIPE_UNLOCKED 0
1049-#define PIPEACONF_PIPE_LOCKED (1<<25)
1050-#define PIPEACONF_PALETTE 0
1051-#define PIPEACONF_GAMMA (1<<24)
1052-#define PIPECONF_FORCE_BORDER (1<<25)
1053-#define PIPECONF_PROGRESSIVE (0 << 21)
1054-#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
1055-#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
1056-
1057-#define DSPARB 0x70030
1058-#define DSPARB_CSTART_MASK (0x7f << 7)
1059-#define DSPARB_CSTART_SHIFT 7
1060-#define DSPARB_BSTART_MASK (0x7f)
1061-#define DSPARB_BSTART_SHIFT 0
1062-
1063-#define PIPEBCONF 0x71008
1064-#define PIPEBCONF_ENABLE (1<<31)
1065-#define PIPEBCONF_DISABLE 0
1066-#define PIPEBCONF_DOUBLE_WIDE (1<<30)
1067-#define PIPEBCONF_DISABLE 0
1068-#define PIPEBCONF_GAMMA (1<<24)
1069-#define PIPEBCONF_PALETTE 0
1070-
1071-#define PIPEBGCMAXRED 0x71010
1072-#define PIPEBGCMAXGREEN 0x71014
1073-#define PIPEBGCMAXBLUE 0x71018
1074-#define PIPEBSTAT 0x71024
1075-#define PIPEBFRAMEHIGH 0x71040
1076-#define PIPEBFRAMEPIXEL 0x71044
1077-
1078-#define DSPACNTR 0x70180
1079-#define DSPBCNTR 0x71180
1080-#define DISPLAY_PLANE_ENABLE (1<<31)
1081-#define DISPLAY_PLANE_DISABLE 0
1082-#define DISPPLANE_GAMMA_ENABLE (1<<30)
1083-#define DISPPLANE_GAMMA_DISABLE 0
1084-#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
1085-#define DISPPLANE_8BPP (0x2<<26)
1086-#define DISPPLANE_15_16BPP (0x4<<26)
1087-#define DISPPLANE_16BPP (0x5<<26)
1088-#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
1089-#define DISPPLANE_32BPP (0x7<<26)
1090-#define DISPPLANE_STEREO_ENABLE (1<<25)
1091-#define DISPPLANE_STEREO_DISABLE 0
1092-#define DISPPLANE_SEL_PIPE_MASK (1<<24)
1093-#define DISPPLANE_SEL_PIPE_A 0
1094-#define DISPPLANE_SEL_PIPE_B (1<<24)
1095-#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
1096-#define DISPPLANE_SRC_KEY_DISABLE 0
1097-#define DISPPLANE_LINE_DOUBLE (1<<20)
1098-#define DISPPLANE_NO_LINE_DOUBLE 0
1099-#define DISPPLANE_STEREO_POLARITY_FIRST 0
1100-#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
1101-/* plane B only */
1102-#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
1103-#define DISPPLANE_ALPHA_TRANS_DISABLE 0
1104-#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
1105-#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
1106-
1107-#define DSPABASE 0x70184
1108-#define DSPASTRIDE 0x70188
1109-
1110-#define DSPBBASE 0x71184
1111-#define DSPBADDR DSPBBASE
1112-#define DSPBSTRIDE 0x71188
1113-
1114-#define DSPAKEYVAL 0x70194
1115-#define DSPAKEYMASK 0x70198
1116-
1117-#define DSPAPOS 0x7018C /* reserved */
1118-#define DSPASIZE 0x70190
1119-#define DSPBPOS 0x7118C
1120-#define DSPBSIZE 0x71190
1121-
1122-#define DSPASURF 0x7019C
1123-#define DSPATILEOFF 0x701A4
1124-
1125-#define DSPBSURF 0x7119C
1126-#define DSPBTILEOFF 0x711A4
1127-
1128-#define VGACNTRL 0x71400
1129-# define VGA_DISP_DISABLE (1 << 31)
1130-# define VGA_2X_MODE (1 << 30)
1131-# define VGA_PIPE_B_SELECT (1 << 29)
1132-
1133-/*
1134- * Some BIOS scratch area registers. The 845 (and 830?) store the amount
1135- * of video memory available to the BIOS in SWF1.
1136- */
1137-
1138-#define SWF0 0x71410
1139-
1140-/*
1141- * 855 scratch registers.
1142- */
1143-#define SWF10 0x70410
1144-
1145-#define SWF30 0x72414
1146-
1147-/*
1148- * Overlay registers. These are overlay registers accessed via MMIO.
1149- * Those loaded via the overlay register page are defined in i830_video.c.
1150- */
1151-#define OVADD 0x30000
1152-
1153-#define DOVSTA 0x30008
1154-#define OC_BUF (0x3<<20)
1155+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
1156+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5)
1157
1158-#define OGAMC5 0x30010
1159-#define OGAMC4 0x30014
1160-#define OGAMC3 0x30018
1161-#define OGAMC2 0x3001c
1162-#define OGAMC1 0x30020
1163-#define OGAMC0 0x30024
1164-/*
1165- * Palette registers
1166- */
1167-#define PALETTE_A 0x0a000
1168-#define PALETTE_B 0x0a800
1169+extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1170
1171 #define IS_I830(dev) ((dev)->pci_device == 0x3577)
1172 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
1173diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
1174index df03611..4a2de78 100644
1175--- a/drivers/gpu/drm/i915/i915_irq.c
1176+++ b/drivers/gpu/drm/i915/i915_irq.c
1177@@ -31,10 +31,6 @@
1178 #include "i915_drm.h"
1179 #include "i915_drv.h"
1180
1181-#define USER_INT_FLAG (1<<1)
1182-#define VSYNC_PIPEB_FLAG (1<<5)
1183-#define VSYNC_PIPEA_FLAG (1<<7)
1184-
1185 #define MAX_NOPID ((u32)~0)
1186
1187 /**
1188@@ -236,40 +232,43 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1189 u16 temp;
1190 u32 pipea_stats, pipeb_stats;
1191
1192- pipea_stats = I915_READ(I915REG_PIPEASTAT);
1193- pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
1194+ pipea_stats = I915_READ(PIPEASTAT);
1195+ pipeb_stats = I915_READ(PIPEBSTAT);
1196
1197- temp = I915_READ16(I915REG_INT_IDENTITY_R);
1198+ temp = I915_READ16(IIR);
1199
1200- temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
1201+ temp &= (I915_USER_INTERRUPT |
1202+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1203+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT);
1204
1205 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
1206
1207 if (temp == 0)
1208 return IRQ_NONE;
1209
1210- I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
1211- (void) I915_READ16(I915REG_INT_IDENTITY_R);
1212+ I915_WRITE16(IIR, temp);
1213+ (void) I915_READ16(IIR);
1214 DRM_READMEMORYBARRIER();
1215
1216 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1217
1218- if (temp & USER_INT_FLAG)
1219+ if (temp & I915_USER_INTERRUPT)
1220 DRM_WAKEUP(&dev_priv->irq_queue);
1221
1222- if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
1223+ if (temp & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1224+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
1225 int vblank_pipe = dev_priv->vblank_pipe;
1226
1227 if ((vblank_pipe &
1228 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
1229 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
1230- if (temp & VSYNC_PIPEA_FLAG)
1231+ if (temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
1232 atomic_inc(&dev->vbl_received);
1233- if (temp & VSYNC_PIPEB_FLAG)
1234+ if (temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
1235 atomic_inc(&dev->vbl_received2);
1236- } else if (((temp & VSYNC_PIPEA_FLAG) &&
1237+ } else if (((temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
1238 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
1239- ((temp & VSYNC_PIPEB_FLAG) &&
1240+ ((temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
1241 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
1242 atomic_inc(&dev->vbl_received);
1243
1244@@ -278,12 +277,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1245
1246 if (dev_priv->swaps_pending > 0)
1247 drm_locked_tasklet(dev, i915_vblank_tasklet);
1248- I915_WRITE(I915REG_PIPEASTAT,
1249+ I915_WRITE(PIPEASTAT,
1250 pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
1251- I915_VBLANK_CLEAR);
1252- I915_WRITE(I915REG_PIPEBSTAT,
1253+ PIPE_VBLANK_INTERRUPT_STATUS);
1254+ I915_WRITE(PIPEBSTAT,
1255 pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
1256- I915_VBLANK_CLEAR);
1257+ PIPE_VBLANK_INTERRUPT_STATUS);
1258 }
1259
1260 return IRQ_HANDLED;
1261@@ -304,12 +303,12 @@ static int i915_emit_irq(struct drm_device * dev)
1262 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
1263
1264 BEGIN_LP_RING(6);
1265- OUT_RING(CMD_STORE_DWORD_IDX);
1266- OUT_RING(20);
1267+ OUT_RING(MI_STORE_DWORD_INDEX);
1268+ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
1269 OUT_RING(dev_priv->counter);
1270 OUT_RING(0);
1271 OUT_RING(0);
1272- OUT_RING(GFX_OP_USER_INTERRUPT);
1273+ OUT_RING(MI_USER_INTERRUPT);
1274 ADVANCE_LP_RING();
1275
1276 return dev_priv->counter;
1277@@ -421,11 +420,11 @@ static void i915_enable_interrupt (struct drm_device *dev)
1278
1279 flag = 0;
1280 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
1281- flag |= VSYNC_PIPEA_FLAG;
1282+ flag |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1283 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
1284- flag |= VSYNC_PIPEB_FLAG;
1285+ flag |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1286
1287- I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
1288+ I915_WRITE16(IER, I915_USER_INTERRUPT | flag);
1289 }
1290
1291 /* Set the vblank monitor pipe
1292@@ -465,11 +464,11 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1293 return -EINVAL;
1294 }
1295
1296- flag = I915_READ(I915REG_INT_ENABLE_R);
1297+ flag = I915_READ(IER);
1298 pipe->pipe = 0;
1299- if (flag & VSYNC_PIPEA_FLAG)
1300+ if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
1301 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
1302- if (flag & VSYNC_PIPEB_FLAG)
1303+ if (flag & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
1304 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
1305
1306 return 0;
1307@@ -587,9 +586,9 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1308 {
1309 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1310
1311- I915_WRITE16(I915REG_HWSTAM, 0xfffe);
1312- I915_WRITE16(I915REG_INT_MASK_R, 0x0);
1313- I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
1314+ I915_WRITE16(HWSTAM, 0xfffe);
1315+ I915_WRITE16(IMR, 0x0);
1316+ I915_WRITE16(IER, 0x0);
1317 }
1318
1319 void i915_driver_irq_postinstall(struct drm_device * dev)
1320@@ -614,10 +613,10 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
1321 if (!dev_priv)
1322 return;
1323
1324- I915_WRITE16(I915REG_HWSTAM, 0xffff);
1325- I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
1326- I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
1327+ I915_WRITE16(HWSTAM, 0xffff);
1328+ I915_WRITE16(IMR, 0xffff);
1329+ I915_WRITE16(IER, 0x0);
1330
1331- temp = I915_READ16(I915REG_INT_IDENTITY_R);
1332- I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
1333+ temp = I915_READ16(IIR);
1334+ I915_WRITE16(IIR, temp);
1335 }
1336diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1337new file mode 100644
1338index 0000000..477c64e
1339--- /dev/null
1340+++ b/drivers/gpu/drm/i915/i915_reg.h
1341@@ -0,0 +1,1405 @@
1342+/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
1343+ * All Rights Reserved.
1344+ *
1345+ * Permission is hereby granted, free of charge, to any person obtaining a
1346+ * copy of this software and associated documentation files (the
1347+ * "Software"), to deal in the Software without restriction, including
1348+ * without limitation the rights to use, copy, modify, merge, publish,
1349+ * distribute, sub license, and/or sell copies of the Software, and to
1350+ * permit persons to whom the Software is furnished to do so, subject to
1351+ * the following conditions:
1352+ *
1353+ * The above copyright notice and this permission notice (including the
1354+ * next paragraph) shall be included in all copies or substantial portions
1355+ * of the Software.
1356+ *
1357+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
1358+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1359+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
1360+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
1361+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
1362+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
1363+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1364+ */
1365+
1366+#ifndef _I915_REG_H_
1367+#define _I915_REG_H_
1368+
1369+/* MCH MMIO space */
1370+/** 915-945 and GM965 MCH register controlling DRAM channel access */
1371+#define DCC 0x200
1372+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
1373+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
1374+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
1375+#define DCC_ADDRESSING_MODE_MASK (3 << 0)
1376+#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
1377+
1378+/** 965 MCH register controlling DRAM channel configuration */
1379+#define CHDECMISC 0x111
1380+#define CHDECMISC_FLEXMEMORY (1 << 1)
1381+
1382+/*
1383+ * The Bridge device's PCI config space has information about the
1384+ * fb aperture size and the amount of pre-reserved memory.
1385+ */
1386+#define INTEL_GMCH_CTRL 0x52
1387+#define INTEL_GMCH_ENABLED 0x4
1388+#define INTEL_GMCH_MEM_MASK 0x1
1389+#define INTEL_GMCH_MEM_64M 0x1
1390+#define INTEL_GMCH_MEM_128M 0
1391+
1392+#define INTEL_855_GMCH_GMS_MASK (0x7 << 4)
1393+#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
1394+#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
1395+#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
1396+#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
1397+#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
1398+#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
1399+
1400+#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
1401+#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
1402+
1403+/* PCI config space */
1404+
1405+#define HPLLCC 0xc0 /* 855 only */
1406+#define GC_CLOCK_CONTROL_MASK (3 << 0)
1407+#define GC_CLOCK_133_200 (0 << 0)
1408+#define GC_CLOCK_100_200 (1 << 0)
1409+#define GC_CLOCK_100_133 (2 << 0)
1410+#define GC_CLOCK_166_250 (3 << 0)
1411+#define GCFGC 0xf0 /* 915+ only */
1412+#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
1413+#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
1414+#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
1415+#define GC_DISPLAY_CLOCK_MASK (7 << 4)
1416+#define LBB 0xf4
1417+
1418+/* VGA stuff */
1419+
1420+#define VGA_ST01_MDA 0x3ba
1421+#define VGA_ST01_CGA 0x3da
1422+
1423+#define VGA_MSR_WRITE 0x3c2
1424+#define VGA_MSR_READ 0x3cc
1425+#define VGA_MSR_MEM_EN (1<<1)
1426+#define VGA_MSR_CGA_MODE (1<<0)
1427+
1428+#define VGA_SR_INDEX 0x3c4
1429+#define VGA_SR_DATA 0x3c5
1430+
1431+#define VGA_AR_INDEX 0x3c0
1432+#define VGA_AR_VID_EN (1<<5)
1433+#define VGA_AR_DATA_WRITE 0x3c0
1434+#define VGA_AR_DATA_READ 0x3c1
1435+
1436+#define VGA_GR_INDEX 0x3ce
1437+#define VGA_GR_DATA 0x3cf
1438+/* GR05 */
1439+#define VGA_GR_MEM_READ_MODE_SHIFT 3
1440+#define VGA_GR_MEM_READ_MODE_PLANE 1
1441+/* GR06 */
1442+#define VGA_GR_MEM_MODE_MASK 0xc
1443+#define VGA_GR_MEM_MODE_SHIFT 2
1444+#define VGA_GR_MEM_A0000_AFFFF 0
1445+#define VGA_GR_MEM_A0000_BFFFF 1
1446+#define VGA_GR_MEM_B0000_B7FFF 2
1447+#define VGA_GR_MEM_B0000_BFFFF 3
1448+
1449+#define VGA_DACMASK 0x3c6
1450+#define VGA_DACRX 0x3c7
1451+#define VGA_DACWX 0x3c8
1452+#define VGA_DACDATA 0x3c9
1453+
1454+#define VGA_CR_INDEX_MDA 0x3b4
1455+#define VGA_CR_DATA_MDA 0x3b5
1456+#define VGA_CR_INDEX_CGA 0x3d4
1457+#define VGA_CR_DATA_CGA 0x3d5
1458+
1459+/*
1460+ * Memory interface instructions used by the kernel
1461+ */
1462+#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
1463+
1464+#define MI_NOOP MI_INSTR(0, 0)
1465+#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
1466+#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
1467+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
1468+#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
1469+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
1470+#define MI_FLUSH MI_INSTR(0x04, 0)
1471+#define MI_READ_FLUSH (1 << 0)
1472+#define MI_EXE_FLUSH (1 << 1)
1473+#define MI_NO_WRITE_FLUSH (1 << 2)
1474+#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
1475+#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
1476+#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
1477+#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
1478+#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
1479+#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
1480+#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
1481+#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
1482+#define MI_STORE_DWORD_INDEX_SHIFT 2
1483+#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
1484+#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
1485+#define MI_BATCH_NON_SECURE (1)
1486+#define MI_BATCH_NON_SECURE_I965 (1<<8)
1487+#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
1488+
1489+/*
1490+ * 3D instructions used by the kernel
1491+ */
1492+#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
1493+
1494+#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
1495+#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
1496+#define SC_UPDATE_SCISSOR (0x1<<1)
1497+#define SC_ENABLE_MASK (0x1<<0)
1498+#define SC_ENABLE (0x1<<0)
1499+#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
1500+#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
1501+#define SCI_YMIN_MASK (0xffff<<16)
1502+#define SCI_XMIN_MASK (0xffff<<0)
1503+#define SCI_YMAX_MASK (0xffff<<16)
1504+#define SCI_XMAX_MASK (0xffff<<0)
1505+#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
1506+#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
1507+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
1508+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
1509+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
1510+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
1511+#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
1512+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
1513+#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
1514+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
1515+#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
1516+#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
1517+#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
1518+#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
1519+#define BLT_DEPTH_8 (0<<24)
1520+#define BLT_DEPTH_16_565 (1<<24)
1521+#define BLT_DEPTH_16_1555 (2<<24)
1522+#define BLT_DEPTH_32 (3<<24)
1523+#define BLT_ROP_GXCOPY (0xcc<<16)
1524+#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
1525+#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
1526+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
1527+#define ASYNC_FLIP (1<<22)
1528+#define DISPLAY_PLANE_A (0<<20)
1529+#define DISPLAY_PLANE_B (1<<20)
1530+
1531+/*
1532+ * Instruction and interrupt control regs
1533+ */
1534+
1535+#define PRB0_TAIL 0x02030
1536+#define PRB0_HEAD 0x02034
1537+#define PRB0_START 0x02038
1538+#define PRB0_CTL 0x0203c
1539+#define TAIL_ADDR 0x001FFFF8
1540+#define HEAD_WRAP_COUNT 0xFFE00000
1541+#define HEAD_WRAP_ONE 0x00200000
1542+#define HEAD_ADDR 0x001FFFFC
1543+#define RING_NR_PAGES 0x001FF000
1544+#define RING_REPORT_MASK 0x00000006
1545+#define RING_REPORT_64K 0x00000002
1546+#define RING_REPORT_128K 0x00000004
1547+#define RING_NO_REPORT 0x00000000
1548+#define RING_VALID_MASK 0x00000001
1549+#define RING_VALID 0x00000001
1550+#define RING_INVALID 0x00000000
1551+#define PRB1_TAIL 0x02040 /* 915+ only */
1552+#define PRB1_HEAD 0x02044 /* 915+ only */
1553+#define PRB1_START 0x02048 /* 915+ only */
1554+#define PRB1_CTL 0x0204c /* 915+ only */
1555+#define ACTHD_I965 0x02074
1556+#define HWS_PGA 0x02080
1557+#define HWS_ADDRESS_MASK 0xfffff000
1558+#define HWS_START_ADDRESS_SHIFT 4
1559+#define IPEIR 0x02088
1560+#define NOPID 0x02094
1561+#define HWSTAM 0x02098
1562+#define SCPD0 0x0209c /* 915+ only */
1563+#define IER 0x020a0
1564+#define IIR 0x020a4
1565+#define IMR 0x020a8
1566+#define ISR 0x020ac
1567+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
1568+#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
1569+#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
1570+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
1571+#define I915_HWB_OOM_INTERRUPT (1<<13)
1572+#define I915_SYNC_STATUS_INTERRUPT (1<<12)
1573+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
1574+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
1575+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
1576+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
1577+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
1578+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
1579+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
1580+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
1581+#define I915_DEBUG_INTERRUPT (1<<2)
1582+#define I915_USER_INTERRUPT (1<<1)
1583+#define I915_ASLE_INTERRUPT (1<<0)
1584+#define EIR 0x020b0
1585+#define EMR 0x020b4
1586+#define ESR 0x020b8
1587+#define INSTPM 0x020c0
1588+#define ACTHD 0x020c8
1589+#define FW_BLC 0x020d8
1590+#define FW_BLC_SELF 0x020e0 /* 915+ only */
1591+#define MI_ARB_STATE 0x020e4 /* 915+ only */
1592+#define CACHE_MODE_0 0x02120 /* 915+ only */
1593+#define CM0_MASK_SHIFT 16
1594+#define CM0_IZ_OPT_DISABLE (1<<6)
1595+#define CM0_ZR_OPT_DISABLE (1<<5)
1596+#define CM0_DEPTH_EVICT_DISABLE (1<<4)
1597+#define CM0_COLOR_EVICT_DISABLE (1<<3)
1598+#define CM0_DEPTH_WRITE_DISABLE (1<<1)
1599+#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
1600+#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
1601+
1602+/*
1603+ * Framebuffer compression (915+ only)
1604+ */
1605+
1606+#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
1607+#define FBC_LL_BASE 0x03204 /* 4k page aligned */
1608+#define FBC_CONTROL 0x03208
1609+#define FBC_CTL_EN (1<<31)
1610+#define FBC_CTL_PERIODIC (1<<30)
1611+#define FBC_CTL_INTERVAL_SHIFT (16)
1612+#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
1613+#define FBC_CTL_STRIDE_SHIFT (5)
1614+#define FBC_CTL_FENCENO (1<<0)
1615+#define FBC_COMMAND 0x0320c
1616+#define FBC_CMD_COMPRESS (1<<0)
1617+#define FBC_STATUS 0x03210
1618+#define FBC_STAT_COMPRESSING (1<<31)
1619+#define FBC_STAT_COMPRESSED (1<<30)
1620+#define FBC_STAT_MODIFIED (1<<29)
1621+#define FBC_STAT_CURRENT_LINE (1<<0)
1622+#define FBC_CONTROL2 0x03214
1623+#define FBC_CTL_FENCE_DBL (0<<4)
1624+#define FBC_CTL_IDLE_IMM (0<<2)
1625+#define FBC_CTL_IDLE_FULL (1<<2)
1626+#define FBC_CTL_IDLE_LINE (2<<2)
1627+#define FBC_CTL_IDLE_DEBUG (3<<2)
1628+#define FBC_CTL_CPU_FENCE (1<<1)
1629+#define FBC_CTL_PLANEA (0<<0)
1630+#define FBC_CTL_PLANEB (1<<0)
1631+#define FBC_FENCE_OFF 0x0321b
1632+
1633+#define FBC_LL_SIZE (1536)
1634+
1635+/*
1636+ * GPIO regs
1637+ */
1638+#define GPIOA 0x5010
1639+#define GPIOB 0x5014
1640+#define GPIOC 0x5018
1641+#define GPIOD 0x501c
1642+#define GPIOE 0x5020
1643+#define GPIOF 0x5024
1644+#define GPIOG 0x5028
1645+#define GPIOH 0x502c
1646+# define GPIO_CLOCK_DIR_MASK (1 << 0)
1647+# define GPIO_CLOCK_DIR_IN (0 << 1)
1648+# define GPIO_CLOCK_DIR_OUT (1 << 1)
1649+# define GPIO_CLOCK_VAL_MASK (1 << 2)
1650+# define GPIO_CLOCK_VAL_OUT (1 << 3)
1651+# define GPIO_CLOCK_VAL_IN (1 << 4)
1652+# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
1653+# define GPIO_DATA_DIR_MASK (1 << 8)
1654+# define GPIO_DATA_DIR_IN (0 << 9)
1655+# define GPIO_DATA_DIR_OUT (1 << 9)
1656+# define GPIO_DATA_VAL_MASK (1 << 10)
1657+# define GPIO_DATA_VAL_OUT (1 << 11)
1658+# define GPIO_DATA_VAL_IN (1 << 12)
1659+# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
1660+
1661+/*
1662+ * Clock control & power management
1663+ */
1664+
1665+#define VGA0 0x6000
1666+#define VGA1 0x6004
1667+#define VGA_PD 0x6010
1668+#define VGA0_PD_P2_DIV_4 (1 << 7)
1669+#define VGA0_PD_P1_DIV_2 (1 << 5)
1670+#define VGA0_PD_P1_SHIFT 0
1671+#define VGA0_PD_P1_MASK (0x1f << 0)
1672+#define VGA1_PD_P2_DIV_4 (1 << 15)
1673+#define VGA1_PD_P1_DIV_2 (1 << 13)
1674+#define VGA1_PD_P1_SHIFT 8
1675+#define VGA1_PD_P1_MASK (0x1f << 8)
1676+#define DPLL_A 0x06014
1677+#define DPLL_B 0x06018
1678+#define DPLL_VCO_ENABLE (1 << 31)
1679+#define DPLL_DVO_HIGH_SPEED (1 << 30)
1680+#define DPLL_SYNCLOCK_ENABLE (1 << 29)
1681+#define DPLL_VGA_MODE_DIS (1 << 28)
1682+#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
1683+#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
1684+#define DPLL_MODE_MASK (3 << 26)
1685+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
1686+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
1687+#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
1688+#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
1689+#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
1690+#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
1691+
1692+#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
1693+#define I915_CRC_ERROR_ENABLE (1UL<<29)
1694+#define I915_CRC_DONE_ENABLE (1UL<<28)
1695+#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
1696+#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
1697+#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
1698+#define I915_DPST_EVENT_ENABLE (1UL<<23)
1699+#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
1700+#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
1701+#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
1702+#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
1703+#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
1704+#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
1705+#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
1706+#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
1707+#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
1708+#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
1709+#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
1710+#define I915_DPST_EVENT_STATUS (1UL<<7)
1711+#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
1712+#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
1713+#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
1714+#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
1715+#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1)
1716+#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
1717+
1718+#define SRX_INDEX 0x3c4
1719+#define SRX_DATA 0x3c5
1720+#define SR01 1
1721+#define SR01_SCREEN_OFF (1<<5)
1722+
1723+#define PPCR 0x61204
1724+#define PPCR_ON (1<<0)
1725+
1726+#define DVOB 0x61140
1727+#define DVOB_ON (1<<31)
1728+#define DVOC 0x61160
1729+#define DVOC_ON (1<<31)
1730+#define LVDS 0x61180
1731+#define LVDS_ON (1<<31)
1732+
1733+#define ADPA 0x61100
1734+#define ADPA_DPMS_MASK (~(3<<10))
1735+#define ADPA_DPMS_ON (0<<10)
1736+#define ADPA_DPMS_SUSPEND (1<<10)
1737+#define ADPA_DPMS_STANDBY (2<<10)
1738+#define ADPA_DPMS_OFF (3<<10)
1739+
1740+#define RING_TAIL 0x00
1741+#define TAIL_ADDR 0x001FFFF8
1742+#define RING_HEAD 0x04
1743+#define HEAD_WRAP_COUNT 0xFFE00000
1744+#define HEAD_WRAP_ONE 0x00200000
1745+#define HEAD_ADDR 0x001FFFFC
1746+#define RING_START 0x08
1747+#define START_ADDR 0xFFFFF000
1748+#define RING_LEN 0x0C
1749+#define RING_NR_PAGES 0x001FF000
1750+#define RING_REPORT_MASK 0x00000006
1751+#define RING_REPORT_64K 0x00000002
1752+#define RING_REPORT_128K 0x00000004
1753+#define RING_NO_REPORT 0x00000000
1754+#define RING_VALID_MASK 0x00000001
1755+#define RING_VALID 0x00000001
1756+#define RING_INVALID 0x00000000
1757+
1758+/* Scratch pad debug 0 reg:
1759+ */
1760+#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
1761+/*
1762+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
1763+ * this field (only one bit may be set).
1764+ */
1765+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
1766+#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
1767+/* i830, required in DVO non-gang */
1768+#define PLL_P2_DIVIDE_BY_4 (1 << 23)
1769+#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
1770+#define PLL_REF_INPUT_DREFCLK (0 << 13)
1771+#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
1772+#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
1773+#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
1774+#define PLL_REF_INPUT_MASK (3 << 13)
1775+#define PLL_LOAD_PULSE_PHASE_SHIFT 9
1776+/*
1777+ * Parallel to Serial Load Pulse phase selection.
1778+ * Selects the phase for the 10X DPLL clock for the PCIe
1779+ * digital display port. The range is 4 to 13; 10 or more
1780+ * is just a flip delay. The default is 6
1781+ */
1782+#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
1783+#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
1784+/*
1785+ * SDVO multiplier for 945G/GM. Not used on 965.
1786+ */
1787+#define SDVO_MULTIPLIER_MASK 0x000000ff
1788+#define SDVO_MULTIPLIER_SHIFT_HIRES 4
1789+#define SDVO_MULTIPLIER_SHIFT_VGA 0
1790+#define DPLL_A_MD 0x0601c /* 965+ only */
1791+/*
1792+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
1793+ *
1794+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
1795+ */
1796+#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
1797+#define DPLL_MD_UDI_DIVIDER_SHIFT 24
1798+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
1799+#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
1800+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
1801+/*
1802+ * SDVO/UDI pixel multiplier.
1803+ *
1804+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
1805+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
1806+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
1807+ * dummy bytes in the datastream at an increased clock rate, with both sides of
1808+ * the link knowing how many bytes are fill.
1809+ *
1810+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
1811+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
1812+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
1813+ * through an SDVO command.
1814+ *
1815+ * This register field has values of multiplication factor minus 1, with
1816+ * a maximum multiplier of 5 for SDVO.
1817+ */
1818+#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
1819+#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
1820+/*
1821+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
1822+ * This best be set to the default value (3) or the CRT won't work. No,
1823+ * I don't entirely understand what this does...
1824+ */
1825+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
1826+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
1827+#define DPLL_B_MD 0x06020 /* 965+ only */
1828+#define FPA0 0x06040
1829+#define FPA1 0x06044
1830+#define FPB0 0x06048
1831+#define FPB1 0x0604c
1832+#define FP_N_DIV_MASK 0x003f0000
1833+#define FP_N_DIV_SHIFT 16
1834+#define FP_M1_DIV_MASK 0x00003f00
1835+#define FP_M1_DIV_SHIFT 8
1836+#define FP_M2_DIV_MASK 0x0000003f
1837+#define FP_M2_DIV_SHIFT 0
1838+#define DPLL_TEST 0x606c
1839+#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
1840+#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
1841+#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
1842+#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
1843+#define DPLLB_TEST_N_BYPASS (1 << 19)
1844+#define DPLLB_TEST_M_BYPASS (1 << 18)
1845+#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
1846+#define DPLLA_TEST_N_BYPASS (1 << 3)
1847+#define DPLLA_TEST_M_BYPASS (1 << 2)
1848+#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
1849+#define D_STATE 0x6104
1850+#define CG_2D_DIS 0x6200
1851+#define CG_3D_DIS 0x6204
1852+
1853+/*
1854+ * Palette regs
1855+ */
1856+
1857+#define PALETTE_A 0x0a000
1858+#define PALETTE_B 0x0a800
1859+
1860+/*
1861+ * Overlay regs
1862+ */
1863+
1864+#define OVADD 0x30000
1865+#define DOVSTA 0x30008
1866+#define OC_BUF (0x3<<20)
1867+#define OGAMC5 0x30010
1868+#define OGAMC4 0x30014
1869+#define OGAMC3 0x30018
1870+#define OGAMC2 0x3001c
1871+#define OGAMC1 0x30020
1872+#define OGAMC0 0x30024
1873+
1874+/*
1875+ * Display engine regs
1876+ */
1877+
1878+/* Pipe A timing regs */
1879+#define HTOTAL_A 0x60000
1880+#define HBLANK_A 0x60004
1881+#define HSYNC_A 0x60008
1882+#define VTOTAL_A 0x6000c
1883+#define VBLANK_A 0x60010
1884+#define VSYNC_A 0x60014
1885+#define PIPEASRC 0x6001c
1886+#define BCLRPAT_A 0x60020
1887+
1888+/* Pipe B timing regs */
1889+#define HTOTAL_B 0x61000
1890+#define HBLANK_B 0x61004
1891+#define HSYNC_B 0x61008
1892+#define VTOTAL_B 0x6100c
1893+#define VBLANK_B 0x61010
1894+#define VSYNC_B 0x61014
1895+#define PIPEBSRC 0x6101c
1896+#define BCLRPAT_B 0x61020
1897+
1898+/* VGA port control */
1899+#define ADPA 0x61100
1900+#define ADPA_DAC_ENABLE (1<<31)
1901+#define ADPA_DAC_DISABLE 0
1902+#define ADPA_PIPE_SELECT_MASK (1<<30)
1903+#define ADPA_PIPE_A_SELECT 0
1904+#define ADPA_PIPE_B_SELECT (1<<30)
1905+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
1906+#define ADPA_SETS_HVPOLARITY 0
1907+#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
1908+#define ADPA_VSYNC_CNTL_ENABLE 0
1909+#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
1910+#define ADPA_HSYNC_CNTL_ENABLE 0
1911+#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
1912+#define ADPA_VSYNC_ACTIVE_LOW 0
1913+#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
1914+#define ADPA_HSYNC_ACTIVE_LOW 0
1915+#define ADPA_DPMS_MASK (~(3<<10))
1916+#define ADPA_DPMS_ON (0<<10)
1917+#define ADPA_DPMS_SUSPEND (1<<10)
1918+#define ADPA_DPMS_STANDBY (2<<10)
1919+#define ADPA_DPMS_OFF (3<<10)
1920+
1921+/* Hotplug control (945+ only) */
1922+#define PORT_HOTPLUG_EN 0x61110
1923+#define SDVOB_HOTPLUG_INT_EN (1 << 26)
1924+#define SDVOC_HOTPLUG_INT_EN (1 << 25)
1925+#define TV_HOTPLUG_INT_EN (1 << 18)
1926+#define CRT_HOTPLUG_INT_EN (1 << 9)
1927+#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
1928+
1929+#define PORT_HOTPLUG_STAT 0x61114
1930+#define CRT_HOTPLUG_INT_STATUS (1 << 11)
1931+#define TV_HOTPLUG_INT_STATUS (1 << 10)
1932+#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
1933+#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
1934+#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
1935+#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
1936+#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
1937+#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
1938+
1939+/* SDVO port control */
1940+#define SDVOB 0x61140
1941+#define SDVOC 0x61160
1942+#define SDVO_ENABLE (1 << 31)
1943+#define SDVO_PIPE_B_SELECT (1 << 30)
1944+#define SDVO_STALL_SELECT (1 << 29)
1945+#define SDVO_INTERRUPT_ENABLE (1 << 26)
1946+/**
1947+ * 915G/GM SDVO pixel multiplier.
1948+ *
1949+ * Programmed value is multiplier - 1, up to 5x.
1950+ *
1951+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
1952+ */
1953+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
1954+#define SDVO_PORT_MULTIPLY_SHIFT 23
1955+#define SDVO_PHASE_SELECT_MASK (15 << 19)
1956+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
1957+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
1958+#define SDVOC_GANG_MODE (1 << 16)
1959+#define SDVO_BORDER_ENABLE (1 << 7)
1960+#define SDVOB_PCIE_CONCURRENCY (1 << 3)
1961+#define SDVO_DETECTED (1 << 2)
1962+/* Bits to be preserved when writing */
1963+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26))
1964+#define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26))
1965+
1966+/* DVO port control */
1967+#define DVOA 0x61120
1968+#define DVOB 0x61140
1969+#define DVOC 0x61160
1970+#define DVO_ENABLE (1 << 31)
1971+#define DVO_PIPE_B_SELECT (1 << 30)
1972+#define DVO_PIPE_STALL_UNUSED (0 << 28)
1973+#define DVO_PIPE_STALL (1 << 28)
1974+#define DVO_PIPE_STALL_TV (2 << 28)
1975+#define DVO_PIPE_STALL_MASK (3 << 28)
1976+#define DVO_USE_VGA_SYNC (1 << 15)
1977+#define DVO_DATA_ORDER_I740 (0 << 14)
1978+#define DVO_DATA_ORDER_FP (1 << 14)
1979+#define DVO_VSYNC_DISABLE (1 << 11)
1980+#define DVO_HSYNC_DISABLE (1 << 10)
1981+#define DVO_VSYNC_TRISTATE (1 << 9)
1982+#define DVO_HSYNC_TRISTATE (1 << 8)
1983+#define DVO_BORDER_ENABLE (1 << 7)
1984+#define DVO_DATA_ORDER_GBRG (1 << 6)
1985+#define DVO_DATA_ORDER_RGGB (0 << 6)
1986+#define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6)
1987+#define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6)
1988+#define DVO_VSYNC_ACTIVE_HIGH (1 << 4)
1989+#define DVO_HSYNC_ACTIVE_HIGH (1 << 3)
1990+#define DVO_BLANK_ACTIVE_HIGH (1 << 2)
1991+#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */
1992+#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */
1993+#define DVO_PRESERVE_MASK (0x7<<24)
1994+#define DVOA_SRCDIM 0x61124
1995+#define DVOB_SRCDIM 0x61144
1996+#define DVOC_SRCDIM 0x61164
1997+#define DVO_SRCDIM_HORIZONTAL_SHIFT 12
1998+#define DVO_SRCDIM_VERTICAL_SHIFT 0
1999+
2000+/* LVDS port control */
2001+#define LVDS 0x61180
2002+/*
2003+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
2004+ * the DPLL semantics change when the LVDS is assigned to that pipe.
2005+ */
2006+#define LVDS_PORT_EN (1 << 31)
2007+/* Selects pipe B for LVDS data. Must be set on pre-965. */
2008+#define LVDS_PIPEB_SELECT (1 << 30)
2009+/*
2010+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
2011+ * pixel.
2012+ */
2013+#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
2014+#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
2015+#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
2016+/*
2017+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
2018+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
2019+ * on.
2020+ */
2021+#define LVDS_A3_POWER_MASK (3 << 6)
2022+#define LVDS_A3_POWER_DOWN (0 << 6)
2023+#define LVDS_A3_POWER_UP (3 << 6)
2024+/*
2025+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
2026+ * is set.
2027+ */
2028+#define LVDS_CLKB_POWER_MASK (3 << 4)
2029+#define LVDS_CLKB_POWER_DOWN (0 << 4)
2030+#define LVDS_CLKB_POWER_UP (3 << 4)
2031+/*
2032+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
2033+ * setting for whether we are in dual-channel mode. The B3 pair will
2034+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
2035+ */
2036+#define LVDS_B0B3_POWER_MASK (3 << 2)
2037+#define LVDS_B0B3_POWER_DOWN (0 << 2)
2038+#define LVDS_B0B3_POWER_UP (3 << 2)
2039+
2040+/* Panel power sequencing */
2041+#define PP_STATUS 0x61200
2042+#define PP_ON (1 << 31)
2043+/*
2044+ * Indicates that all dependencies of the panel are on:
2045+ *
2046+ * - PLL enabled
2047+ * - pipe enabled
2048+ * - LVDS/DVOB/DVOC on
2049+ */
2050+#define PP_READY (1 << 30)
2051+#define PP_SEQUENCE_NONE (0 << 28)
2052+#define PP_SEQUENCE_ON (1 << 28)
2053+#define PP_SEQUENCE_OFF (2 << 28)
2054+#define PP_SEQUENCE_MASK 0x30000000
2055+#define PP_CONTROL 0x61204
2056+#define POWER_TARGET_ON (1 << 0)
2057+#define PP_ON_DELAYS 0x61208
2058+#define PP_OFF_DELAYS 0x6120c
2059+#define PP_DIVISOR 0x61210
2060+
2061+/* Panel fitting */
2062+#define PFIT_CONTROL 0x61230
2063+#define PFIT_ENABLE (1 << 31)
2064+#define PFIT_PIPE_MASK (3 << 29)
2065+#define PFIT_PIPE_SHIFT 29
2066+#define VERT_INTERP_DISABLE (0 << 10)
2067+#define VERT_INTERP_BILINEAR (1 << 10)
2068+#define VERT_INTERP_MASK (3 << 10)
2069+#define VERT_AUTO_SCALE (1 << 9)
2070+#define HORIZ_INTERP_DISABLE (0 << 6)
2071+#define HORIZ_INTERP_BILINEAR (1 << 6)
2072+#define HORIZ_INTERP_MASK (3 << 6)
2073+#define HORIZ_AUTO_SCALE (1 << 5)
2074+#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
2075+#define PFIT_PGM_RATIOS 0x61234
2076+#define PFIT_VERT_SCALE_MASK 0xfff00000
2077+#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
2078+#define PFIT_AUTO_RATIOS 0x61238
2079+
2080+/* Backlight control */
2081+#define BLC_PWM_CTL 0x61254
2082+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
2083+#define BLC_PWM_CTL2 0x61250 /* 965+ only */
2084+/*
2085+ * This is the most significant 15 bits of the number of backlight cycles in a
2086+ * complete cycle of the modulated backlight control.
2087+ *
2088+ * The actual value is this field multiplied by two.
2089+ */
2090+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
2091+#define BLM_LEGACY_MODE (1 << 16)
2092+/*
2093+ * This is the number of cycles out of the backlight modulation cycle for which
2094+ * the backlight is on.
2095+ *
2096+ * This field must be no greater than the number of cycles in the complete
2097+ * backlight modulation cycle.
2098+ */
2099+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
2100+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
2101+
2102+/* TV port control */
2103+#define TV_CTL 0x68000
2104+/** Enables the TV encoder */
2105+# define TV_ENC_ENABLE (1 << 31)
2106+/** Sources the TV encoder input from pipe B instead of A. */
2107+# define TV_ENC_PIPEB_SELECT (1 << 30)
2108+/** Outputs composite video (DAC A only) */
2109+# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
2110+/** Outputs SVideo video (DAC B/C) */
2111+# define TV_ENC_OUTPUT_SVIDEO (1 << 28)
2112+/** Outputs Component video (DAC A/B/C) */
2113+# define TV_ENC_OUTPUT_COMPONENT (2 << 28)
2114+/** Outputs Composite and SVideo (DAC A/B/C) */
2115+# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28)
2116+# define TV_TRILEVEL_SYNC (1 << 21)
2117+/** Enables slow sync generation (945GM only) */
2118+# define TV_SLOW_SYNC (1 << 20)
2119+/** Selects 4x oversampling for 480i and 576p */
2120+# define TV_OVERSAMPLE_4X (0 << 18)
2121+/** Selects 2x oversampling for 720p and 1080i */
2122+# define TV_OVERSAMPLE_2X (1 << 18)
2123+/** Selects no oversampling for 1080p */
2124+# define TV_OVERSAMPLE_NONE (2 << 18)
2125+/** Selects 8x oversampling */
2126+# define TV_OVERSAMPLE_8X (3 << 18)
2127+/** Selects progressive mode rather than interlaced */
2128+# define TV_PROGRESSIVE (1 << 17)
2129+/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */
2130+# define TV_PAL_BURST (1 << 16)
2131+/** Field for setting delay of Y compared to C */
2132+# define TV_YC_SKEW_MASK (7 << 12)
2133+/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
2134+# define TV_ENC_SDP_FIX (1 << 11)
2135+/**
2136+ * Enables a fix for the 915GM only.
2137+ *
2138+ * Not sure what it does.
2139+ */
2140+# define TV_ENC_C0_FIX (1 << 10)
2141+/** Bits that must be preserved by software */
2142+# define TV_CTL_SAVE ((3 << 8) | (3 << 6))
2143+# define TV_FUSE_STATE_MASK (3 << 4)
2144+/** Read-only state that reports all features enabled */
2145+# define TV_FUSE_STATE_ENABLED (0 << 4)
2146+/** Read-only state that reports that Macrovision is disabled in hardware*/
2147+# define TV_FUSE_STATE_NO_MACROVISION (1 << 4)
2148+/** Read-only state that reports that TV-out is disabled in hardware. */
2149+# define TV_FUSE_STATE_DISABLED (2 << 4)
2150+/** Normal operation */
2151+# define TV_TEST_MODE_NORMAL (0 << 0)
2152+/** Encoder test pattern 1 - combo pattern */
2153+# define TV_TEST_MODE_PATTERN_1 (1 << 0)
2154+/** Encoder test pattern 2 - full screen vertical 75% color bars */
2155+# define TV_TEST_MODE_PATTERN_2 (2 << 0)
2156+/** Encoder test pattern 3 - full screen horizontal 75% color bars */
2157+# define TV_TEST_MODE_PATTERN_3 (3 << 0)
2158+/** Encoder test pattern 4 - random noise */
2159+# define TV_TEST_MODE_PATTERN_4 (4 << 0)
2160+/** Encoder test pattern 5 - linear color ramps */
2161+# define TV_TEST_MODE_PATTERN_5 (5 << 0)
2162+/**
2163+ * This test mode forces the DACs to 50% of full output.
2164+ *
2165+ * This is used for load detection in combination with TVDAC_SENSE_MASK
2166+ */
2167+# define TV_TEST_MODE_MONITOR_DETECT (7 << 0)
2168+# define TV_TEST_MODE_MASK (7 << 0)
2169+
2170+#define TV_DAC 0x68004
2171+/**
2172+ * Reports that DAC state change logic has reported change (RO).
2173+ *
2174+ * This gets cleared when TV_DAC_STATE_EN is cleared
2175+*/
2176+# define TVDAC_STATE_CHG (1 << 31)
2177+# define TVDAC_SENSE_MASK (7 << 28)
2178+/** Reports that DAC A voltage is above the detect threshold */
2179+# define TVDAC_A_SENSE (1 << 30)
2180+/** Reports that DAC B voltage is above the detect threshold */
2181+# define TVDAC_B_SENSE (1 << 29)
2182+/** Reports that DAC C voltage is above the detect threshold */
2183+# define TVDAC_C_SENSE (1 << 28)
2184+/**
2185+ * Enables DAC state detection logic, for load-based TV detection.
2186+ *
2187+ * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
2188+ * to off, for load detection to work.
2189+ */
2190+# define TVDAC_STATE_CHG_EN (1 << 27)
2191+/** Sets the DAC A sense value to high */
2192+# define TVDAC_A_SENSE_CTL (1 << 26)
2193+/** Sets the DAC B sense value to high */
2194+# define TVDAC_B_SENSE_CTL (1 << 25)
2195+/** Sets the DAC C sense value to high */
2196+# define TVDAC_C_SENSE_CTL (1 << 24)
2197+/** Overrides the ENC_ENABLE and DAC voltage levels */
2198+# define DAC_CTL_OVERRIDE (1 << 7)
2199+/** Sets the slew rate. Must be preserved in software */
2200+# define ENC_TVDAC_SLEW_FAST (1 << 6)
2201+# define DAC_A_1_3_V (0 << 4)
2202+# define DAC_A_1_1_V (1 << 4)
2203+# define DAC_A_0_7_V (2 << 4)
2204+# define DAC_A_OFF (3 << 4)
2205+# define DAC_B_1_3_V (0 << 2)
2206+# define DAC_B_1_1_V (1 << 2)
2207+# define DAC_B_0_7_V (2 << 2)
2208+# define DAC_B_OFF (3 << 2)
2209+# define DAC_C_1_3_V (0 << 0)
2210+# define DAC_C_1_1_V (1 << 0)
2211+# define DAC_C_0_7_V (2 << 0)
2212+# define DAC_C_OFF (3 << 0)
2213+
2214+/**
2215+ * CSC coefficients are stored in a floating point format with 9 bits of
2216+ * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n,
2217+ * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
2218+ * -1 (0x3) being the only legal negative value.
2219+ */
2220+#define TV_CSC_Y 0x68010
2221+# define TV_RY_MASK 0x07ff0000
2222+# define TV_RY_SHIFT 16
2223+# define TV_GY_MASK 0x00000fff
2224+# define TV_GY_SHIFT 0
2225+
2226+#define TV_CSC_Y2 0x68014
2227+# define TV_BY_MASK 0x07ff0000
2228+# define TV_BY_SHIFT 16
2229+/**
2230+ * Y attenuation for component video.
2231+ *
2232+ * Stored in 1.9 fixed point.
2233+ */
2234+# define TV_AY_MASK 0x000003ff
2235+# define TV_AY_SHIFT 0
2236+
2237+#define TV_CSC_U 0x68018
2238+# define TV_RU_MASK 0x07ff0000
2239+# define TV_RU_SHIFT 16
2240+# define TV_GU_MASK 0x000007ff
2241+# define TV_GU_SHIFT 0
2242+
2243+#define TV_CSC_U2 0x6801c
2244+# define TV_BU_MASK 0x07ff0000
2245+# define TV_BU_SHIFT 16
2246+/**
2247+ * U attenuation for component video.
2248+ *
2249+ * Stored in 1.9 fixed point.
2250+ */
2251+# define TV_AU_MASK 0x000003ff
2252+# define TV_AU_SHIFT 0
2253+
2254+#define TV_CSC_V 0x68020
2255+# define TV_RV_MASK 0x0fff0000
2256+# define TV_RV_SHIFT 16
2257+# define TV_GV_MASK 0x000007ff
2258+# define TV_GV_SHIFT 0
2259+
2260+#define TV_CSC_V2 0x68024
2261+# define TV_BV_MASK 0x07ff0000
2262+# define TV_BV_SHIFT 16
2263+/**
2264+ * V attenuation for component video.
2265+ *
2266+ * Stored in 1.9 fixed point.
2267+ */
2268+# define TV_AV_MASK 0x000007ff
2269+# define TV_AV_SHIFT 0
2270+
2271+#define TV_CLR_KNOBS 0x68028
2272+/** 2s-complement brightness adjustment */
2273+# define TV_BRIGHTNESS_MASK 0xff000000
2274+# define TV_BRIGHTNESS_SHIFT 24
2275+/** Contrast adjustment, as a 2.6 unsigned floating point number */
2276+# define TV_CONTRAST_MASK 0x00ff0000
2277+# define TV_CONTRAST_SHIFT 16
2278+/** Saturation adjustment, as a 2.6 unsigned floating point number */
2279+# define TV_SATURATION_MASK 0x0000ff00
2280+# define TV_SATURATION_SHIFT 8
2281+/** Hue adjustment, as an integer phase angle in degrees */
2282+# define TV_HUE_MASK 0x000000ff
2283+# define TV_HUE_SHIFT 0
2284+
2285+#define TV_CLR_LEVEL 0x6802c
2286+/** Controls the DAC level for black */
2287+# define TV_BLACK_LEVEL_MASK 0x01ff0000
2288+# define TV_BLACK_LEVEL_SHIFT 16
2289+/** Controls the DAC level for blanking */
2290+# define TV_BLANK_LEVEL_MASK 0x000001ff
2291+# define TV_BLANK_LEVEL_SHIFT 0
2292+
2293+#define TV_H_CTL_1 0x68030
2294+/** Number of pixels in the hsync. */
2295+# define TV_HSYNC_END_MASK 0x1fff0000
2296+# define TV_HSYNC_END_SHIFT 16
2297+/** Total number of pixels minus one in the line (display and blanking). */
2298+# define TV_HTOTAL_MASK 0x00001fff
2299+# define TV_HTOTAL_SHIFT 0
2300+
2301+#define TV_H_CTL_2 0x68034
2302+/** Enables the colorburst (needed for non-component color) */
2303+# define TV_BURST_ENA (1 << 31)
2304+/** Offset of the colorburst from the start of hsync, in pixels minus one. */
2305+# define TV_HBURST_START_SHIFT 16
2306+# define TV_HBURST_START_MASK 0x1fff0000
2307+/** Length of the colorburst */
2308+# define TV_HBURST_LEN_SHIFT 0
2309+# define TV_HBURST_LEN_MASK 0x0001fff
2310+
2311+#define TV_H_CTL_3 0x68038
2312+/** End of hblank, measured in pixels minus one from start of hsync */
2313+# define TV_HBLANK_END_SHIFT 16
2314+# define TV_HBLANK_END_MASK 0x1fff0000
2315+/** Start of hblank, measured in pixels minus one from start of hsync */
2316+# define TV_HBLANK_START_SHIFT 0
2317+# define TV_HBLANK_START_MASK 0x0001fff
2318+
2319+#define TV_V_CTL_1 0x6803c
2320+/** XXX */
2321+# define TV_NBR_END_SHIFT 16
2322+# define TV_NBR_END_MASK 0x07ff0000
2323+/** XXX */
2324+# define TV_VI_END_F1_SHIFT 8
2325+# define TV_VI_END_F1_MASK 0x00003f00
2326+/** XXX */
2327+# define TV_VI_END_F2_SHIFT 0
2328+# define TV_VI_END_F2_MASK 0x0000003f
2329+
2330+#define TV_V_CTL_2 0x68040
2331+/** Length of vsync, in half lines */
2332+# define TV_VSYNC_LEN_MASK 0x07ff0000
2333+# define TV_VSYNC_LEN_SHIFT 16
2334+/** Offset of the start of vsync in field 1, measured in one less than the
2335+ * number of half lines.
2336+ */
2337+# define TV_VSYNC_START_F1_MASK 0x00007f00
2338+# define TV_VSYNC_START_F1_SHIFT 8
2339+/**
2340+ * Offset of the start of vsync in field 2, measured in one less than the
2341+ * number of half lines.
2342+ */
2343+# define TV_VSYNC_START_F2_MASK 0x0000007f
2344+# define TV_VSYNC_START_F2_SHIFT 0
2345+
2346+#define TV_V_CTL_3 0x68044
2347+/** Enables generation of the equalization signal */
2348+# define TV_EQUAL_ENA (1 << 31)
2349+/** Length of vsync, in half lines */
2350+# define TV_VEQ_LEN_MASK 0x007f0000
2351+# define TV_VEQ_LEN_SHIFT 16
2352+/** Offset of the start of equalization in field 1, measured in one less than
2353+ * the number of half lines.
2354+ */
2355+# define TV_VEQ_START_F1_MASK 0x0007f00
2356+# define TV_VEQ_START_F1_SHIFT 8
2357+/**
2358+ * Offset of the start of equalization in field 2, measured in one less than
2359+ * the number of half lines.
2360+ */
2361+# define TV_VEQ_START_F2_MASK 0x000007f
2362+# define TV_VEQ_START_F2_SHIFT 0
2363+
2364+#define TV_V_CTL_4 0x68048
2365+/**
2366+ * Offset to start of vertical colorburst, measured in one less than the
2367+ * number of lines from vertical start.
2368+ */
2369+# define TV_VBURST_START_F1_MASK 0x003f0000
2370+# define TV_VBURST_START_F1_SHIFT 16
2371+/**
2372+ * Offset to the end of vertical colorburst, measured in one less than the
2373+ * number of lines from the start of NBR.
2374+ */
2375+# define TV_VBURST_END_F1_MASK 0x000000ff
2376+# define TV_VBURST_END_F1_SHIFT 0
2377+
2378+#define TV_V_CTL_5 0x6804c
2379+/**
2380+ * Offset to start of vertical colorburst, measured in one less than the
2381+ * number of lines from vertical start.
2382+ */
2383+# define TV_VBURST_START_F2_MASK 0x003f0000
2384+# define TV_VBURST_START_F2_SHIFT 16
2385+/**
2386+ * Offset to the end of vertical colorburst, measured in one less than the
2387+ * number of lines from the start of NBR.
2388+ */
2389+# define TV_VBURST_END_F2_MASK 0x000000ff
2390+# define TV_VBURST_END_F2_SHIFT 0
2391+
2392+#define TV_V_CTL_6 0x68050
2393+/**
2394+ * Offset to start of vertical colorburst, measured in one less than the
2395+ * number of lines from vertical start.
2396+ */
2397+# define TV_VBURST_START_F3_MASK 0x003f0000
2398+# define TV_VBURST_START_F3_SHIFT 16
2399+/**
2400+ * Offset to the end of vertical colorburst, measured in one less than the
2401+ * number of lines from the start of NBR.
2402+ */
2403+# define TV_VBURST_END_F3_MASK 0x000000ff
2404+# define TV_VBURST_END_F3_SHIFT 0
2405+
2406+#define TV_V_CTL_7 0x68054
2407+/**
2408+ * Offset to start of vertical colorburst, measured in one less than the
2409+ * number of lines from vertical start.
2410+ */
2411+# define TV_VBURST_START_F4_MASK 0x003f0000
2412+# define TV_VBURST_START_F4_SHIFT 16
2413+/**
2414+ * Offset to the end of vertical colorburst, measured in one less than the
2415+ * number of lines from the start of NBR.
2416+ */
2417+# define TV_VBURST_END_F4_MASK 0x000000ff
2418+# define TV_VBURST_END_F4_SHIFT 0
2419+
2420+#define TV_SC_CTL_1 0x68060
2421+/** Turns on the first subcarrier phase generation DDA */
2422+# define TV_SC_DDA1_EN (1 << 31)
2423+/** Turns on the first subcarrier phase generation DDA */
2424+# define TV_SC_DDA2_EN (1 << 30)
2425+/** Turns on the first subcarrier phase generation DDA */
2426+# define TV_SC_DDA3_EN (1 << 29)
2427+/** Sets the subcarrier DDA to reset frequency every other field */
2428+# define TV_SC_RESET_EVERY_2 (0 << 24)
2429+/** Sets the subcarrier DDA to reset frequency every fourth field */
2430+# define TV_SC_RESET_EVERY_4 (1 << 24)
2431+/** Sets the subcarrier DDA to reset frequency every eighth field */
2432+# define TV_SC_RESET_EVERY_8 (2 << 24)
2433+/** Sets the subcarrier DDA to never reset the frequency */
2434+# define TV_SC_RESET_NEVER (3 << 24)
2435+/** Sets the peak amplitude of the colorburst.*/
2436+# define TV_BURST_LEVEL_MASK 0x00ff0000
2437+# define TV_BURST_LEVEL_SHIFT 16
2438+/** Sets the increment of the first subcarrier phase generation DDA */
2439+# define TV_SCDDA1_INC_MASK 0x00000fff
2440+# define TV_SCDDA1_INC_SHIFT 0
2441+
2442+#define TV_SC_CTL_2 0x68064
2443+/** Sets the rollover for the second subcarrier phase generation DDA */
2444+# define TV_SCDDA2_SIZE_MASK 0x7fff0000
2445+# define TV_SCDDA2_SIZE_SHIFT 16
2446+/** Sets the increent of the second subcarrier phase generation DDA */
2447+# define TV_SCDDA2_INC_MASK 0x00007fff
2448+# define TV_SCDDA2_INC_SHIFT 0
2449+
2450+#define TV_SC_CTL_3 0x68068
2451+/** Sets the rollover for the third subcarrier phase generation DDA */
2452+# define TV_SCDDA3_SIZE_MASK 0x7fff0000
2453+# define TV_SCDDA3_SIZE_SHIFT 16
2454+/** Sets the increent of the third subcarrier phase generation DDA */
2455+# define TV_SCDDA3_INC_MASK 0x00007fff
2456+# define TV_SCDDA3_INC_SHIFT 0
2457+
2458+#define TV_WIN_POS 0x68070
2459+/** X coordinate of the display from the start of horizontal active */
2460+# define TV_XPOS_MASK 0x1fff0000
2461+# define TV_XPOS_SHIFT 16
2462+/** Y coordinate of the display from the start of vertical active (NBR) */
2463+# define TV_YPOS_MASK 0x00000fff
2464+# define TV_YPOS_SHIFT 0
2465+
2466+#define TV_WIN_SIZE 0x68074
2467+/** Horizontal size of the display window, measured in pixels*/
2468+# define TV_XSIZE_MASK 0x1fff0000
2469+# define TV_XSIZE_SHIFT 16
2470+/**
2471+ * Vertical size of the display window, measured in pixels.
2472+ *
2473+ * Must be even for interlaced modes.
2474+ */
2475+# define TV_YSIZE_MASK 0x00000fff
2476+# define TV_YSIZE_SHIFT 0
2477+
2478+#define TV_FILTER_CTL_1 0x68080
2479+/**
2480+ * Enables automatic scaling calculation.
2481+ *
2482+ * If set, the rest of the registers are ignored, and the calculated values can
2483+ * be read back from the register.
2484+ */
2485+# define TV_AUTO_SCALE (1 << 31)
2486+/**
2487+ * Disables the vertical filter.
2488+ *
2489+ * This is required on modes more than 1024 pixels wide */
2490+# define TV_V_FILTER_BYPASS (1 << 29)
2491+/** Enables adaptive vertical filtering */
2492+# define TV_VADAPT (1 << 28)
2493+# define TV_VADAPT_MODE_MASK (3 << 26)
2494+/** Selects the least adaptive vertical filtering mode */
2495+# define TV_VADAPT_MODE_LEAST (0 << 26)
2496+/** Selects the moderately adaptive vertical filtering mode */
2497+# define TV_VADAPT_MODE_MODERATE (1 << 26)
2498+/** Selects the most adaptive vertical filtering mode */
2499+# define TV_VADAPT_MODE_MOST (3 << 26)
2500+/**
2501+ * Sets the horizontal scaling factor.
2502+ *
2503+ * This should be the fractional part of the horizontal scaling factor divided
2504+ * by the oversampling rate. TV_HSCALE should be less than 1, and set to:
2505+ *
2506+ * (src width - 1) / ((oversample * dest width) - 1)
2507+ */
2508+# define TV_HSCALE_FRAC_MASK 0x00003fff
2509+# define TV_HSCALE_FRAC_SHIFT 0
2510+
2511+#define TV_FILTER_CTL_2 0x68084
2512+/**
2513+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
2514+ *
2515+ * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
2516+ */
2517+# define TV_VSCALE_INT_MASK 0x00038000
2518+# define TV_VSCALE_INT_SHIFT 15
2519+/**
2520+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
2521+ *
2522+ * \sa TV_VSCALE_INT_MASK
2523+ */
2524+# define TV_VSCALE_FRAC_MASK 0x00007fff
2525+# define TV_VSCALE_FRAC_SHIFT 0
2526+
2527+#define TV_FILTER_CTL_3 0x68088
2528+/**
2529+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
2530+ *
2531+ * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
2532+ *
2533+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
2534+ */
2535+# define TV_VSCALE_IP_INT_MASK 0x00038000
2536+# define TV_VSCALE_IP_INT_SHIFT 15
2537+/**
2538+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
2539+ *
2540+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
2541+ *
2542+ * \sa TV_VSCALE_IP_INT_MASK
2543+ */
2544+# define TV_VSCALE_IP_FRAC_MASK 0x00007fff
2545+# define TV_VSCALE_IP_FRAC_SHIFT 0
2546+
2547+#define TV_CC_CONTROL 0x68090
2548+# define TV_CC_ENABLE (1 << 31)
2549+/**
2550+ * Specifies which field to send the CC data in.
2551+ *
2552+ * CC data is usually sent in field 0.
2553+ */
2554+# define TV_CC_FID_MASK (1 << 27)
2555+# define TV_CC_FID_SHIFT 27
2556+/** Sets the horizontal position of the CC data. Usually 135. */
2557+# define TV_CC_HOFF_MASK 0x03ff0000
2558+# define TV_CC_HOFF_SHIFT 16
2559+/** Sets the vertical position of the CC data. Usually 21 */
2560+# define TV_CC_LINE_MASK 0x0000003f
2561+# define TV_CC_LINE_SHIFT 0
2562+
2563+#define TV_CC_DATA 0x68094
2564+# define TV_CC_RDY (1 << 31)
2565+/** Second word of CC data to be transmitted. */
2566+# define TV_CC_DATA_2_MASK 0x007f0000
2567+# define TV_CC_DATA_2_SHIFT 16
2568+/** First word of CC data to be transmitted. */
2569+# define TV_CC_DATA_1_MASK 0x0000007f
2570+# define TV_CC_DATA_1_SHIFT 0
2571+
2572+#define TV_H_LUMA_0 0x68100
2573+#define TV_H_LUMA_59 0x681ec
2574+#define TV_H_CHROMA_0 0x68200
2575+#define TV_H_CHROMA_59 0x682ec
2576+#define TV_V_LUMA_0 0x68300
2577+#define TV_V_LUMA_42 0x683a8
2578+#define TV_V_CHROMA_0 0x68400
2579+#define TV_V_CHROMA_42 0x684a8
2580+
2581+/* Display & cursor control */
2582+
2583+/* Pipe A */
2584+#define PIPEADSL 0x70000
2585+#define PIPEACONF 0x70008
2586+#define PIPEACONF_ENABLE (1<<31)
2587+#define PIPEACONF_DISABLE 0
2588+#define PIPEACONF_DOUBLE_WIDE (1<<30)
2589+#define I965_PIPECONF_ACTIVE (1<<30)
2590+#define PIPEACONF_SINGLE_WIDE 0
2591+#define PIPEACONF_PIPE_UNLOCKED 0
2592+#define PIPEACONF_PIPE_LOCKED (1<<25)
2593+#define PIPEACONF_PALETTE 0
2594+#define PIPEACONF_GAMMA (1<<24)
2595+#define PIPECONF_FORCE_BORDER (1<<25)
2596+#define PIPECONF_PROGRESSIVE (0 << 21)
2597+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
2598+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
2599+#define PIPEASTAT 0x70024
2600+#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
2601+#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
2602+#define PIPE_CRC_DONE_ENABLE (1UL<<28)
2603+#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
2604+#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
2605+#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
2606+#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
2607+#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
2608+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
2609+#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
2610+#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
2611+#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
2612+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
2613+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
2614+#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
2615+#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
2616+#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
2617+#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
2618+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
2619+#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
2620+#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
2621+#define PIPE_DPST_EVENT_STATUS (1UL<<7)
2622+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
2623+#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
2624+#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
2625+#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
2626+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
2627+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
2628+#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
2629+
2630+#define DSPARB 0x70030
2631+#define DSPARB_CSTART_MASK (0x7f << 7)
2632+#define DSPARB_CSTART_SHIFT 7
2633+#define DSPARB_BSTART_MASK (0x7f)
2634+#define DSPARB_BSTART_SHIFT 0
2635+/*
2636+ * The two pipe frame counter registers are not synchronized, so
2637+ * reading a stable value is somewhat tricky. The following code
2638+ * should work:
2639+ *
2640+ * do {
2641+ * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
2642+ * PIPE_FRAME_HIGH_SHIFT;
2643+ * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
2644+ * PIPE_FRAME_LOW_SHIFT);
2645+ * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
2646+ * PIPE_FRAME_HIGH_SHIFT);
2647+ * } while (high1 != high2);
2648+ * frame = (high1 << 8) | low1;
2649+ */
2650+#define PIPEAFRAMEHIGH 0x70040
2651+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
2652+#define PIPE_FRAME_HIGH_SHIFT 0
2653+#define PIPEAFRAMEPIXEL 0x70044
2654+#define PIPE_FRAME_LOW_MASK 0xff000000
2655+#define PIPE_FRAME_LOW_SHIFT 24
2656+#define PIPE_PIXEL_MASK 0x00ffffff
2657+#define PIPE_PIXEL_SHIFT 0
2658+
2659+/* Cursor A & B regs */
2660+#define CURACNTR 0x70080
2661+#define CURSOR_MODE_DISABLE 0x00
2662+#define CURSOR_MODE_64_32B_AX 0x07
2663+#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
2664+#define MCURSOR_GAMMA_ENABLE (1 << 26)
2665+#define CURABASE 0x70084
2666+#define CURAPOS 0x70088
2667+#define CURSOR_POS_MASK 0x007FF
2668+#define CURSOR_POS_SIGN 0x8000
2669+#define CURSOR_X_SHIFT 0
2670+#define CURSOR_Y_SHIFT 16
2671+#define CURBCNTR 0x700c0
2672+#define CURBBASE 0x700c4
2673+#define CURBPOS 0x700c8
2674+
2675+/* Display A control */
2676+#define DSPACNTR 0x70180
2677+#define DISPLAY_PLANE_ENABLE (1<<31)
2678+#define DISPLAY_PLANE_DISABLE 0
2679+#define DISPPLANE_GAMMA_ENABLE (1<<30)
2680+#define DISPPLANE_GAMMA_DISABLE 0
2681+#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
2682+#define DISPPLANE_8BPP (0x2<<26)
2683+#define DISPPLANE_15_16BPP (0x4<<26)
2684+#define DISPPLANE_16BPP (0x5<<26)
2685+#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
2686+#define DISPPLANE_32BPP (0x7<<26)
2687+#define DISPPLANE_STEREO_ENABLE (1<<25)
2688+#define DISPPLANE_STEREO_DISABLE 0
2689+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
2690+#define DISPPLANE_SEL_PIPE_A 0
2691+#define DISPPLANE_SEL_PIPE_B (1<<24)
2692+#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
2693+#define DISPPLANE_SRC_KEY_DISABLE 0
2694+#define DISPPLANE_LINE_DOUBLE (1<<20)
2695+#define DISPPLANE_NO_LINE_DOUBLE 0
2696+#define DISPPLANE_STEREO_POLARITY_FIRST 0
2697+#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
2698+#define DSPAADDR 0x70184
2699+#define DSPASTRIDE 0x70188
2700+#define DSPAPOS 0x7018C /* reserved */
2701+#define DSPASIZE 0x70190
2702+#define DSPASURF 0x7019C /* 965+ only */
2703+#define DSPATILEOFF 0x701A4 /* 965+ only */
2704+
2705+/* VBIOS flags */
2706+#define SWF00 0x71410
2707+#define SWF01 0x71414
2708+#define SWF02 0x71418
2709+#define SWF03 0x7141c
2710+#define SWF04 0x71420
2711+#define SWF05 0x71424
2712+#define SWF06 0x71428
2713+#define SWF10 0x70410
2714+#define SWF11 0x70414
2715+#define SWF14 0x71420
2716+#define SWF30 0x72414
2717+#define SWF31 0x72418
2718+#define SWF32 0x7241c
2719+
2720+/* Pipe B */
2721+#define PIPEBDSL 0x71000
2722+#define PIPEBCONF 0x71008
2723+#define PIPEBSTAT 0x71024
2724+#define PIPEBFRAMEHIGH 0x71040
2725+#define PIPEBFRAMEPIXEL 0x71044
2726+
2727+/* Display B control */
2728+#define DSPBCNTR 0x71180
2729+#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
2730+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
2731+#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
2732+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
2733+#define DSPBADDR 0x71184
2734+#define DSPBSTRIDE 0x71188
2735+#define DSPBPOS 0x7118C
2736+#define DSPBSIZE 0x71190
2737+#define DSPBSURF 0x7119C
2738+#define DSPBTILEOFF 0x711A4
2739+
2740+/* VBIOS regs */
2741+#define VGACNTRL 0x71400
2742+# define VGA_DISP_DISABLE (1 << 31)
2743+# define VGA_2X_MODE (1 << 30)
2744+# define VGA_PIPE_B_SELECT (1 << 29)
2745+
2746+#endif /* _I915_REG_H_ */
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch
deleted file mode 100644
index 9337475c31..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch
+++ /dev/null
@@ -1,424 +0,0 @@
1commit 4f99970852559935b27bc634318f34c18c5fd143
2Author: Eric Anholt <eric@anholt.net>
3Date: Tue Jul 29 12:10:39 2008 -0700
4
5 i915: Add support for MSI and interrupt mitigation.
6
7 Previous attempts at interrupt mitigation had been foiled by i915_wait_irq's
8 failure to update the sarea seqno value when the status page indicated that
9 the seqno had already been passed. MSI support has been seen to cut CPU
10 costs by up to 40% in some workloads by avoiding other expensive interrupt
11 handlers for frequent graphics interrupts.
12
13 Signed-off-by: Eric Anholt <eric@anholt.net>
14 Signed-off-by: Dave Airlie <airlied@redhat.com>
15
16diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
17index 53f0e5a..61ed515 100644
18--- a/drivers/gpu/drm/drm_irq.c
19+++ b/drivers/gpu/drm/drm_irq.c
20@@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
21 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
22 return -EINVAL;
23
24- p->irq = dev->irq;
25+ p->irq = dev->pdev->irq;
26
27 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
28 p->irq);
29@@ -89,7 +89,7 @@ static int drm_irq_install(struct drm_device * dev)
30 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
31 return -EINVAL;
32
33- if (dev->irq == 0)
34+ if (dev->pdev->irq == 0)
35 return -EINVAL;
36
37 mutex_lock(&dev->struct_mutex);
38@@ -107,7 +107,7 @@ static int drm_irq_install(struct drm_device * dev)
39 dev->irq_enabled = 1;
40 mutex_unlock(&dev->struct_mutex);
41
42- DRM_DEBUG("irq=%d\n", dev->irq);
43+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
44
45 if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
46 init_waitqueue_head(&dev->vbl_queue);
47@@ -127,8 +127,12 @@ static int drm_irq_install(struct drm_device * dev)
48 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
49 sh_flags = IRQF_SHARED;
50
51- ret = request_irq(dev->irq, dev->driver->irq_handler,
52+ ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
53 sh_flags, dev->devname, dev);
54+ /* Expose the device irq number to drivers that want to export it for
55+ * whatever reason.
56+ */
57+ dev->irq = dev->pdev->irq;
58 if (ret < 0) {
59 mutex_lock(&dev->struct_mutex);
60 dev->irq_enabled = 0;
61@@ -164,11 +168,11 @@ int drm_irq_uninstall(struct drm_device * dev)
62 if (!irq_enabled)
63 return -EINVAL;
64
65- DRM_DEBUG("irq=%d\n", dev->irq);
66+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
67
68 dev->driver->irq_uninstall(dev);
69
70- free_irq(dev->irq, dev);
71+ free_irq(dev->pdev->irq, dev);
72
73 dev->locked_tasklet_func = NULL;
74
75@@ -201,7 +205,7 @@ int drm_control(struct drm_device *dev, void *data,
76 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
77 return 0;
78 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
79- ctl->irq != dev->irq)
80+ ctl->irq != dev->pdev->irq)
81 return -EINVAL;
82 return drm_irq_install(dev);
83 case DRM_UNINST_HANDLER:
84@@ -239,7 +243,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
85 int ret = 0;
86 unsigned int flags, seq;
87
88- if ((!dev->irq) || (!dev->irq_enabled))
89+ if ((!dev->pdev->irq) || (!dev->irq_enabled))
90 return -EINVAL;
91
92 if (vblwait->request.type &
93diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
94index 7be580b..10bfb0c 100644
95--- a/drivers/gpu/drm/i915/i915_dma.c
96+++ b/drivers/gpu/drm/i915/i915_dma.c
97@@ -84,7 +84,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
98 * may not have been called from userspace and after dev_private
99 * is freed, it's too late.
100 */
101- if (dev->irq)
102+ if (dev->irq_enabled)
103 drm_irq_uninstall(dev);
104
105 if (dev_priv->ring.virtual_start) {
106@@ -644,7 +644,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
107
108 switch (param->param) {
109 case I915_PARAM_IRQ_ACTIVE:
110- value = dev->irq ? 1 : 0;
111+ value = dev->irq_enabled;
112 break;
113 case I915_PARAM_ALLOW_BATCHBUFFER:
114 value = dev_priv->allow_batchbuffer ? 1 : 0;
115@@ -763,6 +763,20 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
116 ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
117 _DRM_KERNEL | _DRM_DRIVER,
118 &dev_priv->mmio_map);
119+
120+
121+ /* On the 945G/GM, the chipset reports the MSI capability on the
122+ * integrated graphics even though the support isn't actually there
123+ * according to the published specs. It doesn't appear to function
124+ * correctly in testing on 945G.
125+ * This may be a side effect of MSI having been made available for PEG
126+ * and the registers being closely associated.
127+ */
128+ if (!IS_I945G(dev) && !IS_I945GM(dev))
129+ pci_enable_msi(dev->pdev);
130+
131+ spin_lock_init(&dev_priv->user_irq_lock);
132+
133 return ret;
134 }
135
136@@ -770,6 +784,9 @@ int i915_driver_unload(struct drm_device *dev)
137 {
138 struct drm_i915_private *dev_priv = dev->dev_private;
139
140+ if (dev->pdev->msi_enabled)
141+ pci_disable_msi(dev->pdev);
142+
143 if (dev_priv->mmio_map)
144 drm_rmmap(dev, dev_priv->mmio_map);
145
146diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
147index afb51a3..8daf0d8 100644
148--- a/drivers/gpu/drm/i915/i915_drv.h
149+++ b/drivers/gpu/drm/i915/i915_drv.h
150@@ -105,6 +105,12 @@ typedef struct drm_i915_private {
151 wait_queue_head_t irq_queue;
152 atomic_t irq_received;
153 atomic_t irq_emitted;
154+ /** Protects user_irq_refcount and irq_mask_reg */
155+ spinlock_t user_irq_lock;
156+ /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
157+ int user_irq_refcount;
158+ /** Cached value of IMR to avoid reads in updating the bitfield */
159+ u32 irq_mask_reg;
160
161 int tex_lru_log_granularity;
162 int allow_batchbuffer;
163diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
164index 4a2de78..24d11ed 100644
165--- a/drivers/gpu/drm/i915/i915_irq.c
166+++ b/drivers/gpu/drm/i915/i915_irq.c
167@@ -33,6 +33,31 @@
168
169 #define MAX_NOPID ((u32)~0)
170
171+/** These are the interrupts used by the driver */
172+#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
173+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \
174+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
175+
176+static inline void
177+i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
178+{
179+ if ((dev_priv->irq_mask_reg & mask) != 0) {
180+ dev_priv->irq_mask_reg &= ~mask;
181+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
182+ (void) I915_READ(IMR);
183+ }
184+}
185+
186+static inline void
187+i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
188+{
189+ if ((dev_priv->irq_mask_reg & mask) != mask) {
190+ dev_priv->irq_mask_reg |= mask;
191+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
192+ (void) I915_READ(IMR);
193+ }
194+}
195+
196 /**
197 * Emit blits for scheduled buffer swaps.
198 *
199@@ -229,46 +254,50 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
200 {
201 struct drm_device *dev = (struct drm_device *) arg;
202 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
203- u16 temp;
204 u32 pipea_stats, pipeb_stats;
205+ u32 iir;
206
207 pipea_stats = I915_READ(PIPEASTAT);
208 pipeb_stats = I915_READ(PIPEBSTAT);
209
210- temp = I915_READ16(IIR);
211-
212- temp &= (I915_USER_INTERRUPT |
213- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
214- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT);
215+ if (dev->pdev->msi_enabled)
216+ I915_WRITE(IMR, ~0);
217+ iir = I915_READ(IIR);
218
219- DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
220+ DRM_DEBUG("iir=%08x\n", iir);
221
222- if (temp == 0)
223+ if (iir == 0) {
224+ if (dev->pdev->msi_enabled) {
225+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
226+ (void) I915_READ(IMR);
227+ }
228 return IRQ_NONE;
229+ }
230
231- I915_WRITE16(IIR, temp);
232- (void) I915_READ16(IIR);
233- DRM_READMEMORYBARRIER();
234+ I915_WRITE(IIR, iir);
235+ if (dev->pdev->msi_enabled)
236+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
237+ (void) I915_READ(IIR); /* Flush posted writes */
238
239 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
240
241- if (temp & I915_USER_INTERRUPT)
242+ if (iir & I915_USER_INTERRUPT)
243 DRM_WAKEUP(&dev_priv->irq_queue);
244
245- if (temp & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
246- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
247+ if (iir & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
248+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
249 int vblank_pipe = dev_priv->vblank_pipe;
250
251 if ((vblank_pipe &
252 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
253 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
254- if (temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
255+ if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
256 atomic_inc(&dev->vbl_received);
257- if (temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
258+ if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
259 atomic_inc(&dev->vbl_received2);
260- } else if (((temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
261+ } else if (((iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
262 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
263- ((temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
264+ ((iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
265 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
266 atomic_inc(&dev->vbl_received);
267
268@@ -314,6 +343,27 @@ static int i915_emit_irq(struct drm_device * dev)
269 return dev_priv->counter;
270 }
271
272+static void i915_user_irq_get(struct drm_device *dev)
273+{
274+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
275+
276+ spin_lock(&dev_priv->user_irq_lock);
277+ if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
278+ i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
279+ spin_unlock(&dev_priv->user_irq_lock);
280+}
281+
282+static void i915_user_irq_put(struct drm_device *dev)
283+{
284+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
285+
286+ spin_lock(&dev_priv->user_irq_lock);
287+ BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
288+ if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
289+ i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
290+ spin_unlock(&dev_priv->user_irq_lock);
291+}
292+
293 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
294 {
295 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
296@@ -322,13 +372,17 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
297 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
298 READ_BREADCRUMB(dev_priv));
299
300- if (READ_BREADCRUMB(dev_priv) >= irq_nr)
301+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
302+ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
303 return 0;
304+ }
305
306 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
307
308+ i915_user_irq_get(dev);
309 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
310 READ_BREADCRUMB(dev_priv) >= irq_nr);
311+ i915_user_irq_put(dev);
312
313 if (ret == -EBUSY) {
314 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
315@@ -413,20 +467,6 @@ int i915_irq_wait(struct drm_device *dev, void *data,
316 return i915_wait_irq(dev, irqwait->irq_seq);
317 }
318
319-static void i915_enable_interrupt (struct drm_device *dev)
320-{
321- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
322- u16 flag;
323-
324- flag = 0;
325- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
326- flag |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
327- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
328- flag |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
329-
330- I915_WRITE16(IER, I915_USER_INTERRUPT | flag);
331-}
332-
333 /* Set the vblank monitor pipe
334 */
335 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
336@@ -434,6 +474,7 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
337 {
338 drm_i915_private_t *dev_priv = dev->dev_private;
339 drm_i915_vblank_pipe_t *pipe = data;
340+ u32 enable_mask = 0, disable_mask = 0;
341
342 if (!dev_priv) {
343 DRM_ERROR("called with no initialization\n");
344@@ -445,9 +486,20 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
345 return -EINVAL;
346 }
347
348- dev_priv->vblank_pipe = pipe->pipe;
349+ if (pipe->pipe & DRM_I915_VBLANK_PIPE_A)
350+ enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
351+ else
352+ disable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
353+
354+ if (pipe->pipe & DRM_I915_VBLANK_PIPE_B)
355+ enable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
356+ else
357+ disable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
358
359- i915_enable_interrupt (dev);
360+ i915_enable_irq(dev_priv, enable_mask);
361+ i915_disable_irq(dev_priv, disable_mask);
362+
363+ dev_priv->vblank_pipe = pipe->pipe;
364
365 return 0;
366 }
367@@ -464,7 +516,7 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
368 return -EINVAL;
369 }
370
371- flag = I915_READ(IER);
372+ flag = I915_READ(IMR);
373 pipe->pipe = 0;
374 if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
375 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
376@@ -586,9 +638,9 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
377 {
378 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
379
380- I915_WRITE16(HWSTAM, 0xfffe);
381- I915_WRITE16(IMR, 0x0);
382- I915_WRITE16(IER, 0x0);
383+ I915_WRITE(HWSTAM, 0xfffe);
384+ I915_WRITE(IMR, 0x0);
385+ I915_WRITE(IER, 0x0);
386 }
387
388 void i915_driver_irq_postinstall(struct drm_device * dev)
389@@ -601,7 +653,18 @@ void i915_driver_irq_postinstall(struct drm_device * dev)
390
391 if (!dev_priv->vblank_pipe)
392 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
393- i915_enable_interrupt(dev);
394+
395+ /* Set initial unmasked IRQs to just the selected vblank pipes. */
396+ dev_priv->irq_mask_reg = ~0;
397+ if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
398+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
399+ if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
400+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
401+
402+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
403+ I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
404+ (void) I915_READ(IER);
405+
406 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
407 }
408
409@@ -613,10 +676,10 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
410 if (!dev_priv)
411 return;
412
413- I915_WRITE16(HWSTAM, 0xffff);
414- I915_WRITE16(IMR, 0xffff);
415- I915_WRITE16(IER, 0x0);
416+ I915_WRITE(HWSTAM, 0xffff);
417+ I915_WRITE(IMR, 0xffff);
418+ I915_WRITE(IER, 0x0);
419
420- temp = I915_READ16(IIR);
421- I915_WRITE16(IIR, temp);
422+ temp = I915_READ(IIR);
423+ I915_WRITE(IIR, temp);
424 }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch
deleted file mode 100644
index 8736250f00..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch
+++ /dev/null
@@ -1,46 +0,0 @@
1commit 1236e8610ab9c6f9f8297e60530bedb2640c7224
2Author: Keith Packard <keithp@keithp.com>
3Date: Wed Jul 30 12:21:20 2008 -0700
4
5 i915: Track progress inside of batchbuffers for determining wedgedness.
6
7 This avoids early termination for long-running commands.
8
9 Signed-off-by: Eric Anholt <eric@anholt.net>
10 Signed-off-by: Dave Airlie <airlied@redhat.com>
11
12diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
13index 10bfb0c..4c72a01 100644
14--- a/drivers/gpu/drm/i915/i915_dma.c
15+++ b/drivers/gpu/drm/i915/i915_dma.c
16@@ -40,11 +40,15 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
17 {
18 drm_i915_private_t *dev_priv = dev->dev_private;
19 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
20+ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
21+ u32 last_acthd = I915_READ(acthd_reg);
22+ u32 acthd;
23 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
24 int i;
25
26- for (i = 0; i < 10000; i++) {
27+ for (i = 0; i < 100000; i++) {
28 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
29+ acthd = I915_READ(acthd_reg);
30 ring->space = ring->head - (ring->tail + 8);
31 if (ring->space < 0)
32 ring->space += ring->Size;
33@@ -55,8 +59,13 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
34
35 if (ring->head != last_head)
36 i = 0;
37+ if (acthd != last_acthd)
38+ i = 0;
39
40 last_head = ring->head;
41+ last_acthd = acthd;
42+ msleep_interruptible(10);
43+
44 }
45
46 return -EBUSY;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0007-i915-Initialize-hardware-status-page-at-device-load.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0007-i915-Initialize-hardware-status-page-at-device-load.patch
deleted file mode 100644
index 79f068f422..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0007-i915-Initialize-hardware-status-page-at-device-load.patch
+++ /dev/null
@@ -1,137 +0,0 @@
1commit 75fed4ae8454aa975c274b2585ec2287dd15773d
2Author: Keith Packard <keithp@keithp.com>
3Date: Wed Jul 30 13:03:43 2008 -0700
4
5 i915: Initialize hardware status page at device load when possible.
6
7 Some chips were unstable with repeated setup/teardown of the hardware status
8 page.
9
10 Signed-off-by: Eric Anholt <eric@anholt.net>
11 Signed-off-by: Dave Airlie <airlied@redhat.com>
12
13diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
14index 4c72a01..b3c4ac9 100644
15--- a/drivers/gpu/drm/i915/i915_dma.c
16+++ b/drivers/gpu/drm/i915/i915_dma.c
17@@ -71,6 +71,52 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
18 return -EBUSY;
19 }
20
21+/**
22+ * Sets up the hardware status page for devices that need a physical address
23+ * in the register.
24+ */
25+int i915_init_phys_hws(struct drm_device *dev)
26+{
27+ drm_i915_private_t *dev_priv = dev->dev_private;
28+ /* Program Hardware Status Page */
29+ dev_priv->status_page_dmah =
30+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
31+
32+ if (!dev_priv->status_page_dmah) {
33+ DRM_ERROR("Can not allocate hardware status page\n");
34+ return -ENOMEM;
35+ }
36+ dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
37+ dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
38+
39+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
40+
41+ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
42+ DRM_DEBUG("Enabled hardware status page\n");
43+ return 0;
44+}
45+
46+/**
47+ * Frees the hardware status page, whether it's a physical address or a virtual
48+ * address set up by the X Server.
49+ */
50+void i915_free_hws(struct drm_device *dev)
51+{
52+ drm_i915_private_t *dev_priv = dev->dev_private;
53+ if (dev_priv->status_page_dmah) {
54+ drm_pci_free(dev, dev_priv->status_page_dmah);
55+ dev_priv->status_page_dmah = NULL;
56+ }
57+
58+ if (dev_priv->status_gfx_addr) {
59+ dev_priv->status_gfx_addr = 0;
60+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
61+ }
62+
63+ /* Need to rewrite hardware status page */
64+ I915_WRITE(HWS_PGA, 0x1ffff000);
65+}
66+
67 void i915_kernel_lost_context(struct drm_device * dev)
68 {
69 drm_i915_private_t *dev_priv = dev->dev_private;
70@@ -103,18 +149,9 @@ static int i915_dma_cleanup(struct drm_device * dev)
71 dev_priv->ring.map.size = 0;
72 }
73
74- if (dev_priv->status_page_dmah) {
75- drm_pci_free(dev, dev_priv->status_page_dmah);
76- dev_priv->status_page_dmah = NULL;
77- /* Need to rewrite hardware status page */
78- I915_WRITE(HWS_PGA, 0x1ffff000);
79- }
80-
81- if (dev_priv->status_gfx_addr) {
82- dev_priv->status_gfx_addr = 0;
83- drm_core_ioremapfree(&dev_priv->hws_map, dev);
84- I915_WRITE(HWS_PGA, 0x1ffff000);
85- }
86+ /* Clear the HWS virtual address at teardown */
87+ if (I915_NEED_GFX_HWS(dev))
88+ i915_free_hws(dev);
89
90 return 0;
91 }
92@@ -165,23 +202,6 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
93 */
94 dev_priv->allow_batchbuffer = 1;
95
96- /* Program Hardware Status Page */
97- if (!I915_NEED_GFX_HWS(dev)) {
98- dev_priv->status_page_dmah =
99- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
100-
101- if (!dev_priv->status_page_dmah) {
102- i915_dma_cleanup(dev);
103- DRM_ERROR("Can not allocate hardware status page\n");
104- return -ENOMEM;
105- }
106- dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
107- dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
108-
109- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
110- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
111- }
112- DRM_DEBUG("Enabled hardware status page\n");
113 return 0;
114 }
115
116@@ -773,6 +793,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
117 _DRM_KERNEL | _DRM_DRIVER,
118 &dev_priv->mmio_map);
119
120+ /* Init HWS */
121+ if (!I915_NEED_GFX_HWS(dev)) {
122+ ret = i915_init_phys_hws(dev);
123+ if (ret != 0)
124+ return ret;
125+ }
126
127 /* On the 945G/GM, the chipset reports the MSI capability on the
128 * integrated graphics even though the support isn't actually there
129@@ -796,6 +822,8 @@ int i915_driver_unload(struct drm_device *dev)
130 if (dev->pdev->msi_enabled)
131 pci_disable_msi(dev->pdev);
132
133+ i915_free_hws(dev);
134+
135 if (dev_priv->mmio_map)
136 drm_rmmap(dev, dev_priv->mmio_map);
137
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0008-Add-Intel-ACPI-IGD-OpRegion-support.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0008-Add-Intel-ACPI-IGD-OpRegion-support.patch
deleted file mode 100644
index afa6f96345..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0008-Add-Intel-ACPI-IGD-OpRegion-support.patch
+++ /dev/null
@@ -1,572 +0,0 @@
1commit 91c2ebb8e78aa64f4807399b506ec0090ae5f3d6
2Author: Matthew Garrett <mjg59@srcf.ucam.org>
3Date: Tue Aug 5 19:37:25 2008 +0100
4
5 Add Intel ACPI IGD OpRegion support
6
7 This adds the support necessary for allowing ACPI backlight control to
8 work on some newer Intel-based graphics systems. Tested on Thinkpad T61
9 and HP 2510p hardware.
10
11 Signed-off-by: Matthew Garrett <mjg@redhat.com>
12 Signed-off-by: Dave Airlie <airlied@linux.ie>
13
14diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
15index a9e6046..b032808 100644
16--- a/drivers/gpu/drm/i915/Makefile
17+++ b/drivers/gpu/drm/i915/Makefile
18@@ -3,7 +3,7 @@
19 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
20
21 ccflags-y := -Iinclude/drm
22-i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
23+i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o
24
25 i915-$(CONFIG_COMPAT) += i915_ioc32.o
26
27diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28index b3c4ac9..cead62f 100644
29--- a/drivers/gpu/drm/i915/i915_dma.c
30+++ b/drivers/gpu/drm/i915/i915_dma.c
31@@ -810,6 +810,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
32 if (!IS_I945G(dev) && !IS_I945GM(dev))
33 pci_enable_msi(dev->pdev);
34
35+ intel_opregion_init(dev);
36+
37 spin_lock_init(&dev_priv->user_irq_lock);
38
39 return ret;
40@@ -827,6 +829,8 @@ int i915_driver_unload(struct drm_device *dev)
41 if (dev_priv->mmio_map)
42 drm_rmmap(dev, dev_priv->mmio_map);
43
44+ intel_opregion_free(dev);
45+
46 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
47 DRM_MEM_DRIVER);
48
49diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
50index 6c99aab..d95eca2 100644
51--- a/drivers/gpu/drm/i915/i915_drv.c
52+++ b/drivers/gpu/drm/i915/i915_drv.c
53@@ -371,6 +371,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
54
55 i915_save_vga(dev);
56
57+ intel_opregion_free(dev);
58+
59 if (state.event == PM_EVENT_SUSPEND) {
60 /* Shut down the device */
61 pci_disable_device(dev->pdev);
62@@ -532,6 +534,8 @@ static int i915_resume(struct drm_device *dev)
63
64 i915_restore_vga(dev);
65
66+ intel_opregion_init(dev);
67+
68 return 0;
69 }
70
71diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
72index 8daf0d8..e4bd01c 100644
73--- a/drivers/gpu/drm/i915/i915_drv.h
74+++ b/drivers/gpu/drm/i915/i915_drv.h
75@@ -82,6 +82,14 @@ typedef struct _drm_i915_vbl_swap {
76 unsigned int sequence;
77 } drm_i915_vbl_swap_t;
78
79+struct intel_opregion {
80+ struct opregion_header *header;
81+ struct opregion_acpi *acpi;
82+ struct opregion_swsci *swsci;
83+ struct opregion_asle *asle;
84+ int enabled;
85+};
86+
87 typedef struct drm_i915_private {
88 drm_local_map_t *sarea;
89 drm_local_map_t *mmio_map;
90@@ -122,6 +130,8 @@ typedef struct drm_i915_private {
91 drm_i915_vbl_swap_t vbl_swaps;
92 unsigned int swaps_pending;
93
94+ struct intel_opregion opregion;
95+
96 /* Register state */
97 u8 saveLBB;
98 u32 saveDSPACNTR;
99@@ -244,6 +254,7 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
100 struct drm_file *file_priv);
101 extern int i915_vblank_swap(struct drm_device *dev, void *data,
102 struct drm_file *file_priv);
103+extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
104
105 /* i915_mem.c */
106 extern int i915_mem_alloc(struct drm_device *dev, void *data,
107@@ -258,6 +269,12 @@ extern void i915_mem_takedown(struct mem_block **heap);
108 extern void i915_mem_release(struct drm_device * dev,
109 struct drm_file *file_priv, struct mem_block *heap);
110
111+/* i915_opregion.c */
112+extern int intel_opregion_init(struct drm_device *dev);
113+extern void intel_opregion_free(struct drm_device *dev);
114+extern void opregion_asle_intr(struct drm_device *dev);
115+extern void opregion_enable_asle(struct drm_device *dev);
116+
117 #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
118 #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
119 #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
120diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
121index 24d11ed..ae7d3a8 100644
122--- a/drivers/gpu/drm/i915/i915_irq.c
123+++ b/drivers/gpu/drm/i915/i915_irq.c
124@@ -36,9 +36,11 @@
125 /** These are the interrupts used by the driver */
126 #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
127 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \
128- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
129+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT | \
130+ I915_ASLE_INTERRUPT | \
131+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
132
133-static inline void
134+void
135 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
136 {
137 if ((dev_priv->irq_mask_reg & mask) != 0) {
138@@ -274,6 +276,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
139 return IRQ_NONE;
140 }
141
142+ I915_WRITE(PIPEASTAT, pipea_stats);
143+ I915_WRITE(PIPEBSTAT, pipeb_stats);
144+
145 I915_WRITE(IIR, iir);
146 if (dev->pdev->msi_enabled)
147 I915_WRITE(IMR, dev_priv->irq_mask_reg);
148@@ -306,14 +311,14 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
149
150 if (dev_priv->swaps_pending > 0)
151 drm_locked_tasklet(dev, i915_vblank_tasklet);
152- I915_WRITE(PIPEASTAT,
153- pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
154- PIPE_VBLANK_INTERRUPT_STATUS);
155- I915_WRITE(PIPEBSTAT,
156- pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
157- PIPE_VBLANK_INTERRUPT_STATUS);
158 }
159
160+ if (iir & I915_ASLE_INTERRUPT)
161+ opregion_asle_intr(dev);
162+
163+ if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
164+ opregion_asle_intr(dev);
165+
166 return IRQ_HANDLED;
167 }
168
169@@ -661,10 +666,14 @@ void i915_driver_irq_postinstall(struct drm_device * dev)
170 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
171 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
172
173+ dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
174+
175 I915_WRITE(IMR, dev_priv->irq_mask_reg);
176 I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
177 (void) I915_READ(IER);
178
179+ opregion_enable_asle(dev);
180+
181 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
182 }
183
184diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
185new file mode 100644
186index 0000000..1787a0c
187--- /dev/null
188+++ b/drivers/gpu/drm/i915/i915_opregion.c
189@@ -0,0 +1,371 @@
190+/*
191+ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
192+ * Copyright 2008 Red Hat <mjg@redhat.com>
193+ *
194+ * Permission is hereby granted, free of charge, to any person obtaining
195+ * a copy of this software and associated documentation files (the
196+ * "Software"), to deal in the Software without restriction, including
197+ * without limitation the rights to use, copy, modify, merge, publish,
198+ * distribute, sub license, and/or sell copies of the Software, and to
199+ * permit persons to whom the Software is furnished to do so, subject to
200+ * the following conditions:
201+ *
202+ * The above copyright notice and this permission notice (including the
203+ * next paragraph) shall be included in all copies or substantial
204+ * portions of the Software.
205+ *
206+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
207+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
208+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
209+ * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
210+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
211+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
212+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
213+ * SOFTWARE.
214+ *
215+ */
216+
217+#include <linux/acpi.h>
218+
219+#include "drmP.h"
220+#include "i915_drm.h"
221+#include "i915_drv.h"
222+
223+#define PCI_ASLE 0xe4
224+#define PCI_LBPC 0xf4
225+#define PCI_ASLS 0xfc
226+
227+#define OPREGION_SZ (8*1024)
228+#define OPREGION_HEADER_OFFSET 0
229+#define OPREGION_ACPI_OFFSET 0x100
230+#define OPREGION_SWSCI_OFFSET 0x200
231+#define OPREGION_ASLE_OFFSET 0x300
232+#define OPREGION_VBT_OFFSET 0x1000
233+
234+#define OPREGION_SIGNATURE "IntelGraphicsMem"
235+#define MBOX_ACPI (1<<0)
236+#define MBOX_SWSCI (1<<1)
237+#define MBOX_ASLE (1<<2)
238+
239+struct opregion_header {
240+ u8 signature[16];
241+ u32 size;
242+ u32 opregion_ver;
243+ u8 bios_ver[32];
244+ u8 vbios_ver[16];
245+ u8 driver_ver[16];
246+ u32 mboxes;
247+ u8 reserved[164];
248+} __attribute__((packed));
249+
250+/* OpRegion mailbox #1: public ACPI methods */
251+struct opregion_acpi {
252+ u32 drdy; /* driver readiness */
253+ u32 csts; /* notification status */
254+ u32 cevt; /* current event */
255+ u8 rsvd1[20];
256+ u32 didl[8]; /* supported display devices ID list */
257+ u32 cpdl[8]; /* currently presented display list */
258+ u32 cadl[8]; /* currently active display list */
259+ u32 nadl[8]; /* next active devices list */
260+ u32 aslp; /* ASL sleep time-out */
261+ u32 tidx; /* toggle table index */
262+ u32 chpd; /* current hotplug enable indicator */
263+ u32 clid; /* current lid state*/
264+ u32 cdck; /* current docking state */
265+ u32 sxsw; /* Sx state resume */
266+ u32 evts; /* ASL supported events */
267+ u32 cnot; /* current OS notification */
268+ u32 nrdy; /* driver status */
269+ u8 rsvd2[60];
270+} __attribute__((packed));
271+
272+/* OpRegion mailbox #2: SWSCI */
273+struct opregion_swsci {
274+ u32 scic; /* SWSCI command|status|data */
275+ u32 parm; /* command parameters */
276+ u32 dslp; /* driver sleep time-out */
277+ u8 rsvd[244];
278+} __attribute__((packed));
279+
280+/* OpRegion mailbox #3: ASLE */
281+struct opregion_asle {
282+ u32 ardy; /* driver readiness */
283+ u32 aslc; /* ASLE interrupt command */
284+ u32 tche; /* technology enabled indicator */
285+ u32 alsi; /* current ALS illuminance reading */
286+ u32 bclp; /* backlight brightness to set */
287+ u32 pfit; /* panel fitting state */
288+ u32 cblv; /* current brightness level */
289+ u16 bclm[20]; /* backlight level duty cycle mapping table */
290+ u32 cpfm; /* current panel fitting mode */
291+ u32 epfm; /* enabled panel fitting modes */
292+ u8 plut[74]; /* panel LUT and identifier */
293+ u32 pfmb; /* PWM freq and min brightness */
294+ u8 rsvd[102];
295+} __attribute__((packed));
296+
297+/* ASLE irq request bits */
298+#define ASLE_SET_ALS_ILLUM (1 << 0)
299+#define ASLE_SET_BACKLIGHT (1 << 1)
300+#define ASLE_SET_PFIT (1 << 2)
301+#define ASLE_SET_PWM_FREQ (1 << 3)
302+#define ASLE_REQ_MSK 0xf
303+
304+/* response bits of ASLE irq request */
305+#define ASLE_ALS_ILLUM_FAIL (2<<10)
306+#define ASLE_BACKLIGHT_FAIL (2<<12)
307+#define ASLE_PFIT_FAIL (2<<14)
308+#define ASLE_PWM_FREQ_FAIL (2<<16)
309+
310+/* ASLE backlight brightness to set */
311+#define ASLE_BCLP_VALID (1<<31)
312+#define ASLE_BCLP_MSK (~(1<<31))
313+
314+/* ASLE panel fitting request */
315+#define ASLE_PFIT_VALID (1<<31)
316+#define ASLE_PFIT_CENTER (1<<0)
317+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
318+#define ASLE_PFIT_STRETCH_GFX (1<<2)
319+
320+/* PWM frequency and minimum brightness */
321+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
322+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
323+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
324+#define ASLE_PFMB_PWM_VALID (1<<31)
325+
326+#define ASLE_CBLV_VALID (1<<31)
327+
328+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
329+{
330+ struct drm_i915_private *dev_priv = dev->dev_private;
331+ struct opregion_asle *asle = dev_priv->opregion.asle;
332+ u32 blc_pwm_ctl, blc_pwm_ctl2;
333+
334+ if (!(bclp & ASLE_BCLP_VALID))
335+ return ASLE_BACKLIGHT_FAIL;
336+
337+ bclp &= ASLE_BCLP_MSK;
338+ if (bclp < 0 || bclp > 255)
339+ return ASLE_BACKLIGHT_FAIL;
340+
341+ blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
342+ blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
343+ blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
344+
345+ if (blc_pwm_ctl2 & BLM_COMBINATION_MODE)
346+ pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
347+ else
348+ I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1));
349+
350+ asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
351+
352+ return 0;
353+}
354+
355+static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
356+{
357+ /* alsi is the current ALS reading in lux. 0 indicates below sensor
358+ range, 0xffff indicates above sensor range. 1-0xfffe are valid */
359+ return 0;
360+}
361+
362+static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
363+{
364+ struct drm_i915_private *dev_priv = dev->dev_private;
365+ if (pfmb & ASLE_PFMB_PWM_VALID) {
366+ u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
367+ u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
368+ blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
369+ pwm = pwm >> 9;
370+ /* FIXME - what do we do with the PWM? */
371+ }
372+ return 0;
373+}
374+
375+static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
376+{
377+ /* Panel fitting is currently controlled by the X code, so this is a
378+ noop until modesetting support works fully */
379+ if (!(pfit & ASLE_PFIT_VALID))
380+ return ASLE_PFIT_FAIL;
381+ return 0;
382+}
383+
384+void opregion_asle_intr(struct drm_device *dev)
385+{
386+ struct drm_i915_private *dev_priv = dev->dev_private;
387+ struct opregion_asle *asle = dev_priv->opregion.asle;
388+ u32 asle_stat = 0;
389+ u32 asle_req;
390+
391+ if (!asle)
392+ return;
393+
394+ asle_req = asle->aslc & ASLE_REQ_MSK;
395+
396+ if (!asle_req) {
397+ DRM_DEBUG("non asle set request??\n");
398+ return;
399+ }
400+
401+ if (asle_req & ASLE_SET_ALS_ILLUM)
402+ asle_stat |= asle_set_als_illum(dev, asle->alsi);
403+
404+ if (asle_req & ASLE_SET_BACKLIGHT)
405+ asle_stat |= asle_set_backlight(dev, asle->bclp);
406+
407+ if (asle_req & ASLE_SET_PFIT)
408+ asle_stat |= asle_set_pfit(dev, asle->pfit);
409+
410+ if (asle_req & ASLE_SET_PWM_FREQ)
411+ asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
412+
413+ asle->aslc = asle_stat;
414+}
415+
416+#define ASLE_ALS_EN (1<<0)
417+#define ASLE_BLC_EN (1<<1)
418+#define ASLE_PFIT_EN (1<<2)
419+#define ASLE_PFMB_EN (1<<3)
420+
421+void opregion_enable_asle(struct drm_device *dev)
422+{
423+ struct drm_i915_private *dev_priv = dev->dev_private;
424+ struct opregion_asle *asle = dev_priv->opregion.asle;
425+
426+ if (asle) {
427+ u32 pipeb_stats = I915_READ(PIPEBSTAT);
428+ if (IS_MOBILE(dev)) {
429+ /* Many devices trigger events with a write to the
430+ legacy backlight controller, so we need to ensure
431+ that it's able to generate interrupts */
432+ I915_WRITE(PIPEBSTAT, pipeb_stats |=
433+ I915_LEGACY_BLC_EVENT_ENABLE);
434+ i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT |
435+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
436+ } else
437+ i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT);
438+
439+ asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
440+ ASLE_PFMB_EN;
441+ asle->ardy = 1;
442+ }
443+}
444+
445+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
446+#define ACPI_EV_LID (1<<1)
447+#define ACPI_EV_DOCK (1<<2)
448+
449+static struct intel_opregion *system_opregion;
450+
451+int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
452+ void *data)
453+{
454+ /* The only video events relevant to opregion are 0x80. These indicate
455+ either a docking event, lid switch or display switch request. In
456+ Linux, these are handled by the dock, button and video drivers.
457+ We might want to fix the video driver to be opregion-aware in
458+ future, but right now we just indicate to the firmware that the
459+ request has been handled */
460+
461+ struct opregion_acpi *acpi;
462+
463+ if (!system_opregion)
464+ return NOTIFY_DONE;
465+
466+ acpi = system_opregion->acpi;
467+ acpi->csts = 0;
468+
469+ return NOTIFY_OK;
470+}
471+
472+static struct notifier_block intel_opregion_notifier = {
473+ .notifier_call = intel_opregion_video_event,
474+};
475+
476+int intel_opregion_init(struct drm_device *dev)
477+{
478+ struct drm_i915_private *dev_priv = dev->dev_private;
479+ struct intel_opregion *opregion = &dev_priv->opregion;
480+ void *base;
481+ u32 asls, mboxes;
482+ int err = 0;
483+
484+ pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
485+ DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
486+ if (asls == 0) {
487+ DRM_DEBUG("ACPI OpRegion not supported!\n");
488+ return -ENOTSUPP;
489+ }
490+
491+ base = ioremap(asls, OPREGION_SZ);
492+ if (!base)
493+ return -ENOMEM;
494+
495+ opregion->header = base;
496+ if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
497+ DRM_DEBUG("opregion signature mismatch\n");
498+ err = -EINVAL;
499+ goto err_out;
500+ }
501+
502+ mboxes = opregion->header->mboxes;
503+ if (mboxes & MBOX_ACPI) {
504+ DRM_DEBUG("Public ACPI methods supported\n");
505+ opregion->acpi = base + OPREGION_ACPI_OFFSET;
506+ } else {
507+ DRM_DEBUG("Public ACPI methods not supported\n");
508+ err = -ENOTSUPP;
509+ goto err_out;
510+ }
511+ opregion->enabled = 1;
512+
513+ if (mboxes & MBOX_SWSCI) {
514+ DRM_DEBUG("SWSCI supported\n");
515+ opregion->swsci = base + OPREGION_SWSCI_OFFSET;
516+ }
517+ if (mboxes & MBOX_ASLE) {
518+ DRM_DEBUG("ASLE supported\n");
519+ opregion->asle = base + OPREGION_ASLE_OFFSET;
520+ }
521+
522+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
523+ * Right now, all the events are handled by the ACPI video module.
524+ * We don't actually need to do anything with them. */
525+ opregion->acpi->csts = 0;
526+ opregion->acpi->drdy = 1;
527+
528+ system_opregion = opregion;
529+ register_acpi_notifier(&intel_opregion_notifier);
530+
531+ return 0;
532+
533+err_out:
534+ iounmap(opregion->header);
535+ opregion->header = NULL;
536+ return err;
537+}
538+
539+void intel_opregion_free(struct drm_device *dev)
540+{
541+ struct drm_i915_private *dev_priv = dev->dev_private;
542+ struct intel_opregion *opregion = &dev_priv->opregion;
543+
544+ if (!opregion->enabled)
545+ return;
546+
547+ opregion->acpi->drdy = 0;
548+
549+ system_opregion = NULL;
550+ unregister_acpi_notifier(&intel_opregion_notifier);
551+
552+ /* just clear all opregion memory pointers now */
553+ iounmap(opregion->header);
554+ opregion->header = NULL;
555+ opregion->acpi = NULL;
556+ opregion->swsci = NULL;
557+ opregion->asle = NULL;
558+
559+ opregion->enabled = 0;
560+}
561diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
562index 477c64e..43ad2cb 100644
563--- a/drivers/gpu/drm/i915/i915_reg.h
564+++ b/drivers/gpu/drm/i915/i915_reg.h
565@@ -740,6 +740,7 @@
566 #define BLC_PWM_CTL 0x61254
567 #define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
568 #define BLC_PWM_CTL2 0x61250 /* 965+ only */
569+#define BLM_COMBINATION_MODE (1 << 30)
570 /*
571 * This is the most significant 15 bits of the number of backlight cycles in a
572 * complete cycle of the modulated backlight control.
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0009-drm-fix-sysfs-error-path.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0009-drm-fix-sysfs-error-path.patch
deleted file mode 100644
index 8dea824804..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0009-drm-fix-sysfs-error-path.patch
+++ /dev/null
@@ -1,23 +0,0 @@
1commit 2e9c9eedfe0be777c051a2198dddf459adcc407b
2Author: Dave Airlie <airlied@redhat.com>
3Date: Tue Sep 2 10:06:06 2008 +1000
4
5 drm: fix sysfs error path.
6
7 Pointed out by Roel Kluin on dri-devel.
8
9 Signed-off-by: Dave Airlie <airlied@redhat.com>
10
11diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
12index af211a0..1611b9b 100644
13--- a/drivers/gpu/drm/drm_sysfs.c
14+++ b/drivers/gpu/drm/drm_sysfs.c
15@@ -184,7 +184,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
16 err_out_files:
17 if (i > 0)
18 for (j = 0; j < i; j++)
19- device_remove_file(&minor->kdev, &device_attrs[i]);
20+ device_remove_file(&minor->kdev, &device_attrs[j]);
21 device_unregister(&minor->kdev);
22 err_out:
23
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0010-i915-separate-suspend-resume-functions.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0010-i915-separate-suspend-resume-functions.patch
deleted file mode 100644
index 897d50c39b..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0010-i915-separate-suspend-resume-functions.patch
+++ /dev/null
@@ -1,1079 +0,0 @@
1commit a850828c640735fb410c782717c9eb7f8474e356
2Author: Jesse Barnes <jbarnes@virtuousgeek.org>
3Date: Mon Aug 25 15:11:06 2008 -0700
4
5 separate i915 suspend/resume functions into their own file
6
7 [Patch against drm-next. Consider this a trial balloon for our new Linux
8 development model.]
9
10 This is a big chunk of code. Separating it out makes it easier to change
11 without churn on the main i915_drv.c file (and there will be churn as we
12 fix bugs and add things like kernel mode setting). Also makes it easier
13 to share this file with BSD.
14
15 Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
16 Signed-off-by: Dave Airlie <airlied@redhat.com>
17
18diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
19index b032808..c4bbda6 100644
20--- a/drivers/gpu/drm/i915/Makefile
21+++ b/drivers/gpu/drm/i915/Makefile
22@@ -3,7 +3,8 @@
23 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
24
25 ccflags-y := -Iinclude/drm
26-i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o
27+i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \
28+ i915_suspend.o
29
30 i915-$(CONFIG_COMPAT) += i915_ioc32.o
31
32diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33index d95eca2..eff66ed 100644
34--- a/drivers/gpu/drm/i915/i915_drv.c
35+++ b/drivers/gpu/drm/i915/i915_drv.c
36@@ -38,211 +38,9 @@ static struct pci_device_id pciidlist[] = {
37 i915_PCI_IDS
38 };
39
40-enum pipe {
41- PIPE_A = 0,
42- PIPE_B,
43-};
44-
45-static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
46-{
47- struct drm_i915_private *dev_priv = dev->dev_private;
48-
49- if (pipe == PIPE_A)
50- return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
51- else
52- return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
53-}
54-
55-static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
56-{
57- struct drm_i915_private *dev_priv = dev->dev_private;
58- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
59- u32 *array;
60- int i;
61-
62- if (!i915_pipe_enabled(dev, pipe))
63- return;
64-
65- if (pipe == PIPE_A)
66- array = dev_priv->save_palette_a;
67- else
68- array = dev_priv->save_palette_b;
69-
70- for(i = 0; i < 256; i++)
71- array[i] = I915_READ(reg + (i << 2));
72-}
73-
74-static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
75-{
76- struct drm_i915_private *dev_priv = dev->dev_private;
77- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
78- u32 *array;
79- int i;
80-
81- if (!i915_pipe_enabled(dev, pipe))
82- return;
83-
84- if (pipe == PIPE_A)
85- array = dev_priv->save_palette_a;
86- else
87- array = dev_priv->save_palette_b;
88-
89- for(i = 0; i < 256; i++)
90- I915_WRITE(reg + (i << 2), array[i]);
91-}
92-
93-static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
94-{
95- outb(reg, index_port);
96- return inb(data_port);
97-}
98-
99-static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
100-{
101- inb(st01);
102- outb(palette_enable | reg, VGA_AR_INDEX);
103- return inb(VGA_AR_DATA_READ);
104-}
105-
106-static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
107-{
108- inb(st01);
109- outb(palette_enable | reg, VGA_AR_INDEX);
110- outb(val, VGA_AR_DATA_WRITE);
111-}
112-
113-static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
114-{
115- outb(reg, index_port);
116- outb(val, data_port);
117-}
118-
119-static void i915_save_vga(struct drm_device *dev)
120-{
121- struct drm_i915_private *dev_priv = dev->dev_private;
122- int i;
123- u16 cr_index, cr_data, st01;
124-
125- /* VGA color palette registers */
126- dev_priv->saveDACMASK = inb(VGA_DACMASK);
127- /* DACCRX automatically increments during read */
128- outb(0, VGA_DACRX);
129- /* Read 3 bytes of color data from each index */
130- for (i = 0; i < 256 * 3; i++)
131- dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
132-
133- /* MSR bits */
134- dev_priv->saveMSR = inb(VGA_MSR_READ);
135- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
136- cr_index = VGA_CR_INDEX_CGA;
137- cr_data = VGA_CR_DATA_CGA;
138- st01 = VGA_ST01_CGA;
139- } else {
140- cr_index = VGA_CR_INDEX_MDA;
141- cr_data = VGA_CR_DATA_MDA;
142- st01 = VGA_ST01_MDA;
143- }
144-
145- /* CRT controller regs */
146- i915_write_indexed(cr_index, cr_data, 0x11,
147- i915_read_indexed(cr_index, cr_data, 0x11) &
148- (~0x80));
149- for (i = 0; i <= 0x24; i++)
150- dev_priv->saveCR[i] =
151- i915_read_indexed(cr_index, cr_data, i);
152- /* Make sure we don't turn off CR group 0 writes */
153- dev_priv->saveCR[0x11] &= ~0x80;
154-
155- /* Attribute controller registers */
156- inb(st01);
157- dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
158- for (i = 0; i <= 0x14; i++)
159- dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
160- inb(st01);
161- outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
162- inb(st01);
163-
164- /* Graphics controller registers */
165- for (i = 0; i < 9; i++)
166- dev_priv->saveGR[i] =
167- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
168-
169- dev_priv->saveGR[0x10] =
170- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
171- dev_priv->saveGR[0x11] =
172- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
173- dev_priv->saveGR[0x18] =
174- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
175-
176- /* Sequencer registers */
177- for (i = 0; i < 8; i++)
178- dev_priv->saveSR[i] =
179- i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
180-}
181-
182-static void i915_restore_vga(struct drm_device *dev)
183-{
184- struct drm_i915_private *dev_priv = dev->dev_private;
185- int i;
186- u16 cr_index, cr_data, st01;
187-
188- /* MSR bits */
189- outb(dev_priv->saveMSR, VGA_MSR_WRITE);
190- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
191- cr_index = VGA_CR_INDEX_CGA;
192- cr_data = VGA_CR_DATA_CGA;
193- st01 = VGA_ST01_CGA;
194- } else {
195- cr_index = VGA_CR_INDEX_MDA;
196- cr_data = VGA_CR_DATA_MDA;
197- st01 = VGA_ST01_MDA;
198- }
199-
200- /* Sequencer registers, don't write SR07 */
201- for (i = 0; i < 7; i++)
202- i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
203- dev_priv->saveSR[i]);
204-
205- /* CRT controller regs */
206- /* Enable CR group 0 writes */
207- i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
208- for (i = 0; i <= 0x24; i++)
209- i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
210-
211- /* Graphics controller regs */
212- for (i = 0; i < 9; i++)
213- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
214- dev_priv->saveGR[i]);
215-
216- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
217- dev_priv->saveGR[0x10]);
218- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
219- dev_priv->saveGR[0x11]);
220- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
221- dev_priv->saveGR[0x18]);
222-
223- /* Attribute controller registers */
224- inb(st01);
225- for (i = 0; i <= 0x14; i++)
226- i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
227- inb(st01); /* switch back to index mode */
228- outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
229- inb(st01);
230-
231- /* VGA color palette registers */
232- outb(dev_priv->saveDACMASK, VGA_DACMASK);
233- /* DACCRX automatically increments during read */
234- outb(0, VGA_DACWX);
235- /* Read 3 bytes of color data from each index */
236- for (i = 0; i < 256 * 3; i++)
237- outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
238-
239-}
240-
241 static int i915_suspend(struct drm_device *dev, pm_message_t state)
242 {
243 struct drm_i915_private *dev_priv = dev->dev_private;
244- int i;
245
246 if (!dev || !dev_priv) {
247 printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
248@@ -254,122 +52,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
249 return 0;
250
251 pci_save_state(dev->pdev);
252- pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
253-
254- /* Display arbitration control */
255- dev_priv->saveDSPARB = I915_READ(DSPARB);
256-
257- /* Pipe & plane A info */
258- dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
259- dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
260- dev_priv->saveFPA0 = I915_READ(FPA0);
261- dev_priv->saveFPA1 = I915_READ(FPA1);
262- dev_priv->saveDPLL_A = I915_READ(DPLL_A);
263- if (IS_I965G(dev))
264- dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
265- dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
266- dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
267- dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
268- dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
269- dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
270- dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
271- dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
272-
273- dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
274- dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
275- dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
276- dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
277- dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
278- if (IS_I965G(dev)) {
279- dev_priv->saveDSPASURF = I915_READ(DSPASURF);
280- dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
281- }
282- i915_save_palette(dev, PIPE_A);
283- dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
284-
285- /* Pipe & plane B info */
286- dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
287- dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
288- dev_priv->saveFPB0 = I915_READ(FPB0);
289- dev_priv->saveFPB1 = I915_READ(FPB1);
290- dev_priv->saveDPLL_B = I915_READ(DPLL_B);
291- if (IS_I965G(dev))
292- dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
293- dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
294- dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
295- dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
296- dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
297- dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
298- dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
299- dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
300-
301- dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
302- dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
303- dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
304- dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
305- dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
306- if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
307- dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
308- dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
309- }
310- i915_save_palette(dev, PIPE_B);
311- dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
312-
313- /* CRT state */
314- dev_priv->saveADPA = I915_READ(ADPA);
315
316- /* LVDS state */
317- dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
318- dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
319- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
320- if (IS_I965G(dev))
321- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
322- if (IS_MOBILE(dev) && !IS_I830(dev))
323- dev_priv->saveLVDS = I915_READ(LVDS);
324- if (!IS_I830(dev) && !IS_845G(dev))
325- dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
326- dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
327- dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
328- dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
329-
330- /* FIXME: save TV & SDVO state */
331-
332- /* FBC state */
333- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
334- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
335- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
336- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
337-
338- /* Interrupt state */
339- dev_priv->saveIIR = I915_READ(IIR);
340- dev_priv->saveIER = I915_READ(IER);
341- dev_priv->saveIMR = I915_READ(IMR);
342-
343- /* VGA state */
344- dev_priv->saveVGA0 = I915_READ(VGA0);
345- dev_priv->saveVGA1 = I915_READ(VGA1);
346- dev_priv->saveVGA_PD = I915_READ(VGA_PD);
347- dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
348-
349- /* Clock gating state */
350- dev_priv->saveD_STATE = I915_READ(D_STATE);
351- dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
352-
353- /* Cache mode state */
354- dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
355-
356- /* Memory Arbitration state */
357- dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
358-
359- /* Scratch space */
360- for (i = 0; i < 16; i++) {
361- dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
362- dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
363- }
364- for (i = 0; i < 3; i++)
365- dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
366-
367- i915_save_vga(dev);
368+ i915_save_state(dev);
369
370 intel_opregion_free(dev);
371
372@@ -384,155 +68,13 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
373
374 static int i915_resume(struct drm_device *dev)
375 {
376- struct drm_i915_private *dev_priv = dev->dev_private;
377- int i;
378-
379 pci_set_power_state(dev->pdev, PCI_D0);
380 pci_restore_state(dev->pdev);
381 if (pci_enable_device(dev->pdev))
382 return -1;
383 pci_set_master(dev->pdev);
384
385- pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
386-
387- I915_WRITE(DSPARB, dev_priv->saveDSPARB);
388-
389- /* Pipe & plane A info */
390- /* Prime the clock */
391- if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
392- I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
393- ~DPLL_VCO_ENABLE);
394- udelay(150);
395- }
396- I915_WRITE(FPA0, dev_priv->saveFPA0);
397- I915_WRITE(FPA1, dev_priv->saveFPA1);
398- /* Actually enable it */
399- I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
400- udelay(150);
401- if (IS_I965G(dev))
402- I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
403- udelay(150);
404-
405- /* Restore mode */
406- I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
407- I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
408- I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
409- I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
410- I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
411- I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
412- I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
413-
414- /* Restore plane info */
415- I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
416- I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
417- I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
418- I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
419- I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
420- if (IS_I965G(dev)) {
421- I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
422- I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
423- }
424-
425- I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
426-
427- i915_restore_palette(dev, PIPE_A);
428- /* Enable the plane */
429- I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
430- I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
431-
432- /* Pipe & plane B info */
433- if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
434- I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
435- ~DPLL_VCO_ENABLE);
436- udelay(150);
437- }
438- I915_WRITE(FPB0, dev_priv->saveFPB0);
439- I915_WRITE(FPB1, dev_priv->saveFPB1);
440- /* Actually enable it */
441- I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
442- udelay(150);
443- if (IS_I965G(dev))
444- I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
445- udelay(150);
446-
447- /* Restore mode */
448- I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
449- I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
450- I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
451- I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
452- I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
453- I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
454- I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
455-
456- /* Restore plane info */
457- I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
458- I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
459- I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
460- I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
461- I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
462- if (IS_I965G(dev)) {
463- I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
464- I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
465- }
466-
467- I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
468-
469- i915_restore_palette(dev, PIPE_B);
470- /* Enable the plane */
471- I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
472- I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
473-
474- /* CRT state */
475- I915_WRITE(ADPA, dev_priv->saveADPA);
476-
477- /* LVDS state */
478- if (IS_I965G(dev))
479- I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
480- if (IS_MOBILE(dev) && !IS_I830(dev))
481- I915_WRITE(LVDS, dev_priv->saveLVDS);
482- if (!IS_I830(dev) && !IS_845G(dev))
483- I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
484-
485- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
486- I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
487- I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
488- I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
489- I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
490- I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
491-
492- /* FIXME: restore TV & SDVO state */
493-
494- /* FBC info */
495- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
496- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
497- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
498- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
499-
500- /* VGA state */
501- I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
502- I915_WRITE(VGA0, dev_priv->saveVGA0);
503- I915_WRITE(VGA1, dev_priv->saveVGA1);
504- I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
505- udelay(150);
506-
507- /* Clock gating state */
508- I915_WRITE (D_STATE, dev_priv->saveD_STATE);
509- I915_WRITE(CG_2D_DIS, dev_priv->saveCG_2D_DIS);
510-
511- /* Cache mode state */
512- I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
513-
514- /* Memory arbitration state */
515- I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
516-
517- for (i = 0; i < 16; i++) {
518- I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
519- I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
520- }
521- for (i = 0; i < 3; i++)
522- I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
523-
524- i915_restore_vga(dev);
525+ i915_restore_state(dev);
526
527 intel_opregion_init(dev);
528
529diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
530index e4bd01c..a82b487 100644
531--- a/drivers/gpu/drm/i915/i915_drv.h
532+++ b/drivers/gpu/drm/i915/i915_drv.h
533@@ -41,6 +41,11 @@
534 #define DRIVER_DESC "Intel Graphics"
535 #define DRIVER_DATE "20060119"
536
537+enum pipe {
538+ PIPE_A = 0,
539+ PIPE_B,
540+};
541+
542 /* Interface history:
543 *
544 * 1.1: Original.
545@@ -269,6 +274,10 @@ extern void i915_mem_takedown(struct mem_block **heap);
546 extern void i915_mem_release(struct drm_device * dev,
547 struct drm_file *file_priv, struct mem_block *heap);
548
549+/* i915_suspend.c */
550+extern int i915_save_state(struct drm_device *dev);
551+extern int i915_restore_state(struct drm_device *dev);
552+
553 /* i915_opregion.c */
554 extern int intel_opregion_init(struct drm_device *dev);
555 extern void intel_opregion_free(struct drm_device *dev);
556@@ -279,6 +288,8 @@ extern void opregion_enable_asle(struct drm_device *dev);
557 #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
558 #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
559 #define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
560+#define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg))
561+#define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val))
562
563 #define I915_VERBOSE 0
564
565diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
566new file mode 100644
567index 0000000..e0c1fe4
568--- /dev/null
569+++ b/drivers/gpu/drm/i915/i915_suspend.c
570@@ -0,0 +1,509 @@
571+/*
572+ *
573+ * Copyright 2008 (c) Intel Corporation
574+ * Jesse Barnes <jbarnes@virtuousgeek.org>
575+ *
576+ * Permission is hereby granted, free of charge, to any person obtaining a
577+ * copy of this software and associated documentation files (the
578+ * "Software"), to deal in the Software without restriction, including
579+ * without limitation the rights to use, copy, modify, merge, publish,
580+ * distribute, sub license, and/or sell copies of the Software, and to
581+ * permit persons to whom the Software is furnished to do so, subject to
582+ * the following conditions:
583+ *
584+ * The above copyright notice and this permission notice (including the
585+ * next paragraph) shall be included in all copies or substantial portions
586+ * of the Software.
587+ *
588+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
589+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
590+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
591+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
592+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
593+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
594+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
595+ */
596+
597+#include "drmP.h"
598+#include "drm.h"
599+#include "i915_drm.h"
600+#include "i915_drv.h"
601+
602+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
603+{
604+ struct drm_i915_private *dev_priv = dev->dev_private;
605+
606+ if (pipe == PIPE_A)
607+ return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
608+ else
609+ return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
610+}
611+
612+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
613+{
614+ struct drm_i915_private *dev_priv = dev->dev_private;
615+ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
616+ u32 *array;
617+ int i;
618+
619+ if (!i915_pipe_enabled(dev, pipe))
620+ return;
621+
622+ if (pipe == PIPE_A)
623+ array = dev_priv->save_palette_a;
624+ else
625+ array = dev_priv->save_palette_b;
626+
627+ for(i = 0; i < 256; i++)
628+ array[i] = I915_READ(reg + (i << 2));
629+}
630+
631+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
632+{
633+ struct drm_i915_private *dev_priv = dev->dev_private;
634+ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
635+ u32 *array;
636+ int i;
637+
638+ if (!i915_pipe_enabled(dev, pipe))
639+ return;
640+
641+ if (pipe == PIPE_A)
642+ array = dev_priv->save_palette_a;
643+ else
644+ array = dev_priv->save_palette_b;
645+
646+ for(i = 0; i < 256; i++)
647+ I915_WRITE(reg + (i << 2), array[i]);
648+}
649+
650+static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
651+{
652+ struct drm_i915_private *dev_priv = dev->dev_private;
653+
654+ I915_WRITE8(index_port, reg);
655+ return I915_READ8(data_port);
656+}
657+
658+static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
659+{
660+ struct drm_i915_private *dev_priv = dev->dev_private;
661+
662+ I915_READ8(st01);
663+ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
664+ return I915_READ8(VGA_AR_DATA_READ);
665+}
666+
667+static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
668+{
669+ struct drm_i915_private *dev_priv = dev->dev_private;
670+
671+ I915_READ8(st01);
672+ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
673+ I915_WRITE8(VGA_AR_DATA_WRITE, val);
674+}
675+
676+static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
677+{
678+ struct drm_i915_private *dev_priv = dev->dev_private;
679+
680+ I915_WRITE8(index_port, reg);
681+ I915_WRITE8(data_port, val);
682+}
683+
684+static void i915_save_vga(struct drm_device *dev)
685+{
686+ struct drm_i915_private *dev_priv = dev->dev_private;
687+ int i;
688+ u16 cr_index, cr_data, st01;
689+
690+ /* VGA color palette registers */
691+ dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
692+ /* DACCRX automatically increments during read */
693+ I915_WRITE8(VGA_DACRX, 0);
694+ /* Read 3 bytes of color data from each index */
695+ for (i = 0; i < 256 * 3; i++)
696+ dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA);
697+
698+ /* MSR bits */
699+ dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
700+ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
701+ cr_index = VGA_CR_INDEX_CGA;
702+ cr_data = VGA_CR_DATA_CGA;
703+ st01 = VGA_ST01_CGA;
704+ } else {
705+ cr_index = VGA_CR_INDEX_MDA;
706+ cr_data = VGA_CR_DATA_MDA;
707+ st01 = VGA_ST01_MDA;
708+ }
709+
710+ /* CRT controller regs */
711+ i915_write_indexed(dev, cr_index, cr_data, 0x11,
712+ i915_read_indexed(dev, cr_index, cr_data, 0x11) &
713+ (~0x80));
714+ for (i = 0; i <= 0x24; i++)
715+ dev_priv->saveCR[i] =
716+ i915_read_indexed(dev, cr_index, cr_data, i);
717+ /* Make sure we don't turn off CR group 0 writes */
718+ dev_priv->saveCR[0x11] &= ~0x80;
719+
720+ /* Attribute controller registers */
721+ I915_READ8(st01);
722+ dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
723+ for (i = 0; i <= 0x14; i++)
724+ dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
725+ I915_READ8(st01);
726+ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
727+ I915_READ8(st01);
728+
729+ /* Graphics controller registers */
730+ for (i = 0; i < 9; i++)
731+ dev_priv->saveGR[i] =
732+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
733+
734+ dev_priv->saveGR[0x10] =
735+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
736+ dev_priv->saveGR[0x11] =
737+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
738+ dev_priv->saveGR[0x18] =
739+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
740+
741+ /* Sequencer registers */
742+ for (i = 0; i < 8; i++)
743+ dev_priv->saveSR[i] =
744+ i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
745+}
746+
747+static void i915_restore_vga(struct drm_device *dev)
748+{
749+ struct drm_i915_private *dev_priv = dev->dev_private;
750+ int i;
751+ u16 cr_index, cr_data, st01;
752+
753+ /* MSR bits */
754+ I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
755+ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
756+ cr_index = VGA_CR_INDEX_CGA;
757+ cr_data = VGA_CR_DATA_CGA;
758+ st01 = VGA_ST01_CGA;
759+ } else {
760+ cr_index = VGA_CR_INDEX_MDA;
761+ cr_data = VGA_CR_DATA_MDA;
762+ st01 = VGA_ST01_MDA;
763+ }
764+
765+ /* Sequencer registers, don't write SR07 */
766+ for (i = 0; i < 7; i++)
767+ i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
768+ dev_priv->saveSR[i]);
769+
770+ /* CRT controller regs */
771+ /* Enable CR group 0 writes */
772+ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
773+ for (i = 0; i <= 0x24; i++)
774+ i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
775+
776+ /* Graphics controller regs */
777+ for (i = 0; i < 9; i++)
778+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
779+ dev_priv->saveGR[i]);
780+
781+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
782+ dev_priv->saveGR[0x10]);
783+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
784+ dev_priv->saveGR[0x11]);
785+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
786+ dev_priv->saveGR[0x18]);
787+
788+ /* Attribute controller registers */
789+ I915_READ8(st01); /* switch back to index mode */
790+ for (i = 0; i <= 0x14; i++)
791+ i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
792+ I915_READ8(st01); /* switch back to index mode */
793+ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
794+ I915_READ8(st01);
795+
796+ /* VGA color palette registers */
797+ I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
798+ /* DACCRX automatically increments during read */
799+ I915_WRITE8(VGA_DACWX, 0);
800+ /* Read 3 bytes of color data from each index */
801+ for (i = 0; i < 256 * 3; i++)
802+ I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]);
803+
804+}
805+
806+int i915_save_state(struct drm_device *dev)
807+{
808+ struct drm_i915_private *dev_priv = dev->dev_private;
809+ int i;
810+
811+ pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
812+
813+ /* Display arbitration control */
814+ dev_priv->saveDSPARB = I915_READ(DSPARB);
815+
816+ /* Pipe & plane A info */
817+ dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
818+ dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
819+ dev_priv->saveFPA0 = I915_READ(FPA0);
820+ dev_priv->saveFPA1 = I915_READ(FPA1);
821+ dev_priv->saveDPLL_A = I915_READ(DPLL_A);
822+ if (IS_I965G(dev))
823+ dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
824+ dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
825+ dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
826+ dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
827+ dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
828+ dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
829+ dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
830+ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
831+
832+ dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
833+ dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
834+ dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
835+ dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
836+ dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
837+ if (IS_I965G(dev)) {
838+ dev_priv->saveDSPASURF = I915_READ(DSPASURF);
839+ dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
840+ }
841+ i915_save_palette(dev, PIPE_A);
842+ dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
843+
844+ /* Pipe & plane B info */
845+ dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
846+ dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
847+ dev_priv->saveFPB0 = I915_READ(FPB0);
848+ dev_priv->saveFPB1 = I915_READ(FPB1);
849+ dev_priv->saveDPLL_B = I915_READ(DPLL_B);
850+ if (IS_I965G(dev))
851+ dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
852+ dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
853+ dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
854+ dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
855+ dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
856+ dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
857+ dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
858+ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
859+
860+ dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
861+ dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
862+ dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
863+ dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
864+ dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
865+ if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
866+ dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
867+ dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
868+ }
869+ i915_save_palette(dev, PIPE_B);
870+ dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
871+
872+ /* CRT state */
873+ dev_priv->saveADPA = I915_READ(ADPA);
874+
875+ /* LVDS state */
876+ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
877+ dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
878+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
879+ if (IS_I965G(dev))
880+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
881+ if (IS_MOBILE(dev) && !IS_I830(dev))
882+ dev_priv->saveLVDS = I915_READ(LVDS);
883+ if (!IS_I830(dev) && !IS_845G(dev))
884+ dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
885+ dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
886+ dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
887+ dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
888+
889+ /* FIXME: save TV & SDVO state */
890+
891+ /* FBC state */
892+ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
893+ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
894+ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
895+ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
896+
897+ /* Interrupt state */
898+ dev_priv->saveIIR = I915_READ(IIR);
899+ dev_priv->saveIER = I915_READ(IER);
900+ dev_priv->saveIMR = I915_READ(IMR);
901+
902+ /* VGA state */
903+ dev_priv->saveVGA0 = I915_READ(VGA0);
904+ dev_priv->saveVGA1 = I915_READ(VGA1);
905+ dev_priv->saveVGA_PD = I915_READ(VGA_PD);
906+ dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
907+
908+ /* Clock gating state */
909+ dev_priv->saveD_STATE = I915_READ(D_STATE);
910+ dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
911+
912+ /* Cache mode state */
913+ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
914+
915+ /* Memory Arbitration state */
916+ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
917+
918+ /* Scratch space */
919+ for (i = 0; i < 16; i++) {
920+ dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
921+ dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
922+ }
923+ for (i = 0; i < 3; i++)
924+ dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
925+
926+ i915_save_vga(dev);
927+
928+ return 0;
929+}
930+
931+int i915_restore_state(struct drm_device *dev)
932+{
933+ struct drm_i915_private *dev_priv = dev->dev_private;
934+ int i;
935+
936+ pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
937+
938+ I915_WRITE(DSPARB, dev_priv->saveDSPARB);
939+
940+ /* Pipe & plane A info */
941+ /* Prime the clock */
942+ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
943+ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
944+ ~DPLL_VCO_ENABLE);
945+ DRM_UDELAY(150);
946+ }
947+ I915_WRITE(FPA0, dev_priv->saveFPA0);
948+ I915_WRITE(FPA1, dev_priv->saveFPA1);
949+ /* Actually enable it */
950+ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
951+ DRM_UDELAY(150);
952+ if (IS_I965G(dev))
953+ I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
954+ DRM_UDELAY(150);
955+
956+ /* Restore mode */
957+ I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
958+ I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
959+ I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
960+ I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
961+ I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
962+ I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
963+ I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
964+
965+ /* Restore plane info */
966+ I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
967+ I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
968+ I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
969+ I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
970+ I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
971+ if (IS_I965G(dev)) {
972+ I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
973+ I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
974+ }
975+
976+ I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
977+
978+ i915_restore_palette(dev, PIPE_A);
979+ /* Enable the plane */
980+ I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
981+ I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
982+
983+ /* Pipe & plane B info */
984+ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
985+ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
986+ ~DPLL_VCO_ENABLE);
987+ DRM_UDELAY(150);
988+ }
989+ I915_WRITE(FPB0, dev_priv->saveFPB0);
990+ I915_WRITE(FPB1, dev_priv->saveFPB1);
991+ /* Actually enable it */
992+ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
993+ DRM_UDELAY(150);
994+ if (IS_I965G(dev))
995+ I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
996+ DRM_UDELAY(150);
997+
998+ /* Restore mode */
999+ I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
1000+ I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
1001+ I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
1002+ I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
1003+ I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
1004+ I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
1005+ I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
1006+
1007+ /* Restore plane info */
1008+ I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
1009+ I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
1010+ I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
1011+ I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
1012+ I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
1013+ if (IS_I965G(dev)) {
1014+ I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
1015+ I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
1016+ }
1017+
1018+ I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
1019+
1020+ i915_restore_palette(dev, PIPE_B);
1021+ /* Enable the plane */
1022+ I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
1023+ I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
1024+
1025+ /* CRT state */
1026+ I915_WRITE(ADPA, dev_priv->saveADPA);
1027+
1028+ /* LVDS state */
1029+ if (IS_I965G(dev))
1030+ I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
1031+ if (IS_MOBILE(dev) && !IS_I830(dev))
1032+ I915_WRITE(LVDS, dev_priv->saveLVDS);
1033+ if (!IS_I830(dev) && !IS_845G(dev))
1034+ I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
1035+
1036+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
1037+ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
1038+ I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
1039+ I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
1040+ I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
1041+ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
1042+
1043+ /* FIXME: restore TV & SDVO state */
1044+
1045+ /* FBC info */
1046+ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
1047+ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
1048+ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
1049+ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
1050+
1051+ /* VGA state */
1052+ I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
1053+ I915_WRITE(VGA0, dev_priv->saveVGA0);
1054+ I915_WRITE(VGA1, dev_priv->saveVGA1);
1055+ I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
1056+ DRM_UDELAY(150);
1057+
1058+ /* Clock gating state */
1059+ I915_WRITE (D_STATE, dev_priv->saveD_STATE);
1060+ I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
1061+
1062+ /* Cache mode state */
1063+ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
1064+
1065+ /* Memory arbitration state */
1066+ I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
1067+
1068+ for (i = 0; i < 16; i++) {
1069+ I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
1070+ I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
1071+ }
1072+ for (i = 0; i < 3; i++)
1073+ I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
1074+
1075+ i915_restore_vga(dev);
1076+
1077+ return 0;
1078+}
1079+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0011-drm-vblank-rework.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0011-drm-vblank-rework.patch
deleted file mode 100644
index 6161a71f04..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0011-drm-vblank-rework.patch
+++ /dev/null
@@ -1,1534 +0,0 @@
1commit 2aebb4e4e62d09b4a95be7be7c24a7f6528385b7
2Author: Jesse Barnes <jbarnes@virtuousgeek.org>
3Date: Tue Sep 30 12:14:26 2008 -0700
4
5 drm: Rework vblank-wait handling to allow interrupt reduction.
6
7 Previously, drivers supporting vblank interrupt waits would run the interrupt
8 all the time, or all the time that any 3d client was running, preventing the
9 CPU from sleeping for long when the system was otherwise idle. Now, interrupts
10 are disabled any time that no client is waiting on a vblank event. The new
11 method uses vblank counters on the chipsets when the interrupts are turned
12 off, rather than counting interrupts, so that we can continue to present
13 accurate vblank numbers.
14
15 Co-author: Michel Dänzer <michel@tungstengraphics.com>
16 Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
17 Signed-off-by: Eric Anholt <eric@anholt.net>
18 Signed-off-by: Dave Airlie <airlied@redhat.com>
19
20diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
21index 452c2d8..fb45fe7 100644
22--- a/drivers/gpu/drm/drm_drv.c
23+++ b/drivers/gpu/drm/drm_drv.c
24@@ -116,6 +116,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
25
26 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
27
28+ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
29+
30 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
31 };
32
33diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
34index 61ed515..d0c13d9 100644
35--- a/drivers/gpu/drm/drm_irq.c
36+++ b/drivers/gpu/drm/drm_irq.c
37@@ -71,19 +71,131 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
38 return 0;
39 }
40
41+static void vblank_disable_fn(unsigned long arg)
42+{
43+ struct drm_device *dev = (struct drm_device *)arg;
44+ unsigned long irqflags;
45+ int i;
46+
47+ if (!dev->vblank_disable_allowed)
48+ return;
49+
50+ for (i = 0; i < dev->num_crtcs; i++) {
51+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
52+ if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
53+ dev->vblank_enabled[i]) {
54+ DRM_DEBUG("disabling vblank on crtc %d\n", i);
55+ dev->last_vblank[i] =
56+ dev->driver->get_vblank_counter(dev, i);
57+ dev->driver->disable_vblank(dev, i);
58+ dev->vblank_enabled[i] = 0;
59+ }
60+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
61+ }
62+}
63+
64+static void drm_vblank_cleanup(struct drm_device *dev)
65+{
66+ /* Bail if the driver didn't call drm_vblank_init() */
67+ if (dev->num_crtcs == 0)
68+ return;
69+
70+ del_timer(&dev->vblank_disable_timer);
71+
72+ vblank_disable_fn((unsigned long)dev);
73+
74+ drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
75+ DRM_MEM_DRIVER);
76+ drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
77+ DRM_MEM_DRIVER);
78+ drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
79+ dev->num_crtcs, DRM_MEM_DRIVER);
80+ drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
81+ dev->num_crtcs, DRM_MEM_DRIVER);
82+ drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
83+ dev->num_crtcs, DRM_MEM_DRIVER);
84+ drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
85+ DRM_MEM_DRIVER);
86+ drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
87+ dev->num_crtcs, DRM_MEM_DRIVER);
88+
89+ dev->num_crtcs = 0;
90+}
91+
92+int drm_vblank_init(struct drm_device *dev, int num_crtcs)
93+{
94+ int i, ret = -ENOMEM;
95+
96+ setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
97+ (unsigned long)dev);
98+ spin_lock_init(&dev->vbl_lock);
99+ atomic_set(&dev->vbl_signal_pending, 0);
100+ dev->num_crtcs = num_crtcs;
101+
102+ dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
103+ DRM_MEM_DRIVER);
104+ if (!dev->vbl_queue)
105+ goto err;
106+
107+ dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
108+ DRM_MEM_DRIVER);
109+ if (!dev->vbl_sigs)
110+ goto err;
111+
112+ dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
113+ DRM_MEM_DRIVER);
114+ if (!dev->_vblank_count)
115+ goto err;
116+
117+ dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
118+ DRM_MEM_DRIVER);
119+ if (!dev->vblank_refcount)
120+ goto err;
121+
122+ dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
123+ DRM_MEM_DRIVER);
124+ if (!dev->vblank_enabled)
125+ goto err;
126+
127+ dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
128+ if (!dev->last_vblank)
129+ goto err;
130+
131+ dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
132+ DRM_MEM_DRIVER);
133+ if (!dev->vblank_inmodeset)
134+ goto err;
135+
136+ /* Zero per-crtc vblank stuff */
137+ for (i = 0; i < num_crtcs; i++) {
138+ init_waitqueue_head(&dev->vbl_queue[i]);
139+ INIT_LIST_HEAD(&dev->vbl_sigs[i]);
140+ atomic_set(&dev->_vblank_count[i], 0);
141+ atomic_set(&dev->vblank_refcount[i], 0);
142+ }
143+
144+ dev->vblank_disable_allowed = 0;
145+
146+ return 0;
147+
148+err:
149+ drm_vblank_cleanup(dev);
150+ return ret;
151+}
152+EXPORT_SYMBOL(drm_vblank_init);
153+
154 /**
155 * Install IRQ handler.
156 *
157 * \param dev DRM device.
158- * \param irq IRQ number.
159 *
160- * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
161+ * Initializes the IRQ related data. Installs the handler, calling the driver
162 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
163 * before and after the installation.
164 */
165-static int drm_irq_install(struct drm_device * dev)
166+int drm_irq_install(struct drm_device *dev)
167 {
168- int ret;
169+ int ret = 0;
170 unsigned long sh_flags = 0;
171
172 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
173@@ -109,17 +221,6 @@ static int drm_irq_install(struct drm_device * dev)
174
175 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
176
177- if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
178- init_waitqueue_head(&dev->vbl_queue);
179-
180- spin_lock_init(&dev->vbl_lock);
181-
182- INIT_LIST_HEAD(&dev->vbl_sigs);
183- INIT_LIST_HEAD(&dev->vbl_sigs2);
184-
185- dev->vbl_pending = 0;
186- }
187-
188 /* Before installing handler */
189 dev->driver->irq_preinstall(dev);
190
191@@ -141,10 +242,16 @@ static int drm_irq_install(struct drm_device * dev)
192 }
193
194 /* After installing handler */
195- dev->driver->irq_postinstall(dev);
196+ ret = dev->driver->irq_postinstall(dev);
197+ if (ret < 0) {
198+ mutex_lock(&dev->struct_mutex);
199+ dev->irq_enabled = 0;
200+ mutex_unlock(&dev->struct_mutex);
201+ }
202
203- return 0;
204+ return ret;
205 }
206+EXPORT_SYMBOL(drm_irq_install);
207
208 /**
209 * Uninstall the IRQ handler.
210@@ -174,11 +281,12 @@ int drm_irq_uninstall(struct drm_device * dev)
211
212 free_irq(dev->pdev->irq, dev);
213
214+ drm_vblank_cleanup(dev);
215+
216 dev->locked_tasklet_func = NULL;
217
218 return 0;
219 }
220-
221 EXPORT_SYMBOL(drm_irq_uninstall);
222
223 /**
224@@ -218,6 +326,174 @@ int drm_control(struct drm_device *dev, void *data,
225 }
226
227 /**
228+ * drm_vblank_count - retrieve "cooked" vblank counter value
229+ * @dev: DRM device
230+ * @crtc: which counter to retrieve
231+ *
232+ * Fetches the "cooked" vblank count value that represents the number of
233+ * vblank events since the system was booted, including lost events due to
234+ * modesetting activity.
235+ */
236+u32 drm_vblank_count(struct drm_device *dev, int crtc)
237+{
238+ return atomic_read(&dev->_vblank_count[crtc]);
239+}
240+EXPORT_SYMBOL(drm_vblank_count);
241+
242+/**
243+ * drm_update_vblank_count - update the master vblank counter
244+ * @dev: DRM device
245+ * @crtc: counter to update
246+ *
247+ * Call back into the driver to update the appropriate vblank counter
248+ * (specified by @crtc). Deal with wraparound, if it occurred, and
249+ * update the last read value so we can deal with wraparound on the next
250+ * call if necessary.
251+ *
252+ * Only necessary when going from off->on, to account for frames we
253+ * didn't get an interrupt for.
254+ *
255+ * Note: caller must hold dev->vbl_lock since this reads & writes
256+ * device vblank fields.
257+ */
258+static void drm_update_vblank_count(struct drm_device *dev, int crtc)
259+{
260+ u32 cur_vblank, diff;
261+
262+ /*
263+ * Interrupts were disabled prior to this call, so deal with counter
264+ * wrap if needed.
265+ * NOTE! It's possible we lost a full dev->max_vblank_count events
266+ * here if the register is small or we had vblank interrupts off for
267+ * a long time.
268+ */
269+ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
270+ diff = cur_vblank - dev->last_vblank[crtc];
271+ if (cur_vblank < dev->last_vblank[crtc]) {
272+ diff += dev->max_vblank_count;
273+
274+ DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
275+ crtc, dev->last_vblank[crtc], cur_vblank, diff);
276+ }
277+
278+ DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
279+ crtc, diff);
280+
281+ atomic_add(diff, &dev->_vblank_count[crtc]);
282+}
283+
284+/**
285+ * drm_vblank_get - get a reference count on vblank events
286+ * @dev: DRM device
287+ * @crtc: which CRTC to own
288+ *
289+ * Acquire a reference count on vblank events to avoid having them disabled
290+ * while in use.
291+ *
292+ * RETURNS
293+ * Zero on success, nonzero on failure.
294+ */
295+int drm_vblank_get(struct drm_device *dev, int crtc)
296+{
297+ unsigned long irqflags;
298+ int ret = 0;
299+
300+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
301+ /* Going from 0->1 means we have to enable interrupts again */
302+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
303+ !dev->vblank_enabled[crtc]) {
304+ ret = dev->driver->enable_vblank(dev, crtc);
305+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
306+ if (ret)
307+ atomic_dec(&dev->vblank_refcount[crtc]);
308+ else {
309+ dev->vblank_enabled[crtc] = 1;
310+ drm_update_vblank_count(dev, crtc);
311+ }
312+ }
313+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
314+
315+ return ret;
316+}
317+EXPORT_SYMBOL(drm_vblank_get);
318+
319+/**
320+ * drm_vblank_put - give up ownership of vblank events
321+ * @dev: DRM device
322+ * @crtc: which counter to give up
323+ *
324+ * Release ownership of a given vblank counter, turning off interrupts
325+ * if possible.
326+ */
327+void drm_vblank_put(struct drm_device *dev, int crtc)
328+{
329+ /* Last user schedules interrupt disable */
330+ if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
331+ mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
332+}
333+EXPORT_SYMBOL(drm_vblank_put);
334+
335+/**
336+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
337+ * @DRM_IOCTL_ARGS: standard ioctl arguments
338+ *
339+ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
340+ * ioctls around modesetting so that any lost vblank events are accounted for.
341+ *
342+ * Generally the counter will reset across mode sets. If interrupts are
343+ * enabled around this call, we don't have to do anything since the counter
344+ * will have already been incremented.
345+ */
346+int drm_modeset_ctl(struct drm_device *dev, void *data,
347+ struct drm_file *file_priv)
348+{
349+ struct drm_modeset_ctl *modeset = data;
350+ unsigned long irqflags;
351+ int crtc, ret = 0;
352+
353+ /* If drm_vblank_init() hasn't been called yet, just no-op */
354+ if (!dev->num_crtcs)
355+ goto out;
356+
357+ crtc = modeset->crtc;
358+ if (crtc >= dev->num_crtcs) {
359+ ret = -EINVAL;
360+ goto out;
361+ }
362+
363+ /*
364+ * To avoid all the problems that might happen if interrupts
365+ * were enabled/disabled around or between these calls, we just
366+ * have the kernel take a reference on the CRTC (just once though
367+ * to avoid corrupting the count if multiple, mismatch calls occur),
368+ * so that interrupts remain enabled in the interim.
369+ */
370+ switch (modeset->cmd) {
371+ case _DRM_PRE_MODESET:
372+ if (!dev->vblank_inmodeset[crtc]) {
373+ dev->vblank_inmodeset[crtc] = 1;
374+ drm_vblank_get(dev, crtc);
375+ }
376+ break;
377+ case _DRM_POST_MODESET:
378+ if (dev->vblank_inmodeset[crtc]) {
379+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
380+ dev->vblank_disable_allowed = 1;
381+ dev->vblank_inmodeset[crtc] = 0;
382+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
383+ drm_vblank_put(dev, crtc);
384+ }
385+ break;
386+ default:
387+ ret = -EINVAL;
388+ break;
389+ }
390+
391+out:
392+ return ret;
393+}
394+
395+/**
396 * Wait for VBLANK.
397 *
398 * \param inode device inode.
399@@ -236,12 +512,12 @@ int drm_control(struct drm_device *dev, void *data,
400 *
401 * If a signal is not requested, then calls vblank_wait().
402 */
403-int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
404+int drm_wait_vblank(struct drm_device *dev, void *data,
405+ struct drm_file *file_priv)
406 {
407 union drm_wait_vblank *vblwait = data;
408- struct timeval now;
409 int ret = 0;
410- unsigned int flags, seq;
411+ unsigned int flags, seq, crtc;
412
413 if ((!dev->pdev->irq) || (!dev->irq_enabled))
414 return -EINVAL;
415@@ -255,13 +531,17 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
416 }
417
418 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
419+ crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
420
421- if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
422- DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
423+ if (crtc >= dev->num_crtcs)
424 return -EINVAL;
425
426- seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
427- : &dev->vbl_received);
428+ ret = drm_vblank_get(dev, crtc);
429+ if (ret) {
430+ DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
431+ return ret;
432+ }
433+ seq = drm_vblank_count(dev, crtc);
434
435 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
436 case _DRM_VBLANK_RELATIVE:
437@@ -270,7 +550,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
438 case _DRM_VBLANK_ABSOLUTE:
439 break;
440 default:
441- return -EINVAL;
442+ ret = -EINVAL;
443+ goto done;
444 }
445
446 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
447@@ -280,8 +561,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
448
449 if (flags & _DRM_VBLANK_SIGNAL) {
450 unsigned long irqflags;
451- struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
452- ? &dev->vbl_sigs2 : &dev->vbl_sigs;
453+ struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
454 struct drm_vbl_sig *vbl_sig;
455
456 spin_lock_irqsave(&dev->vbl_lock, irqflags);
457@@ -302,22 +582,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
458 }
459 }
460
461- if (dev->vbl_pending >= 100) {
462+ if (atomic_read(&dev->vbl_signal_pending) >= 100) {
463 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
464- return -EBUSY;
465+ ret = -EBUSY;
466+ goto done;
467 }
468
469- dev->vbl_pending++;
470-
471 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
472
473- if (!
474- (vbl_sig =
475- drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
476- return -ENOMEM;
477+ vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
478+ DRM_MEM_DRIVER);
479+ if (!vbl_sig) {
480+ ret = -ENOMEM;
481+ goto done;
482+ }
483+
484+ ret = drm_vblank_get(dev, crtc);
485+ if (ret) {
486+ drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
487+ DRM_MEM_DRIVER);
488+ return ret;
489 }
490
491- memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
492+ atomic_inc(&dev->vbl_signal_pending);
493
494 vbl_sig->sequence = vblwait->request.sequence;
495 vbl_sig->info.si_signo = vblwait->request.signal;
496@@ -331,20 +618,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
497
498 vblwait->reply.sequence = seq;
499 } else {
500- if (flags & _DRM_VBLANK_SECONDARY) {
501- if (dev->driver->vblank_wait2)
502- ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
503- } else if (dev->driver->vblank_wait)
504- ret =
505- dev->driver->vblank_wait(dev,
506- &vblwait->request.sequence);
507-
508- do_gettimeofday(&now);
509- vblwait->reply.tval_sec = now.tv_sec;
510- vblwait->reply.tval_usec = now.tv_usec;
511+ DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
512+ vblwait->request.sequence, crtc);
513+ DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
514+ ((drm_vblank_count(dev, crtc)
515+ - vblwait->request.sequence) <= (1 << 23)));
516+
517+ if (ret != -EINTR) {
518+ struct timeval now;
519+
520+ do_gettimeofday(&now);
521+
522+ vblwait->reply.tval_sec = now.tv_sec;
523+ vblwait->reply.tval_usec = now.tv_usec;
524+ vblwait->reply.sequence = drm_vblank_count(dev, crtc);
525+ DRM_DEBUG("returning %d to client\n",
526+ vblwait->reply.sequence);
527+ } else {
528+ DRM_DEBUG("vblank wait interrupted by signal\n");
529+ }
530 }
531
532- done:
533+done:
534+ drm_vblank_put(dev, crtc);
535 return ret;
536 }
537
538@@ -352,44 +648,57 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
539 * Send the VBLANK signals.
540 *
541 * \param dev DRM device.
542+ * \param crtc CRTC where the vblank event occurred
543 *
544 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
545 *
546 * If a signal is not requested, then calls vblank_wait().
547 */
548-void drm_vbl_send_signals(struct drm_device * dev)
549+static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
550 {
551+ struct drm_vbl_sig *vbl_sig, *tmp;
552+ struct list_head *vbl_sigs;
553+ unsigned int vbl_seq;
554 unsigned long flags;
555- int i;
556
557 spin_lock_irqsave(&dev->vbl_lock, flags);
558
559- for (i = 0; i < 2; i++) {
560- struct drm_vbl_sig *vbl_sig, *tmp;
561- struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
562- unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
563- &dev->vbl_received);
564+ vbl_sigs = &dev->vbl_sigs[crtc];
565+ vbl_seq = drm_vblank_count(dev, crtc);
566
567- list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
568- if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
569- vbl_sig->info.si_code = vbl_seq;
570- send_sig_info(vbl_sig->info.si_signo,
571- &vbl_sig->info, vbl_sig->task);
572+ list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
573+ if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
574+ vbl_sig->info.si_code = vbl_seq;
575+ send_sig_info(vbl_sig->info.si_signo,
576+ &vbl_sig->info, vbl_sig->task);
577
578- list_del(&vbl_sig->head);
579-
580- drm_free(vbl_sig, sizeof(*vbl_sig),
581- DRM_MEM_DRIVER);
582+ list_del(&vbl_sig->head);
583
584- dev->vbl_pending--;
585- }
586- }
587+ drm_free(vbl_sig, sizeof(*vbl_sig),
588+ DRM_MEM_DRIVER);
589+ atomic_dec(&dev->vbl_signal_pending);
590+ drm_vblank_put(dev, crtc);
591+ }
592 }
593
594 spin_unlock_irqrestore(&dev->vbl_lock, flags);
595 }
596
597-EXPORT_SYMBOL(drm_vbl_send_signals);
598+/**
599+ * drm_handle_vblank - handle a vblank event
600+ * @dev: DRM device
601+ * @crtc: where this event occurred
602+ *
603+ * Drivers should call this routine in their vblank interrupt handlers to
604+ * update the vblank counter and send any signals that may be pending.
605+ */
606+void drm_handle_vblank(struct drm_device *dev, int crtc)
607+{
608+ atomic_inc(&dev->_vblank_count[crtc]);
609+ DRM_WAKEUP(&dev->vbl_queue[crtc]);
610+ drm_vbl_send_signals(dev, crtc);
611+}
612+EXPORT_SYMBOL(drm_handle_vblank);
613
614 /**
615 * Tasklet wrapper function.
616diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
617index cead62f..8609ec2 100644
618--- a/drivers/gpu/drm/i915/i915_dma.c
619+++ b/drivers/gpu/drm/i915/i915_dma.c
620@@ -673,7 +673,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
621
622 switch (param->param) {
623 case I915_PARAM_IRQ_ACTIVE:
624- value = dev->irq_enabled;
625+ value = dev->pdev->irq ? 1 : 0;
626 break;
627 case I915_PARAM_ALLOW_BATCHBUFFER:
628 value = dev_priv->allow_batchbuffer ? 1 : 0;
629@@ -808,7 +808,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
630 * and the registers being closely associated.
631 */
632 if (!IS_I945G(dev) && !IS_I945GM(dev))
633- pci_enable_msi(dev->pdev);
634+ if (pci_enable_msi(dev->pdev))
635+ DRM_ERROR("failed to enable MSI\n");
636
637 intel_opregion_init(dev);
638
639diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
640index eff66ed..37af03f 100644
641--- a/drivers/gpu/drm/i915/i915_drv.c
642+++ b/drivers/gpu/drm/i915/i915_drv.c
643@@ -85,10 +85,8 @@ static struct drm_driver driver = {
644 /* don't use mtrr's here, the Xserver or user space app should
645 * deal with them for intel hardware.
646 */
647- .driver_features =
648- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
649- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
650- DRIVER_IRQ_VBL2,
651+ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
652+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
653 .load = i915_driver_load,
654 .unload = i915_driver_unload,
655 .lastclose = i915_driver_lastclose,
656@@ -96,8 +94,9 @@ static struct drm_driver driver = {
657 .suspend = i915_suspend,
658 .resume = i915_resume,
659 .device_is_agp = i915_driver_device_is_agp,
660- .vblank_wait = i915_driver_vblank_wait,
661- .vblank_wait2 = i915_driver_vblank_wait2,
662+ .get_vblank_counter = i915_get_vblank_counter,
663+ .enable_vblank = i915_enable_vblank,
664+ .disable_vblank = i915_disable_vblank,
665 .irq_preinstall = i915_driver_irq_preinstall,
666 .irq_postinstall = i915_driver_irq_postinstall,
667 .irq_uninstall = i915_driver_irq_uninstall,
668diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
669index 71326ca..d1a02be 100644
670--- a/drivers/gpu/drm/i915/i915_drv.h
671+++ b/drivers/gpu/drm/i915/i915_drv.h
672@@ -83,10 +83,15 @@ struct mem_block {
673 typedef struct _drm_i915_vbl_swap {
674 struct list_head head;
675 drm_drawable_t drw_id;
676- unsigned int pipe;
677+ unsigned int plane;
678 unsigned int sequence;
679 } drm_i915_vbl_swap_t;
680
681+struct opregion_header;
682+struct opregion_acpi;
683+struct opregion_swsci;
684+struct opregion_asle;
685+
686 struct intel_opregion {
687 struct opregion_header *header;
688 struct opregion_acpi *acpi;
689@@ -105,7 +110,7 @@ typedef struct drm_i915_private {
690 drm_dma_handle_t *status_page_dmah;
691 void *hw_status_page;
692 dma_addr_t dma_status_page;
693- unsigned long counter;
694+ uint32_t counter;
695 unsigned int status_gfx_addr;
696 drm_local_map_t hws_map;
697
698@@ -247,16 +252,17 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
699 extern int i915_irq_wait(struct drm_device *dev, void *data,
700 struct drm_file *file_priv);
701
702-extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
703-extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
704 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
705 extern void i915_driver_irq_preinstall(struct drm_device * dev);
706-extern void i915_driver_irq_postinstall(struct drm_device * dev);
707+extern int i915_driver_irq_postinstall(struct drm_device *dev);
708 extern void i915_driver_irq_uninstall(struct drm_device * dev);
709 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
710 struct drm_file *file_priv);
711 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
712 struct drm_file *file_priv);
713+extern int i915_enable_vblank(struct drm_device *dev, int crtc);
714+extern void i915_disable_vblank(struct drm_device *dev, int crtc);
715+extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
716 extern int i915_vblank_swap(struct drm_device *dev, void *data,
717 struct drm_file *file_priv);
718 extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
719@@ -278,6 +284,10 @@ extern void i915_mem_release(struct drm_device * dev,
720 extern int i915_save_state(struct drm_device *dev);
721 extern int i915_restore_state(struct drm_device *dev);
722
723+/* i915_suspend.c */
724+extern int i915_save_state(struct drm_device *dev);
725+extern int i915_restore_state(struct drm_device *dev);
726+
727 /* i915_opregion.c */
728 extern int intel_opregion_init(struct drm_device *dev);
729 extern void intel_opregion_free(struct drm_device *dev);
730diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
731index ae7d3a8..f875959 100644
732--- a/drivers/gpu/drm/i915/i915_irq.c
733+++ b/drivers/gpu/drm/i915/i915_irq.c
734@@ -35,9 +35,8 @@
735
736 /** These are the interrupts used by the driver */
737 #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
738- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \
739- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT | \
740 I915_ASLE_INTERRUPT | \
741+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
742 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
743
744 void
745@@ -61,6 +60,64 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
746 }
747
748 /**
749+ * i915_get_pipe - return the the pipe associated with a given plane
750+ * @dev: DRM device
751+ * @plane: plane to look for
752+ *
753+ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
754+ * rather than a pipe number, since they may not always be equal. This routine
755+ * maps the given @plane back to a pipe number.
756+ */
757+static int
758+i915_get_pipe(struct drm_device *dev, int plane)
759+{
760+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
761+ u32 dspcntr;
762+
763+ dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
764+
765+ return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
766+}
767+
768+/**
769+ * i915_get_plane - return the the plane associated with a given pipe
770+ * @dev: DRM device
771+ * @pipe: pipe to look for
772+ *
773+ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
774+ * rather than a plane number, since they may not always be equal. This routine
775+ * maps the given @pipe back to a plane number.
776+ */
777+static int
778+i915_get_plane(struct drm_device *dev, int pipe)
779+{
780+ if (i915_get_pipe(dev, 0) == pipe)
781+ return 0;
782+ return 1;
783+}
784+
785+/**
786+ * i915_pipe_enabled - check if a pipe is enabled
787+ * @dev: DRM device
788+ * @pipe: pipe to check
789+ *
790+ * Reading certain registers when the pipe is disabled can hang the chip.
791+ * Use this routine to make sure the PLL is running and the pipe is active
792+ * before reading such registers if unsure.
793+ */
794+static int
795+i915_pipe_enabled(struct drm_device *dev, int pipe)
796+{
797+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
798+ unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
799+
800+ if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
801+ return 1;
802+
803+ return 0;
804+}
805+
806+/**
807 * Emit blits for scheduled buffer swaps.
808 *
809 * This function will be called with the HW lock held.
810@@ -71,8 +128,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
811 unsigned long irqflags;
812 struct list_head *list, *tmp, hits, *hit;
813 int nhits, nrects, slice[2], upper[2], lower[2], i;
814- unsigned counter[2] = { atomic_read(&dev->vbl_received),
815- atomic_read(&dev->vbl_received2) };
816+ unsigned counter[2];
817 struct drm_drawable_info *drw;
818 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
819 u32 cpp = dev_priv->cpp;
820@@ -94,6 +150,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
821 src_pitch >>= 2;
822 }
823
824+ counter[0] = drm_vblank_count(dev, 0);
825+ counter[1] = drm_vblank_count(dev, 1);
826+
827 DRM_DEBUG("\n");
828
829 INIT_LIST_HEAD(&hits);
830@@ -106,12 +165,14 @@ static void i915_vblank_tasklet(struct drm_device *dev)
831 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
832 drm_i915_vbl_swap_t *vbl_swap =
833 list_entry(list, drm_i915_vbl_swap_t, head);
834+ int pipe = i915_get_pipe(dev, vbl_swap->plane);
835
836- if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
837+ if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
838 continue;
839
840 list_del(list);
841 dev_priv->swaps_pending--;
842+ drm_vblank_put(dev, pipe);
843
844 spin_unlock(&dev_priv->swaps_lock);
845 spin_lock(&dev->drw_lock);
846@@ -204,7 +265,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
847 drm_i915_vbl_swap_t *swap_hit =
848 list_entry(hit, drm_i915_vbl_swap_t, head);
849 struct drm_clip_rect *rect;
850- int num_rects, pipe;
851+ int num_rects, plane;
852 unsigned short top, bottom;
853
854 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
855@@ -213,9 +274,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
856 continue;
857
858 rect = drw->rects;
859- pipe = swap_hit->pipe;
860- top = upper[pipe];
861- bottom = lower[pipe];
862+ plane = swap_hit->plane;
863+ top = upper[plane];
864+ bottom = lower[plane];
865
866 for (num_rects = drw->num_rects; num_rects--; rect++) {
867 int y1 = max(rect->y1, top);
868@@ -252,22 +313,54 @@ static void i915_vblank_tasklet(struct drm_device *dev)
869 }
870 }
871
872+u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
873+{
874+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
875+ unsigned long high_frame;
876+ unsigned long low_frame;
877+ u32 high1, high2, low, count;
878+ int pipe;
879+
880+ pipe = i915_get_pipe(dev, plane);
881+ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
882+ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
883+
884+ if (!i915_pipe_enabled(dev, pipe)) {
885+ DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
886+ return 0;
887+ }
888+
889+ /*
890+ * High & low register fields aren't synchronized, so make sure
891+ * we get a low value that's stable across two reads of the high
892+ * register.
893+ */
894+ do {
895+ high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
896+ PIPE_FRAME_HIGH_SHIFT);
897+ low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
898+ PIPE_FRAME_LOW_SHIFT);
899+ high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
900+ PIPE_FRAME_HIGH_SHIFT);
901+ } while (high1 != high2);
902+
903+ count = (high1 << 8) | low;
904+
905+ return count;
906+}
907+
908 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
909 {
910 struct drm_device *dev = (struct drm_device *) arg;
911 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
912- u32 pipea_stats, pipeb_stats;
913 u32 iir;
914-
915- pipea_stats = I915_READ(PIPEASTAT);
916- pipeb_stats = I915_READ(PIPEBSTAT);
917+ u32 pipea_stats, pipeb_stats;
918+ int vblank = 0;
919
920 if (dev->pdev->msi_enabled)
921 I915_WRITE(IMR, ~0);
922 iir = I915_READ(IIR);
923
924- DRM_DEBUG("iir=%08x\n", iir);
925-
926 if (iir == 0) {
927 if (dev->pdev->msi_enabled) {
928 I915_WRITE(IMR, dev_priv->irq_mask_reg);
929@@ -276,48 +369,56 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
930 return IRQ_NONE;
931 }
932
933- I915_WRITE(PIPEASTAT, pipea_stats);
934- I915_WRITE(PIPEBSTAT, pipeb_stats);
935-
936- I915_WRITE(IIR, iir);
937- if (dev->pdev->msi_enabled)
938- I915_WRITE(IMR, dev_priv->irq_mask_reg);
939- (void) I915_READ(IIR); /* Flush posted writes */
940-
941- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
942-
943- if (iir & I915_USER_INTERRUPT)
944- DRM_WAKEUP(&dev_priv->irq_queue);
945-
946- if (iir & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
947- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
948- int vblank_pipe = dev_priv->vblank_pipe;
949-
950- if ((vblank_pipe &
951- (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
952- == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
953- if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
954- atomic_inc(&dev->vbl_received);
955- if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
956- atomic_inc(&dev->vbl_received2);
957- } else if (((iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
958- (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
959- ((iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
960- (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
961- atomic_inc(&dev->vbl_received);
962+ /*
963+ * Clear the PIPE(A|B)STAT regs before the IIR otherwise
964+ * we may get extra interrupts.
965+ */
966+ if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
967+ pipea_stats = I915_READ(PIPEASTAT);
968+ if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
969+ pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
970+ PIPE_VBLANK_INTERRUPT_ENABLE);
971+ else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
972+ PIPE_VBLANK_INTERRUPT_STATUS)) {
973+ vblank++;
974+ drm_handle_vblank(dev, i915_get_plane(dev, 0));
975+ }
976
977- DRM_WAKEUP(&dev->vbl_queue);
978- drm_vbl_send_signals(dev);
979+ I915_WRITE(PIPEASTAT, pipea_stats);
980+ }
981+ if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
982+ pipeb_stats = I915_READ(PIPEBSTAT);
983+ /* Ack the event */
984+ I915_WRITE(PIPEBSTAT, pipeb_stats);
985+
986+ /* The vblank interrupt gets enabled even if we didn't ask for
987+ it, so make sure it's shut down again */
988+ if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
989+ pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
990+ PIPE_VBLANK_INTERRUPT_ENABLE);
991+ else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
992+ PIPE_VBLANK_INTERRUPT_STATUS)) {
993+ vblank++;
994+ drm_handle_vblank(dev, i915_get_plane(dev, 1));
995+ }
996
997- if (dev_priv->swaps_pending > 0)
998- drm_locked_tasklet(dev, i915_vblank_tasklet);
999+ if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
1000+ opregion_asle_intr(dev);
1001+ I915_WRITE(PIPEBSTAT, pipeb_stats);
1002 }
1003
1004 if (iir & I915_ASLE_INTERRUPT)
1005 opregion_asle_intr(dev);
1006
1007- if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
1008- opregion_asle_intr(dev);
1009+ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1010+
1011+ if (dev->pdev->msi_enabled)
1012+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
1013+ I915_WRITE(IIR, iir);
1014+ (void) I915_READ(IIR);
1015+
1016+ if (vblank && dev_priv->swaps_pending > 0)
1017+ drm_locked_tasklet(dev, i915_vblank_tasklet);
1018
1019 return IRQ_HANDLED;
1020 }
1021@@ -358,7 +459,7 @@ static void i915_user_irq_get(struct drm_device *dev)
1022 spin_unlock(&dev_priv->user_irq_lock);
1023 }
1024
1025-static void i915_user_irq_put(struct drm_device *dev)
1026+void i915_user_irq_put(struct drm_device *dev)
1027 {
1028 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1029
1030@@ -395,41 +496,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1031 }
1032
1033 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1034- return ret;
1035-}
1036-
1037-static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
1038- atomic_t *counter)
1039-{
1040- drm_i915_private_t *dev_priv = dev->dev_private;
1041- unsigned int cur_vblank;
1042- int ret = 0;
1043-
1044- if (!dev_priv) {
1045- DRM_ERROR("called with no initialization\n");
1046- return -EINVAL;
1047- }
1048-
1049- DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
1050- (((cur_vblank = atomic_read(counter))
1051- - *sequence) <= (1<<23)));
1052-
1053- *sequence = cur_vblank;
1054
1055 return ret;
1056 }
1057
1058-
1059-int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
1060-{
1061- return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
1062-}
1063-
1064-int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
1065-{
1066- return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
1067-}
1068-
1069 /* Needs the lock as it touches the ring.
1070 */
1071 int i915_irq_emit(struct drm_device *dev, void *data,
1072@@ -472,40 +542,88 @@ int i915_irq_wait(struct drm_device *dev, void *data,
1073 return i915_wait_irq(dev, irqwait->irq_seq);
1074 }
1075
1076+int i915_enable_vblank(struct drm_device *dev, int plane)
1077+{
1078+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1079+ int pipe = i915_get_pipe(dev, plane);
1080+ u32 pipestat_reg = 0;
1081+ u32 pipestat;
1082+
1083+ switch (pipe) {
1084+ case 0:
1085+ pipestat_reg = PIPEASTAT;
1086+ i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
1087+ break;
1088+ case 1:
1089+ pipestat_reg = PIPEBSTAT;
1090+ i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
1091+ break;
1092+ default:
1093+ DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
1094+ pipe);
1095+ break;
1096+ }
1097+
1098+ if (pipestat_reg) {
1099+ pipestat = I915_READ(pipestat_reg);
1100+ if (IS_I965G(dev))
1101+ pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
1102+ else
1103+ pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
1104+ /* Clear any stale interrupt status */
1105+ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
1106+ PIPE_VBLANK_INTERRUPT_STATUS);
1107+ I915_WRITE(pipestat_reg, pipestat);
1108+ }
1109+
1110+ return 0;
1111+}
1112+
1113+void i915_disable_vblank(struct drm_device *dev, int plane)
1114+{
1115+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1116+ int pipe = i915_get_pipe(dev, plane);
1117+ u32 pipestat_reg = 0;
1118+ u32 pipestat;
1119+
1120+ switch (pipe) {
1121+ case 0:
1122+ pipestat_reg = PIPEASTAT;
1123+ i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
1124+ break;
1125+ case 1:
1126+ pipestat_reg = PIPEBSTAT;
1127+ i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
1128+ break;
1129+ default:
1130+ DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
1131+ pipe);
1132+ break;
1133+ }
1134+
1135+ if (pipestat_reg) {
1136+ pipestat = I915_READ(pipestat_reg);
1137+ pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
1138+ PIPE_VBLANK_INTERRUPT_ENABLE);
1139+ /* Clear any stale interrupt status */
1140+ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
1141+ PIPE_VBLANK_INTERRUPT_STATUS);
1142+ I915_WRITE(pipestat_reg, pipestat);
1143+ }
1144+}
1145+
1146 /* Set the vblank monitor pipe
1147 */
1148 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1149 struct drm_file *file_priv)
1150 {
1151 drm_i915_private_t *dev_priv = dev->dev_private;
1152- drm_i915_vblank_pipe_t *pipe = data;
1153- u32 enable_mask = 0, disable_mask = 0;
1154
1155 if (!dev_priv) {
1156 DRM_ERROR("called with no initialization\n");
1157 return -EINVAL;
1158 }
1159
1160- if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
1161- DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
1162- return -EINVAL;
1163- }
1164-
1165- if (pipe->pipe & DRM_I915_VBLANK_PIPE_A)
1166- enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1167- else
1168- disable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1169-
1170- if (pipe->pipe & DRM_I915_VBLANK_PIPE_B)
1171- enable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1172- else
1173- disable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1174-
1175- i915_enable_irq(dev_priv, enable_mask);
1176- i915_disable_irq(dev_priv, disable_mask);
1177-
1178- dev_priv->vblank_pipe = pipe->pipe;
1179-
1180 return 0;
1181 }
1182
1183@@ -514,19 +632,13 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1184 {
1185 drm_i915_private_t *dev_priv = dev->dev_private;
1186 drm_i915_vblank_pipe_t *pipe = data;
1187- u16 flag;
1188
1189 if (!dev_priv) {
1190 DRM_ERROR("called with no initialization\n");
1191 return -EINVAL;
1192 }
1193
1194- flag = I915_READ(IMR);
1195- pipe->pipe = 0;
1196- if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
1197- pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
1198- if (flag & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
1199- pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
1200+ pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1201
1202 return 0;
1203 }
1204@@ -540,9 +652,10 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
1205 drm_i915_private_t *dev_priv = dev->dev_private;
1206 drm_i915_vblank_swap_t *swap = data;
1207 drm_i915_vbl_swap_t *vbl_swap;
1208- unsigned int pipe, seqtype, curseq;
1209+ unsigned int pipe, seqtype, curseq, plane;
1210 unsigned long irqflags;
1211 struct list_head *list;
1212+ int ret;
1213
1214 if (!dev_priv) {
1215 DRM_ERROR("%s called with no initialization\n", __func__);
1216@@ -560,7 +673,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
1217 return -EINVAL;
1218 }
1219
1220- pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
1221+ plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
1222+ pipe = i915_get_pipe(dev, plane);
1223
1224 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
1225
1226@@ -579,7 +693,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
1227
1228 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
1229
1230- curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
1231+ /*
1232+ * We take the ref here and put it when the swap actually completes
1233+ * in the tasklet.
1234+ */
1235+ ret = drm_vblank_get(dev, pipe);
1236+ if (ret)
1237+ return ret;
1238+ curseq = drm_vblank_count(dev, pipe);
1239
1240 if (seqtype == _DRM_VBLANK_RELATIVE)
1241 swap->sequence += curseq;
1242@@ -589,6 +710,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
1243 swap->sequence = curseq + 1;
1244 } else {
1245 DRM_DEBUG("Missed target sequence\n");
1246+ drm_vblank_put(dev, pipe);
1247 return -EINVAL;
1248 }
1249 }
1250@@ -599,7 +721,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
1251 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
1252
1253 if (vbl_swap->drw_id == swap->drawable &&
1254- vbl_swap->pipe == pipe &&
1255+ vbl_swap->plane == plane &&
1256 vbl_swap->sequence == swap->sequence) {
1257 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
1258 DRM_DEBUG("Already scheduled\n");
1259@@ -611,6 +733,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
1260
1261 if (dev_priv->swaps_pending >= 100) {
1262 DRM_DEBUG("Too many swaps queued\n");
1263+ drm_vblank_put(dev, pipe);
1264 return -EBUSY;
1265 }
1266
1267@@ -618,13 +741,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
1268
1269 if (!vbl_swap) {
1270 DRM_ERROR("Failed to allocate memory to queue swap\n");
1271+ drm_vblank_put(dev, pipe);
1272 return -ENOMEM;
1273 }
1274
1275 DRM_DEBUG("\n");
1276
1277 vbl_swap->drw_id = swap->drawable;
1278- vbl_swap->pipe = pipe;
1279+ vbl_swap->plane = plane;
1280 vbl_swap->sequence = swap->sequence;
1281
1282 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
1283@@ -643,28 +767,32 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1284 {
1285 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1286
1287- I915_WRITE(HWSTAM, 0xfffe);
1288- I915_WRITE(IMR, 0x0);
1289+ I915_WRITE(HWSTAM, 0xeffe);
1290+ I915_WRITE(IMR, 0xffffffff);
1291 I915_WRITE(IER, 0x0);
1292 }
1293
1294-void i915_driver_irq_postinstall(struct drm_device * dev)
1295+int i915_driver_irq_postinstall(struct drm_device *dev)
1296 {
1297 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1298+ int ret, num_pipes = 2;
1299
1300 spin_lock_init(&dev_priv->swaps_lock);
1301 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
1302 dev_priv->swaps_pending = 0;
1303
1304- if (!dev_priv->vblank_pipe)
1305- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
1306-
1307 /* Set initial unmasked IRQs to just the selected vblank pipes. */
1308 dev_priv->irq_mask_reg = ~0;
1309- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
1310- dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1311- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
1312- dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1313+
1314+ ret = drm_vblank_init(dev, num_pipes);
1315+ if (ret)
1316+ return ret;
1317+
1318+ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1319+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1320+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1321+
1322+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1323
1324 dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
1325
1326@@ -673,22 +801,29 @@ void i915_driver_irq_postinstall(struct drm_device * dev)
1327 (void) I915_READ(IER);
1328
1329 opregion_enable_asle(dev);
1330-
1331 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
1332+
1333+ return 0;
1334 }
1335
1336 void i915_driver_irq_uninstall(struct drm_device * dev)
1337 {
1338 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1339- u16 temp;
1340+ u32 temp;
1341
1342 if (!dev_priv)
1343 return;
1344
1345- I915_WRITE(HWSTAM, 0xffff);
1346- I915_WRITE(IMR, 0xffff);
1347+ dev_priv->vblank_pipe = 0;
1348+
1349+ I915_WRITE(HWSTAM, 0xffffffff);
1350+ I915_WRITE(IMR, 0xffffffff);
1351 I915_WRITE(IER, 0x0);
1352
1353+ temp = I915_READ(PIPEASTAT);
1354+ I915_WRITE(PIPEASTAT, temp);
1355+ temp = I915_READ(PIPEBSTAT);
1356+ I915_WRITE(PIPEBSTAT, temp);
1357 temp = I915_READ(IIR);
1358 I915_WRITE(IIR, temp);
1359 }
1360diff --git a/include/drm/drm.h b/include/drm/drm.h
1361index 0864c69..15e5503 100644
1362--- a/include/drm/drm.h
1363+++ b/include/drm/drm.h
1364@@ -454,6 +454,7 @@ struct drm_irq_busid {
1365 enum drm_vblank_seq_type {
1366 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
1367 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
1368+ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
1369 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
1370 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
1371 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
1372@@ -486,6 +487,19 @@ union drm_wait_vblank {
1373 struct drm_wait_vblank_reply reply;
1374 };
1375
1376+#define _DRM_PRE_MODESET 1
1377+#define _DRM_POST_MODESET 2
1378+
1379+/**
1380+ * DRM_IOCTL_MODESET_CTL ioctl argument type
1381+ *
1382+ * \sa drmModesetCtl().
1383+ */
1384+struct drm_modeset_ctl {
1385+ uint32_t crtc;
1386+ uint32_t cmd;
1387+};
1388+
1389 /**
1390 * DRM_IOCTL_AGP_ENABLE ioctl argument type.
1391 *
1392@@ -570,6 +584,7 @@ struct drm_set_version {
1393 #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
1394 #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
1395 #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
1396+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
1397
1398 #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
1399 #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
1400diff --git a/include/drm/drmP.h b/include/drm/drmP.h
1401index 1c1b13e..e79ce07 100644
1402--- a/include/drm/drmP.h
1403+++ b/include/drm/drmP.h
1404@@ -580,11 +580,54 @@ struct drm_driver {
1405 int (*kernel_context_switch) (struct drm_device *dev, int old,
1406 int new);
1407 void (*kernel_context_switch_unlock) (struct drm_device *dev);
1408- int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
1409- int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
1410 int (*dri_library_name) (struct drm_device *dev, char *buf);
1411
1412 /**
1413+ * get_vblank_counter - get raw hardware vblank counter
1414+ * @dev: DRM device
1415+ * @crtc: counter to fetch
1416+ *
1417+ * Driver callback for fetching a raw hardware vblank counter
1418+ * for @crtc. If a device doesn't have a hardware counter, the
1419+ * driver can simply return the value of drm_vblank_count and
1420+ * make the enable_vblank() and disable_vblank() hooks into no-ops,
1421+ * leaving interrupts enabled at all times.
1422+ *
1423+ * Wraparound handling and loss of events due to modesetting is dealt
1424+ * with in the DRM core code.
1425+ *
1426+ * RETURNS
1427+ * Raw vblank counter value.
1428+ */
1429+ u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
1430+
1431+ /**
1432+ * enable_vblank - enable vblank interrupt events
1433+ * @dev: DRM device
1434+ * @crtc: which irq to enable
1435+ *
1436+ * Enable vblank interrupts for @crtc. If the device doesn't have
1437+ * a hardware vblank counter, this routine should be a no-op, since
1438+ * interrupts will have to stay on to keep the count accurate.
1439+ *
1440+ * RETURNS
1441+ * Zero on success, appropriate errno if the given @crtc's vblank
1442+ * interrupt cannot be enabled.
1443+ */
1444+ int (*enable_vblank) (struct drm_device *dev, int crtc);
1445+
1446+ /**
1447+ * disable_vblank - disable vblank interrupt events
1448+ * @dev: DRM device
1449+ * @crtc: which irq to enable
1450+ *
1451+ * Disable vblank interrupts for @crtc. If the device doesn't have
1452+ * a hardware vblank counter, this routine should be a no-op, since
1453+ * interrupts will have to stay on to keep the count accurate.
1454+ */
1455+ void (*disable_vblank) (struct drm_device *dev, int crtc);
1456+
1457+ /**
1458 * Called by \c drm_device_is_agp. Typically used to determine if a
1459 * card is really attached to AGP or not.
1460 *
1461@@ -601,7 +644,7 @@ struct drm_driver {
1462
1463 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
1464 void (*irq_preinstall) (struct drm_device *dev);
1465- void (*irq_postinstall) (struct drm_device *dev);
1466+ int (*irq_postinstall) (struct drm_device *dev);
1467 void (*irq_uninstall) (struct drm_device *dev);
1468 void (*reclaim_buffers) (struct drm_device *dev,
1469 struct drm_file * file_priv);
1470@@ -730,13 +773,28 @@ struct drm_device {
1471 /** \name VBLANK IRQ support */
1472 /*@{ */
1473
1474- wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
1475- atomic_t vbl_received;
1476- atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
1477+ /*
1478+ * At load time, disabling the vblank interrupt won't be allowed since
1479+ * old clients may not call the modeset ioctl and therefore misbehave.
1480+ * Once the modeset ioctl *has* been called though, we can safely
1481+ * disable them when unused.
1482+ */
1483+ int vblank_disable_allowed;
1484+
1485+ wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
1486+ atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
1487 spinlock_t vbl_lock;
1488- struct list_head vbl_sigs; /**< signal list to send on VBLANK */
1489- struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
1490- unsigned int vbl_pending;
1491+ struct list_head *vbl_sigs; /**< signal list to send on VBLANK */
1492+ atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
1493+ atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
1494+ u32 *last_vblank; /* protected by dev->vbl_lock, used */
1495+ /* for wraparound handling */
1496+ int *vblank_enabled; /* so we don't call enable more than
1497+ once per disable */
1498+ int *vblank_inmodeset; /* Display driver is setting mode */
1499+ struct timer_list vblank_disable_timer;
1500+
1501+ u32 max_vblank_count; /**< size of vblank counter register */
1502 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
1503 void (*locked_tasklet_func)(struct drm_device *dev);
1504
1505@@ -757,6 +815,7 @@ struct drm_device {
1506 struct pci_controller *hose;
1507 #endif
1508 struct drm_sg_mem *sg; /**< Scatter gather memory */
1509+ int num_crtcs; /**< Number of CRTCs on this device */
1510 void *dev_private; /**< device private data */
1511 struct drm_sigdata sigdata; /**< For block_all_signals */
1512 sigset_t sigmask;
1513@@ -990,10 +1049,19 @@ extern void drm_driver_irq_preinstall(struct drm_device *dev);
1514 extern void drm_driver_irq_postinstall(struct drm_device *dev);
1515 extern void drm_driver_irq_uninstall(struct drm_device *dev);
1516
1517+extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
1518 extern int drm_wait_vblank(struct drm_device *dev, void *data,
1519- struct drm_file *file_priv);
1520+ struct drm_file *filp);
1521 extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
1522-extern void drm_vbl_send_signals(struct drm_device *dev);
1523+extern void drm_locked_tasklet(struct drm_device *dev,
1524+ void(*func)(struct drm_device *));
1525+extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1526+extern void drm_handle_vblank(struct drm_device *dev, int crtc);
1527+extern int drm_vblank_get(struct drm_device *dev, int crtc);
1528+extern void drm_vblank_put(struct drm_device *dev, int crtc);
1529+/* Modesetting support */
1530+extern int drm_modeset_ctl(struct drm_device *dev, void *data,
1531+ struct drm_file *file_priv);
1532 extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
1533
1534 /* AGP/GART support (drm_agpsupport.h) */
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0012-Export-shmem_file_setup-for-DRM-GEM.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0012-Export-shmem_file_setup-for-DRM-GEM.patch
deleted file mode 100644
index 642d89ba76..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0012-Export-shmem_file_setup-for-DRM-GEM.patch
+++ /dev/null
@@ -1,25 +0,0 @@
1commit 48e13db26a25ebaf61f1fc28f612d6b35ddf1965
2Author: Keith Packard <keithp@keithp.com>
3Date: Fri Jun 20 00:08:06 2008 -0700
4
5 Export shmem_file_setup for DRM-GEM
6
7 GEM needs to create shmem files to back buffer objects. Though currently
8 creation of files for objects could have been driven from userland, the
9 modesetting work will require allocation of buffer objects before userland
10 is running, for boot-time message display.
11
12 Signed-off-by: Eric Anholt <eric@anholt.net>
13
14diff --git a/mm/shmem.c b/mm/shmem.c
15index 04fb4f1..515909d 100644
16--- a/mm/shmem.c
17+++ b/mm/shmem.c
18@@ -2582,6 +2582,7 @@ put_memory:
19 shmem_unacct_size(flags, size);
20 return ERR_PTR(error);
21 }
22+EXPORT_SYMBOL(shmem_file_setup);
23
24 /**
25 * shmem_zero_setup - setup a shared anonymous mapping
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch
deleted file mode 100644
index cc90d46262..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch
+++ /dev/null
@@ -1,24 +0,0 @@
1commit 25eaa97fc74b225e13cf11ed8d770192ddc9355d
2Author: Eric Anholt <eric@anholt.net>
3Date: Thu Aug 21 12:53:33 2008 -0700
4
5 Export kmap_atomic_pfn for DRM-GEM.
6
7 The driver would like to map IO space directly for copying data in when
8 appropriate, to avoid CPU cache flushing for streaming writes.
9 kmap_atomic_pfn lets us avoid IPIs associated with ioremap for this process.
10
11 Signed-off-by: Eric Anholt <eric@anholt.net>
12
13diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
14index 165c871..d52e91d 100644
15--- a/arch/x86/mm/highmem_32.c
16+++ b/arch/x86/mm/highmem_32.c
17@@ -137,6 +137,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
18
19 return (void*) vaddr;
20 }
21+EXPORT_SYMBOL(kmap_atomic_pfn);
22
23 struct page *kmap_atomic_to_page(void *ptr)
24 {
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch
deleted file mode 100644
index 95cca5d0c6..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch
+++ /dev/null
@@ -1,5483 +0,0 @@
1commit c97398223c6a505fac2c783a624dc80e0aa5d5d0
2Author: Eric Anholt <eric@anholt.net>
3Date: Wed Jul 30 12:06:12 2008 -0700
4
5 drm: Add GEM ("graphics execution manager") to i915 driver.
6
7 GEM allows the creation of persistent buffer objects accessible by the
8 graphics device through new ioctls for managing execution of commands on the
9 device. The userland API is almost entirely driver-specific to ensure that
10 any driver building on this model can easily map the interface to individual
11 driver requirements.
12
13 GEM is used by the 2d driver for managing its internal state allocations and
14 will be used for pixmap storage to reduce memory consumption and enable
15 zero-copy GLX_EXT_texture_from_pixmap, and in the 3d driver is used to enable
16 GL_EXT_framebuffer_object and GL_ARB_pixel_buffer_object.
17
18 Signed-off-by: Eric Anholt <eric@anholt.net>
19
20diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
21index e9f9a97..74da994 100644
22--- a/drivers/gpu/drm/Makefile
23+++ b/drivers/gpu/drm/Makefile
24@@ -4,8 +4,9 @@
25
26 ccflags-y := -Iinclude/drm
27
28-drm-y := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
29- drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
30+drm-y := drm_auth.o drm_bufs.o drm_cache.o \
31+ drm_context.o drm_dma.o drm_drawable.o \
32+ drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
33 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
34 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
35 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
36diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
37index aefa5ac..2639be2 100644
38--- a/drivers/gpu/drm/drm_agpsupport.c
39+++ b/drivers/gpu/drm/drm_agpsupport.c
40@@ -33,6 +33,7 @@
41
42 #include "drmP.h"
43 #include <linux/module.h>
44+#include <asm/agp.h>
45
46 #if __OS_HAS_AGP
47
48@@ -452,4 +453,52 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
49 return agp_unbind_memory(handle);
50 }
51
52-#endif /* __OS_HAS_AGP */
53+/**
54+ * Binds a collection of pages into AGP memory at the given offset, returning
55+ * the AGP memory structure containing them.
56+ *
57+ * No reference is held on the pages during this time -- it is up to the
58+ * caller to handle that.
59+ */
60+DRM_AGP_MEM *
61+drm_agp_bind_pages(struct drm_device *dev,
62+ struct page **pages,
63+ unsigned long num_pages,
64+ uint32_t gtt_offset)
65+{
66+ DRM_AGP_MEM *mem;
67+ int ret, i;
68+
69+ DRM_DEBUG("\n");
70+
71+ mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
72+ AGP_USER_MEMORY);
73+ if (mem == NULL) {
74+ DRM_ERROR("Failed to allocate memory for %ld pages\n",
75+ num_pages);
76+ return NULL;
77+ }
78+
79+ for (i = 0; i < num_pages; i++)
80+ mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
81+ mem->page_count = num_pages;
82+
83+ mem->is_flushed = true;
84+ ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
85+ if (ret != 0) {
86+ DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
87+ agp_free_memory(mem);
88+ return NULL;
89+ }
90+
91+ return mem;
92+}
93+EXPORT_SYMBOL(drm_agp_bind_pages);
94+
95+void drm_agp_chipset_flush(struct drm_device *dev)
96+{
97+ agp_flush_chipset(dev->agp->bridge);
98+}
99+EXPORT_SYMBOL(drm_agp_chipset_flush);
100+
101+#endif /* __OS_HAS_AGP */
102diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
103new file mode 100644
104index 0000000..9475f7d
105--- /dev/null
106+++ b/drivers/gpu/drm/drm_cache.c
107@@ -0,0 +1,76 @@
108+/**************************************************************************
109+ *
110+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
111+ * All Rights Reserved.
112+ *
113+ * Permission is hereby granted, free of charge, to any person obtaining a
114+ * copy of this software and associated documentation files (the
115+ * "Software"), to deal in the Software without restriction, including
116+ * without limitation the rights to use, copy, modify, merge, publish,
117+ * distribute, sub license, and/or sell copies of the Software, and to
118+ * permit persons to whom the Software is furnished to do so, subject to
119+ * the following conditions:
120+ *
121+ * The above copyright notice and this permission notice (including the
122+ * next paragraph) shall be included in all copies or substantial portions
123+ * of the Software.
124+ *
125+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
126+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
127+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
128+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
129+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
130+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
131+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
132+ *
133+ **************************************************************************/
134+/*
135+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
136+ */
137+
138+#include "drmP.h"
139+
140+#if defined(CONFIG_X86)
141+static void
142+drm_clflush_page(struct page *page)
143+{
144+ uint8_t *page_virtual;
145+ unsigned int i;
146+
147+ if (unlikely(page == NULL))
148+ return;
149+
150+ page_virtual = kmap_atomic(page, KM_USER0);
151+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
152+ clflush(page_virtual + i);
153+ kunmap_atomic(page_virtual, KM_USER0);
154+}
155+#endif
156+
157+static void
158+drm_clflush_ipi_handler(void *null)
159+{
160+ wbinvd();
161+}
162+
163+void
164+drm_clflush_pages(struct page *pages[], unsigned long num_pages)
165+{
166+
167+#if defined(CONFIG_X86)
168+ if (cpu_has_clflush) {
169+ unsigned long i;
170+
171+ mb();
172+ for (i = 0; i < num_pages; ++i)
173+ drm_clflush_page(*pages++);
174+ mb();
175+
176+ return;
177+ }
178+#endif
179+
180+ if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
181+ DRM_ERROR("Timed out waiting for cache flush.\n");
182+}
183+EXPORT_SYMBOL(drm_clflush_pages);
184diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
185index fb45fe7..96f416a 100644
186--- a/drivers/gpu/drm/drm_drv.c
187+++ b/drivers/gpu/drm/drm_drv.c
188@@ -119,6 +119,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
189 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
190
191 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
192+
193+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
194+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
195+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
196 };
197
198 #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
199diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
200index dcf8b4d..0d46627 100644
201--- a/drivers/gpu/drm/drm_fops.c
202+++ b/drivers/gpu/drm/drm_fops.c
203@@ -256,6 +256,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
204
205 INIT_LIST_HEAD(&priv->lhead);
206
207+ if (dev->driver->driver_features & DRIVER_GEM)
208+ drm_gem_open(dev, priv);
209+
210 if (dev->driver->open) {
211 ret = dev->driver->open(dev, priv);
212 if (ret < 0)
213@@ -400,6 +403,9 @@ int drm_release(struct inode *inode, struct file *filp)
214 dev->driver->reclaim_buffers(dev, file_priv);
215 }
216
217+ if (dev->driver->driver_features & DRIVER_GEM)
218+ drm_gem_release(dev, file_priv);
219+
220 drm_fasync(-1, filp, 0);
221
222 mutex_lock(&dev->ctxlist_mutex);
223diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
224new file mode 100644
225index 0000000..434155b
226--- /dev/null
227+++ b/drivers/gpu/drm/drm_gem.c
228@@ -0,0 +1,420 @@
229+/*
230+ * Copyright © 2008 Intel Corporation
231+ *
232+ * Permission is hereby granted, free of charge, to any person obtaining a
233+ * copy of this software and associated documentation files (the "Software"),
234+ * to deal in the Software without restriction, including without limitation
235+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
236+ * and/or sell copies of the Software, and to permit persons to whom the
237+ * Software is furnished to do so, subject to the following conditions:
238+ *
239+ * The above copyright notice and this permission notice (including the next
240+ * paragraph) shall be included in all copies or substantial portions of the
241+ * Software.
242+ *
243+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
244+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
245+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
246+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
247+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
248+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
249+ * IN THE SOFTWARE.
250+ *
251+ * Authors:
252+ * Eric Anholt <eric@anholt.net>
253+ *
254+ */
255+
256+#include <linux/types.h>
257+#include <linux/slab.h>
258+#include <linux/mm.h>
259+#include <linux/uaccess.h>
260+#include <linux/fs.h>
261+#include <linux/file.h>
262+#include <linux/module.h>
263+#include <linux/mman.h>
264+#include <linux/pagemap.h>
265+#include "drmP.h"
266+
267+/** @file drm_gem.c
268+ *
269+ * This file provides some of the base ioctls and library routines for
270+ * the graphics memory manager implemented by each device driver.
271+ *
272+ * Because various devices have different requirements in terms of
273+ * synchronization and migration strategies, implementing that is left up to
274+ * the driver, and all that the general API provides should be generic --
275+ * allocating objects, reading/writing data with the cpu, freeing objects.
276+ * Even there, platform-dependent optimizations for reading/writing data with
277+ * the CPU mean we'll likely hook those out to driver-specific calls. However,
278+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
279+ *
280+ * The goal was to have swap-backed object allocation managed through
281+ * struct file. However, file descriptors as handles to a struct file have
282+ * two major failings:
283+ * - Process limits prevent more than 1024 or so being used at a time by
284+ * default.
285+ * - Inability to allocate high fds will aggravate the X Server's select()
286+ * handling, and likely that of many GL client applications as well.
287+ *
288+ * This led to a plan of using our own integer IDs (called handles, following
289+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
290+ * ioctls. The objects themselves will still include the struct file so
291+ * that we can transition to fds if the required kernel infrastructure shows
292+ * up at a later date, and as our interface with shmfs for memory allocation.
293+ */
294+
295+/**
296+ * Initialize the GEM device fields
297+ */
298+
299+int
300+drm_gem_init(struct drm_device *dev)
301+{
302+ spin_lock_init(&dev->object_name_lock);
303+ idr_init(&dev->object_name_idr);
304+ atomic_set(&dev->object_count, 0);
305+ atomic_set(&dev->object_memory, 0);
306+ atomic_set(&dev->pin_count, 0);
307+ atomic_set(&dev->pin_memory, 0);
308+ atomic_set(&dev->gtt_count, 0);
309+ atomic_set(&dev->gtt_memory, 0);
310+ return 0;
311+}
312+
313+/**
314+ * Allocate a GEM object of the specified size with shmfs backing store
315+ */
316+struct drm_gem_object *
317+drm_gem_object_alloc(struct drm_device *dev, size_t size)
318+{
319+ struct drm_gem_object *obj;
320+
321+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
322+
323+ obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
324+
325+ obj->dev = dev;
326+ obj->filp = shmem_file_setup("drm mm object", size, 0);
327+ if (IS_ERR(obj->filp)) {
328+ kfree(obj);
329+ return NULL;
330+ }
331+
332+ kref_init(&obj->refcount);
333+ kref_init(&obj->handlecount);
334+ obj->size = size;
335+ if (dev->driver->gem_init_object != NULL &&
336+ dev->driver->gem_init_object(obj) != 0) {
337+ fput(obj->filp);
338+ kfree(obj);
339+ return NULL;
340+ }
341+ atomic_inc(&dev->object_count);
342+ atomic_add(obj->size, &dev->object_memory);
343+ return obj;
344+}
345+EXPORT_SYMBOL(drm_gem_object_alloc);
346+
347+/**
348+ * Removes the mapping from handle to filp for this object.
349+ */
350+static int
351+drm_gem_handle_delete(struct drm_file *filp, int handle)
352+{
353+ struct drm_device *dev;
354+ struct drm_gem_object *obj;
355+
356+ /* This is gross. The idr system doesn't let us try a delete and
357+ * return an error code. It just spews if you fail at deleting.
358+ * So, we have to grab a lock around finding the object and then
359+ * doing the delete on it and dropping the refcount, or the user
360+ * could race us to double-decrement the refcount and cause a
361+ * use-after-free later. Given the frequency of our handle lookups,
362+ * we may want to use ida for number allocation and a hash table
363+ * for the pointers, anyway.
364+ */
365+ spin_lock(&filp->table_lock);
366+
367+ /* Check if we currently have a reference on the object */
368+ obj = idr_find(&filp->object_idr, handle);
369+ if (obj == NULL) {
370+ spin_unlock(&filp->table_lock);
371+ return -EINVAL;
372+ }
373+ dev = obj->dev;
374+
375+ /* Release reference and decrement refcount. */
376+ idr_remove(&filp->object_idr, handle);
377+ spin_unlock(&filp->table_lock);
378+
379+ mutex_lock(&dev->struct_mutex);
380+ drm_gem_object_handle_unreference(obj);
381+ mutex_unlock(&dev->struct_mutex);
382+
383+ return 0;
384+}
385+
386+/**
387+ * Create a handle for this object. This adds a handle reference
388+ * to the object, which includes a regular reference count. Callers
389+ * will likely want to dereference the object afterwards.
390+ */
391+int
392+drm_gem_handle_create(struct drm_file *file_priv,
393+ struct drm_gem_object *obj,
394+ int *handlep)
395+{
396+ int ret;
397+
398+ /*
399+ * Get the user-visible handle using idr.
400+ */
401+again:
402+ /* ensure there is space available to allocate a handle */
403+ if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
404+ return -ENOMEM;
405+
406+ /* do the allocation under our spinlock */
407+ spin_lock(&file_priv->table_lock);
408+ ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
409+ spin_unlock(&file_priv->table_lock);
410+ if (ret == -EAGAIN)
411+ goto again;
412+
413+ if (ret != 0)
414+ return ret;
415+
416+ drm_gem_object_handle_reference(obj);
417+ return 0;
418+}
419+EXPORT_SYMBOL(drm_gem_handle_create);
420+
421+/** Returns a reference to the object named by the handle. */
422+struct drm_gem_object *
423+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
424+ int handle)
425+{
426+ struct drm_gem_object *obj;
427+
428+ spin_lock(&filp->table_lock);
429+
430+ /* Check if we currently have a reference on the object */
431+ obj = idr_find(&filp->object_idr, handle);
432+ if (obj == NULL) {
433+ spin_unlock(&filp->table_lock);
434+ return NULL;
435+ }
436+
437+ drm_gem_object_reference(obj);
438+
439+ spin_unlock(&filp->table_lock);
440+
441+ return obj;
442+}
443+EXPORT_SYMBOL(drm_gem_object_lookup);
444+
445+/**
446+ * Releases the handle to an mm object.
447+ */
448+int
449+drm_gem_close_ioctl(struct drm_device *dev, void *data,
450+ struct drm_file *file_priv)
451+{
452+ struct drm_gem_close *args = data;
453+ int ret;
454+
455+ if (!(dev->driver->driver_features & DRIVER_GEM))
456+ return -ENODEV;
457+
458+ ret = drm_gem_handle_delete(file_priv, args->handle);
459+
460+ return ret;
461+}
462+
463+/**
464+ * Create a global name for an object, returning the name.
465+ *
466+ * Note that the name does not hold a reference; when the object
467+ * is freed, the name goes away.
468+ */
469+int
470+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
471+ struct drm_file *file_priv)
472+{
473+ struct drm_gem_flink *args = data;
474+ struct drm_gem_object *obj;
475+ int ret;
476+
477+ if (!(dev->driver->driver_features & DRIVER_GEM))
478+ return -ENODEV;
479+
480+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
481+ if (obj == NULL)
482+ return -EINVAL;
483+
484+again:
485+ if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
486+ return -ENOMEM;
487+
488+ spin_lock(&dev->object_name_lock);
489+ if (obj->name) {
490+ spin_unlock(&dev->object_name_lock);
491+ return -EEXIST;
492+ }
493+ ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
494+ &obj->name);
495+ spin_unlock(&dev->object_name_lock);
496+ if (ret == -EAGAIN)
497+ goto again;
498+
499+ if (ret != 0) {
500+ mutex_lock(&dev->struct_mutex);
501+ drm_gem_object_unreference(obj);
502+ mutex_unlock(&dev->struct_mutex);
503+ return ret;
504+ }
505+
506+ /*
507+ * Leave the reference from the lookup around as the
508+ * name table now holds one
509+ */
510+ args->name = (uint64_t) obj->name;
511+
512+ return 0;
513+}
514+
515+/**
516+ * Open an object using the global name, returning a handle and the size.
517+ *
518+ * This handle (of course) holds a reference to the object, so the object
519+ * will not go away until the handle is deleted.
520+ */
521+int
522+drm_gem_open_ioctl(struct drm_device *dev, void *data,
523+ struct drm_file *file_priv)
524+{
525+ struct drm_gem_open *args = data;
526+ struct drm_gem_object *obj;
527+ int ret;
528+ int handle;
529+
530+ if (!(dev->driver->driver_features & DRIVER_GEM))
531+ return -ENODEV;
532+
533+ spin_lock(&dev->object_name_lock);
534+ obj = idr_find(&dev->object_name_idr, (int) args->name);
535+ if (obj)
536+ drm_gem_object_reference(obj);
537+ spin_unlock(&dev->object_name_lock);
538+ if (!obj)
539+ return -ENOENT;
540+
541+ ret = drm_gem_handle_create(file_priv, obj, &handle);
542+ mutex_lock(&dev->struct_mutex);
543+ drm_gem_object_unreference(obj);
544+ mutex_unlock(&dev->struct_mutex);
545+ if (ret)
546+ return ret;
547+
548+ args->handle = handle;
549+ args->size = obj->size;
550+
551+ return 0;
552+}
553+
554+/**
555+ * Called at device open time, sets up the structure for handling refcounting
556+ * of mm objects.
557+ */
558+void
559+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
560+{
561+ idr_init(&file_private->object_idr);
562+ spin_lock_init(&file_private->table_lock);
563+}
564+
565+/**
566+ * Called at device close to release the file's
567+ * handle references on objects.
568+ */
569+static int
570+drm_gem_object_release_handle(int id, void *ptr, void *data)
571+{
572+ struct drm_gem_object *obj = ptr;
573+
574+ drm_gem_object_handle_unreference(obj);
575+
576+ return 0;
577+}
578+
579+/**
580+ * Called at close time when the filp is going away.
581+ *
582+ * Releases any remaining references on objects by this filp.
583+ */
584+void
585+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
586+{
587+ mutex_lock(&dev->struct_mutex);
588+ idr_for_each(&file_private->object_idr,
589+ &drm_gem_object_release_handle, NULL);
590+
591+ idr_destroy(&file_private->object_idr);
592+ mutex_unlock(&dev->struct_mutex);
593+}
594+
595+/**
596+ * Called after the last reference to the object has been lost.
597+ *
598+ * Frees the object
599+ */
600+void
601+drm_gem_object_free(struct kref *kref)
602+{
603+ struct drm_gem_object *obj = (struct drm_gem_object *) kref;
604+ struct drm_device *dev = obj->dev;
605+
606+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
607+
608+ if (dev->driver->gem_free_object != NULL)
609+ dev->driver->gem_free_object(obj);
610+
611+ fput(obj->filp);
612+ atomic_dec(&dev->object_count);
613+ atomic_sub(obj->size, &dev->object_memory);
614+ kfree(obj);
615+}
616+EXPORT_SYMBOL(drm_gem_object_free);
617+
618+/**
619+ * Called after the last handle to the object has been closed
620+ *
621+ * Removes any name for the object. Note that this must be
622+ * called before drm_gem_object_free or we'll be touching
623+ * freed memory
624+ */
625+void
626+drm_gem_object_handle_free(struct kref *kref)
627+{
628+ struct drm_gem_object *obj = container_of(kref,
629+ struct drm_gem_object,
630+ handlecount);
631+ struct drm_device *dev = obj->dev;
632+
633+ /* Remove any name for this object */
634+ spin_lock(&dev->object_name_lock);
635+ if (obj->name) {
636+ idr_remove(&dev->object_name_idr, obj->name);
637+ spin_unlock(&dev->object_name_lock);
638+ /*
639+ * The object name held a reference to this object, drop
640+ * that now.
641+ */
642+ drm_gem_object_unreference(obj);
643+ } else
644+ spin_unlock(&dev->object_name_lock);
645+
646+}
647+EXPORT_SYMBOL(drm_gem_object_handle_free);
648+
649diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
650index 0177012..803bc9e 100644
651--- a/drivers/gpu/drm/drm_memory.c
652+++ b/drivers/gpu/drm/drm_memory.c
653@@ -133,6 +133,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
654 {
655 return drm_agp_free_memory(handle) ? 0 : -EINVAL;
656 }
657+EXPORT_SYMBOL(drm_free_agp);
658
659 /** Wrapper around agp_bind_memory() */
660 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
661@@ -145,6 +146,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
662 {
663 return drm_agp_unbind_memory(handle);
664 }
665+EXPORT_SYMBOL(drm_unbind_agp);
666
667 #else /* __OS_HAS_AGP */
668 static inline void *agp_remap(unsigned long offset, unsigned long size,
669diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
670index dcff9e9..217ad7d 100644
671--- a/drivers/gpu/drm/drm_mm.c
672+++ b/drivers/gpu/drm/drm_mm.c
673@@ -169,6 +169,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
674
675 return child;
676 }
677+EXPORT_SYMBOL(drm_mm_get_block);
678
679 /*
680 * Put a block. Merge with the previous and / or next block if they are free.
681@@ -217,6 +218,7 @@ void drm_mm_put_block(struct drm_mm_node * cur)
682 drm_free(cur, sizeof(*cur), DRM_MEM_MM);
683 }
684 }
685+EXPORT_SYMBOL(drm_mm_put_block);
686
687 struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
688 unsigned long size,
689@@ -265,6 +267,7 @@ int drm_mm_clean(struct drm_mm * mm)
690
691 return (head->next->next == head);
692 }
693+EXPORT_SYMBOL(drm_mm_search_free);
694
695 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
696 {
697@@ -273,7 +276,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
698
699 return drm_mm_create_tail_node(mm, start, size);
700 }
701-
702+EXPORT_SYMBOL(drm_mm_init);
703
704 void drm_mm_takedown(struct drm_mm * mm)
705 {
706diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
707index 93b1e04..d490db4 100644
708--- a/drivers/gpu/drm/drm_proc.c
709+++ b/drivers/gpu/drm/drm_proc.c
710@@ -49,6 +49,10 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
711 int request, int *eof, void *data);
712 static int drm_bufs_info(char *buf, char **start, off_t offset,
713 int request, int *eof, void *data);
714+static int drm_gem_name_info(char *buf, char **start, off_t offset,
715+ int request, int *eof, void *data);
716+static int drm_gem_object_info(char *buf, char **start, off_t offset,
717+ int request, int *eof, void *data);
718 #if DRM_DEBUG_CODE
719 static int drm_vma_info(char *buf, char **start, off_t offset,
720 int request, int *eof, void *data);
721@@ -60,13 +64,16 @@ static int drm_vma_info(char *buf, char **start, off_t offset,
722 static struct drm_proc_list {
723 const char *name; /**< file name */
724 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
725+ u32 driver_features; /**< Required driver features for this entry */
726 } drm_proc_list[] = {
727- {"name", drm_name_info},
728- {"mem", drm_mem_info},
729- {"vm", drm_vm_info},
730- {"clients", drm_clients_info},
731- {"queues", drm_queues_info},
732- {"bufs", drm_bufs_info},
733+ {"name", drm_name_info, 0},
734+ {"mem", drm_mem_info, 0},
735+ {"vm", drm_vm_info, 0},
736+ {"clients", drm_clients_info, 0},
737+ {"queues", drm_queues_info, 0},
738+ {"bufs", drm_bufs_info, 0},
739+ {"gem_names", drm_gem_name_info, DRIVER_GEM},
740+ {"gem_objects", drm_gem_object_info, DRIVER_GEM},
741 #if DRM_DEBUG_CODE
742 {"vma", drm_vma_info},
743 #endif
744@@ -90,8 +97,9 @@ static struct drm_proc_list {
745 int drm_proc_init(struct drm_minor *minor, int minor_id,
746 struct proc_dir_entry *root)
747 {
748+ struct drm_device *dev = minor->dev;
749 struct proc_dir_entry *ent;
750- int i, j;
751+ int i, j, ret;
752 char name[64];
753
754 sprintf(name, "%d", minor_id);
755@@ -102,23 +110,42 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
756 }
757
758 for (i = 0; i < DRM_PROC_ENTRIES; i++) {
759+ u32 features = drm_proc_list[i].driver_features;
760+
761+ if (features != 0 &&
762+ (dev->driver->driver_features & features) != features)
763+ continue;
764+
765 ent = create_proc_entry(drm_proc_list[i].name,
766 S_IFREG | S_IRUGO, minor->dev_root);
767 if (!ent) {
768 DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
769 name, drm_proc_list[i].name);
770- for (j = 0; j < i; j++)
771- remove_proc_entry(drm_proc_list[i].name,
772- minor->dev_root);
773- remove_proc_entry(name, root);
774- minor->dev_root = NULL;
775- return -1;
776+ ret = -1;
777+ goto fail;
778 }
779 ent->read_proc = drm_proc_list[i].f;
780 ent->data = minor;
781 }
782
783+ if (dev->driver->proc_init) {
784+ ret = dev->driver->proc_init(minor);
785+ if (ret) {
786+ DRM_ERROR("DRM: Driver failed to initialize "
787+ "/proc/dri.\n");
788+ goto fail;
789+ }
790+ }
791+
792 return 0;
793+ fail:
794+
795+ for (j = 0; j < i; j++)
796+ remove_proc_entry(drm_proc_list[i].name,
797+ minor->dev_root);
798+ remove_proc_entry(name, root);
799+ minor->dev_root = NULL;
800+ return ret;
801 }
802
803 /**
804@@ -133,12 +160,16 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
805 */
806 int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
807 {
808+ struct drm_device *dev = minor->dev;
809 int i;
810 char name[64];
811
812 if (!root || !minor->dev_root)
813 return 0;
814
815+ if (dev->driver->proc_cleanup)
816+ dev->driver->proc_cleanup(minor);
817+
818 for (i = 0; i < DRM_PROC_ENTRIES; i++)
819 remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
820 sprintf(name, "%d", minor->index);
821@@ -480,6 +511,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
822 return ret;
823 }
824
825+struct drm_gem_name_info_data {
826+ int len;
827+ char *buf;
828+ int eof;
829+};
830+
831+static int drm_gem_one_name_info(int id, void *ptr, void *data)
832+{
833+ struct drm_gem_object *obj = ptr;
834+ struct drm_gem_name_info_data *nid = data;
835+
836+ DRM_INFO("name %d size %d\n", obj->name, obj->size);
837+ if (nid->eof)
838+ return 0;
839+
840+ nid->len += sprintf(&nid->buf[nid->len],
841+ "%6d%9d%8d%9d\n",
842+ obj->name, obj->size,
843+ atomic_read(&obj->handlecount.refcount),
844+ atomic_read(&obj->refcount.refcount));
845+ if (nid->len > DRM_PROC_LIMIT) {
846+ nid->eof = 1;
847+ return 0;
848+ }
849+ return 0;
850+}
851+
852+static int drm_gem_name_info(char *buf, char **start, off_t offset,
853+ int request, int *eof, void *data)
854+{
855+ struct drm_minor *minor = (struct drm_minor *) data;
856+ struct drm_device *dev = minor->dev;
857+ struct drm_gem_name_info_data nid;
858+
859+ if (offset > DRM_PROC_LIMIT) {
860+ *eof = 1;
861+ return 0;
862+ }
863+
864+ nid.len = sprintf(buf, " name size handles refcount\n");
865+ nid.buf = buf;
866+ nid.eof = 0;
867+ idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
868+
869+ *start = &buf[offset];
870+ *eof = 0;
871+ if (nid.len > request + offset)
872+ return request;
873+ *eof = 1;
874+ return nid.len - offset;
875+}
876+
877+static int drm_gem_object_info(char *buf, char **start, off_t offset,
878+ int request, int *eof, void *data)
879+{
880+ struct drm_minor *minor = (struct drm_minor *) data;
881+ struct drm_device *dev = minor->dev;
882+ int len = 0;
883+
884+ if (offset > DRM_PROC_LIMIT) {
885+ *eof = 1;
886+ return 0;
887+ }
888+
889+ *start = &buf[offset];
890+ *eof = 0;
891+ DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
892+ DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
893+ DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
894+ DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
895+ DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
896+ DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
897+ if (len > request + offset)
898+ return request;
899+ *eof = 1;
900+ return len - offset;
901+}
902+
903 #if DRM_DEBUG_CODE
904
905 static int drm__vma_info(char *buf, char **start, off_t offset, int request,
906diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
907index c2f584f..82f4657 100644
908--- a/drivers/gpu/drm/drm_stub.c
909+++ b/drivers/gpu/drm/drm_stub.c
910@@ -152,6 +152,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
911 goto error_out_unreg;
912 }
913
914+ if (driver->driver_features & DRIVER_GEM) {
915+ retcode = drm_gem_init(dev);
916+ if (retcode) {
917+ DRM_ERROR("Cannot initialize graphics execution "
918+ "manager (GEM)\n");
919+ goto error_out_unreg;
920+ }
921+ }
922+
923 return 0;
924
925 error_out_unreg:
926@@ -317,6 +326,7 @@ int drm_put_dev(struct drm_device * dev)
927 int drm_put_minor(struct drm_minor **minor_p)
928 {
929 struct drm_minor *minor = *minor_p;
930+
931 DRM_DEBUG("release secondary minor %d\n", minor->index);
932
933 if (minor->type == DRM_MINOR_LEGACY)
934diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
935index c4bbda6..5ba78e4 100644
936--- a/drivers/gpu/drm/i915/Makefile
937+++ b/drivers/gpu/drm/i915/Makefile
938@@ -4,7 +4,11 @@
939
940 ccflags-y := -Iinclude/drm
941 i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \
942- i915_suspend.o
943+ i915_suspend.o \
944+ i915_gem.o \
945+ i915_gem_debug.o \
946+ i915_gem_proc.o \
947+ i915_gem_tiling.o
948
949 i915-$(CONFIG_COMPAT) += i915_ioc32.o
950
951diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
952index 8609ec2..3b5aa74 100644
953--- a/drivers/gpu/drm/i915/i915_dma.c
954+++ b/drivers/gpu/drm/i915/i915_dma.c
955@@ -170,24 +170,31 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
956 dev_priv->sarea_priv = (drm_i915_sarea_t *)
957 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
958
959- dev_priv->ring.Start = init->ring_start;
960- dev_priv->ring.End = init->ring_end;
961- dev_priv->ring.Size = init->ring_size;
962- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
963+ if (init->ring_size != 0) {
964+ if (dev_priv->ring.ring_obj != NULL) {
965+ i915_dma_cleanup(dev);
966+ DRM_ERROR("Client tried to initialize ringbuffer in "
967+ "GEM mode\n");
968+ return -EINVAL;
969+ }
970
971- dev_priv->ring.map.offset = init->ring_start;
972- dev_priv->ring.map.size = init->ring_size;
973- dev_priv->ring.map.type = 0;
974- dev_priv->ring.map.flags = 0;
975- dev_priv->ring.map.mtrr = 0;
976+ dev_priv->ring.Size = init->ring_size;
977+ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
978
979- drm_core_ioremap(&dev_priv->ring.map, dev);
980+ dev_priv->ring.map.offset = init->ring_start;
981+ dev_priv->ring.map.size = init->ring_size;
982+ dev_priv->ring.map.type = 0;
983+ dev_priv->ring.map.flags = 0;
984+ dev_priv->ring.map.mtrr = 0;
985
986- if (dev_priv->ring.map.handle == NULL) {
987- i915_dma_cleanup(dev);
988- DRM_ERROR("can not ioremap virtual address for"
989- " ring buffer\n");
990- return -ENOMEM;
991+ drm_core_ioremap(&dev_priv->ring.map, dev);
992+
993+ if (dev_priv->ring.map.handle == NULL) {
994+ i915_dma_cleanup(dev);
995+ DRM_ERROR("can not ioremap virtual address for"
996+ " ring buffer\n");
997+ return -ENOMEM;
998+ }
999 }
1000
1001 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
1002@@ -377,9 +384,10 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
1003 return 0;
1004 }
1005
1006-static int i915_emit_box(struct drm_device * dev,
1007- struct drm_clip_rect __user * boxes,
1008- int i, int DR1, int DR4)
1009+int
1010+i915_emit_box(struct drm_device *dev,
1011+ struct drm_clip_rect __user *boxes,
1012+ int i, int DR1, int DR4)
1013 {
1014 drm_i915_private_t *dev_priv = dev->dev_private;
1015 struct drm_clip_rect box;
1016@@ -681,6 +689,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
1017 case I915_PARAM_LAST_DISPATCH:
1018 value = READ_BREADCRUMB(dev_priv);
1019 break;
1020+ case I915_PARAM_HAS_GEM:
1021+ value = 1;
1022+ break;
1023 default:
1024 DRM_ERROR("Unknown parameter %d\n", param->param);
1025 return -EINVAL;
1026@@ -784,6 +795,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1027 memset(dev_priv, 0, sizeof(drm_i915_private_t));
1028
1029 dev->dev_private = (void *)dev_priv;
1030+ dev_priv->dev = dev;
1031
1032 /* Add register map (needed for suspend/resume) */
1033 base = drm_get_resource_start(dev, mmio_bar);
1034@@ -793,6 +805,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1035 _DRM_KERNEL | _DRM_DRIVER,
1036 &dev_priv->mmio_map);
1037
1038+ i915_gem_load(dev);
1039+
1040 /* Init HWS */
1041 if (!I915_NEED_GFX_HWS(dev)) {
1042 ret = i915_init_phys_hws(dev);
1043@@ -838,6 +852,25 @@ int i915_driver_unload(struct drm_device *dev)
1044 return 0;
1045 }
1046
1047+int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1048+{
1049+ struct drm_i915_file_private *i915_file_priv;
1050+
1051+ DRM_DEBUG("\n");
1052+ i915_file_priv = (struct drm_i915_file_private *)
1053+ drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
1054+
1055+ if (!i915_file_priv)
1056+ return -ENOMEM;
1057+
1058+ file_priv->driver_priv = i915_file_priv;
1059+
1060+ i915_file_priv->mm.last_gem_seqno = 0;
1061+ i915_file_priv->mm.last_gem_throttle_seqno = 0;
1062+
1063+ return 0;
1064+}
1065+
1066 void i915_driver_lastclose(struct drm_device * dev)
1067 {
1068 drm_i915_private_t *dev_priv = dev->dev_private;
1069@@ -845,6 +878,8 @@ void i915_driver_lastclose(struct drm_device * dev)
1070 if (!dev_priv)
1071 return;
1072
1073+ i915_gem_lastclose(dev);
1074+
1075 if (dev_priv->agp_heap)
1076 i915_mem_takedown(&(dev_priv->agp_heap));
1077
1078@@ -857,6 +892,13 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1079 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
1080 }
1081
1082+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1083+{
1084+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1085+
1086+ drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
1087+}
1088+
1089 struct drm_ioctl_desc i915_ioctls[] = {
1090 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1091 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1092@@ -875,6 +917,22 @@ struct drm_ioctl_desc i915_ioctls[] = {
1093 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
1094 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1095 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
1096+ DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
1097+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1098+ DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1099+ DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1100+ DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
1101+ DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
1102+ DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
1103+ DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
1104+ DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
1105+ DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
1106+ DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
1107+ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
1108+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
1109+ DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
1110+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
1111+ DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
1112 };
1113
1114 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1115diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1116index 37af03f..a80ead2 100644
1117--- a/drivers/gpu/drm/i915/i915_drv.c
1118+++ b/drivers/gpu/drm/i915/i915_drv.c
1119@@ -85,12 +85,15 @@ static struct drm_driver driver = {
1120 /* don't use mtrr's here, the Xserver or user space app should
1121 * deal with them for intel hardware.
1122 */
1123- .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
1124- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
1125+ .driver_features =
1126+ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
1127+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
1128 .load = i915_driver_load,
1129 .unload = i915_driver_unload,
1130+ .open = i915_driver_open,
1131 .lastclose = i915_driver_lastclose,
1132 .preclose = i915_driver_preclose,
1133+ .postclose = i915_driver_postclose,
1134 .suspend = i915_suspend,
1135 .resume = i915_resume,
1136 .device_is_agp = i915_driver_device_is_agp,
1137@@ -104,6 +107,10 @@ static struct drm_driver driver = {
1138 .reclaim_buffers = drm_core_reclaim_buffers,
1139 .get_map_ofs = drm_core_get_map_ofs,
1140 .get_reg_ofs = drm_core_get_reg_ofs,
1141+ .proc_init = i915_gem_proc_init,
1142+ .proc_cleanup = i915_gem_proc_cleanup,
1143+ .gem_init_object = i915_gem_init_object,
1144+ .gem_free_object = i915_gem_free_object,
1145 .ioctls = i915_ioctls,
1146 .fops = {
1147 .owner = THIS_MODULE,
1148diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1149index d1a02be..87b071a 100644
1150--- a/drivers/gpu/drm/i915/i915_drv.h
1151+++ b/drivers/gpu/drm/i915/i915_drv.h
1152@@ -39,7 +39,7 @@
1153
1154 #define DRIVER_NAME "i915"
1155 #define DRIVER_DESC "Intel Graphics"
1156-#define DRIVER_DATE "20060119"
1157+#define DRIVER_DATE "20080730"
1158
1159 enum pipe {
1160 PIPE_A = 0,
1161@@ -60,16 +60,23 @@ enum pipe {
1162 #define DRIVER_MINOR 6
1163 #define DRIVER_PATCHLEVEL 0
1164
1165+#define WATCH_COHERENCY 0
1166+#define WATCH_BUF 0
1167+#define WATCH_EXEC 0
1168+#define WATCH_LRU 0
1169+#define WATCH_RELOC 0
1170+#define WATCH_INACTIVE 0
1171+#define WATCH_PWRITE 0
1172+
1173 typedef struct _drm_i915_ring_buffer {
1174 int tail_mask;
1175- unsigned long Start;
1176- unsigned long End;
1177 unsigned long Size;
1178 u8 *virtual_start;
1179 int head;
1180 int tail;
1181 int space;
1182 drm_local_map_t map;
1183+ struct drm_gem_object *ring_obj;
1184 } drm_i915_ring_buffer_t;
1185
1186 struct mem_block {
1187@@ -101,6 +108,8 @@ struct intel_opregion {
1188 };
1189
1190 typedef struct drm_i915_private {
1191+ struct drm_device *dev;
1192+
1193 drm_local_map_t *sarea;
1194 drm_local_map_t *mmio_map;
1195
1196@@ -113,6 +122,7 @@ typedef struct drm_i915_private {
1197 uint32_t counter;
1198 unsigned int status_gfx_addr;
1199 drm_local_map_t hws_map;
1200+ struct drm_gem_object *hws_obj;
1201
1202 unsigned int cpp;
1203 int back_offset;
1204@@ -122,7 +132,6 @@ typedef struct drm_i915_private {
1205
1206 wait_queue_head_t irq_queue;
1207 atomic_t irq_received;
1208- atomic_t irq_emitted;
1209 /** Protects user_irq_refcount and irq_mask_reg */
1210 spinlock_t user_irq_lock;
1211 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
1212@@ -230,8 +239,174 @@ typedef struct drm_i915_private {
1213 u8 saveDACMASK;
1214 u8 saveDACDATA[256*3]; /* 256 3-byte colors */
1215 u8 saveCR[37];
1216+
1217+ struct {
1218+ struct drm_mm gtt_space;
1219+
1220+ /**
1221+ * List of objects currently involved in rendering from the
1222+ * ringbuffer.
1223+ *
1224+ * A reference is held on the buffer while on this list.
1225+ */
1226+ struct list_head active_list;
1227+
1228+ /**
1229+ * List of objects which are not in the ringbuffer but which
1230+ * still have a write_domain which needs to be flushed before
1231+ * unbinding.
1232+ *
1233+ * A reference is held on the buffer while on this list.
1234+ */
1235+ struct list_head flushing_list;
1236+
1237+ /**
1238+ * LRU list of objects which are not in the ringbuffer and
1239+ * are ready to unbind, but are still in the GTT.
1240+ *
1241+ * A reference is not held on the buffer while on this list,
1242+ * as merely being GTT-bound shouldn't prevent its being
1243+ * freed, and we'll pull it off the list in the free path.
1244+ */
1245+ struct list_head inactive_list;
1246+
1247+ /**
1248+ * List of breadcrumbs associated with GPU requests currently
1249+ * outstanding.
1250+ */
1251+ struct list_head request_list;
1252+
1253+ /**
1254+ * We leave the user IRQ off as much as possible,
1255+ * but this means that requests will finish and never
1256+ * be retired once the system goes idle. Set a timer to
1257+ * fire periodically while the ring is running. When it
1258+ * fires, go retire requests.
1259+ */
1260+ struct delayed_work retire_work;
1261+
1262+ uint32_t next_gem_seqno;
1263+
1264+ /**
1265+ * Waiting sequence number, if any
1266+ */
1267+ uint32_t waiting_gem_seqno;
1268+
1269+ /**
1270+ * Last seq seen at irq time
1271+ */
1272+ uint32_t irq_gem_seqno;
1273+
1274+ /**
1275+ * Flag if the X Server, and thus DRM, is not currently in
1276+ * control of the device.
1277+ *
1278+ * This is set between LeaveVT and EnterVT. It needs to be
1279+ * replaced with a semaphore. It also needs to be
1280+ * transitioned away from for kernel modesetting.
1281+ */
1282+ int suspended;
1283+
1284+ /**
1285+ * Flag if the hardware appears to be wedged.
1286+ *
1287+ * This is set when attempts to idle the device timeout.
1288+ * It prevents command submission from occuring and makes
1289+ * every pending request fail
1290+ */
1291+ int wedged;
1292+
1293+ /** Bit 6 swizzling required for X tiling */
1294+ uint32_t bit_6_swizzle_x;
1295+ /** Bit 6 swizzling required for Y tiling */
1296+ uint32_t bit_6_swizzle_y;
1297+ } mm;
1298 } drm_i915_private_t;
1299
1300+/** driver private structure attached to each drm_gem_object */
1301+struct drm_i915_gem_object {
1302+ struct drm_gem_object *obj;
1303+
1304+ /** Current space allocated to this object in the GTT, if any. */
1305+ struct drm_mm_node *gtt_space;
1306+
1307+ /** This object's place on the active/flushing/inactive lists */
1308+ struct list_head list;
1309+
1310+ /**
1311+ * This is set if the object is on the active or flushing lists
1312+ * (has pending rendering), and is not set if it's on inactive (ready
1313+ * to be unbound).
1314+ */
1315+ int active;
1316+
1317+ /**
1318+ * This is set if the object has been written to since last bound
1319+ * to the GTT
1320+ */
1321+ int dirty;
1322+
1323+ /** AGP memory structure for our GTT binding. */
1324+ DRM_AGP_MEM *agp_mem;
1325+
1326+ struct page **page_list;
1327+
1328+ /**
1329+ * Current offset of the object in GTT space.
1330+ *
1331+ * This is the same as gtt_space->start
1332+ */
1333+ uint32_t gtt_offset;
1334+
1335+ /** Boolean whether this object has a valid gtt offset. */
1336+ int gtt_bound;
1337+
1338+ /** How many users have pinned this object in GTT space */
1339+ int pin_count;
1340+
1341+ /** Breadcrumb of last rendering to the buffer. */
1342+ uint32_t last_rendering_seqno;
1343+
1344+ /** Current tiling mode for the object. */
1345+ uint32_t tiling_mode;
1346+
1347+ /**
1348+ * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
1349+ * GEM_DOMAIN_CPU is not in the object's read domain.
1350+ */
1351+ uint8_t *page_cpu_valid;
1352+};
1353+
1354+/**
1355+ * Request queue structure.
1356+ *
1357+ * The request queue allows us to note sequence numbers that have been emitted
1358+ * and may be associated with active buffers to be retired.
1359+ *
1360+ * By keeping this list, we can avoid having to do questionable
1361+ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1362+ * an emission time with seqnos for tracking how far ahead of the GPU we are.
1363+ */
1364+struct drm_i915_gem_request {
1365+ /** GEM sequence number associated with this request. */
1366+ uint32_t seqno;
1367+
1368+ /** Time at which this request was emitted, in jiffies. */
1369+ unsigned long emitted_jiffies;
1370+
1371+ /** Cache domains that were flushed at the start of the request. */
1372+ uint32_t flush_domains;
1373+
1374+ struct list_head list;
1375+};
1376+
1377+struct drm_i915_file_private {
1378+ struct {
1379+ uint32_t last_gem_seqno;
1380+ uint32_t last_gem_throttle_seqno;
1381+ } mm;
1382+};
1383+
1384 extern struct drm_ioctl_desc i915_ioctls[];
1385 extern int i915_max_ioctl;
1386
1387@@ -239,18 +414,26 @@ extern int i915_max_ioctl;
1388 extern void i915_kernel_lost_context(struct drm_device * dev);
1389 extern int i915_driver_load(struct drm_device *, unsigned long flags);
1390 extern int i915_driver_unload(struct drm_device *);
1391+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
1392 extern void i915_driver_lastclose(struct drm_device * dev);
1393 extern void i915_driver_preclose(struct drm_device *dev,
1394 struct drm_file *file_priv);
1395+extern void i915_driver_postclose(struct drm_device *dev,
1396+ struct drm_file *file_priv);
1397 extern int i915_driver_device_is_agp(struct drm_device * dev);
1398 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1399 unsigned long arg);
1400+extern int i915_emit_box(struct drm_device *dev,
1401+ struct drm_clip_rect __user *boxes,
1402+ int i, int DR1, int DR4);
1403
1404 /* i915_irq.c */
1405 extern int i915_irq_emit(struct drm_device *dev, void *data,
1406 struct drm_file *file_priv);
1407 extern int i915_irq_wait(struct drm_device *dev, void *data,
1408 struct drm_file *file_priv);
1409+void i915_user_irq_get(struct drm_device *dev);
1410+void i915_user_irq_put(struct drm_device *dev);
1411
1412 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
1413 extern void i915_driver_irq_preinstall(struct drm_device * dev);
1414@@ -279,6 +462,67 @@ extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
1415 extern void i915_mem_takedown(struct mem_block **heap);
1416 extern void i915_mem_release(struct drm_device * dev,
1417 struct drm_file *file_priv, struct mem_block *heap);
1418+/* i915_gem.c */
1419+int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1420+ struct drm_file *file_priv);
1421+int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1422+ struct drm_file *file_priv);
1423+int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1424+ struct drm_file *file_priv);
1425+int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1426+ struct drm_file *file_priv);
1427+int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1428+ struct drm_file *file_priv);
1429+int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1430+ struct drm_file *file_priv);
1431+int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1432+ struct drm_file *file_priv);
1433+int i915_gem_execbuffer(struct drm_device *dev, void *data,
1434+ struct drm_file *file_priv);
1435+int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1436+ struct drm_file *file_priv);
1437+int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1438+ struct drm_file *file_priv);
1439+int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1440+ struct drm_file *file_priv);
1441+int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1442+ struct drm_file *file_priv);
1443+int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1444+ struct drm_file *file_priv);
1445+int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1446+ struct drm_file *file_priv);
1447+int i915_gem_set_tiling(struct drm_device *dev, void *data,
1448+ struct drm_file *file_priv);
1449+int i915_gem_get_tiling(struct drm_device *dev, void *data,
1450+ struct drm_file *file_priv);
1451+void i915_gem_load(struct drm_device *dev);
1452+int i915_gem_proc_init(struct drm_minor *minor);
1453+void i915_gem_proc_cleanup(struct drm_minor *minor);
1454+int i915_gem_init_object(struct drm_gem_object *obj);
1455+void i915_gem_free_object(struct drm_gem_object *obj);
1456+int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
1457+void i915_gem_object_unpin(struct drm_gem_object *obj);
1458+void i915_gem_lastclose(struct drm_device *dev);
1459+uint32_t i915_get_gem_seqno(struct drm_device *dev);
1460+void i915_gem_retire_requests(struct drm_device *dev);
1461+void i915_gem_retire_work_handler(struct work_struct *work);
1462+void i915_gem_clflush_object(struct drm_gem_object *obj);
1463+
1464+/* i915_gem_tiling.c */
1465+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
1466+
1467+/* i915_gem_debug.c */
1468+void i915_gem_dump_object(struct drm_gem_object *obj, int len,
1469+ const char *where, uint32_t mark);
1470+#if WATCH_INACTIVE
1471+void i915_verify_inactive(struct drm_device *dev, char *file, int line);
1472+#else
1473+#define i915_verify_inactive(dev, file, line)
1474+#endif
1475+void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
1476+void i915_gem_dump_object(struct drm_gem_object *obj, int len,
1477+ const char *where, uint32_t mark);
1478+void i915_dump_lru(struct drm_device *dev, const char *where);
1479
1480 /* i915_suspend.c */
1481 extern int i915_save_state(struct drm_device *dev);
1482@@ -347,6 +591,7 @@ extern void opregion_enable_asle(struct drm_device *dev);
1483 */
1484 #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
1485 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5)
1486+#define I915_GEM_HWS_INDEX 0x10
1487
1488 extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1489
1490diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1491new file mode 100644
1492index 0000000..90ae8a0
1493--- /dev/null
1494+++ b/drivers/gpu/drm/i915/i915_gem.c
1495@@ -0,0 +1,2497 @@
1496+/*
1497+ * Copyright © 2008 Intel Corporation
1498+ *
1499+ * Permission is hereby granted, free of charge, to any person obtaining a
1500+ * copy of this software and associated documentation files (the "Software"),
1501+ * to deal in the Software without restriction, including without limitation
1502+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
1503+ * and/or sell copies of the Software, and to permit persons to whom the
1504+ * Software is furnished to do so, subject to the following conditions:
1505+ *
1506+ * The above copyright notice and this permission notice (including the next
1507+ * paragraph) shall be included in all copies or substantial portions of the
1508+ * Software.
1509+ *
1510+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1511+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1512+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1513+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1514+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
1515+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
1516+ * IN THE SOFTWARE.
1517+ *
1518+ * Authors:
1519+ * Eric Anholt <eric@anholt.net>
1520+ *
1521+ */
1522+
1523+#include "drmP.h"
1524+#include "drm.h"
1525+#include "i915_drm.h"
1526+#include "i915_drv.h"
1527+#include <linux/swap.h>
1528+
1529+static int
1530+i915_gem_object_set_domain(struct drm_gem_object *obj,
1531+ uint32_t read_domains,
1532+ uint32_t write_domain);
1533+static int
1534+i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1535+ uint64_t offset,
1536+ uint64_t size,
1537+ uint32_t read_domains,
1538+ uint32_t write_domain);
1539+static int
1540+i915_gem_set_domain(struct drm_gem_object *obj,
1541+ struct drm_file *file_priv,
1542+ uint32_t read_domains,
1543+ uint32_t write_domain);
1544+static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
1545+static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
1546+static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
1547+
1548+int
1549+i915_gem_init_ioctl(struct drm_device *dev, void *data,
1550+ struct drm_file *file_priv)
1551+{
1552+ drm_i915_private_t *dev_priv = dev->dev_private;
1553+ struct drm_i915_gem_init *args = data;
1554+
1555+ mutex_lock(&dev->struct_mutex);
1556+
1557+ if (args->gtt_start >= args->gtt_end ||
1558+ (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
1559+ (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
1560+ mutex_unlock(&dev->struct_mutex);
1561+ return -EINVAL;
1562+ }
1563+
1564+ drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
1565+ args->gtt_end - args->gtt_start);
1566+
1567+ dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
1568+
1569+ mutex_unlock(&dev->struct_mutex);
1570+
1571+ return 0;
1572+}
1573+
1574+
1575+/**
1576+ * Creates a new mm object and returns a handle to it.
1577+ */
1578+int
1579+i915_gem_create_ioctl(struct drm_device *dev, void *data,
1580+ struct drm_file *file_priv)
1581+{
1582+ struct drm_i915_gem_create *args = data;
1583+ struct drm_gem_object *obj;
1584+ int handle, ret;
1585+
1586+ args->size = roundup(args->size, PAGE_SIZE);
1587+
1588+ /* Allocate the new object */
1589+ obj = drm_gem_object_alloc(dev, args->size);
1590+ if (obj == NULL)
1591+ return -ENOMEM;
1592+
1593+ ret = drm_gem_handle_create(file_priv, obj, &handle);
1594+ mutex_lock(&dev->struct_mutex);
1595+ drm_gem_object_handle_unreference(obj);
1596+ mutex_unlock(&dev->struct_mutex);
1597+
1598+ if (ret)
1599+ return ret;
1600+
1601+ args->handle = handle;
1602+
1603+ return 0;
1604+}
1605+
1606+/**
1607+ * Reads data from the object referenced by handle.
1608+ *
1609+ * On error, the contents of *data are undefined.
1610+ */
1611+int
1612+i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1613+ struct drm_file *file_priv)
1614+{
1615+ struct drm_i915_gem_pread *args = data;
1616+ struct drm_gem_object *obj;
1617+ struct drm_i915_gem_object *obj_priv;
1618+ ssize_t read;
1619+ loff_t offset;
1620+ int ret;
1621+
1622+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1623+ if (obj == NULL)
1624+ return -EBADF;
1625+ obj_priv = obj->driver_private;
1626+
1627+ /* Bounds check source.
1628+ *
1629+ * XXX: This could use review for overflow issues...
1630+ */
1631+ if (args->offset > obj->size || args->size > obj->size ||
1632+ args->offset + args->size > obj->size) {
1633+ drm_gem_object_unreference(obj);
1634+ return -EINVAL;
1635+ }
1636+
1637+ mutex_lock(&dev->struct_mutex);
1638+
1639+ ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
1640+ I915_GEM_DOMAIN_CPU, 0);
1641+ if (ret != 0) {
1642+ drm_gem_object_unreference(obj);
1643+ mutex_unlock(&dev->struct_mutex);
1644+ }
1645+
1646+ offset = args->offset;
1647+
1648+ read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
1649+ args->size, &offset);
1650+ if (read != args->size) {
1651+ drm_gem_object_unreference(obj);
1652+ mutex_unlock(&dev->struct_mutex);
1653+ if (read < 0)
1654+ return read;
1655+ else
1656+ return -EINVAL;
1657+ }
1658+
1659+ drm_gem_object_unreference(obj);
1660+ mutex_unlock(&dev->struct_mutex);
1661+
1662+ return 0;
1663+}
1664+
1665+static int
1666+i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
1667+ struct drm_i915_gem_pwrite *args,
1668+ struct drm_file *file_priv)
1669+{
1670+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
1671+ ssize_t remain;
1672+ loff_t offset;
1673+ char __user *user_data;
1674+ char *vaddr;
1675+ int i, o, l;
1676+ int ret = 0;
1677+ unsigned long pfn;
1678+ unsigned long unwritten;
1679+
1680+ user_data = (char __user *) (uintptr_t) args->data_ptr;
1681+ remain = args->size;
1682+ if (!access_ok(VERIFY_READ, user_data, remain))
1683+ return -EFAULT;
1684+
1685+
1686+ mutex_lock(&dev->struct_mutex);
1687+ ret = i915_gem_object_pin(obj, 0);
1688+ if (ret) {
1689+ mutex_unlock(&dev->struct_mutex);
1690+ return ret;
1691+ }
1692+ ret = i915_gem_set_domain(obj, file_priv,
1693+ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
1694+ if (ret)
1695+ goto fail;
1696+
1697+ obj_priv = obj->driver_private;
1698+ offset = obj_priv->gtt_offset + args->offset;
1699+ obj_priv->dirty = 1;
1700+
1701+ while (remain > 0) {
1702+ /* Operation in this page
1703+ *
1704+ * i = page number
1705+ * o = offset within page
1706+ * l = bytes to copy
1707+ */
1708+ i = offset >> PAGE_SHIFT;
1709+ o = offset & (PAGE_SIZE-1);
1710+ l = remain;
1711+ if ((o + l) > PAGE_SIZE)
1712+ l = PAGE_SIZE - o;
1713+
1714+ pfn = (dev->agp->base >> PAGE_SHIFT) + i;
1715+
1716+#ifdef CONFIG_HIGHMEM
1717+ /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
1718+ */
1719+ vaddr = kmap_atomic_pfn(pfn, KM_USER0);
1720+#if WATCH_PWRITE
1721+ DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
1722+ i, o, l, pfn, vaddr);
1723+#endif
1724+ unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
1725+ user_data, l);
1726+ kunmap_atomic(vaddr, KM_USER0);
1727+
1728+ if (unwritten)
1729+#endif /* CONFIG_HIGHMEM */
1730+ {
1731+ vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
1732+#if WATCH_PWRITE
1733+ DRM_INFO("pwrite slow i %d o %d l %d "
1734+ "pfn %ld vaddr %p\n",
1735+ i, o, l, pfn, vaddr);
1736+#endif
1737+ if (vaddr == NULL) {
1738+ ret = -EFAULT;
1739+ goto fail;
1740+ }
1741+ unwritten = __copy_from_user(vaddr + o, user_data, l);
1742+#if WATCH_PWRITE
1743+ DRM_INFO("unwritten %ld\n", unwritten);
1744+#endif
1745+ iounmap(vaddr);
1746+ if (unwritten) {
1747+ ret = -EFAULT;
1748+ goto fail;
1749+ }
1750+ }
1751+
1752+ remain -= l;
1753+ user_data += l;
1754+ offset += l;
1755+ }
1756+#if WATCH_PWRITE && 1
1757+ i915_gem_clflush_object(obj);
1758+ i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
1759+ i915_gem_clflush_object(obj);
1760+#endif
1761+
1762+fail:
1763+ i915_gem_object_unpin(obj);
1764+ mutex_unlock(&dev->struct_mutex);
1765+
1766+ return ret;
1767+}
1768+
1769+int
1770+i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
1771+ struct drm_i915_gem_pwrite *args,
1772+ struct drm_file *file_priv)
1773+{
1774+ int ret;
1775+ loff_t offset;
1776+ ssize_t written;
1777+
1778+ mutex_lock(&dev->struct_mutex);
1779+
1780+ ret = i915_gem_set_domain(obj, file_priv,
1781+ I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
1782+ if (ret) {
1783+ mutex_unlock(&dev->struct_mutex);
1784+ return ret;
1785+ }
1786+
1787+ offset = args->offset;
1788+
1789+ written = vfs_write(obj->filp,
1790+ (char __user *)(uintptr_t) args->data_ptr,
1791+ args->size, &offset);
1792+ if (written != args->size) {
1793+ mutex_unlock(&dev->struct_mutex);
1794+ if (written < 0)
1795+ return written;
1796+ else
1797+ return -EINVAL;
1798+ }
1799+
1800+ mutex_unlock(&dev->struct_mutex);
1801+
1802+ return 0;
1803+}
1804+
1805+/**
1806+ * Writes data to the object referenced by handle.
1807+ *
1808+ * On error, the contents of the buffer that were to be modified are undefined.
1809+ */
1810+int
1811+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1812+ struct drm_file *file_priv)
1813+{
1814+ struct drm_i915_gem_pwrite *args = data;
1815+ struct drm_gem_object *obj;
1816+ struct drm_i915_gem_object *obj_priv;
1817+ int ret = 0;
1818+
1819+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1820+ if (obj == NULL)
1821+ return -EBADF;
1822+ obj_priv = obj->driver_private;
1823+
1824+ /* Bounds check destination.
1825+ *
1826+ * XXX: This could use review for overflow issues...
1827+ */
1828+ if (args->offset > obj->size || args->size > obj->size ||
1829+ args->offset + args->size > obj->size) {
1830+ drm_gem_object_unreference(obj);
1831+ return -EINVAL;
1832+ }
1833+
1834+ /* We can only do the GTT pwrite on untiled buffers, as otherwise
1835+ * it would end up going through the fenced access, and we'll get
1836+ * different detiling behavior between reading and writing.
1837+ * pread/pwrite currently are reading and writing from the CPU
1838+ * perspective, requiring manual detiling by the client.
1839+ */
1840+ if (obj_priv->tiling_mode == I915_TILING_NONE &&
1841+ dev->gtt_total != 0)
1842+ ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
1843+ else
1844+ ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
1845+
1846+#if WATCH_PWRITE
1847+ if (ret)
1848+ DRM_INFO("pwrite failed %d\n", ret);
1849+#endif
1850+
1851+ drm_gem_object_unreference(obj);
1852+
1853+ return ret;
1854+}
1855+
1856+/**
1857+ * Called when user space prepares to use an object
1858+ */
1859+int
1860+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1861+ struct drm_file *file_priv)
1862+{
1863+ struct drm_i915_gem_set_domain *args = data;
1864+ struct drm_gem_object *obj;
1865+ int ret;
1866+
1867+ if (!(dev->driver->driver_features & DRIVER_GEM))
1868+ return -ENODEV;
1869+
1870+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1871+ if (obj == NULL)
1872+ return -EBADF;
1873+
1874+ mutex_lock(&dev->struct_mutex);
1875+#if WATCH_BUF
1876+ DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
1877+ obj, obj->size, args->read_domains, args->write_domain);
1878+#endif
1879+ ret = i915_gem_set_domain(obj, file_priv,
1880+ args->read_domains, args->write_domain);
1881+ drm_gem_object_unreference(obj);
1882+ mutex_unlock(&dev->struct_mutex);
1883+ return ret;
1884+}
1885+
1886+/**
1887+ * Called when user space has done writes to this buffer
1888+ */
1889+int
1890+i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1891+ struct drm_file *file_priv)
1892+{
1893+ struct drm_i915_gem_sw_finish *args = data;
1894+ struct drm_gem_object *obj;
1895+ struct drm_i915_gem_object *obj_priv;
1896+ int ret = 0;
1897+
1898+ if (!(dev->driver->driver_features & DRIVER_GEM))
1899+ return -ENODEV;
1900+
1901+ mutex_lock(&dev->struct_mutex);
1902+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1903+ if (obj == NULL) {
1904+ mutex_unlock(&dev->struct_mutex);
1905+ return -EBADF;
1906+ }
1907+
1908+#if WATCH_BUF
1909+ DRM_INFO("%s: sw_finish %d (%p %d)\n",
1910+ __func__, args->handle, obj, obj->size);
1911+#endif
1912+ obj_priv = obj->driver_private;
1913+
1914+ /* Pinned buffers may be scanout, so flush the cache */
1915+ if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
1916+ i915_gem_clflush_object(obj);
1917+ drm_agp_chipset_flush(dev);
1918+ }
1919+ drm_gem_object_unreference(obj);
1920+ mutex_unlock(&dev->struct_mutex);
1921+ return ret;
1922+}
1923+
1924+/**
1925+ * Maps the contents of an object, returning the address it is mapped
1926+ * into.
1927+ *
1928+ * While the mapping holds a reference on the contents of the object, it doesn't
1929+ * imply a ref on the object itself.
1930+ */
1931+int
1932+i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1933+ struct drm_file *file_priv)
1934+{
1935+ struct drm_i915_gem_mmap *args = data;
1936+ struct drm_gem_object *obj;
1937+ loff_t offset;
1938+ unsigned long addr;
1939+
1940+ if (!(dev->driver->driver_features & DRIVER_GEM))
1941+ return -ENODEV;
1942+
1943+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1944+ if (obj == NULL)
1945+ return -EBADF;
1946+
1947+ offset = args->offset;
1948+
1949+ down_write(&current->mm->mmap_sem);
1950+ addr = do_mmap(obj->filp, 0, args->size,
1951+ PROT_READ | PROT_WRITE, MAP_SHARED,
1952+ args->offset);
1953+ up_write(&current->mm->mmap_sem);
1954+ mutex_lock(&dev->struct_mutex);
1955+ drm_gem_object_unreference(obj);
1956+ mutex_unlock(&dev->struct_mutex);
1957+ if (IS_ERR((void *)addr))
1958+ return addr;
1959+
1960+ args->addr_ptr = (uint64_t) addr;
1961+
1962+ return 0;
1963+}
1964+
1965+static void
1966+i915_gem_object_free_page_list(struct drm_gem_object *obj)
1967+{
1968+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
1969+ int page_count = obj->size / PAGE_SIZE;
1970+ int i;
1971+
1972+ if (obj_priv->page_list == NULL)
1973+ return;
1974+
1975+
1976+ for (i = 0; i < page_count; i++)
1977+ if (obj_priv->page_list[i] != NULL) {
1978+ if (obj_priv->dirty)
1979+ set_page_dirty(obj_priv->page_list[i]);
1980+ mark_page_accessed(obj_priv->page_list[i]);
1981+ page_cache_release(obj_priv->page_list[i]);
1982+ }
1983+ obj_priv->dirty = 0;
1984+
1985+ drm_free(obj_priv->page_list,
1986+ page_count * sizeof(struct page *),
1987+ DRM_MEM_DRIVER);
1988+ obj_priv->page_list = NULL;
1989+}
1990+
1991+static void
1992+i915_gem_object_move_to_active(struct drm_gem_object *obj)
1993+{
1994+ struct drm_device *dev = obj->dev;
1995+ drm_i915_private_t *dev_priv = dev->dev_private;
1996+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
1997+
1998+ /* Add a reference if we're newly entering the active list. */
1999+ if (!obj_priv->active) {
2000+ drm_gem_object_reference(obj);
2001+ obj_priv->active = 1;
2002+ }
2003+ /* Move from whatever list we were on to the tail of execution. */
2004+ list_move_tail(&obj_priv->list,
2005+ &dev_priv->mm.active_list);
2006+}
2007+
2008+
2009+static void
2010+i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
2011+{
2012+ struct drm_device *dev = obj->dev;
2013+ drm_i915_private_t *dev_priv = dev->dev_private;
2014+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
2015+
2016+ i915_verify_inactive(dev, __FILE__, __LINE__);
2017+ if (obj_priv->pin_count != 0)
2018+ list_del_init(&obj_priv->list);
2019+ else
2020+ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
2021+
2022+ if (obj_priv->active) {
2023+ obj_priv->active = 0;
2024+ drm_gem_object_unreference(obj);
2025+ }
2026+ i915_verify_inactive(dev, __FILE__, __LINE__);
2027+}
2028+
2029+/**
2030+ * Creates a new sequence number, emitting a write of it to the status page
2031+ * plus an interrupt, which will trigger i915_user_interrupt_handler.
2032+ *
2033+ * Must be called with struct_lock held.
2034+ *
2035+ * Returned sequence numbers are nonzero on success.
2036+ */
2037+static uint32_t
2038+i915_add_request(struct drm_device *dev, uint32_t flush_domains)
2039+{
2040+ drm_i915_private_t *dev_priv = dev->dev_private;
2041+ struct drm_i915_gem_request *request;
2042+ uint32_t seqno;
2043+ int was_empty;
2044+ RING_LOCALS;
2045+
2046+ request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
2047+ if (request == NULL)
2048+ return 0;
2049+
2050+ /* Grab the seqno we're going to make this request be, and bump the
2051+ * next (skipping 0 so it can be the reserved no-seqno value).
2052+ */
2053+ seqno = dev_priv->mm.next_gem_seqno;
2054+ dev_priv->mm.next_gem_seqno++;
2055+ if (dev_priv->mm.next_gem_seqno == 0)
2056+ dev_priv->mm.next_gem_seqno++;
2057+
2058+ BEGIN_LP_RING(4);
2059+ OUT_RING(MI_STORE_DWORD_INDEX);
2060+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
2061+ OUT_RING(seqno);
2062+
2063+ OUT_RING(MI_USER_INTERRUPT);
2064+ ADVANCE_LP_RING();
2065+
2066+ DRM_DEBUG("%d\n", seqno);
2067+
2068+ request->seqno = seqno;
2069+ request->emitted_jiffies = jiffies;
2070+ request->flush_domains = flush_domains;
2071+ was_empty = list_empty(&dev_priv->mm.request_list);
2072+ list_add_tail(&request->list, &dev_priv->mm.request_list);
2073+
2074+ if (was_empty)
2075+ schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
2076+ return seqno;
2077+}
2078+
2079+/**
2080+ * Command execution barrier
2081+ *
2082+ * Ensures that all commands in the ring are finished
2083+ * before signalling the CPU
2084+ */
2085+uint32_t
2086+i915_retire_commands(struct drm_device *dev)
2087+{
2088+ drm_i915_private_t *dev_priv = dev->dev_private;
2089+ uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
2090+ uint32_t flush_domains = 0;
2091+ RING_LOCALS;
2092+
2093+ /* The sampler always gets flushed on i965 (sigh) */
2094+ if (IS_I965G(dev))
2095+ flush_domains |= I915_GEM_DOMAIN_SAMPLER;
2096+ BEGIN_LP_RING(2);
2097+ OUT_RING(cmd);
2098+ OUT_RING(0); /* noop */
2099+ ADVANCE_LP_RING();
2100+ return flush_domains;
2101+}
2102+
2103+/**
2104+ * Moves buffers associated only with the given active seqno from the active
2105+ * to inactive list, potentially freeing them.
2106+ */
2107+static void
2108+i915_gem_retire_request(struct drm_device *dev,
2109+ struct drm_i915_gem_request *request)
2110+{
2111+ drm_i915_private_t *dev_priv = dev->dev_private;
2112+
2113+ /* Move any buffers on the active list that are no longer referenced
2114+ * by the ringbuffer to the flushing/inactive lists as appropriate.
2115+ */
2116+ while (!list_empty(&dev_priv->mm.active_list)) {
2117+ struct drm_gem_object *obj;
2118+ struct drm_i915_gem_object *obj_priv;
2119+
2120+ obj_priv = list_first_entry(&dev_priv->mm.active_list,
2121+ struct drm_i915_gem_object,
2122+ list);
2123+ obj = obj_priv->obj;
2124+
2125+ /* If the seqno being retired doesn't match the oldest in the
2126+ * list, then the oldest in the list must still be newer than
2127+ * this seqno.
2128+ */
2129+ if (obj_priv->last_rendering_seqno != request->seqno)
2130+ return;
2131+#if WATCH_LRU
2132+ DRM_INFO("%s: retire %d moves to inactive list %p\n",
2133+ __func__, request->seqno, obj);
2134+#endif
2135+
2136+ if (obj->write_domain != 0) {
2137+ list_move_tail(&obj_priv->list,
2138+ &dev_priv->mm.flushing_list);
2139+ } else {
2140+ i915_gem_object_move_to_inactive(obj);
2141+ }
2142+ }
2143+
2144+ if (request->flush_domains != 0) {
2145+ struct drm_i915_gem_object *obj_priv, *next;
2146+
2147+ /* Clear the write domain and activity from any buffers
2148+ * that are just waiting for a flush matching the one retired.
2149+ */
2150+ list_for_each_entry_safe(obj_priv, next,
2151+ &dev_priv->mm.flushing_list, list) {
2152+ struct drm_gem_object *obj = obj_priv->obj;
2153+
2154+ if (obj->write_domain & request->flush_domains) {
2155+ obj->write_domain = 0;
2156+ i915_gem_object_move_to_inactive(obj);
2157+ }
2158+ }
2159+
2160+ }
2161+}
2162+
2163+/**
2164+ * Returns true if seq1 is later than seq2.
2165+ */
2166+static int
2167+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
2168+{
2169+ return (int32_t)(seq1 - seq2) >= 0;
2170+}
2171+
2172+uint32_t
2173+i915_get_gem_seqno(struct drm_device *dev)
2174+{
2175+ drm_i915_private_t *dev_priv = dev->dev_private;
2176+
2177+ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
2178+}
2179+
2180+/**
2181+ * This function clears the request list as sequence numbers are passed.
2182+ */
2183+void
2184+i915_gem_retire_requests(struct drm_device *dev)
2185+{
2186+ drm_i915_private_t *dev_priv = dev->dev_private;
2187+ uint32_t seqno;
2188+
2189+ seqno = i915_get_gem_seqno(dev);
2190+
2191+ while (!list_empty(&dev_priv->mm.request_list)) {
2192+ struct drm_i915_gem_request *request;
2193+ uint32_t retiring_seqno;
2194+
2195+ request = list_first_entry(&dev_priv->mm.request_list,
2196+ struct drm_i915_gem_request,
2197+ list);
2198+ retiring_seqno = request->seqno;
2199+
2200+ if (i915_seqno_passed(seqno, retiring_seqno) ||
2201+ dev_priv->mm.wedged) {
2202+ i915_gem_retire_request(dev, request);
2203+
2204+ list_del(&request->list);
2205+ drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
2206+ } else
2207+ break;
2208+ }
2209+}
2210+
2211+void
2212+i915_gem_retire_work_handler(struct work_struct *work)
2213+{
2214+ drm_i915_private_t *dev_priv;
2215+ struct drm_device *dev;
2216+
2217+ dev_priv = container_of(work, drm_i915_private_t,
2218+ mm.retire_work.work);
2219+ dev = dev_priv->dev;
2220+
2221+ mutex_lock(&dev->struct_mutex);
2222+ i915_gem_retire_requests(dev);
2223+ if (!list_empty(&dev_priv->mm.request_list))
2224+ schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
2225+ mutex_unlock(&dev->struct_mutex);
2226+}
2227+
2228+/**
2229+ * Waits for a sequence number to be signaled, and cleans up the
2230+ * request and object lists appropriately for that event.
2231+ */
2232+int
2233+i915_wait_request(struct drm_device *dev, uint32_t seqno)
2234+{
2235+ drm_i915_private_t *dev_priv = dev->dev_private;
2236+ int ret = 0;
2237+
2238+ BUG_ON(seqno == 0);
2239+
2240+ if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
2241+ dev_priv->mm.waiting_gem_seqno = seqno;
2242+ i915_user_irq_get(dev);
2243+ ret = wait_event_interruptible(dev_priv->irq_queue,
2244+ i915_seqno_passed(i915_get_gem_seqno(dev),
2245+ seqno) ||
2246+ dev_priv->mm.wedged);
2247+ i915_user_irq_put(dev);
2248+ dev_priv->mm.waiting_gem_seqno = 0;
2249+ }
2250+ if (dev_priv->mm.wedged)
2251+ ret = -EIO;
2252+
2253+ if (ret && ret != -ERESTARTSYS)
2254+ DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
2255+ __func__, ret, seqno, i915_get_gem_seqno(dev));
2256+
2257+ /* Directly dispatch request retiring. While we have the work queue
2258+ * to handle this, the waiter on a request often wants an associated
2259+ * buffer to have made it to the inactive list, and we would need
2260+ * a separate wait queue to handle that.
2261+ */
2262+ if (ret == 0)
2263+ i915_gem_retire_requests(dev);
2264+
2265+ return ret;
2266+}
2267+
2268+static void
2269+i915_gem_flush(struct drm_device *dev,
2270+ uint32_t invalidate_domains,
2271+ uint32_t flush_domains)
2272+{
2273+ drm_i915_private_t *dev_priv = dev->dev_private;
2274+ uint32_t cmd;
2275+ RING_LOCALS;
2276+
2277+#if WATCH_EXEC
2278+ DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
2279+ invalidate_domains, flush_domains);
2280+#endif
2281+
2282+ if (flush_domains & I915_GEM_DOMAIN_CPU)
2283+ drm_agp_chipset_flush(dev);
2284+
2285+ if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
2286+ I915_GEM_DOMAIN_GTT)) {
2287+ /*
2288+ * read/write caches:
2289+ *
2290+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
2291+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
2292+ * also flushed at 2d versus 3d pipeline switches.
2293+ *
2294+ * read-only caches:
2295+ *
2296+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
2297+ * MI_READ_FLUSH is set, and is always flushed on 965.
2298+ *
2299+ * I915_GEM_DOMAIN_COMMAND may not exist?
2300+ *
2301+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
2302+ * invalidated when MI_EXE_FLUSH is set.
2303+ *
2304+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
2305+ * invalidated with every MI_FLUSH.
2306+ *
2307+ * TLBs:
2308+ *
2309+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
2310+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
2311+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
2312+ * are flushed at any MI_FLUSH.
2313+ */
2314+
2315+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
2316+ if ((invalidate_domains|flush_domains) &
2317+ I915_GEM_DOMAIN_RENDER)
2318+ cmd &= ~MI_NO_WRITE_FLUSH;
2319+ if (!IS_I965G(dev)) {
2320+ /*
2321+ * On the 965, the sampler cache always gets flushed
2322+ * and this bit is reserved.
2323+ */
2324+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
2325+ cmd |= MI_READ_FLUSH;
2326+ }
2327+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
2328+ cmd |= MI_EXE_FLUSH;
2329+
2330+#if WATCH_EXEC
2331+ DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
2332+#endif
2333+ BEGIN_LP_RING(2);
2334+ OUT_RING(cmd);
2335+ OUT_RING(0); /* noop */
2336+ ADVANCE_LP_RING();
2337+ }
2338+}
2339+
2340+/**
2341+ * Ensures that all rendering to the object has completed and the object is
2342+ * safe to unbind from the GTT or access from the CPU.
2343+ */
2344+static int
2345+i915_gem_object_wait_rendering(struct drm_gem_object *obj)
2346+{
2347+ struct drm_device *dev = obj->dev;
2348+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
2349+ int ret;
2350+
2351+ /* If there are writes queued to the buffer, flush and
2352+ * create a new seqno to wait for.
2353+ */
2354+ if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
2355+ uint32_t write_domain = obj->write_domain;
2356+#if WATCH_BUF
2357+ DRM_INFO("%s: flushing object %p from write domain %08x\n",
2358+ __func__, obj, write_domain);
2359+#endif
2360+ i915_gem_flush(dev, 0, write_domain);
2361+
2362+ i915_gem_object_move_to_active(obj);
2363+ obj_priv->last_rendering_seqno = i915_add_request(dev,
2364+ write_domain);
2365+ BUG_ON(obj_priv->last_rendering_seqno == 0);
2366+#if WATCH_LRU
2367+ DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
2368+#endif
2369+ }
2370+
2371+ /* If there is rendering queued on the buffer being evicted, wait for
2372+ * it.
2373+ */
2374+ if (obj_priv->active) {
2375+#if WATCH_BUF
2376+ DRM_INFO("%s: object %p wait for seqno %08x\n",
2377+ __func__, obj, obj_priv->last_rendering_seqno);
2378+#endif
2379+ ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
2380+ if (ret != 0)
2381+ return ret;
2382+ }
2383+
2384+ return 0;
2385+}
2386+
2387+/**
2388+ * Unbinds an object from the GTT aperture.
2389+ */
2390+static int
2391+i915_gem_object_unbind(struct drm_gem_object *obj)
2392+{
2393+ struct drm_device *dev = obj->dev;
2394+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
2395+ int ret = 0;
2396+
2397+#if WATCH_BUF
2398+ DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
2399+ DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
2400+#endif
2401+ if (obj_priv->gtt_space == NULL)
2402+ return 0;
2403+
2404+ if (obj_priv->pin_count != 0) {
2405+ DRM_ERROR("Attempting to unbind pinned buffer\n");
2406+ return -EINVAL;
2407+ }
2408+
2409+ /* Wait for any rendering to complete
2410+ */
2411+ ret = i915_gem_object_wait_rendering(obj);
2412+ if (ret) {
2413+ DRM_ERROR("wait_rendering failed: %d\n", ret);
2414+ return ret;
2415+ }
2416+
2417+ /* Move the object to the CPU domain to ensure that
2418+ * any possible CPU writes while it's not in the GTT
2419+ * are flushed when we go to remap it. This will
2420+ * also ensure that all pending GPU writes are finished
2421+ * before we unbind.
2422+ */
2423+ ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
2424+ I915_GEM_DOMAIN_CPU);
2425+ if (ret) {
2426+ DRM_ERROR("set_domain failed: %d\n", ret);
2427+ return ret;
2428+ }
2429+
2430+ if (obj_priv->agp_mem != NULL) {
2431+ drm_unbind_agp(obj_priv->agp_mem);
2432+ drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2433+ obj_priv->agp_mem = NULL;
2434+ }
2435+
2436+ BUG_ON(obj_priv->active);
2437+
2438+ i915_gem_object_free_page_list(obj);
2439+
2440+ if (obj_priv->gtt_space) {
2441+ atomic_dec(&dev->gtt_count);
2442+ atomic_sub(obj->size, &dev->gtt_memory);
2443+
2444+ drm_mm_put_block(obj_priv->gtt_space);
2445+ obj_priv->gtt_space = NULL;
2446+ }
2447+
2448+ /* Remove ourselves from the LRU list if present. */
2449+ if (!list_empty(&obj_priv->list))
2450+ list_del_init(&obj_priv->list);
2451+
2452+ return 0;
2453+}
2454+
2455+static int
2456+i915_gem_evict_something(struct drm_device *dev)
2457+{
2458+ drm_i915_private_t *dev_priv = dev->dev_private;
2459+ struct drm_gem_object *obj;
2460+ struct drm_i915_gem_object *obj_priv;
2461+ int ret = 0;
2462+
2463+ for (;;) {
2464+ /* If there's an inactive buffer available now, grab it
2465+ * and be done.
2466+ */
2467+ if (!list_empty(&dev_priv->mm.inactive_list)) {
2468+ obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
2469+ struct drm_i915_gem_object,
2470+ list);
2471+ obj = obj_priv->obj;
2472+ BUG_ON(obj_priv->pin_count != 0);
2473+#if WATCH_LRU
2474+ DRM_INFO("%s: evicting %p\n", __func__, obj);
2475+#endif
2476+ BUG_ON(obj_priv->active);
2477+
2478+ /* Wait on the rendering and unbind the buffer. */
2479+ ret = i915_gem_object_unbind(obj);
2480+ break;
2481+ }
2482+
2483+ /* If we didn't get anything, but the ring is still processing
2484+ * things, wait for one of those things to finish and hopefully
2485+ * leave us a buffer to evict.
2486+ */
2487+ if (!list_empty(&dev_priv->mm.request_list)) {
2488+ struct drm_i915_gem_request *request;
2489+
2490+ request = list_first_entry(&dev_priv->mm.request_list,
2491+ struct drm_i915_gem_request,
2492+ list);
2493+
2494+ ret = i915_wait_request(dev, request->seqno);
2495+ if (ret)
2496+ break;
2497+
2498+ /* if waiting caused an object to become inactive,
2499+ * then loop around and wait for it. Otherwise, we
2500+ * assume that waiting freed and unbound something,
2501+ * so there should now be some space in the GTT
2502+ */
2503+ if (!list_empty(&dev_priv->mm.inactive_list))
2504+ continue;
2505+ break;
2506+ }
2507+
2508+ /* If we didn't have anything on the request list but there
2509+ * are buffers awaiting a flush, emit one and try again.
2510+ * When we wait on it, those buffers waiting for that flush
2511+ * will get moved to inactive.
2512+ */
2513+ if (!list_empty(&dev_priv->mm.flushing_list)) {
2514+ obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
2515+ struct drm_i915_gem_object,
2516+ list);
2517+ obj = obj_priv->obj;
2518+
2519+ i915_gem_flush(dev,
2520+ obj->write_domain,
2521+ obj->write_domain);
2522+ i915_add_request(dev, obj->write_domain);
2523+
2524+ obj = NULL;
2525+ continue;
2526+ }
2527+
2528+ DRM_ERROR("inactive empty %d request empty %d "
2529+ "flushing empty %d\n",
2530+ list_empty(&dev_priv->mm.inactive_list),
2531+ list_empty(&dev_priv->mm.request_list),
2532+ list_empty(&dev_priv->mm.flushing_list));
2533+ /* If we didn't do any of the above, there's nothing to be done
2534+ * and we just can't fit it in.
2535+ */
2536+ return -ENOMEM;
2537+ }
2538+ return ret;
2539+}
2540+
2541+static int
2542+i915_gem_object_get_page_list(struct drm_gem_object *obj)
2543+{
2544+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
2545+ int page_count, i;
2546+ struct address_space *mapping;
2547+ struct inode *inode;
2548+ struct page *page;
2549+ int ret;
2550+
2551+ if (obj_priv->page_list)
2552+ return 0;
2553+
2554+ /* Get the list of pages out of our struct file. They'll be pinned
2555+ * at this point until we release them.
2556+ */
2557+ page_count = obj->size / PAGE_SIZE;
2558+ BUG_ON(obj_priv->page_list != NULL);
2559+ obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
2560+ DRM_MEM_DRIVER);
2561+ if (obj_priv->page_list == NULL) {
2562+ DRM_ERROR("Faled to allocate page list\n");
2563+ return -ENOMEM;
2564+ }
2565+
2566+ inode = obj->filp->f_path.dentry->d_inode;
2567+ mapping = inode->i_mapping;
2568+ for (i = 0; i < page_count; i++) {
2569+ page = read_mapping_page(mapping, i, NULL);
2570+ if (IS_ERR(page)) {
2571+ ret = PTR_ERR(page);
2572+ DRM_ERROR("read_mapping_page failed: %d\n", ret);
2573+ i915_gem_object_free_page_list(obj);
2574+ return ret;
2575+ }
2576+ obj_priv->page_list[i] = page;
2577+ }
2578+ return 0;
2579+}
2580+
2581+/**
2582+ * Finds free space in the GTT aperture and binds the object there.
2583+ */
2584+static int
2585+i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2586+{
2587+ struct drm_device *dev = obj->dev;
2588+ drm_i915_private_t *dev_priv = dev->dev_private;
2589+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
2590+ struct drm_mm_node *free_space;
2591+ int page_count, ret;
2592+
2593+ if (alignment == 0)
2594+ alignment = PAGE_SIZE;
2595+ if (alignment & (PAGE_SIZE - 1)) {
2596+ DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2597+ return -EINVAL;
2598+ }
2599+
2600+ search_free:
2601+ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2602+ obj->size, alignment, 0);
2603+ if (free_space != NULL) {
2604+ obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2605+ alignment);
2606+ if (obj_priv->gtt_space != NULL) {
2607+ obj_priv->gtt_space->private = obj;
2608+ obj_priv->gtt_offset = obj_priv->gtt_space->start;
2609+ }
2610+ }
2611+ if (obj_priv->gtt_space == NULL) {
2612+ /* If the gtt is empty and we're still having trouble
2613+ * fitting our object in, we're out of memory.
2614+ */
2615+#if WATCH_LRU
2616+ DRM_INFO("%s: GTT full, evicting something\n", __func__);
2617+#endif
2618+ if (list_empty(&dev_priv->mm.inactive_list) &&
2619+ list_empty(&dev_priv->mm.flushing_list) &&
2620+ list_empty(&dev_priv->mm.active_list)) {
2621+ DRM_ERROR("GTT full, but LRU list empty\n");
2622+ return -ENOMEM;
2623+ }
2624+
2625+ ret = i915_gem_evict_something(dev);
2626+ if (ret != 0) {
2627+ DRM_ERROR("Failed to evict a buffer %d\n", ret);
2628+ return ret;
2629+ }
2630+ goto search_free;
2631+ }
2632+
2633+#if WATCH_BUF
2634+ DRM_INFO("Binding object of size %d at 0x%08x\n",
2635+ obj->size, obj_priv->gtt_offset);
2636+#endif
2637+ ret = i915_gem_object_get_page_list(obj);
2638+ if (ret) {
2639+ drm_mm_put_block(obj_priv->gtt_space);
2640+ obj_priv->gtt_space = NULL;
2641+ return ret;
2642+ }
2643+
2644+ page_count = obj->size / PAGE_SIZE;
2645+ /* Create an AGP memory structure pointing at our pages, and bind it
2646+ * into the GTT.
2647+ */
2648+ obj_priv->agp_mem = drm_agp_bind_pages(dev,
2649+ obj_priv->page_list,
2650+ page_count,
2651+ obj_priv->gtt_offset);
2652+ if (obj_priv->agp_mem == NULL) {
2653+ i915_gem_object_free_page_list(obj);
2654+ drm_mm_put_block(obj_priv->gtt_space);
2655+ obj_priv->gtt_space = NULL;
2656+ return -ENOMEM;
2657+ }
2658+ atomic_inc(&dev->gtt_count);
2659+ atomic_add(obj->size, &dev->gtt_memory);
2660+
2661+ /* Assert that the object is not currently in any GPU domain. As it
2662+ * wasn't in the GTT, there shouldn't be any way it could have been in
2663+ * a GPU cache
2664+ */
2665+ BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2666+ BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2667+
2668+ return 0;
2669+}
2670+
2671+void
2672+i915_gem_clflush_object(struct drm_gem_object *obj)
2673+{
2674+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
2675+
2676+ /* If we don't have a page list set up, then we're not pinned
2677+ * to GPU, and we can ignore the cache flush because it'll happen
2678+ * again at bind time.
2679+ */
2680+ if (obj_priv->page_list == NULL)
2681+ return;
2682+
2683+ drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
2684+}
2685+
2686+/*
2687+ * Set the next domain for the specified object. This
2688+ * may not actually perform the necessary flushing/invaliding though,
2689+ * as that may want to be batched with other set_domain operations
2690+ *
2691+ * This is (we hope) the only really tricky part of gem. The goal
2692+ * is fairly simple -- track which caches hold bits of the object
2693+ * and make sure they remain coherent. A few concrete examples may
2694+ * help to explain how it works. For shorthand, we use the notation
2695+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2696+ * a pair of read and write domain masks.
2697+ *
2698+ * Case 1: the batch buffer
2699+ *
2700+ * 1. Allocated
2701+ * 2. Written by CPU
2702+ * 3. Mapped to GTT
2703+ * 4. Read by GPU
2704+ * 5. Unmapped from GTT
2705+ * 6. Freed
2706+ *
2707+ * Let's take these a step at a time
2708+ *
2709+ * 1. Allocated
2710+ * Pages allocated from the kernel may still have
2711+ * cache contents, so we set them to (CPU, CPU) always.
2712+ * 2. Written by CPU (using pwrite)
2713+ * The pwrite function calls set_domain (CPU, CPU) and
2714+ * this function does nothing (as nothing changes)
2715+ * 3. Mapped by GTT
2716+ * This function asserts that the object is not
2717+ * currently in any GPU-based read or write domains
2718+ * 4. Read by GPU
2719+ * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2720+ * As write_domain is zero, this function adds in the
2721+ * current read domains (CPU+COMMAND, 0).
2722+ * flush_domains is set to CPU.
2723+ * invalidate_domains is set to COMMAND
2724+ * clflush is run to get data out of the CPU caches
2725+ * then i915_dev_set_domain calls i915_gem_flush to
2726+ * emit an MI_FLUSH and drm_agp_chipset_flush
2727+ * 5. Unmapped from GTT
2728+ * i915_gem_object_unbind calls set_domain (CPU, CPU)
2729+ * flush_domains and invalidate_domains end up both zero
2730+ * so no flushing/invalidating happens
2731+ * 6. Freed
2732+ * yay, done
2733+ *
2734+ * Case 2: The shared render buffer
2735+ *
2736+ * 1. Allocated
2737+ * 2. Mapped to GTT
2738+ * 3. Read/written by GPU
2739+ * 4. set_domain to (CPU,CPU)
2740+ * 5. Read/written by CPU
2741+ * 6. Read/written by GPU
2742+ *
2743+ * 1. Allocated
2744+ * Same as last example, (CPU, CPU)
2745+ * 2. Mapped to GTT
2746+ * Nothing changes (assertions find that it is not in the GPU)
2747+ * 3. Read/written by GPU
2748+ * execbuffer calls set_domain (RENDER, RENDER)
2749+ * flush_domains gets CPU
2750+ * invalidate_domains gets GPU
2751+ * clflush (obj)
2752+ * MI_FLUSH and drm_agp_chipset_flush
2753+ * 4. set_domain (CPU, CPU)
2754+ * flush_domains gets GPU
2755+ * invalidate_domains gets CPU
2756+ * wait_rendering (obj) to make sure all drawing is complete.
2757+ * This will include an MI_FLUSH to get the data from GPU
2758+ * to memory
2759+ * clflush (obj) to invalidate the CPU cache
2760+ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2761+ * 5. Read/written by CPU
2762+ * cache lines are loaded and dirtied
2763+ * 6. Read written by GPU
2764+ * Same as last GPU access
2765+ *
2766+ * Case 3: The constant buffer
2767+ *
2768+ * 1. Allocated
2769+ * 2. Written by CPU
2770+ * 3. Read by GPU
2771+ * 4. Updated (written) by CPU again
2772+ * 5. Read by GPU
2773+ *
2774+ * 1. Allocated
2775+ * (CPU, CPU)
2776+ * 2. Written by CPU
2777+ * (CPU, CPU)
2778+ * 3. Read by GPU
2779+ * (CPU+RENDER, 0)
2780+ * flush_domains = CPU
2781+ * invalidate_domains = RENDER
2782+ * clflush (obj)
2783+ * MI_FLUSH
2784+ * drm_agp_chipset_flush
2785+ * 4. Updated (written) by CPU again
2786+ * (CPU, CPU)
2787+ * flush_domains = 0 (no previous write domain)
2788+ * invalidate_domains = 0 (no new read domains)
2789+ * 5. Read by GPU
2790+ * (CPU+RENDER, 0)
2791+ * flush_domains = CPU
2792+ * invalidate_domains = RENDER
2793+ * clflush (obj)
2794+ * MI_FLUSH
2795+ * drm_agp_chipset_flush
2796+ */
2797+static int
2798+i915_gem_object_set_domain(struct drm_gem_object *obj,
2799+ uint32_t read_domains,
2800+ uint32_t write_domain)
2801+{
2802+ struct drm_device *dev = obj->dev;
2803+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
2804+ uint32_t invalidate_domains = 0;
2805+ uint32_t flush_domains = 0;
2806+ int ret;
2807+
2808+#if WATCH_BUF
2809+ DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2810+ __func__, obj,
2811+ obj->read_domains, read_domains,
2812+ obj->write_domain, write_domain);
2813+#endif
2814+ /*
2815+ * If the object isn't moving to a new write domain,
2816+ * let the object stay in multiple read domains
2817+ */
2818+ if (write_domain == 0)
2819+ read_domains |= obj->read_domains;
2820+ else
2821+ obj_priv->dirty = 1;
2822+
2823+ /*
2824+ * Flush the current write domain if
2825+ * the new read domains don't match. Invalidate
2826+ * any read domains which differ from the old
2827+ * write domain
2828+ */
2829+ if (obj->write_domain && obj->write_domain != read_domains) {
2830+ flush_domains |= obj->write_domain;
2831+ invalidate_domains |= read_domains & ~obj->write_domain;
2832+ }
2833+ /*
2834+ * Invalidate any read caches which may have
2835+ * stale data. That is, any new read domains.
2836+ */
2837+ invalidate_domains |= read_domains & ~obj->read_domains;
2838+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2839+#if WATCH_BUF
2840+ DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2841+ __func__, flush_domains, invalidate_domains);
2842+#endif
2843+ /*
2844+ * If we're invaliding the CPU cache and flushing a GPU cache,
2845+ * then pause for rendering so that the GPU caches will be
2846+ * flushed before the cpu cache is invalidated
2847+ */
2848+ if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
2849+ (flush_domains & ~(I915_GEM_DOMAIN_CPU |
2850+ I915_GEM_DOMAIN_GTT))) {
2851+ ret = i915_gem_object_wait_rendering(obj);
2852+ if (ret)
2853+ return ret;
2854+ }
2855+ i915_gem_clflush_object(obj);
2856+ }
2857+
2858+ if ((write_domain | flush_domains) != 0)
2859+ obj->write_domain = write_domain;
2860+
2861+ /* If we're invalidating the CPU domain, clear the per-page CPU
2862+ * domain list as well.
2863+ */
2864+ if (obj_priv->page_cpu_valid != NULL &&
2865+ (write_domain != 0 ||
2866+ read_domains & I915_GEM_DOMAIN_CPU)) {
2867+ drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
2868+ DRM_MEM_DRIVER);
2869+ obj_priv->page_cpu_valid = NULL;
2870+ }
2871+ obj->read_domains = read_domains;
2872+
2873+ dev->invalidate_domains |= invalidate_domains;
2874+ dev->flush_domains |= flush_domains;
2875+#if WATCH_BUF
2876+ DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2877+ __func__,
2878+ obj->read_domains, obj->write_domain,
2879+ dev->invalidate_domains, dev->flush_domains);
2880+#endif
2881+ return 0;
2882+}
2883+
2884+/**
2885+ * Set the read/write domain on a range of the object.
2886+ *
2887+ * Currently only implemented for CPU reads, otherwise drops to normal
2888+ * i915_gem_object_set_domain().
2889+ */
2890+static int
2891+i915_gem_object_set_domain_range(struct drm_gem_object *obj,
2892+ uint64_t offset,
2893+ uint64_t size,
2894+ uint32_t read_domains,
2895+ uint32_t write_domain)
2896+{
2897+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
2898+ int ret, i;
2899+
2900+ if (obj->read_domains & I915_GEM_DOMAIN_CPU)
2901+ return 0;
2902+
2903+ if (read_domains != I915_GEM_DOMAIN_CPU ||
2904+ write_domain != 0)
2905+ return i915_gem_object_set_domain(obj,
2906+ read_domains, write_domain);
2907+
2908+ /* Wait on any GPU rendering to the object to be flushed. */
2909+ if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
2910+ ret = i915_gem_object_wait_rendering(obj);
2911+ if (ret)
2912+ return ret;
2913+ }
2914+
2915+ if (obj_priv->page_cpu_valid == NULL) {
2916+ obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
2917+ DRM_MEM_DRIVER);
2918+ }
2919+
2920+ /* Flush the cache on any pages that are still invalid from the CPU's
2921+ * perspective.
2922+ */
2923+ for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
2924+ if (obj_priv->page_cpu_valid[i])
2925+ continue;
2926+
2927+ drm_clflush_pages(obj_priv->page_list + i, 1);
2928+
2929+ obj_priv->page_cpu_valid[i] = 1;
2930+ }
2931+
2932+ return 0;
2933+}
2934+
2935+/**
2936+ * Once all of the objects have been set in the proper domain,
2937+ * perform the necessary flush and invalidate operations.
2938+ *
2939+ * Returns the write domains flushed, for use in flush tracking.
2940+ */
2941+static uint32_t
2942+i915_gem_dev_set_domain(struct drm_device *dev)
2943+{
2944+ uint32_t flush_domains = dev->flush_domains;
2945+
2946+ /*
2947+ * Now that all the buffers are synced to the proper domains,
2948+ * flush and invalidate the collected domains
2949+ */
2950+ if (dev->invalidate_domains | dev->flush_domains) {
2951+#if WATCH_EXEC
2952+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
2953+ __func__,
2954+ dev->invalidate_domains,
2955+ dev->flush_domains);
2956+#endif
2957+ i915_gem_flush(dev,
2958+ dev->invalidate_domains,
2959+ dev->flush_domains);
2960+ dev->invalidate_domains = 0;
2961+ dev->flush_domains = 0;
2962+ }
2963+
2964+ return flush_domains;
2965+}
2966+
2967+/**
2968+ * Pin an object to the GTT and evaluate the relocations landing in it.
2969+ */
2970+static int
2971+i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2972+ struct drm_file *file_priv,
2973+ struct drm_i915_gem_exec_object *entry)
2974+{
2975+ struct drm_device *dev = obj->dev;
2976+ struct drm_i915_gem_relocation_entry reloc;
2977+ struct drm_i915_gem_relocation_entry __user *relocs;
2978+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
2979+ int i, ret;
2980+ uint32_t last_reloc_offset = -1;
2981+ void *reloc_page = NULL;
2982+
2983+ /* Choose the GTT offset for our buffer and put it there. */
2984+ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
2985+ if (ret)
2986+ return ret;
2987+
2988+ entry->offset = obj_priv->gtt_offset;
2989+
2990+ relocs = (struct drm_i915_gem_relocation_entry __user *)
2991+ (uintptr_t) entry->relocs_ptr;
2992+ /* Apply the relocations, using the GTT aperture to avoid cache
2993+ * flushing requirements.
2994+ */
2995+ for (i = 0; i < entry->relocation_count; i++) {
2996+ struct drm_gem_object *target_obj;
2997+ struct drm_i915_gem_object *target_obj_priv;
2998+ uint32_t reloc_val, reloc_offset, *reloc_entry;
2999+ int ret;
3000+
3001+ ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
3002+ if (ret != 0) {
3003+ i915_gem_object_unpin(obj);
3004+ return ret;
3005+ }
3006+
3007+ target_obj = drm_gem_object_lookup(obj->dev, file_priv,
3008+ reloc.target_handle);
3009+ if (target_obj == NULL) {
3010+ i915_gem_object_unpin(obj);
3011+ return -EBADF;
3012+ }
3013+ target_obj_priv = target_obj->driver_private;
3014+
3015+ /* The target buffer should have appeared before us in the
3016+ * exec_object list, so it should have a GTT space bound by now.
3017+ */
3018+ if (target_obj_priv->gtt_space == NULL) {
3019+ DRM_ERROR("No GTT space found for object %d\n",
3020+ reloc.target_handle);
3021+ drm_gem_object_unreference(target_obj);
3022+ i915_gem_object_unpin(obj);
3023+ return -EINVAL;
3024+ }
3025+
3026+ if (reloc.offset > obj->size - 4) {
3027+ DRM_ERROR("Relocation beyond object bounds: "
3028+ "obj %p target %d offset %d size %d.\n",
3029+ obj, reloc.target_handle,
3030+ (int) reloc.offset, (int) obj->size);
3031+ drm_gem_object_unreference(target_obj);
3032+ i915_gem_object_unpin(obj);
3033+ return -EINVAL;
3034+ }
3035+ if (reloc.offset & 3) {
3036+ DRM_ERROR("Relocation not 4-byte aligned: "
3037+ "obj %p target %d offset %d.\n",
3038+ obj, reloc.target_handle,
3039+ (int) reloc.offset);
3040+ drm_gem_object_unreference(target_obj);
3041+ i915_gem_object_unpin(obj);
3042+ return -EINVAL;
3043+ }
3044+
3045+ if (reloc.write_domain && target_obj->pending_write_domain &&
3046+ reloc.write_domain != target_obj->pending_write_domain) {
3047+ DRM_ERROR("Write domain conflict: "
3048+ "obj %p target %d offset %d "
3049+ "new %08x old %08x\n",
3050+ obj, reloc.target_handle,
3051+ (int) reloc.offset,
3052+ reloc.write_domain,
3053+ target_obj->pending_write_domain);
3054+ drm_gem_object_unreference(target_obj);
3055+ i915_gem_object_unpin(obj);
3056+ return -EINVAL;
3057+ }
3058+
3059+#if WATCH_RELOC
3060+ DRM_INFO("%s: obj %p offset %08x target %d "
3061+ "read %08x write %08x gtt %08x "
3062+ "presumed %08x delta %08x\n",
3063+ __func__,
3064+ obj,
3065+ (int) reloc.offset,
3066+ (int) reloc.target_handle,
3067+ (int) reloc.read_domains,
3068+ (int) reloc.write_domain,
3069+ (int) target_obj_priv->gtt_offset,
3070+ (int) reloc.presumed_offset,
3071+ reloc.delta);
3072+#endif
3073+
3074+ target_obj->pending_read_domains |= reloc.read_domains;
3075+ target_obj->pending_write_domain |= reloc.write_domain;
3076+
3077+ /* If the relocation already has the right value in it, no
3078+ * more work needs to be done.
3079+ */
3080+ if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
3081+ drm_gem_object_unreference(target_obj);
3082+ continue;
3083+ }
3084+
3085+ /* Now that we're going to actually write some data in,
3086+ * make sure that any rendering using this buffer's contents
3087+ * is completed.
3088+ */
3089+ i915_gem_object_wait_rendering(obj);
3090+
3091+ /* As we're writing through the gtt, flush
3092+ * any CPU writes before we write the relocations
3093+ */
3094+ if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
3095+ i915_gem_clflush_object(obj);
3096+ drm_agp_chipset_flush(dev);
3097+ obj->write_domain = 0;
3098+ }
3099+
3100+ /* Map the page containing the relocation we're going to
3101+ * perform.
3102+ */
3103+ reloc_offset = obj_priv->gtt_offset + reloc.offset;
3104+ if (reloc_page == NULL ||
3105+ (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
3106+ (reloc_offset & ~(PAGE_SIZE - 1))) {
3107+ if (reloc_page != NULL)
3108+ iounmap(reloc_page);
3109+
3110+ reloc_page = ioremap(dev->agp->base +
3111+ (reloc_offset & ~(PAGE_SIZE - 1)),
3112+ PAGE_SIZE);
3113+ last_reloc_offset = reloc_offset;
3114+ if (reloc_page == NULL) {
3115+ drm_gem_object_unreference(target_obj);
3116+ i915_gem_object_unpin(obj);
3117+ return -ENOMEM;
3118+ }
3119+ }
3120+
3121+ reloc_entry = (uint32_t *)((char *)reloc_page +
3122+ (reloc_offset & (PAGE_SIZE - 1)));
3123+ reloc_val = target_obj_priv->gtt_offset + reloc.delta;
3124+
3125+#if WATCH_BUF
3126+ DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
3127+ obj, (unsigned int) reloc.offset,
3128+ readl(reloc_entry), reloc_val);
3129+#endif
3130+ writel(reloc_val, reloc_entry);
3131+
3132+ /* Write the updated presumed offset for this entry back out
3133+ * to the user.
3134+ */
3135+ reloc.presumed_offset = target_obj_priv->gtt_offset;
3136+ ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
3137+ if (ret != 0) {
3138+ drm_gem_object_unreference(target_obj);
3139+ i915_gem_object_unpin(obj);
3140+ return ret;
3141+ }
3142+
3143+ drm_gem_object_unreference(target_obj);
3144+ }
3145+
3146+ if (reloc_page != NULL)
3147+ iounmap(reloc_page);
3148+
3149+#if WATCH_BUF
3150+ if (0)
3151+ i915_gem_dump_object(obj, 128, __func__, ~0);
3152+#endif
3153+ return 0;
3154+}
3155+
3156+/** Dispatch a batchbuffer to the ring
3157+ */
3158+static int
3159+i915_dispatch_gem_execbuffer(struct drm_device *dev,
3160+ struct drm_i915_gem_execbuffer *exec,
3161+ uint64_t exec_offset)
3162+{
3163+ drm_i915_private_t *dev_priv = dev->dev_private;
3164+ struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
3165+ (uintptr_t) exec->cliprects_ptr;
3166+ int nbox = exec->num_cliprects;
3167+ int i = 0, count;
3168+ uint32_t exec_start, exec_len;
3169+ RING_LOCALS;
3170+
3171+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3172+ exec_len = (uint32_t) exec->batch_len;
3173+
3174+ if ((exec_start | exec_len) & 0x7) {
3175+ DRM_ERROR("alignment\n");
3176+ return -EINVAL;
3177+ }
3178+
3179+ if (!exec_start)
3180+ return -EINVAL;
3181+
3182+ count = nbox ? nbox : 1;
3183+
3184+ for (i = 0; i < count; i++) {
3185+ if (i < nbox) {
3186+ int ret = i915_emit_box(dev, boxes, i,
3187+ exec->DR1, exec->DR4);
3188+ if (ret)
3189+ return ret;
3190+ }
3191+
3192+ if (IS_I830(dev) || IS_845G(dev)) {
3193+ BEGIN_LP_RING(4);
3194+ OUT_RING(MI_BATCH_BUFFER);
3195+ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3196+ OUT_RING(exec_start + exec_len - 4);
3197+ OUT_RING(0);
3198+ ADVANCE_LP_RING();
3199+ } else {
3200+ BEGIN_LP_RING(2);
3201+ if (IS_I965G(dev)) {
3202+ OUT_RING(MI_BATCH_BUFFER_START |
3203+ (2 << 6) |
3204+ MI_BATCH_NON_SECURE_I965);
3205+ OUT_RING(exec_start);
3206+ } else {
3207+ OUT_RING(MI_BATCH_BUFFER_START |
3208+ (2 << 6));
3209+ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3210+ }
3211+ ADVANCE_LP_RING();
3212+ }
3213+ }
3214+
3215+ /* XXX breadcrumb */
3216+ return 0;
3217+}
3218+
3219+/* Throttle our rendering by waiting until the ring has completed our requests
3220+ * emitted over 20 msec ago.
3221+ *
3222+ * This should get us reasonable parallelism between CPU and GPU but also
3223+ * relatively low latency when blocking on a particular request to finish.
3224+ */
3225+static int
3226+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3227+{
3228+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3229+ int ret = 0;
3230+ uint32_t seqno;
3231+
3232+ mutex_lock(&dev->struct_mutex);
3233+ seqno = i915_file_priv->mm.last_gem_throttle_seqno;
3234+ i915_file_priv->mm.last_gem_throttle_seqno =
3235+ i915_file_priv->mm.last_gem_seqno;
3236+ if (seqno)
3237+ ret = i915_wait_request(dev, seqno);
3238+ mutex_unlock(&dev->struct_mutex);
3239+ return ret;
3240+}
3241+
3242+int
3243+i915_gem_execbuffer(struct drm_device *dev, void *data,
3244+ struct drm_file *file_priv)
3245+{
3246+ drm_i915_private_t *dev_priv = dev->dev_private;
3247+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3248+ struct drm_i915_gem_execbuffer *args = data;
3249+ struct drm_i915_gem_exec_object *exec_list = NULL;
3250+ struct drm_gem_object **object_list = NULL;
3251+ struct drm_gem_object *batch_obj;
3252+ int ret, i, pinned = 0;
3253+ uint64_t exec_offset;
3254+ uint32_t seqno, flush_domains;
3255+
3256+#if WATCH_EXEC
3257+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3258+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3259+#endif
3260+
3261+ /* Copy in the exec list from userland */
3262+ exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
3263+ DRM_MEM_DRIVER);
3264+ object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
3265+ DRM_MEM_DRIVER);
3266+ if (exec_list == NULL || object_list == NULL) {
3267+ DRM_ERROR("Failed to allocate exec or object list "
3268+ "for %d buffers\n",
3269+ args->buffer_count);
3270+ ret = -ENOMEM;
3271+ goto pre_mutex_err;
3272+ }
3273+ ret = copy_from_user(exec_list,
3274+ (struct drm_i915_relocation_entry __user *)
3275+ (uintptr_t) args->buffers_ptr,
3276+ sizeof(*exec_list) * args->buffer_count);
3277+ if (ret != 0) {
3278+ DRM_ERROR("copy %d exec entries failed %d\n",
3279+ args->buffer_count, ret);
3280+ goto pre_mutex_err;
3281+ }
3282+
3283+ mutex_lock(&dev->struct_mutex);
3284+
3285+ i915_verify_inactive(dev, __FILE__, __LINE__);
3286+
3287+ if (dev_priv->mm.wedged) {
3288+ DRM_ERROR("Execbuf while wedged\n");
3289+ mutex_unlock(&dev->struct_mutex);
3290+ return -EIO;
3291+ }
3292+
3293+ if (dev_priv->mm.suspended) {
3294+ DRM_ERROR("Execbuf while VT-switched.\n");
3295+ mutex_unlock(&dev->struct_mutex);
3296+ return -EBUSY;
3297+ }
3298+
3299+ /* Zero the gloabl flush/invalidate flags. These
3300+ * will be modified as each object is bound to the
3301+ * gtt
3302+ */
3303+ dev->invalidate_domains = 0;
3304+ dev->flush_domains = 0;
3305+
3306+ /* Look up object handles and perform the relocations */
3307+ for (i = 0; i < args->buffer_count; i++) {
3308+ object_list[i] = drm_gem_object_lookup(dev, file_priv,
3309+ exec_list[i].handle);
3310+ if (object_list[i] == NULL) {
3311+ DRM_ERROR("Invalid object handle %d at index %d\n",
3312+ exec_list[i].handle, i);
3313+ ret = -EBADF;
3314+ goto err;
3315+ }
3316+
3317+ object_list[i]->pending_read_domains = 0;
3318+ object_list[i]->pending_write_domain = 0;
3319+ ret = i915_gem_object_pin_and_relocate(object_list[i],
3320+ file_priv,
3321+ &exec_list[i]);
3322+ if (ret) {
3323+ DRM_ERROR("object bind and relocate failed %d\n", ret);
3324+ goto err;
3325+ }
3326+ pinned = i + 1;
3327+ }
3328+
3329+ /* Set the pending read domains for the batch buffer to COMMAND */
3330+ batch_obj = object_list[args->buffer_count-1];
3331+ batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
3332+ batch_obj->pending_write_domain = 0;
3333+
3334+ i915_verify_inactive(dev, __FILE__, __LINE__);
3335+
3336+ for (i = 0; i < args->buffer_count; i++) {
3337+ struct drm_gem_object *obj = object_list[i];
3338+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
3339+
3340+ if (obj_priv->gtt_space == NULL) {
3341+ /* We evicted the buffer in the process of validating
3342+ * our set of buffers in. We could try to recover by
3343+ * kicking them everything out and trying again from
3344+ * the start.
3345+ */
3346+ ret = -ENOMEM;
3347+ goto err;
3348+ }
3349+
3350+ /* make sure all previous memory operations have passed */
3351+ ret = i915_gem_object_set_domain(obj,
3352+ obj->pending_read_domains,
3353+ obj->pending_write_domain);
3354+ if (ret)
3355+ goto err;
3356+ }
3357+
3358+ i915_verify_inactive(dev, __FILE__, __LINE__);
3359+
3360+ /* Flush/invalidate caches and chipset buffer */
3361+ flush_domains = i915_gem_dev_set_domain(dev);
3362+
3363+ i915_verify_inactive(dev, __FILE__, __LINE__);
3364+
3365+#if WATCH_COHERENCY
3366+ for (i = 0; i < args->buffer_count; i++) {
3367+ i915_gem_object_check_coherency(object_list[i],
3368+ exec_list[i].handle);
3369+ }
3370+#endif
3371+
3372+ exec_offset = exec_list[args->buffer_count - 1].offset;
3373+
3374+#if WATCH_EXEC
3375+ i915_gem_dump_object(object_list[args->buffer_count - 1],
3376+ args->batch_len,
3377+ __func__,
3378+ ~0);
3379+#endif
3380+
3381+ (void)i915_add_request(dev, flush_domains);
3382+
3383+ /* Exec the batchbuffer */
3384+ ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
3385+ if (ret) {
3386+ DRM_ERROR("dispatch failed %d\n", ret);
3387+ goto err;
3388+ }
3389+
3390+ /*
3391+ * Ensure that the commands in the batch buffer are
3392+ * finished before the interrupt fires
3393+ */
3394+ flush_domains = i915_retire_commands(dev);
3395+
3396+ i915_verify_inactive(dev, __FILE__, __LINE__);
3397+
3398+ /*
3399+ * Get a seqno representing the execution of the current buffer,
3400+ * which we can wait on. We would like to mitigate these interrupts,
3401+ * likely by only creating seqnos occasionally (so that we have
3402+ * *some* interrupts representing completion of buffers that we can
3403+ * wait on when trying to clear up gtt space).
3404+ */
3405+ seqno = i915_add_request(dev, flush_domains);
3406+ BUG_ON(seqno == 0);
3407+ i915_file_priv->mm.last_gem_seqno = seqno;
3408+ for (i = 0; i < args->buffer_count; i++) {
3409+ struct drm_gem_object *obj = object_list[i];
3410+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
3411+
3412+ i915_gem_object_move_to_active(obj);
3413+ obj_priv->last_rendering_seqno = seqno;
3414+#if WATCH_LRU
3415+ DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3416+#endif
3417+ }
3418+#if WATCH_LRU
3419+ i915_dump_lru(dev, __func__);
3420+#endif
3421+
3422+ i915_verify_inactive(dev, __FILE__, __LINE__);
3423+
3424+ /* Copy the new buffer offsets back to the user's exec list. */
3425+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3426+ (uintptr_t) args->buffers_ptr,
3427+ exec_list,
3428+ sizeof(*exec_list) * args->buffer_count);
3429+ if (ret)
3430+ DRM_ERROR("failed to copy %d exec entries "
3431+ "back to user (%d)\n",
3432+ args->buffer_count, ret);
3433+err:
3434+ if (object_list != NULL) {
3435+ for (i = 0; i < pinned; i++)
3436+ i915_gem_object_unpin(object_list[i]);
3437+
3438+ for (i = 0; i < args->buffer_count; i++)
3439+ drm_gem_object_unreference(object_list[i]);
3440+ }
3441+ mutex_unlock(&dev->struct_mutex);
3442+
3443+pre_mutex_err:
3444+ drm_free(object_list, sizeof(*object_list) * args->buffer_count,
3445+ DRM_MEM_DRIVER);
3446+ drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
3447+ DRM_MEM_DRIVER);
3448+
3449+ return ret;
3450+}
3451+
3452+int
3453+i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3454+{
3455+ struct drm_device *dev = obj->dev;
3456+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
3457+ int ret;
3458+
3459+ i915_verify_inactive(dev, __FILE__, __LINE__);
3460+ if (obj_priv->gtt_space == NULL) {
3461+ ret = i915_gem_object_bind_to_gtt(obj, alignment);
3462+ if (ret != 0) {
3463+ DRM_ERROR("Failure to bind: %d", ret);
3464+ return ret;
3465+ }
3466+ }
3467+ obj_priv->pin_count++;
3468+
3469+ /* If the object is not active and not pending a flush,
3470+ * remove it from the inactive list
3471+ */
3472+ if (obj_priv->pin_count == 1) {
3473+ atomic_inc(&dev->pin_count);
3474+ atomic_add(obj->size, &dev->pin_memory);
3475+ if (!obj_priv->active &&
3476+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
3477+ I915_GEM_DOMAIN_GTT)) == 0 &&
3478+ !list_empty(&obj_priv->list))
3479+ list_del_init(&obj_priv->list);
3480+ }
3481+ i915_verify_inactive(dev, __FILE__, __LINE__);
3482+
3483+ return 0;
3484+}
3485+
3486+void
3487+i915_gem_object_unpin(struct drm_gem_object *obj)
3488+{
3489+ struct drm_device *dev = obj->dev;
3490+ drm_i915_private_t *dev_priv = dev->dev_private;
3491+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
3492+
3493+ i915_verify_inactive(dev, __FILE__, __LINE__);
3494+ obj_priv->pin_count--;
3495+ BUG_ON(obj_priv->pin_count < 0);
3496+ BUG_ON(obj_priv->gtt_space == NULL);
3497+
3498+ /* If the object is no longer pinned, and is
3499+ * neither active nor being flushed, then stick it on
3500+ * the inactive list
3501+ */
3502+ if (obj_priv->pin_count == 0) {
3503+ if (!obj_priv->active &&
3504+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
3505+ I915_GEM_DOMAIN_GTT)) == 0)
3506+ list_move_tail(&obj_priv->list,
3507+ &dev_priv->mm.inactive_list);
3508+ atomic_dec(&dev->pin_count);
3509+ atomic_sub(obj->size, &dev->pin_memory);
3510+ }
3511+ i915_verify_inactive(dev, __FILE__, __LINE__);
3512+}
3513+
3514+int
3515+i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3516+ struct drm_file *file_priv)
3517+{
3518+ struct drm_i915_gem_pin *args = data;
3519+ struct drm_gem_object *obj;
3520+ struct drm_i915_gem_object *obj_priv;
3521+ int ret;
3522+
3523+ mutex_lock(&dev->struct_mutex);
3524+
3525+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3526+ if (obj == NULL) {
3527+ DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3528+ args->handle);
3529+ mutex_unlock(&dev->struct_mutex);
3530+ return -EBADF;
3531+ }
3532+ obj_priv = obj->driver_private;
3533+
3534+ ret = i915_gem_object_pin(obj, args->alignment);
3535+ if (ret != 0) {
3536+ drm_gem_object_unreference(obj);
3537+ mutex_unlock(&dev->struct_mutex);
3538+ return ret;
3539+ }
3540+
3541+ /* XXX - flush the CPU caches for pinned objects
3542+ * as the X server doesn't manage domains yet
3543+ */
3544+ if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
3545+ i915_gem_clflush_object(obj);
3546+ drm_agp_chipset_flush(dev);
3547+ obj->write_domain = 0;
3548+ }
3549+ args->offset = obj_priv->gtt_offset;
3550+ drm_gem_object_unreference(obj);
3551+ mutex_unlock(&dev->struct_mutex);
3552+
3553+ return 0;
3554+}
3555+
3556+int
3557+i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3558+ struct drm_file *file_priv)
3559+{
3560+ struct drm_i915_gem_pin *args = data;
3561+ struct drm_gem_object *obj;
3562+
3563+ mutex_lock(&dev->struct_mutex);
3564+
3565+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3566+ if (obj == NULL) {
3567+ DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3568+ args->handle);
3569+ mutex_unlock(&dev->struct_mutex);
3570+ return -EBADF;
3571+ }
3572+
3573+ i915_gem_object_unpin(obj);
3574+
3575+ drm_gem_object_unreference(obj);
3576+ mutex_unlock(&dev->struct_mutex);
3577+ return 0;
3578+}
3579+
3580+int
3581+i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3582+ struct drm_file *file_priv)
3583+{
3584+ struct drm_i915_gem_busy *args = data;
3585+ struct drm_gem_object *obj;
3586+ struct drm_i915_gem_object *obj_priv;
3587+
3588+ mutex_lock(&dev->struct_mutex);
3589+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3590+ if (obj == NULL) {
3591+ DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3592+ args->handle);
3593+ mutex_unlock(&dev->struct_mutex);
3594+ return -EBADF;
3595+ }
3596+
3597+ obj_priv = obj->driver_private;
3598+ args->busy = obj_priv->active;
3599+
3600+ drm_gem_object_unreference(obj);
3601+ mutex_unlock(&dev->struct_mutex);
3602+ return 0;
3603+}
3604+
3605+int
3606+i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3607+ struct drm_file *file_priv)
3608+{
3609+ return i915_gem_ring_throttle(dev, file_priv);
3610+}
3611+
3612+int i915_gem_init_object(struct drm_gem_object *obj)
3613+{
3614+ struct drm_i915_gem_object *obj_priv;
3615+
3616+ obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
3617+ if (obj_priv == NULL)
3618+ return -ENOMEM;
3619+
3620+ /*
3621+ * We've just allocated pages from the kernel,
3622+ * so they've just been written by the CPU with
3623+ * zeros. They'll need to be clflushed before we
3624+ * use them with the GPU.
3625+ */
3626+ obj->write_domain = I915_GEM_DOMAIN_CPU;
3627+ obj->read_domains = I915_GEM_DOMAIN_CPU;
3628+
3629+ obj->driver_private = obj_priv;
3630+ obj_priv->obj = obj;
3631+ INIT_LIST_HEAD(&obj_priv->list);
3632+ return 0;
3633+}
3634+
3635+void i915_gem_free_object(struct drm_gem_object *obj)
3636+{
3637+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
3638+
3639+ while (obj_priv->pin_count > 0)
3640+ i915_gem_object_unpin(obj);
3641+
3642+ i915_gem_object_unbind(obj);
3643+
3644+ drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
3645+ drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
3646+}
3647+
3648+static int
3649+i915_gem_set_domain(struct drm_gem_object *obj,
3650+ struct drm_file *file_priv,
3651+ uint32_t read_domains,
3652+ uint32_t write_domain)
3653+{
3654+ struct drm_device *dev = obj->dev;
3655+ int ret;
3656+ uint32_t flush_domains;
3657+
3658+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
3659+
3660+ ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
3661+ if (ret)
3662+ return ret;
3663+ flush_domains = i915_gem_dev_set_domain(obj->dev);
3664+
3665+ if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
3666+ (void) i915_add_request(dev, flush_domains);
3667+
3668+ return 0;
3669+}
3670+
3671+/** Unbinds all objects that are on the given buffer list. */
3672+static int
3673+i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
3674+{
3675+ struct drm_gem_object *obj;
3676+ struct drm_i915_gem_object *obj_priv;
3677+ int ret;
3678+
3679+ while (!list_empty(head)) {
3680+ obj_priv = list_first_entry(head,
3681+ struct drm_i915_gem_object,
3682+ list);
3683+ obj = obj_priv->obj;
3684+
3685+ if (obj_priv->pin_count != 0) {
3686+ DRM_ERROR("Pinned object in unbind list\n");
3687+ mutex_unlock(&dev->struct_mutex);
3688+ return -EINVAL;
3689+ }
3690+
3691+ ret = i915_gem_object_unbind(obj);
3692+ if (ret != 0) {
3693+ DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3694+ ret);
3695+ mutex_unlock(&dev->struct_mutex);
3696+ return ret;
3697+ }
3698+ }
3699+
3700+
3701+ return 0;
3702+}
3703+
3704+static int
3705+i915_gem_idle(struct drm_device *dev)
3706+{
3707+ drm_i915_private_t *dev_priv = dev->dev_private;
3708+ uint32_t seqno, cur_seqno, last_seqno;
3709+ int stuck, ret;
3710+
3711+ if (dev_priv->mm.suspended)
3712+ return 0;
3713+
3714+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
3715+ * We need to replace this with a semaphore, or something.
3716+ */
3717+ dev_priv->mm.suspended = 1;
3718+
3719+ i915_kernel_lost_context(dev);
3720+
3721+ /* Flush the GPU along with all non-CPU write domains
3722+ */
3723+ i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
3724+ ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
3725+ seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
3726+ I915_GEM_DOMAIN_GTT));
3727+
3728+ if (seqno == 0) {
3729+ mutex_unlock(&dev->struct_mutex);
3730+ return -ENOMEM;
3731+ }
3732+
3733+ dev_priv->mm.waiting_gem_seqno = seqno;
3734+ last_seqno = 0;
3735+ stuck = 0;
3736+ for (;;) {
3737+ cur_seqno = i915_get_gem_seqno(dev);
3738+ if (i915_seqno_passed(cur_seqno, seqno))
3739+ break;
3740+ if (last_seqno == cur_seqno) {
3741+ if (stuck++ > 100) {
3742+ DRM_ERROR("hardware wedged\n");
3743+ dev_priv->mm.wedged = 1;
3744+ DRM_WAKEUP(&dev_priv->irq_queue);
3745+ break;
3746+ }
3747+ }
3748+ msleep(10);
3749+ last_seqno = cur_seqno;
3750+ }
3751+ dev_priv->mm.waiting_gem_seqno = 0;
3752+
3753+ i915_gem_retire_requests(dev);
3754+
3755+ /* Active and flushing should now be empty as we've
3756+ * waited for a sequence higher than any pending execbuffer
3757+ */
3758+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
3759+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3760+
3761+ /* Request should now be empty as we've also waited
3762+ * for the last request in the list
3763+ */
3764+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
3765+
3766+ /* Move all buffers out of the GTT. */
3767+ ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
3768+ if (ret)
3769+ return ret;
3770+
3771+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
3772+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3773+ BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3774+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
3775+ return 0;
3776+}
3777+
3778+static int
3779+i915_gem_init_hws(struct drm_device *dev)
3780+{
3781+ drm_i915_private_t *dev_priv = dev->dev_private;
3782+ struct drm_gem_object *obj;
3783+ struct drm_i915_gem_object *obj_priv;
3784+ int ret;
3785+
3786+ /* If we need a physical address for the status page, it's already
3787+ * initialized at driver load time.
3788+ */
3789+ if (!I915_NEED_GFX_HWS(dev))
3790+ return 0;
3791+
3792+ obj = drm_gem_object_alloc(dev, 4096);
3793+ if (obj == NULL) {
3794+ DRM_ERROR("Failed to allocate status page\n");
3795+ return -ENOMEM;
3796+ }
3797+ obj_priv = obj->driver_private;
3798+
3799+ ret = i915_gem_object_pin(obj, 4096);
3800+ if (ret != 0) {
3801+ drm_gem_object_unreference(obj);
3802+ return ret;
3803+ }
3804+
3805+ dev_priv->status_gfx_addr = obj_priv->gtt_offset;
3806+ dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
3807+ dev_priv->hws_map.size = 4096;
3808+ dev_priv->hws_map.type = 0;
3809+ dev_priv->hws_map.flags = 0;
3810+ dev_priv->hws_map.mtrr = 0;
3811+
3812+ drm_core_ioremap(&dev_priv->hws_map, dev);
3813+ if (dev_priv->hws_map.handle == NULL) {
3814+ DRM_ERROR("Failed to map status page.\n");
3815+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3816+ drm_gem_object_unreference(obj);
3817+ return -EINVAL;
3818+ }
3819+ dev_priv->hws_obj = obj;
3820+ dev_priv->hw_status_page = dev_priv->hws_map.handle;
3821+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
3822+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
3823+ DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
3824+
3825+ return 0;
3826+}
3827+
3828+static int
3829+i915_gem_init_ringbuffer(struct drm_device *dev)
3830+{
3831+ drm_i915_private_t *dev_priv = dev->dev_private;
3832+ struct drm_gem_object *obj;
3833+ struct drm_i915_gem_object *obj_priv;
3834+ int ret;
3835+
3836+ ret = i915_gem_init_hws(dev);
3837+ if (ret != 0)
3838+ return ret;
3839+
3840+ obj = drm_gem_object_alloc(dev, 128 * 1024);
3841+ if (obj == NULL) {
3842+ DRM_ERROR("Failed to allocate ringbuffer\n");
3843+ return -ENOMEM;
3844+ }
3845+ obj_priv = obj->driver_private;
3846+
3847+ ret = i915_gem_object_pin(obj, 4096);
3848+ if (ret != 0) {
3849+ drm_gem_object_unreference(obj);
3850+ return ret;
3851+ }
3852+
3853+ /* Set up the kernel mapping for the ring. */
3854+ dev_priv->ring.Size = obj->size;
3855+ dev_priv->ring.tail_mask = obj->size - 1;
3856+
3857+ dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
3858+ dev_priv->ring.map.size = obj->size;
3859+ dev_priv->ring.map.type = 0;
3860+ dev_priv->ring.map.flags = 0;
3861+ dev_priv->ring.map.mtrr = 0;
3862+
3863+ drm_core_ioremap(&dev_priv->ring.map, dev);
3864+ if (dev_priv->ring.map.handle == NULL) {
3865+ DRM_ERROR("Failed to map ringbuffer.\n");
3866+ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3867+ drm_gem_object_unreference(obj);
3868+ return -EINVAL;
3869+ }
3870+ dev_priv->ring.ring_obj = obj;
3871+ dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
3872+
3873+ /* Stop the ring if it's running. */
3874+ I915_WRITE(PRB0_CTL, 0);
3875+ I915_WRITE(PRB0_HEAD, 0);
3876+ I915_WRITE(PRB0_TAIL, 0);
3877+ I915_WRITE(PRB0_START, 0);
3878+
3879+ /* Initialize the ring. */
3880+ I915_WRITE(PRB0_START, obj_priv->gtt_offset);
3881+ I915_WRITE(PRB0_CTL,
3882+ ((obj->size - 4096) & RING_NR_PAGES) |
3883+ RING_NO_REPORT |
3884+ RING_VALID);
3885+
3886+ /* Update our cache of the ring state */
3887+ i915_kernel_lost_context(dev);
3888+
3889+ return 0;
3890+}
3891+
3892+static void
3893+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3894+{
3895+ drm_i915_private_t *dev_priv = dev->dev_private;
3896+
3897+ if (dev_priv->ring.ring_obj == NULL)
3898+ return;
3899+
3900+ drm_core_ioremapfree(&dev_priv->ring.map, dev);
3901+
3902+ i915_gem_object_unpin(dev_priv->ring.ring_obj);
3903+ drm_gem_object_unreference(dev_priv->ring.ring_obj);
3904+ dev_priv->ring.ring_obj = NULL;
3905+ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3906+
3907+ if (dev_priv->hws_obj != NULL) {
3908+ i915_gem_object_unpin(dev_priv->hws_obj);
3909+ drm_gem_object_unreference(dev_priv->hws_obj);
3910+ dev_priv->hws_obj = NULL;
3911+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3912+
3913+ /* Write high address into HWS_PGA when disabling. */
3914+ I915_WRITE(HWS_PGA, 0x1ffff000);
3915+ }
3916+}
3917+
3918+int
3919+i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3920+ struct drm_file *file_priv)
3921+{
3922+ drm_i915_private_t *dev_priv = dev->dev_private;
3923+ int ret;
3924+
3925+ if (dev_priv->mm.wedged) {
3926+ DRM_ERROR("Reenabling wedged hardware, good luck\n");
3927+ dev_priv->mm.wedged = 0;
3928+ }
3929+
3930+ ret = i915_gem_init_ringbuffer(dev);
3931+ if (ret != 0)
3932+ return ret;
3933+
3934+ mutex_lock(&dev->struct_mutex);
3935+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
3936+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3937+ BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3938+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
3939+ dev_priv->mm.suspended = 0;
3940+ mutex_unlock(&dev->struct_mutex);
3941+ return 0;
3942+}
3943+
3944+int
3945+i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3946+ struct drm_file *file_priv)
3947+{
3948+ int ret;
3949+
3950+ mutex_lock(&dev->struct_mutex);
3951+ ret = i915_gem_idle(dev);
3952+ if (ret == 0)
3953+ i915_gem_cleanup_ringbuffer(dev);
3954+ mutex_unlock(&dev->struct_mutex);
3955+
3956+ return 0;
3957+}
3958+
3959+void
3960+i915_gem_lastclose(struct drm_device *dev)
3961+{
3962+ int ret;
3963+ drm_i915_private_t *dev_priv = dev->dev_private;
3964+
3965+ mutex_lock(&dev->struct_mutex);
3966+
3967+ if (dev_priv->ring.ring_obj != NULL) {
3968+ ret = i915_gem_idle(dev);
3969+ if (ret)
3970+ DRM_ERROR("failed to idle hardware: %d\n", ret);
3971+
3972+ i915_gem_cleanup_ringbuffer(dev);
3973+ }
3974+
3975+ mutex_unlock(&dev->struct_mutex);
3976+}
3977+
3978+void
3979+i915_gem_load(struct drm_device *dev)
3980+{
3981+ drm_i915_private_t *dev_priv = dev->dev_private;
3982+
3983+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
3984+ INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3985+ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3986+ INIT_LIST_HEAD(&dev_priv->mm.request_list);
3987+ INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3988+ i915_gem_retire_work_handler);
3989+ dev_priv->mm.next_gem_seqno = 1;
3990+
3991+ i915_gem_detect_bit_6_swizzle(dev);
3992+}
3993diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
3994new file mode 100644
3995index 0000000..131c088
3996--- /dev/null
3997+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
3998@@ -0,0 +1,201 @@
3999+/*
4000+ * Copyright © 2008 Intel Corporation
4001+ *
4002+ * Permission is hereby granted, free of charge, to any person obtaining a
4003+ * copy of this software and associated documentation files (the "Software"),
4004+ * to deal in the Software without restriction, including without limitation
4005+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
4006+ * and/or sell copies of the Software, and to permit persons to whom the
4007+ * Software is furnished to do so, subject to the following conditions:
4008+ *
4009+ * The above copyright notice and this permission notice (including the next
4010+ * paragraph) shall be included in all copies or substantial portions of the
4011+ * Software.
4012+ *
4013+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4014+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4015+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
4016+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4017+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4018+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
4019+ * IN THE SOFTWARE.
4020+ *
4021+ * Authors:
4022+ * Keith Packard <keithp@keithp.com>
4023+ *
4024+ */
4025+
4026+#include "drmP.h"
4027+#include "drm.h"
4028+#include "i915_drm.h"
4029+#include "i915_drv.h"
4030+
4031+#if WATCH_INACTIVE
4032+void
4033+i915_verify_inactive(struct drm_device *dev, char *file, int line)
4034+{
4035+ drm_i915_private_t *dev_priv = dev->dev_private;
4036+ struct drm_gem_object *obj;
4037+ struct drm_i915_gem_object *obj_priv;
4038+
4039+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
4040+ obj = obj_priv->obj;
4041+ if (obj_priv->pin_count || obj_priv->active ||
4042+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
4043+ I915_GEM_DOMAIN_GTT)))
4044+ DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
4045+ obj,
4046+ obj_priv->pin_count, obj_priv->active,
4047+ obj->write_domain, file, line);
4048+ }
4049+}
4050+#endif /* WATCH_INACTIVE */
4051+
4052+
4053+#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
4054+static void
4055+i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
4056+ uint32_t bias, uint32_t mark)
4057+{
4058+ uint32_t *mem = kmap_atomic(page, KM_USER0);
4059+ int i;
4060+ for (i = start; i < end; i += 4)
4061+ DRM_INFO("%08x: %08x%s\n",
4062+ (int) (bias + i), mem[i / 4],
4063+ (bias + i == mark) ? " ********" : "");
4064+ kunmap_atomic(mem, KM_USER0);
4065+ /* give syslog time to catch up */
4066+ msleep(1);
4067+}
4068+
4069+void
4070+i915_gem_dump_object(struct drm_gem_object *obj, int len,
4071+ const char *where, uint32_t mark)
4072+{
4073+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
4074+ int page;
4075+
4076+ DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
4077+ for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
4078+ int page_len, chunk, chunk_len;
4079+
4080+ page_len = len - page * PAGE_SIZE;
4081+ if (page_len > PAGE_SIZE)
4082+ page_len = PAGE_SIZE;
4083+
4084+ for (chunk = 0; chunk < page_len; chunk += 128) {
4085+ chunk_len = page_len - chunk;
4086+ if (chunk_len > 128)
4087+ chunk_len = 128;
4088+ i915_gem_dump_page(obj_priv->page_list[page],
4089+ chunk, chunk + chunk_len,
4090+ obj_priv->gtt_offset +
4091+ page * PAGE_SIZE,
4092+ mark);
4093+ }
4094+ }
4095+}
4096+#endif
4097+
4098+#if WATCH_LRU
4099+void
4100+i915_dump_lru(struct drm_device *dev, const char *where)
4101+{
4102+ drm_i915_private_t *dev_priv = dev->dev_private;
4103+ struct drm_i915_gem_object *obj_priv;
4104+
4105+ DRM_INFO("active list %s {\n", where);
4106+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
4107+ list)
4108+ {
4109+ DRM_INFO(" %p: %08x\n", obj_priv,
4110+ obj_priv->last_rendering_seqno);
4111+ }
4112+ DRM_INFO("}\n");
4113+ DRM_INFO("flushing list %s {\n", where);
4114+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
4115+ list)
4116+ {
4117+ DRM_INFO(" %p: %08x\n", obj_priv,
4118+ obj_priv->last_rendering_seqno);
4119+ }
4120+ DRM_INFO("}\n");
4121+ DRM_INFO("inactive %s {\n", where);
4122+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
4123+ DRM_INFO(" %p: %08x\n", obj_priv,
4124+ obj_priv->last_rendering_seqno);
4125+ }
4126+ DRM_INFO("}\n");
4127+}
4128+#endif
4129+
4130+
4131+#if WATCH_COHERENCY
4132+void
4133+i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
4134+{
4135+ struct drm_device *dev = obj->dev;
4136+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
4137+ int page;
4138+ uint32_t *gtt_mapping;
4139+ uint32_t *backing_map = NULL;
4140+ int bad_count = 0;
4141+
4142+ DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
4143+ __func__, obj, obj_priv->gtt_offset, handle,
4144+ obj->size / 1024);
4145+
4146+ gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
4147+ obj->size);
4148+ if (gtt_mapping == NULL) {
4149+ DRM_ERROR("failed to map GTT space\n");
4150+ return;
4151+ }
4152+
4153+ for (page = 0; page < obj->size / PAGE_SIZE; page++) {
4154+ int i;
4155+
4156+ backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
4157+
4158+ if (backing_map == NULL) {
4159+ DRM_ERROR("failed to map backing page\n");
4160+ goto out;
4161+ }
4162+
4163+ for (i = 0; i < PAGE_SIZE / 4; i++) {
4164+ uint32_t cpuval = backing_map[i];
4165+ uint32_t gttval = readl(gtt_mapping +
4166+ page * 1024 + i);
4167+
4168+ if (cpuval != gttval) {
4169+ DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
4170+ "0x%08x vs 0x%08x\n",
4171+ (int)(obj_priv->gtt_offset +
4172+ page * PAGE_SIZE + i * 4),
4173+ cpuval, gttval);
4174+ if (bad_count++ >= 8) {
4175+ DRM_INFO("...\n");
4176+ goto out;
4177+ }
4178+ }
4179+ }
4180+ kunmap_atomic(backing_map, KM_USER0);
4181+ backing_map = NULL;
4182+ }
4183+
4184+ out:
4185+ if (backing_map != NULL)
4186+ kunmap_atomic(backing_map, KM_USER0);
4187+ iounmap(gtt_mapping);
4188+
4189+ /* give syslog time to catch up */
4190+ msleep(1);
4191+
4192+ /* Directly flush the object, since we just loaded values with the CPU
4193+ * from the backing pages and we don't want to disturb the cache
4194+ * management that we're trying to observe.
4195+ */
4196+
4197+ i915_gem_clflush_object(obj);
4198+}
4199+#endif
4200diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
4201new file mode 100644
4202index 0000000..15d4160
4203--- /dev/null
4204+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
4205@@ -0,0 +1,292 @@
4206+/*
4207+ * Copyright © 2008 Intel Corporation
4208+ *
4209+ * Permission is hereby granted, free of charge, to any person obtaining a
4210+ * copy of this software and associated documentation files (the "Software"),
4211+ * to deal in the Software without restriction, including without limitation
4212+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
4213+ * and/or sell copies of the Software, and to permit persons to whom the
4214+ * Software is furnished to do so, subject to the following conditions:
4215+ *
4216+ * The above copyright notice and this permission notice (including the next
4217+ * paragraph) shall be included in all copies or substantial portions of the
4218+ * Software.
4219+ *
4220+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4221+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4222+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
4223+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4224+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4225+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
4226+ * IN THE SOFTWARE.
4227+ *
4228+ * Authors:
4229+ * Eric Anholt <eric@anholt.net>
4230+ * Keith Packard <keithp@keithp.com>
4231+ *
4232+ */
4233+
4234+#include "drmP.h"
4235+#include "drm.h"
4236+#include "i915_drm.h"
4237+#include "i915_drv.h"
4238+
4239+static int i915_gem_active_info(char *buf, char **start, off_t offset,
4240+ int request, int *eof, void *data)
4241+{
4242+ struct drm_minor *minor = (struct drm_minor *) data;
4243+ struct drm_device *dev = minor->dev;
4244+ drm_i915_private_t *dev_priv = dev->dev_private;
4245+ struct drm_i915_gem_object *obj_priv;
4246+ int len = 0;
4247+
4248+ if (offset > DRM_PROC_LIMIT) {
4249+ *eof = 1;
4250+ return 0;
4251+ }
4252+
4253+ *start = &buf[offset];
4254+ *eof = 0;
4255+ DRM_PROC_PRINT("Active:\n");
4256+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
4257+ list)
4258+ {
4259+ struct drm_gem_object *obj = obj_priv->obj;
4260+ if (obj->name) {
4261+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
4262+ obj, obj->name,
4263+ obj->read_domains, obj->write_domain,
4264+ obj_priv->last_rendering_seqno);
4265+ } else {
4266+ DRM_PROC_PRINT(" %p: %08x %08x %d\n",
4267+ obj,
4268+ obj->read_domains, obj->write_domain,
4269+ obj_priv->last_rendering_seqno);
4270+ }
4271+ }
4272+ if (len > request + offset)
4273+ return request;
4274+ *eof = 1;
4275+ return len - offset;
4276+}
4277+
4278+static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
4279+ int request, int *eof, void *data)
4280+{
4281+ struct drm_minor *minor = (struct drm_minor *) data;
4282+ struct drm_device *dev = minor->dev;
4283+ drm_i915_private_t *dev_priv = dev->dev_private;
4284+ struct drm_i915_gem_object *obj_priv;
4285+ int len = 0;
4286+
4287+ if (offset > DRM_PROC_LIMIT) {
4288+ *eof = 1;
4289+ return 0;
4290+ }
4291+
4292+ *start = &buf[offset];
4293+ *eof = 0;
4294+ DRM_PROC_PRINT("Flushing:\n");
4295+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
4296+ list)
4297+ {
4298+ struct drm_gem_object *obj = obj_priv->obj;
4299+ if (obj->name) {
4300+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
4301+ obj, obj->name,
4302+ obj->read_domains, obj->write_domain,
4303+ obj_priv->last_rendering_seqno);
4304+ } else {
4305+ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
4306+ obj->read_domains, obj->write_domain,
4307+ obj_priv->last_rendering_seqno);
4308+ }
4309+ }
4310+ if (len > request + offset)
4311+ return request;
4312+ *eof = 1;
4313+ return len - offset;
4314+}
4315+
4316+static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
4317+ int request, int *eof, void *data)
4318+{
4319+ struct drm_minor *minor = (struct drm_minor *) data;
4320+ struct drm_device *dev = minor->dev;
4321+ drm_i915_private_t *dev_priv = dev->dev_private;
4322+ struct drm_i915_gem_object *obj_priv;
4323+ int len = 0;
4324+
4325+ if (offset > DRM_PROC_LIMIT) {
4326+ *eof = 1;
4327+ return 0;
4328+ }
4329+
4330+ *start = &buf[offset];
4331+ *eof = 0;
4332+ DRM_PROC_PRINT("Inactive:\n");
4333+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
4334+ list)
4335+ {
4336+ struct drm_gem_object *obj = obj_priv->obj;
4337+ if (obj->name) {
4338+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
4339+ obj, obj->name,
4340+ obj->read_domains, obj->write_domain,
4341+ obj_priv->last_rendering_seqno);
4342+ } else {
4343+ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
4344+ obj->read_domains, obj->write_domain,
4345+ obj_priv->last_rendering_seqno);
4346+ }
4347+ }
4348+ if (len > request + offset)
4349+ return request;
4350+ *eof = 1;
4351+ return len - offset;
4352+}
4353+
4354+static int i915_gem_request_info(char *buf, char **start, off_t offset,
4355+ int request, int *eof, void *data)
4356+{
4357+ struct drm_minor *minor = (struct drm_minor *) data;
4358+ struct drm_device *dev = minor->dev;
4359+ drm_i915_private_t *dev_priv = dev->dev_private;
4360+ struct drm_i915_gem_request *gem_request;
4361+ int len = 0;
4362+
4363+ if (offset > DRM_PROC_LIMIT) {
4364+ *eof = 1;
4365+ return 0;
4366+ }
4367+
4368+ *start = &buf[offset];
4369+ *eof = 0;
4370+ DRM_PROC_PRINT("Request:\n");
4371+ list_for_each_entry(gem_request, &dev_priv->mm.request_list,
4372+ list)
4373+ {
4374+ DRM_PROC_PRINT(" %d @ %d %08x\n",
4375+ gem_request->seqno,
4376+ (int) (jiffies - gem_request->emitted_jiffies),
4377+ gem_request->flush_domains);
4378+ }
4379+ if (len > request + offset)
4380+ return request;
4381+ *eof = 1;
4382+ return len - offset;
4383+}
4384+
4385+static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
4386+ int request, int *eof, void *data)
4387+{
4388+ struct drm_minor *minor = (struct drm_minor *) data;
4389+ struct drm_device *dev = minor->dev;
4390+ drm_i915_private_t *dev_priv = dev->dev_private;
4391+ int len = 0;
4392+
4393+ if (offset > DRM_PROC_LIMIT) {
4394+ *eof = 1;
4395+ return 0;
4396+ }
4397+
4398+ *start = &buf[offset];
4399+ *eof = 0;
4400+ DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
4401+ DRM_PROC_PRINT("Waiter sequence: %d\n",
4402+ dev_priv->mm.waiting_gem_seqno);
4403+ DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
4404+ if (len > request + offset)
4405+ return request;
4406+ *eof = 1;
4407+ return len - offset;
4408+}
4409+
4410+
4411+static int i915_interrupt_info(char *buf, char **start, off_t offset,
4412+ int request, int *eof, void *data)
4413+{
4414+ struct drm_minor *minor = (struct drm_minor *) data;
4415+ struct drm_device *dev = minor->dev;
4416+ drm_i915_private_t *dev_priv = dev->dev_private;
4417+ int len = 0;
4418+
4419+ if (offset > DRM_PROC_LIMIT) {
4420+ *eof = 1;
4421+ return 0;
4422+ }
4423+
4424+ *start = &buf[offset];
4425+ *eof = 0;
4426+ DRM_PROC_PRINT("Interrupt enable: %08x\n",
4427+ I915_READ(IER));
4428+ DRM_PROC_PRINT("Interrupt identity: %08x\n",
4429+ I915_READ(IIR));
4430+ DRM_PROC_PRINT("Interrupt mask: %08x\n",
4431+ I915_READ(IMR));
4432+ DRM_PROC_PRINT("Pipe A stat: %08x\n",
4433+ I915_READ(PIPEASTAT));
4434+ DRM_PROC_PRINT("Pipe B stat: %08x\n",
4435+ I915_READ(PIPEBSTAT));
4436+ DRM_PROC_PRINT("Interrupts received: %d\n",
4437+ atomic_read(&dev_priv->irq_received));
4438+ DRM_PROC_PRINT("Current sequence: %d\n",
4439+ i915_get_gem_seqno(dev));
4440+ DRM_PROC_PRINT("Waiter sequence: %d\n",
4441+ dev_priv->mm.waiting_gem_seqno);
4442+ DRM_PROC_PRINT("IRQ sequence: %d\n",
4443+ dev_priv->mm.irq_gem_seqno);
4444+ if (len > request + offset)
4445+ return request;
4446+ *eof = 1;
4447+ return len - offset;
4448+}
4449+
4450+static struct drm_proc_list {
4451+ /** file name */
4452+ const char *name;
4453+ /** proc callback*/
4454+ int (*f) (char *, char **, off_t, int, int *, void *);
4455+} i915_gem_proc_list[] = {
4456+ {"i915_gem_active", i915_gem_active_info},
4457+ {"i915_gem_flushing", i915_gem_flushing_info},
4458+ {"i915_gem_inactive", i915_gem_inactive_info},
4459+ {"i915_gem_request", i915_gem_request_info},
4460+ {"i915_gem_seqno", i915_gem_seqno_info},
4461+ {"i915_gem_interrupt", i915_interrupt_info},
4462+};
4463+
4464+#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
4465+
4466+int i915_gem_proc_init(struct drm_minor *minor)
4467+{
4468+ struct proc_dir_entry *ent;
4469+ int i, j;
4470+
4471+ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
4472+ ent = create_proc_entry(i915_gem_proc_list[i].name,
4473+ S_IFREG | S_IRUGO, minor->dev_root);
4474+ if (!ent) {
4475+ DRM_ERROR("Cannot create /proc/dri/.../%s\n",
4476+ i915_gem_proc_list[i].name);
4477+ for (j = 0; j < i; j++)
4478+ remove_proc_entry(i915_gem_proc_list[i].name,
4479+ minor->dev_root);
4480+ return -1;
4481+ }
4482+ ent->read_proc = i915_gem_proc_list[i].f;
4483+ ent->data = minor;
4484+ }
4485+ return 0;
4486+}
4487+
4488+void i915_gem_proc_cleanup(struct drm_minor *minor)
4489+{
4490+ int i;
4491+
4492+ if (!minor->dev_root)
4493+ return;
4494+
4495+ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
4496+ remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
4497+}
4498diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
4499new file mode 100644
4500index 0000000..0c1b3a0
4501--- /dev/null
4502+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
4503@@ -0,0 +1,256 @@
4504+/*
4505+ * Copyright © 2008 Intel Corporation
4506+ *
4507+ * Permission is hereby granted, free of charge, to any person obtaining a
4508+ * copy of this software and associated documentation files (the "Software"),
4509+ * to deal in the Software without restriction, including without limitation
4510+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
4511+ * and/or sell copies of the Software, and to permit persons to whom the
4512+ * Software is furnished to do so, subject to the following conditions:
4513+ *
4514+ * The above copyright notice and this permission notice (including the next
4515+ * paragraph) shall be included in all copies or substantial portions of the
4516+ * Software.
4517+ *
4518+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4519+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4520+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
4521+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4522+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4523+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
4524+ * IN THE SOFTWARE.
4525+ *
4526+ * Authors:
4527+ * Eric Anholt <eric@anholt.net>
4528+ *
4529+ */
4530+
4531+#include "drmP.h"
4532+#include "drm.h"
4533+#include "i915_drm.h"
4534+#include "i915_drv.h"
4535+
4536+/** @file i915_gem_tiling.c
4537+ *
4538+ * Support for managing tiling state of buffer objects.
4539+ *
4540+ * The idea behind tiling is to increase cache hit rates by rearranging
4541+ * pixel data so that a group of pixel accesses are in the same cacheline.
4542+ * Performance improvement from doing this on the back/depth buffer are on
4543+ * the order of 30%.
4544+ *
4545+ * Intel architectures make this somewhat more complicated, though, by
4546+ * adjustments made to addressing of data when the memory is in interleaved
4547+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
4548+ * For interleaved memory, the CPU sends every sequential 64 bytes
4549+ * to an alternate memory channel so it can get the bandwidth from both.
4550+ *
4551+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
4552+ * memory, and it matches what the CPU does for non-tiled. However, when tiled
4553+ * it does it a little differently, since one walks addresses not just in the
4554+ * X direction but also Y. So, along with alternating channels when bit
4555+ * 6 of the address flips, it also alternates when other bits flip -- Bits 9
4556+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
4557+ * are common to both the 915 and 965-class hardware.
4558+ *
4559+ * The CPU also sometimes XORs in higher bits as well, to improve
4560+ * bandwidth doing strided access like we do so frequently in graphics. This
4561+ * is called "Channel XOR Randomization" in the MCH documentation. The result
4562+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
4563+ * decode.
4564+ *
4565+ * All of this bit 6 XORing has an effect on our memory management,
4566+ * as we need to make sure that the 3d driver can correctly address object
4567+ * contents.
4568+ *
4569+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
4570+ * required.
4571+ *
4572+ * When bit 17 is XORed in, we simply refuse to tile at all. Bit
4573+ * 17 is not just a page offset, so as we page an objet out and back in,
4574+ * individual pages in it will have different bit 17 addresses, resulting in
4575+ * each 64 bytes being swapped with its neighbor!
4576+ *
4577+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
4578+ * swizzling it needs to do is, since it's writing with the CPU to the pages
4579+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
4580+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
4581+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
4582+ * to match what the GPU expects.
4583+ */
4584+
4585+/**
4586+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
4587+ * access through main memory.
4588+ */
4589+void
4590+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
4591+{
4592+ drm_i915_private_t *dev_priv = dev->dev_private;
4593+ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
4594+ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
4595+
4596+ if (!IS_I9XX(dev)) {
4597+ /* As far as we know, the 865 doesn't have these bit 6
4598+ * swizzling issues.
4599+ */
4600+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
4601+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
4602+ } else if (!IS_I965G(dev) || IS_I965GM(dev)) {
4603+ uint32_t dcc;
4604+
4605+ /* On 915-945 and GM965, channel interleave by the CPU is
4606+ * determined by DCC. The CPU will alternate based on bit 6
4607+ * in interleaved mode, and the GPU will then also alternate
4608+ * on bit 6, 9, and 10 for X, but the CPU may also optionally
4609+ * alternate based on bit 17 (XOR not disabled and XOR
4610+ * bit == 17).
4611+ */
4612+ dcc = I915_READ(DCC);
4613+ switch (dcc & DCC_ADDRESSING_MODE_MASK) {
4614+ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
4615+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
4616+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
4617+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
4618+ break;
4619+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
4620+ if (IS_I915G(dev) || IS_I915GM(dev) ||
4621+ dcc & DCC_CHANNEL_XOR_DISABLE) {
4622+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
4623+ swizzle_y = I915_BIT_6_SWIZZLE_9;
4624+ } else if (IS_I965GM(dev)) {
4625+ /* GM965 only does bit 11-based channel
4626+ * randomization
4627+ */
4628+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
4629+ swizzle_y = I915_BIT_6_SWIZZLE_9_11;
4630+ } else {
4631+ /* Bit 17 or perhaps other swizzling */
4632+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
4633+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
4634+ }
4635+ break;
4636+ }
4637+ if (dcc == 0xffffffff) {
4638+ DRM_ERROR("Couldn't read from MCHBAR. "
4639+ "Disabling tiling.\n");
4640+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
4641+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
4642+ }
4643+ } else {
4644+ /* The 965, G33, and newer, have a very flexible memory
4645+ * configuration. It will enable dual-channel mode
4646+ * (interleaving) on as much memory as it can, and the GPU
4647+ * will additionally sometimes enable different bit 6
4648+ * swizzling for tiled objects from the CPU.
4649+ *
4650+ * Here's what I found on the G965:
4651+ * slot fill memory size swizzling
4652+ * 0A 0B 1A 1B 1-ch 2-ch
4653+ * 512 0 0 0 512 0 O
4654+ * 512 0 512 0 16 1008 X
4655+ * 512 0 0 512 16 1008 X
4656+ * 0 512 0 512 16 1008 X
4657+ * 1024 1024 1024 0 2048 1024 O
4658+ *
4659+ * We could probably detect this based on either the DRB
4660+ * matching, which was the case for the swizzling required in
4661+ * the table above, or from the 1-ch value being less than
4662+ * the minimum size of a rank.
4663+ */
4664+ if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
4665+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
4666+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
4667+ } else {
4668+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
4669+ swizzle_y = I915_BIT_6_SWIZZLE_9;
4670+ }
4671+ }
4672+
4673+ dev_priv->mm.bit_6_swizzle_x = swizzle_x;
4674+ dev_priv->mm.bit_6_swizzle_y = swizzle_y;
4675+}
4676+
4677+/**
4678+ * Sets the tiling mode of an object, returning the required swizzling of
4679+ * bit 6 of addresses in the object.
4680+ */
4681+int
4682+i915_gem_set_tiling(struct drm_device *dev, void *data,
4683+ struct drm_file *file_priv)
4684+{
4685+ struct drm_i915_gem_set_tiling *args = data;
4686+ drm_i915_private_t *dev_priv = dev->dev_private;
4687+ struct drm_gem_object *obj;
4688+ struct drm_i915_gem_object *obj_priv;
4689+
4690+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4691+ if (obj == NULL)
4692+ return -EINVAL;
4693+ obj_priv = obj->driver_private;
4694+
4695+ mutex_lock(&dev->struct_mutex);
4696+
4697+ if (args->tiling_mode == I915_TILING_NONE) {
4698+ obj_priv->tiling_mode = I915_TILING_NONE;
4699+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
4700+ } else {
4701+ if (args->tiling_mode == I915_TILING_X)
4702+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
4703+ else
4704+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
4705+ /* If we can't handle the swizzling, make it untiled. */
4706+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
4707+ args->tiling_mode = I915_TILING_NONE;
4708+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
4709+ }
4710+ }
4711+ obj_priv->tiling_mode = args->tiling_mode;
4712+
4713+ mutex_unlock(&dev->struct_mutex);
4714+
4715+ drm_gem_object_unreference(obj);
4716+
4717+ return 0;
4718+}
4719+
4720+/**
4721+ * Returns the current tiling mode and required bit 6 swizzling for the object.
4722+ */
4723+int
4724+i915_gem_get_tiling(struct drm_device *dev, void *data,
4725+ struct drm_file *file_priv)
4726+{
4727+ struct drm_i915_gem_get_tiling *args = data;
4728+ drm_i915_private_t *dev_priv = dev->dev_private;
4729+ struct drm_gem_object *obj;
4730+ struct drm_i915_gem_object *obj_priv;
4731+
4732+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4733+ if (obj == NULL)
4734+ return -EINVAL;
4735+ obj_priv = obj->driver_private;
4736+
4737+ mutex_lock(&dev->struct_mutex);
4738+
4739+ args->tiling_mode = obj_priv->tiling_mode;
4740+ switch (obj_priv->tiling_mode) {
4741+ case I915_TILING_X:
4742+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
4743+ break;
4744+ case I915_TILING_Y:
4745+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
4746+ break;
4747+ case I915_TILING_NONE:
4748+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
4749+ break;
4750+ default:
4751+ DRM_ERROR("unknown tiling mode\n");
4752+ }
4753+
4754+ mutex_unlock(&dev->struct_mutex);
4755+
4756+ drm_gem_object_unreference(obj);
4757+
4758+ return 0;
4759+}
4760diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
4761index f875959..f295bdf 100644
4762--- a/drivers/gpu/drm/i915/i915_irq.c
4763+++ b/drivers/gpu/drm/i915/i915_irq.c
4764@@ -407,15 +407,20 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
4765 I915_WRITE(PIPEBSTAT, pipeb_stats);
4766 }
4767
4768- if (iir & I915_ASLE_INTERRUPT)
4769- opregion_asle_intr(dev);
4770+ I915_WRITE(IIR, iir);
4771+ if (dev->pdev->msi_enabled)
4772+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
4773+ (void) I915_READ(IIR); /* Flush posted writes */
4774
4775 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
4776
4777- if (dev->pdev->msi_enabled)
4778- I915_WRITE(IMR, dev_priv->irq_mask_reg);
4779- I915_WRITE(IIR, iir);
4780- (void) I915_READ(IIR);
4781+ if (iir & I915_USER_INTERRUPT) {
4782+ dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
4783+ DRM_WAKEUP(&dev_priv->irq_queue);
4784+ }
4785+
4786+ if (iir & I915_ASLE_INTERRUPT)
4787+ opregion_asle_intr(dev);
4788
4789 if (vblank && dev_priv->swaps_pending > 0)
4790 drm_locked_tasklet(dev, i915_vblank_tasklet);
4791@@ -449,7 +454,7 @@ static int i915_emit_irq(struct drm_device * dev)
4792 return dev_priv->counter;
4793 }
4794
4795-static void i915_user_irq_get(struct drm_device *dev)
4796+void i915_user_irq_get(struct drm_device *dev)
4797 {
4798 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
4799
4800diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
4801index 43ad2cb..5c2d9f2 100644
4802--- a/drivers/gpu/drm/i915/i915_reg.h
4803+++ b/drivers/gpu/drm/i915/i915_reg.h
4804@@ -25,19 +25,6 @@
4805 #ifndef _I915_REG_H_
4806 #define _I915_REG_H_
4807
4808-/* MCH MMIO space */
4809-/** 915-945 and GM965 MCH register controlling DRAM channel access */
4810-#define DCC 0x200
4811-#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
4812-#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
4813-#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
4814-#define DCC_ADDRESSING_MODE_MASK (3 << 0)
4815-#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
4816-
4817-/** 965 MCH register controlling DRAM channel configuration */
4818-#define CHDECMISC 0x111
4819-#define CHDECMISC_FLEXMEMORY (1 << 1)
4820-
4821 /*
4822 * The Bridge device's PCI config space has information about the
4823 * fb aperture size and the amount of pre-reserved memory.
4824@@ -516,6 +503,30 @@
4825 #define PALETTE_A 0x0a000
4826 #define PALETTE_B 0x0a800
4827
4828+/* MCH MMIO space */
4829+
4830+/*
4831+ * MCHBAR mirror.
4832+ *
4833+ * This mirrors the MCHBAR MMIO space whose location is determined by
4834+ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
4835+ * every way. It is not accessible from the CP register read instructions.
4836+ *
4837+ */
4838+#define MCHBAR_MIRROR_BASE 0x10000
4839+
4840+/** 915-945 and GM965 MCH register controlling DRAM channel access */
4841+#define DCC 0x10200
4842+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
4843+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
4844+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
4845+#define DCC_ADDRESSING_MODE_MASK (3 << 0)
4846+#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
4847+
4848+/** 965 MCH register controlling DRAM channel configuration */
4849+#define C0DRB3 0x10206
4850+#define C1DRB3 0x10606
4851+
4852 /*
4853 * Overlay regs
4854 */
4855diff --git a/include/drm/drm.h b/include/drm/drm.h
4856index 15e5503..f46ba4b 100644
4857--- a/include/drm/drm.h
4858+++ b/include/drm/drm.h
4859@@ -570,6 +570,34 @@ struct drm_set_version {
4860 int drm_dd_minor;
4861 };
4862
4863+/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
4864+struct drm_gem_close {
4865+ /** Handle of the object to be closed. */
4866+ uint32_t handle;
4867+ uint32_t pad;
4868+};
4869+
4870+/** DRM_IOCTL_GEM_FLINK ioctl argument type */
4871+struct drm_gem_flink {
4872+ /** Handle for the object being named */
4873+ uint32_t handle;
4874+
4875+ /** Returned global name */
4876+ uint32_t name;
4877+};
4878+
4879+/** DRM_IOCTL_GEM_OPEN ioctl argument type */
4880+struct drm_gem_open {
4881+ /** Name of object being opened */
4882+ uint32_t name;
4883+
4884+ /** Returned handle for the object */
4885+ uint32_t handle;
4886+
4887+ /** Returned size of the object */
4888+ uint64_t size;
4889+};
4890+
4891 #define DRM_IOCTL_BASE 'd'
4892 #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
4893 #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
4894@@ -585,6 +613,9 @@ struct drm_set_version {
4895 #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
4896 #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
4897 #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
4898+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
4899+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
4900+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
4901
4902 #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
4903 #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
4904diff --git a/include/drm/drmP.h b/include/drm/drmP.h
4905index e79ce07..1469a1b 100644
4906--- a/include/drm/drmP.h
4907+++ b/include/drm/drmP.h
4908@@ -104,6 +104,7 @@ struct drm_device;
4909 #define DRIVER_DMA_QUEUE 0x200
4910 #define DRIVER_FB_DMA 0x400
4911 #define DRIVER_IRQ_VBL2 0x800
4912+#define DRIVER_GEM 0x1000
4913
4914 /***********************************************************************/
4915 /** \name Begin the DRM... */
4916@@ -387,6 +388,10 @@ struct drm_file {
4917 struct drm_minor *minor;
4918 int remove_auth_on_close;
4919 unsigned long lock_count;
4920+ /** Mapping of mm object handles to object pointers. */
4921+ struct idr object_idr;
4922+ /** Lock for synchronization of access to object_idr. */
4923+ spinlock_t table_lock;
4924 struct file *filp;
4925 void *driver_priv;
4926 };
4927@@ -558,6 +563,56 @@ struct drm_ati_pcigart_info {
4928 };
4929
4930 /**
4931+ * This structure defines the drm_mm memory object, which will be used by the
4932+ * DRM for its buffer objects.
4933+ */
4934+struct drm_gem_object {
4935+ /** Reference count of this object */
4936+ struct kref refcount;
4937+
4938+ /** Handle count of this object. Each handle also holds a reference */
4939+ struct kref handlecount;
4940+
4941+ /** Related drm device */
4942+ struct drm_device *dev;
4943+
4944+ /** File representing the shmem storage */
4945+ struct file *filp;
4946+
4947+ /**
4948+ * Size of the object, in bytes. Immutable over the object's
4949+ * lifetime.
4950+ */
4951+ size_t size;
4952+
4953+ /**
4954+ * Global name for this object, starts at 1. 0 means unnamed.
4955+ * Access is covered by the object_name_lock in the related drm_device
4956+ */
4957+ int name;
4958+
4959+ /**
4960+ * Memory domains. These monitor which caches contain read/write data
4961+ * related to the object. When transitioning from one set of domains
4962+ * to another, the driver is called to ensure that caches are suitably
4963+ * flushed and invalidated
4964+ */
4965+ uint32_t read_domains;
4966+ uint32_t write_domain;
4967+
4968+ /**
4969+ * While validating an exec operation, the
4970+ * new read/write domain values are computed here.
4971+ * They will be transferred to the above values
4972+ * at the point that any cache flushing occurs
4973+ */
4974+ uint32_t pending_read_domains;
4975+ uint32_t pending_write_domain;
4976+
4977+ void *driver_private;
4978+};
4979+
4980+/**
4981 * DRM driver structure. This structure represent the common code for
4982 * a family of cards. There will one drm_device for each card present
4983 * in this family
4984@@ -657,6 +712,18 @@ struct drm_driver {
4985 void (*set_version) (struct drm_device *dev,
4986 struct drm_set_version *sv);
4987
4988+ int (*proc_init)(struct drm_minor *minor);
4989+ void (*proc_cleanup)(struct drm_minor *minor);
4990+
4991+ /**
4992+ * Driver-specific constructor for drm_gem_objects, to set up
4993+ * obj->driver_private.
4994+ *
4995+ * Returns 0 on success.
4996+ */
4997+ int (*gem_init_object) (struct drm_gem_object *obj);
4998+ void (*gem_free_object) (struct drm_gem_object *obj);
4999+
5000 int major;
5001 int minor;
5002 int patchlevel;
5003@@ -830,6 +897,22 @@ struct drm_device {
5004 spinlock_t drw_lock;
5005 struct idr drw_idr;
5006 /*@} */
5007+
5008+ /** \name GEM information */
5009+ /*@{ */
5010+ spinlock_t object_name_lock;
5011+ struct idr object_name_idr;
5012+ atomic_t object_count;
5013+ atomic_t object_memory;
5014+ atomic_t pin_count;
5015+ atomic_t pin_memory;
5016+ atomic_t gtt_count;
5017+ atomic_t gtt_memory;
5018+ uint32_t gtt_total;
5019+ uint32_t invalidate_domains; /* domains pending invalidation */
5020+ uint32_t flush_domains; /* domains pending flush */
5021+ /*@} */
5022+
5023 };
5024
5025 static __inline__ int drm_core_check_feature(struct drm_device *dev,
5026@@ -926,6 +1009,10 @@ extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
5027 extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
5028 extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
5029 extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
5030+extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
5031+ struct page **pages,
5032+ unsigned long num_pages,
5033+ uint32_t gtt_offset);
5034 extern int drm_unbind_agp(DRM_AGP_MEM * handle);
5035
5036 /* Misc. IOCTL support (drm_ioctl.h) */
5037@@ -988,6 +1075,9 @@ extern int drm_getmagic(struct drm_device *dev, void *data,
5038 extern int drm_authmagic(struct drm_device *dev, void *data,
5039 struct drm_file *file_priv);
5040
5041+/* Cache management (drm_cache.c) */
5042+void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
5043+
5044 /* Locking IOCTL support (drm_lock.h) */
5045 extern int drm_lock(struct drm_device *dev, void *data,
5046 struct drm_file *file_priv);
5047@@ -1094,6 +1184,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
5048 extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
5049 extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
5050 extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
5051+extern void drm_agp_chipset_flush(struct drm_device *dev);
5052
5053 /* Stub support (drm_stub.h) */
5054 extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
5055@@ -1156,6 +1247,66 @@ extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
5056 extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
5057 extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
5058
5059+/* Graphics Execution Manager library functions (drm_gem.c) */
5060+int drm_gem_init(struct drm_device *dev);
5061+void drm_gem_object_free(struct kref *kref);
5062+struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
5063+ size_t size);
5064+void drm_gem_object_handle_free(struct kref *kref);
5065+
5066+static inline void
5067+drm_gem_object_reference(struct drm_gem_object *obj)
5068+{
5069+ kref_get(&obj->refcount);
5070+}
5071+
5072+static inline void
5073+drm_gem_object_unreference(struct drm_gem_object *obj)
5074+{
5075+ if (obj == NULL)
5076+ return;
5077+
5078+ kref_put(&obj->refcount, drm_gem_object_free);
5079+}
5080+
5081+int drm_gem_handle_create(struct drm_file *file_priv,
5082+ struct drm_gem_object *obj,
5083+ int *handlep);
5084+
5085+static inline void
5086+drm_gem_object_handle_reference(struct drm_gem_object *obj)
5087+{
5088+ drm_gem_object_reference(obj);
5089+ kref_get(&obj->handlecount);
5090+}
5091+
5092+static inline void
5093+drm_gem_object_handle_unreference(struct drm_gem_object *obj)
5094+{
5095+ if (obj == NULL)
5096+ return;
5097+
5098+ /*
5099+ * Must bump handle count first as this may be the last
5100+ * ref, in which case the object would disappear before we
5101+ * checked for a name
5102+ */
5103+ kref_put(&obj->handlecount, drm_gem_object_handle_free);
5104+ drm_gem_object_unreference(obj);
5105+}
5106+
5107+struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
5108+ struct drm_file *filp,
5109+ int handle);
5110+int drm_gem_close_ioctl(struct drm_device *dev, void *data,
5111+ struct drm_file *file_priv);
5112+int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
5113+ struct drm_file *file_priv);
5114+int drm_gem_open_ioctl(struct drm_device *dev, void *data,
5115+ struct drm_file *file_priv);
5116+void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
5117+void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
5118+
5119 extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
5120 extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
5121 extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
5122diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
5123index 05c66cf..59d08fc 100644
5124--- a/include/drm/i915_drm.h
5125+++ b/include/drm/i915_drm.h
5126@@ -143,6 +143,22 @@ typedef struct _drm_i915_sarea {
5127 #define DRM_I915_GET_VBLANK_PIPE 0x0e
5128 #define DRM_I915_VBLANK_SWAP 0x0f
5129 #define DRM_I915_HWS_ADDR 0x11
5130+#define DRM_I915_GEM_INIT 0x13
5131+#define DRM_I915_GEM_EXECBUFFER 0x14
5132+#define DRM_I915_GEM_PIN 0x15
5133+#define DRM_I915_GEM_UNPIN 0x16
5134+#define DRM_I915_GEM_BUSY 0x17
5135+#define DRM_I915_GEM_THROTTLE 0x18
5136+#define DRM_I915_GEM_ENTERVT 0x19
5137+#define DRM_I915_GEM_LEAVEVT 0x1a
5138+#define DRM_I915_GEM_CREATE 0x1b
5139+#define DRM_I915_GEM_PREAD 0x1c
5140+#define DRM_I915_GEM_PWRITE 0x1d
5141+#define DRM_I915_GEM_MMAP 0x1e
5142+#define DRM_I915_GEM_SET_DOMAIN 0x1f
5143+#define DRM_I915_GEM_SW_FINISH 0x20
5144+#define DRM_I915_GEM_SET_TILING 0x21
5145+#define DRM_I915_GEM_GET_TILING 0x22
5146
5147 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
5148 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
5149@@ -160,6 +176,20 @@ typedef struct _drm_i915_sarea {
5150 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
5151 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
5152 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
5153+#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
5154+#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
5155+#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
5156+#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
5157+#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
5158+#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
5159+#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
5160+#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
5161+#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
5162+#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
5163+#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
5164+#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
5165+#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
5166+#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
5167
5168 /* Allow drivers to submit batchbuffers directly to hardware, relying
5169 * on the security mechanisms provided by hardware.
5170@@ -200,6 +230,7 @@ typedef struct drm_i915_irq_wait {
5171 #define I915_PARAM_IRQ_ACTIVE 1
5172 #define I915_PARAM_ALLOW_BATCHBUFFER 2
5173 #define I915_PARAM_LAST_DISPATCH 3
5174+#define I915_PARAM_HAS_GEM 5
5175
5176 typedef struct drm_i915_getparam {
5177 int param;
5178@@ -267,4 +298,305 @@ typedef struct drm_i915_hws_addr {
5179 uint64_t addr;
5180 } drm_i915_hws_addr_t;
5181
5182+struct drm_i915_gem_init {
5183+ /**
5184+ * Beginning offset in the GTT to be managed by the DRM memory
5185+ * manager.
5186+ */
5187+ uint64_t gtt_start;
5188+ /**
5189+ * Ending offset in the GTT to be managed by the DRM memory
5190+ * manager.
5191+ */
5192+ uint64_t gtt_end;
5193+};
5194+
5195+struct drm_i915_gem_create {
5196+ /**
5197+ * Requested size for the object.
5198+ *
5199+ * The (page-aligned) allocated size for the object will be returned.
5200+ */
5201+ uint64_t size;
5202+ /**
5203+ * Returned handle for the object.
5204+ *
5205+ * Object handles are nonzero.
5206+ */
5207+ uint32_t handle;
5208+ uint32_t pad;
5209+};
5210+
5211+struct drm_i915_gem_pread {
5212+ /** Handle for the object being read. */
5213+ uint32_t handle;
5214+ uint32_t pad;
5215+ /** Offset into the object to read from */
5216+ uint64_t offset;
5217+ /** Length of data to read */
5218+ uint64_t size;
5219+ /**
5220+ * Pointer to write the data into.
5221+ *
5222+ * This is a fixed-size type for 32/64 compatibility.
5223+ */
5224+ uint64_t data_ptr;
5225+};
5226+
5227+struct drm_i915_gem_pwrite {
5228+ /** Handle for the object being written to. */
5229+ uint32_t handle;
5230+ uint32_t pad;
5231+ /** Offset into the object to write to */
5232+ uint64_t offset;
5233+ /** Length of data to write */
5234+ uint64_t size;
5235+ /**
5236+ * Pointer to read the data from.
5237+ *
5238+ * This is a fixed-size type for 32/64 compatibility.
5239+ */
5240+ uint64_t data_ptr;
5241+};
5242+
5243+struct drm_i915_gem_mmap {
5244+ /** Handle for the object being mapped. */
5245+ uint32_t handle;
5246+ uint32_t pad;
5247+ /** Offset in the object to map. */
5248+ uint64_t offset;
5249+ /**
5250+ * Length of data to map.
5251+ *
5252+ * The value will be page-aligned.
5253+ */
5254+ uint64_t size;
5255+ /**
5256+ * Returned pointer the data was mapped at.
5257+ *
5258+ * This is a fixed-size type for 32/64 compatibility.
5259+ */
5260+ uint64_t addr_ptr;
5261+};
5262+
5263+struct drm_i915_gem_set_domain {
5264+ /** Handle for the object */
5265+ uint32_t handle;
5266+
5267+ /** New read domains */
5268+ uint32_t read_domains;
5269+
5270+ /** New write domain */
5271+ uint32_t write_domain;
5272+};
5273+
5274+struct drm_i915_gem_sw_finish {
5275+ /** Handle for the object */
5276+ uint32_t handle;
5277+};
5278+
5279+struct drm_i915_gem_relocation_entry {
5280+ /**
5281+ * Handle of the buffer being pointed to by this relocation entry.
5282+ *
5283+ * It's appealing to make this be an index into the mm_validate_entry
5284+ * list to refer to the buffer, but this allows the driver to create
5285+ * a relocation list for state buffers and not re-write it per
5286+ * exec using the buffer.
5287+ */
5288+ uint32_t target_handle;
5289+
5290+ /**
5291+ * Value to be added to the offset of the target buffer to make up
5292+ * the relocation entry.
5293+ */
5294+ uint32_t delta;
5295+
5296+ /** Offset in the buffer the relocation entry will be written into */
5297+ uint64_t offset;
5298+
5299+ /**
5300+ * Offset value of the target buffer that the relocation entry was last
5301+ * written as.
5302+ *
5303+ * If the buffer has the same offset as last time, we can skip syncing
5304+ * and writing the relocation. This value is written back out by
5305+ * the execbuffer ioctl when the relocation is written.
5306+ */
5307+ uint64_t presumed_offset;
5308+
5309+ /**
5310+ * Target memory domains read by this operation.
5311+ */
5312+ uint32_t read_domains;
5313+
5314+ /**
5315+ * Target memory domains written by this operation.
5316+ *
5317+ * Note that only one domain may be written by the whole
5318+ * execbuffer operation, so that where there are conflicts,
5319+ * the application will get -EINVAL back.
5320+ */
5321+ uint32_t write_domain;
5322+};
5323+
5324+/** @{
5325+ * Intel memory domains
5326+ *
5327+ * Most of these just align with the various caches in
5328+ * the system and are used to flush and invalidate as
5329+ * objects end up cached in different domains.
5330+ */
5331+/** CPU cache */
5332+#define I915_GEM_DOMAIN_CPU 0x00000001
5333+/** Render cache, used by 2D and 3D drawing */
5334+#define I915_GEM_DOMAIN_RENDER 0x00000002
5335+/** Sampler cache, used by texture engine */
5336+#define I915_GEM_DOMAIN_SAMPLER 0x00000004
5337+/** Command queue, used to load batch buffers */
5338+#define I915_GEM_DOMAIN_COMMAND 0x00000008
5339+/** Instruction cache, used by shader programs */
5340+#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
5341+/** Vertex address cache */
5342+#define I915_GEM_DOMAIN_VERTEX 0x00000020
5343+/** GTT domain - aperture and scanout */
5344+#define I915_GEM_DOMAIN_GTT 0x00000040
5345+/** @} */
5346+
5347+struct drm_i915_gem_exec_object {
5348+ /**
5349+ * User's handle for a buffer to be bound into the GTT for this
5350+ * operation.
5351+ */
5352+ uint32_t handle;
5353+
5354+ /** Number of relocations to be performed on this buffer */
5355+ uint32_t relocation_count;
5356+ /**
5357+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
5358+ * the relocations to be performed in this buffer.
5359+ */
5360+ uint64_t relocs_ptr;
5361+
5362+ /** Required alignment in graphics aperture */
5363+ uint64_t alignment;
5364+
5365+ /**
5366+ * Returned value of the updated offset of the object, for future
5367+ * presumed_offset writes.
5368+ */
5369+ uint64_t offset;
5370+};
5371+
5372+struct drm_i915_gem_execbuffer {
5373+ /**
5374+ * List of buffers to be validated with their relocations to be
5375+ * performend on them.
5376+ *
5377+ * This is a pointer to an array of struct drm_i915_gem_validate_entry.
5378+ *
5379+ * These buffers must be listed in an order such that all relocations
5380+ * a buffer is performing refer to buffers that have already appeared
5381+ * in the validate list.
5382+ */
5383+ uint64_t buffers_ptr;
5384+ uint32_t buffer_count;
5385+
5386+ /** Offset in the batchbuffer to start execution from. */
5387+ uint32_t batch_start_offset;
5388+ /** Bytes used in batchbuffer from batch_start_offset */
5389+ uint32_t batch_len;
5390+ uint32_t DR1;
5391+ uint32_t DR4;
5392+ uint32_t num_cliprects;
5393+ /** This is a struct drm_clip_rect *cliprects */
5394+ uint64_t cliprects_ptr;
5395+};
5396+
5397+struct drm_i915_gem_pin {
5398+ /** Handle of the buffer to be pinned. */
5399+ uint32_t handle;
5400+ uint32_t pad;
5401+
5402+ /** alignment required within the aperture */
5403+ uint64_t alignment;
5404+
5405+ /** Returned GTT offset of the buffer. */
5406+ uint64_t offset;
5407+};
5408+
5409+struct drm_i915_gem_unpin {
5410+ /** Handle of the buffer to be unpinned. */
5411+ uint32_t handle;
5412+ uint32_t pad;
5413+};
5414+
5415+struct drm_i915_gem_busy {
5416+ /** Handle of the buffer to check for busy */
5417+ uint32_t handle;
5418+
5419+ /** Return busy status (1 if busy, 0 if idle) */
5420+ uint32_t busy;
5421+};
5422+
5423+#define I915_TILING_NONE 0
5424+#define I915_TILING_X 1
5425+#define I915_TILING_Y 2
5426+
5427+#define I915_BIT_6_SWIZZLE_NONE 0
5428+#define I915_BIT_6_SWIZZLE_9 1
5429+#define I915_BIT_6_SWIZZLE_9_10 2
5430+#define I915_BIT_6_SWIZZLE_9_11 3
5431+#define I915_BIT_6_SWIZZLE_9_10_11 4
5432+/* Not seen by userland */
5433+#define I915_BIT_6_SWIZZLE_UNKNOWN 5
5434+
5435+struct drm_i915_gem_set_tiling {
5436+ /** Handle of the buffer to have its tiling state updated */
5437+ uint32_t handle;
5438+
5439+ /**
5440+ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
5441+ * I915_TILING_Y).
5442+ *
5443+ * This value is to be set on request, and will be updated by the
5444+ * kernel on successful return with the actual chosen tiling layout.
5445+ *
5446+ * The tiling mode may be demoted to I915_TILING_NONE when the system
5447+ * has bit 6 swizzling that can't be managed correctly by GEM.
5448+ *
5449+ * Buffer contents become undefined when changing tiling_mode.
5450+ */
5451+ uint32_t tiling_mode;
5452+
5453+ /**
5454+ * Stride in bytes for the object when in I915_TILING_X or
5455+ * I915_TILING_Y.
5456+ */
5457+ uint32_t stride;
5458+
5459+ /**
5460+ * Returned address bit 6 swizzling required for CPU access through
5461+ * mmap mapping.
5462+ */
5463+ uint32_t swizzle_mode;
5464+};
5465+
5466+struct drm_i915_gem_get_tiling {
5467+ /** Handle of the buffer to get tiling state for. */
5468+ uint32_t handle;
5469+
5470+ /**
5471+ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
5472+ * I915_TILING_Y).
5473+ */
5474+ uint32_t tiling_mode;
5475+
5476+ /**
5477+ * Returned address bit 6 swizzling required for CPU access through
5478+ * mmap mapping.
5479+ */
5480+ uint32_t swizzle_mode;
5481+};
5482+
5483 #endif /* _I915_DRM_H_ */
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0015-i915-Add-chip-set-ID-param.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0015-i915-Add-chip-set-ID-param.patch
deleted file mode 100644
index c3bf8ebd13..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0015-i915-Add-chip-set-ID-param.patch
+++ /dev/null
@@ -1,35 +0,0 @@
1commit 26ead293ddf664f33dc0ba12b726887c40ce3957
2Author: Kristian Høgsberg <krh@redhat.com>
3Date: Wed Aug 20 11:08:52 2008 -0400
4
5 i915: Add chip set ID param.
6
7 Signed-off-by: Kristian Høgsberg <krh@redhat.com>
8 Signed-off-by: Eric Anholt <eric@anholt.net>
9
10diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
11index 3b5aa74..205d21e 100644
12--- a/drivers/gpu/drm/i915/i915_dma.c
13+++ b/drivers/gpu/drm/i915/i915_dma.c
14@@ -689,6 +689,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
15 case I915_PARAM_LAST_DISPATCH:
16 value = READ_BREADCRUMB(dev_priv);
17 break;
18+ case I915_PARAM_CHIPSET_ID:
19+ value = dev->pci_device;
20+ break;
21 case I915_PARAM_HAS_GEM:
22 value = 1;
23 break;
24diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
25index 59d08fc..eb4b350 100644
26--- a/include/drm/i915_drm.h
27+++ b/include/drm/i915_drm.h
28@@ -230,6 +230,7 @@ typedef struct drm_i915_irq_wait {
29 #define I915_PARAM_IRQ_ACTIVE 1
30 #define I915_PARAM_ALLOW_BATCHBUFFER 2
31 #define I915_PARAM_LAST_DISPATCH 3
32+#define I915_PARAM_CHIPSET_ID 4
33 #define I915_PARAM_HAS_GEM 5
34
35 typedef struct drm_i915_getparam {
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch
deleted file mode 100644
index 910f37e9c5..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch
+++ /dev/null
@@ -1,205 +0,0 @@
1commit 8a524209fce67d3b6d2e831b5dad4eced796ce98
2Author: Eric Anholt <eric@anholt.net>
3Date: Mon Sep 1 16:45:29 2008 -0700
4
5 i915: Use struct_mutex to protect ring in GEM mode.
6
7 In the conversion for GEM, we had stopped using the hardware lock to protect
8 ring usage, since it was all internal to the DRM now. However, some paths
9 weren't converted to using struct_mutex to prevent multiple threads from
10 concurrently working on the ring, in particular between the vblank swap handler
11 and ioctls.
12
13 Signed-off-by: Eric Anholt <eric@anholt.net>
14
15diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
16index 205d21e..25f59c1 100644
17--- a/drivers/gpu/drm/i915/i915_dma.c
18+++ b/drivers/gpu/drm/i915/i915_dma.c
19@@ -588,9 +588,15 @@ static int i915_quiescent(struct drm_device * dev)
20 static int i915_flush_ioctl(struct drm_device *dev, void *data,
21 struct drm_file *file_priv)
22 {
23- LOCK_TEST_WITH_RETURN(dev, file_priv);
24+ int ret;
25+
26+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
27
28- return i915_quiescent(dev);
29+ mutex_lock(&dev->struct_mutex);
30+ ret = i915_quiescent(dev);
31+ mutex_unlock(&dev->struct_mutex);
32+
33+ return ret;
34 }
35
36 static int i915_batchbuffer(struct drm_device *dev, void *data,
37@@ -611,14 +617,16 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
38 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
39 batch->start, batch->used, batch->num_cliprects);
40
41- LOCK_TEST_WITH_RETURN(dev, file_priv);
42+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
43
44 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
45 batch->num_cliprects *
46 sizeof(struct drm_clip_rect)))
47 return -EFAULT;
48
49+ mutex_lock(&dev->struct_mutex);
50 ret = i915_dispatch_batchbuffer(dev, batch);
51+ mutex_unlock(&dev->struct_mutex);
52
53 sarea_priv->last_dispatch = (int)hw_status[5];
54 return ret;
55@@ -637,7 +645,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
56 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
57 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
58
59- LOCK_TEST_WITH_RETURN(dev, file_priv);
60+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
61
62 if (cmdbuf->num_cliprects &&
63 DRM_VERIFYAREA_READ(cmdbuf->cliprects,
64@@ -647,7 +655,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
65 return -EFAULT;
66 }
67
68+ mutex_lock(&dev->struct_mutex);
69 ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
70+ mutex_unlock(&dev->struct_mutex);
71 if (ret) {
72 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
73 return ret;
74@@ -660,11 +670,17 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
75 static int i915_flip_bufs(struct drm_device *dev, void *data,
76 struct drm_file *file_priv)
77 {
78+ int ret;
79+
80 DRM_DEBUG("%s\n", __FUNCTION__);
81
82- LOCK_TEST_WITH_RETURN(dev, file_priv);
83+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
84
85- return i915_dispatch_flip(dev);
86+ mutex_lock(&dev->struct_mutex);
87+ ret = i915_dispatch_flip(dev);
88+ mutex_unlock(&dev->struct_mutex);
89+
90+ return ret;
91 }
92
93 static int i915_getparam(struct drm_device *dev, void *data,
94diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
95index 87b071a..8547f0a 100644
96--- a/drivers/gpu/drm/i915/i915_drv.h
97+++ b/drivers/gpu/drm/i915/i915_drv.h
98@@ -285,6 +285,9 @@ typedef struct drm_i915_private {
99 */
100 struct delayed_work retire_work;
101
102+ /** Work task for vblank-related ring access */
103+ struct work_struct vblank_work;
104+
105 uint32_t next_gem_seqno;
106
107 /**
108@@ -435,6 +438,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
109 void i915_user_irq_get(struct drm_device *dev);
110 void i915_user_irq_put(struct drm_device *dev);
111
112+extern void i915_gem_vblank_work_handler(struct work_struct *work);
113 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
114 extern void i915_driver_irq_preinstall(struct drm_device * dev);
115 extern int i915_driver_irq_postinstall(struct drm_device *dev);
116@@ -538,6 +542,17 @@ extern void intel_opregion_free(struct drm_device *dev);
117 extern void opregion_asle_intr(struct drm_device *dev);
118 extern void opregion_enable_asle(struct drm_device *dev);
119
120+/**
121+ * Lock test for when it's just for synchronization of ring access.
122+ *
123+ * In that case, we don't need to do it when GEM is initialized as nobody else
124+ * has access to the ring.
125+ */
126+#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
127+ if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
128+ LOCK_TEST_WITH_RETURN(dev, file_priv); \
129+} while (0)
130+
131 #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
132 #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
133 #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
134diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
135index 90ae8a0..bb6e5a3 100644
136--- a/drivers/gpu/drm/i915/i915_gem.c
137+++ b/drivers/gpu/drm/i915/i915_gem.c
138@@ -2491,6 +2491,8 @@ i915_gem_load(struct drm_device *dev)
139 INIT_LIST_HEAD(&dev_priv->mm.request_list);
140 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
141 i915_gem_retire_work_handler);
142+ INIT_WORK(&dev_priv->mm.vblank_work,
143+ i915_gem_vblank_work_handler);
144 dev_priv->mm.next_gem_seqno = 1;
145
146 i915_gem_detect_bit_6_swizzle(dev);
147diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
148index f295bdf..d04c526 100644
149--- a/drivers/gpu/drm/i915/i915_irq.c
150+++ b/drivers/gpu/drm/i915/i915_irq.c
151@@ -349,6 +349,21 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
152 return count;
153 }
154
155+void
156+i915_gem_vblank_work_handler(struct work_struct *work)
157+{
158+ drm_i915_private_t *dev_priv;
159+ struct drm_device *dev;
160+
161+ dev_priv = container_of(work, drm_i915_private_t,
162+ mm.vblank_work);
163+ dev = dev_priv->dev;
164+
165+ mutex_lock(&dev->struct_mutex);
166+ i915_vblank_tasklet(dev);
167+ mutex_unlock(&dev->struct_mutex);
168+}
169+
170 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
171 {
172 struct drm_device *dev = (struct drm_device *) arg;
173@@ -422,8 +437,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
174 if (iir & I915_ASLE_INTERRUPT)
175 opregion_asle_intr(dev);
176
177- if (vblank && dev_priv->swaps_pending > 0)
178- drm_locked_tasklet(dev, i915_vblank_tasklet);
179+ if (vblank && dev_priv->swaps_pending > 0) {
180+ if (dev_priv->ring.ring_obj == NULL)
181+ drm_locked_tasklet(dev, i915_vblank_tasklet);
182+ else
183+ schedule_work(&dev_priv->mm.vblank_work);
184+ }
185
186 return IRQ_HANDLED;
187 }
188@@ -514,14 +533,15 @@ int i915_irq_emit(struct drm_device *dev, void *data,
189 drm_i915_irq_emit_t *emit = data;
190 int result;
191
192- LOCK_TEST_WITH_RETURN(dev, file_priv);
193+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
194
195 if (!dev_priv) {
196 DRM_ERROR("called with no initialization\n");
197 return -EINVAL;
198 }
199-
200+ mutex_lock(&dev->struct_mutex);
201 result = i915_emit_irq(dev);
202+ mutex_unlock(&dev->struct_mutex);
203
204 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
205 DRM_ERROR("copy_to_user\n");
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0017-i915-Make-use-of-sarea_priv-conditional.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0017-i915-Make-use-of-sarea_priv-conditional.patch
deleted file mode 100644
index 542b69dd52..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0017-i915-Make-use-of-sarea_priv-conditional.patch
+++ /dev/null
@@ -1,147 +0,0 @@
1commit 69749cf99189a8a78de201ac24990c91ee111469
2Author: Kristian Høgsberg <krh@redhat.com>
3Date: Wed Aug 20 11:20:13 2008 -0400
4
5 i915: Make use of sarea_priv conditional.
6
7 We fail ioctls that depend on the sarea_priv with EINVAL.
8
9 Signed-off-by: Kristian Høgsberg <krh@redhat.com>
10 Signed-off-by: Eric Anholt <eric@anholt.net>
11
12diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
13index 25f59c1..dbd3f49 100644
14--- a/drivers/gpu/drm/i915/i915_dma.c
15+++ b/drivers/gpu/drm/i915/i915_dma.c
16@@ -55,7 +55,8 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
17 if (ring->space >= n)
18 return 0;
19
20- dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
21+ if (dev_priv->sarea_priv)
22+ dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
23
24 if (ring->head != last_head)
25 i = 0;
26@@ -128,7 +129,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
27 if (ring->space < 0)
28 ring->space += ring->Size;
29
30- if (ring->head == ring->tail)
31+ if (ring->head == ring->tail && dev_priv->sarea_priv)
32 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
33 }
34
35@@ -433,10 +434,11 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
36 drm_i915_private_t *dev_priv = dev->dev_private;
37 RING_LOCALS;
38
39- dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
40-
41+ dev_priv->counter++;
42 if (dev_priv->counter > 0x7FFFFFFFUL)
43- dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
44+ dev_priv->counter = 0;
45+ if (dev_priv->sarea_priv)
46+ dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
47
48 BEGIN_LP_RING(4);
49 OUT_RING(MI_STORE_DWORD_INDEX);
50@@ -534,6 +536,9 @@ static int i915_dispatch_flip(struct drm_device * dev)
51 drm_i915_private_t *dev_priv = dev->dev_private;
52 RING_LOCALS;
53
54+ if (!dev_priv->sarea_priv)
55+ return -EINVAL;
56+
57 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
58 __FUNCTION__,
59 dev_priv->current_page,
60@@ -628,7 +633,8 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
61 ret = i915_dispatch_batchbuffer(dev, batch);
62 mutex_unlock(&dev->struct_mutex);
63
64- sarea_priv->last_dispatch = (int)hw_status[5];
65+ if (sarea_priv)
66+ sarea_priv->last_dispatch = (int)hw_status[5];
67 return ret;
68 }
69
70@@ -663,7 +669,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
71 return ret;
72 }
73
74- sarea_priv->last_dispatch = (int)hw_status[5];
75+ if (sarea_priv)
76+ sarea_priv->last_dispatch = (int)hw_status[5];
77 return 0;
78 }
79
80diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
81index d04c526..ef03a59 100644
82--- a/drivers/gpu/drm/i915/i915_irq.c
83+++ b/drivers/gpu/drm/i915/i915_irq.c
84@@ -427,7 +427,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
85 I915_WRITE(IMR, dev_priv->irq_mask_reg);
86 (void) I915_READ(IIR); /* Flush posted writes */
87
88- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
89+ if (dev_priv->sarea_priv)
90+ dev_priv->sarea_priv->last_dispatch =
91+ READ_BREADCRUMB(dev_priv);
92
93 if (iir & I915_USER_INTERRUPT) {
94 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
95@@ -456,10 +458,11 @@ static int i915_emit_irq(struct drm_device * dev)
96
97 DRM_DEBUG("\n");
98
99- dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
100-
101+ dev_priv->counter++;
102 if (dev_priv->counter > 0x7FFFFFFFUL)
103- dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
104+ dev_priv->counter = 1;
105+ if (dev_priv->sarea_priv)
106+ dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
107
108 BEGIN_LP_RING(6);
109 OUT_RING(MI_STORE_DWORD_INDEX);
110@@ -503,11 +506,15 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
111 READ_BREADCRUMB(dev_priv));
112
113 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
114- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
115+ if (dev_priv->sarea_priv) {
116+ dev_priv->sarea_priv->last_dispatch =
117+ READ_BREADCRUMB(dev_priv);
118+ }
119 return 0;
120 }
121
122- dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
123+ if (dev_priv->sarea_priv)
124+ dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
125
126 i915_user_irq_get(dev);
127 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
128@@ -519,7 +526,9 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
129 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
130 }
131
132- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
133+ if (dev_priv->sarea_priv)
134+ dev_priv->sarea_priv->last_dispatch =
135+ READ_BREADCRUMB(dev_priv);
136
137 return ret;
138 }
139@@ -682,7 +691,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
140 struct list_head *list;
141 int ret;
142
143- if (!dev_priv) {
144+ if (!dev_priv || !dev_priv->sarea_priv) {
145 DRM_ERROR("%s called with no initialization\n", __func__);
146 return -EINVAL;
147 }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch
deleted file mode 100644
index 3593fa5826..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch
+++ /dev/null
@@ -1,44 +0,0 @@
1commit 7ad6d5861b04bbb2cdc36d1dcf8989e16f86e659
2Author: Kristian Høgsberg <krh@redhat.com>
3Date: Wed Aug 20 11:04:27 2008 -0400
4
5 i915 gem: install and uninstall irq handler in entervt and leavevt ioctls.
6
7 Signed-off-by: Kristian Høgsberg <krh@redhat.com>
8 Signed-off-by: Eric Anholt <eric@anholt.net>
9
10diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
11index bb6e5a3..5fe5034 100644
12--- a/drivers/gpu/drm/i915/i915_gem.c
13+++ b/drivers/gpu/drm/i915/i915_gem.c
14@@ -2443,6 +2443,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
15 BUG_ON(!list_empty(&dev_priv->mm.request_list));
16 dev_priv->mm.suspended = 0;
17 mutex_unlock(&dev->struct_mutex);
18+
19+ drm_irq_install(dev);
20+
21 return 0;
22 }
23
24@@ -2458,6 +2461,8 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
25 i915_gem_cleanup_ringbuffer(dev);
26 mutex_unlock(&dev->struct_mutex);
27
28+ drm_irq_uninstall(dev);
29+
30 return 0;
31 }
32
33diff --git a/include/drm/drmP.h b/include/drm/drmP.h
34index 1469a1b..51ee72c 100644
35--- a/include/drm/drmP.h
36+++ b/include/drm/drmP.h
37@@ -1134,6 +1134,7 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev,
38 extern int drm_control(struct drm_device *dev, void *data,
39 struct drm_file *file_priv);
40 extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
41+extern int drm_irq_install(struct drm_device *dev);
42 extern int drm_irq_uninstall(struct drm_device *dev);
43 extern void drm_driver_irq_preinstall(struct drm_device *dev);
44 extern void drm_driver_irq_postinstall(struct drm_device *dev);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch
deleted file mode 100644
index 6de4514e28..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch
+++ /dev/null
@@ -1,32 +0,0 @@
1commit c3de45b0488762a9161e9b9e8bf419f63c100c47
2Author: Eric Anholt <eric@anholt.net>
3Date: Tue Sep 9 11:40:34 2008 -0700
4
5 DRM: Return -EBADF on bad object in flink, and return curent name if it exists.
6
7 Signed-off-by: Eric Anholt <eric@anholt.net>
8
9diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
10index 434155b..ccd1afd 100644
11--- a/drivers/gpu/drm/drm_gem.c
12+++ b/drivers/gpu/drm/drm_gem.c
13@@ -251,7 +251,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
14
15 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
16 if (obj == NULL)
17- return -EINVAL;
18+ return -EBADF;
19
20 again:
21 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
22@@ -259,8 +259,9 @@ again:
23
24 spin_lock(&dev->object_name_lock);
25 if (obj->name) {
26+ args->name = obj->name;
27 spin_unlock(&dev->object_name_lock);
28- return -EEXIST;
29+ return 0;
30 }
31 ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
32 &obj->name);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch
deleted file mode 100644
index 7080907cde..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch
+++ /dev/null
@@ -1,23 +0,0 @@
1commit 880db7a8dbed226d638b3a48aa1a3996f8624911
2Author: Eric Anholt <eric@anholt.net>
3Date: Wed Sep 10 14:22:49 2008 -0700
4
5 drm: Avoid oops in GEM execbuffers with bad arguments.
6
7 Signed-off-by: Eric Anholt <eric@anholt.net>
8
9diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
10index 5fe5034..29d9d21 100644
11--- a/drivers/gpu/drm/i915/i915_gem.c
12+++ b/drivers/gpu/drm/i915/i915_gem.c
13@@ -1763,6 +1763,10 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
14 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
15 #endif
16
17+ if (args->buffer_count < 1) {
18+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
19+ return -EINVAL;
20+ }
21 /* Copy in the exec list from userland */
22 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
23 DRM_MEM_DRIVER);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch
deleted file mode 100644
index f5481d7d85..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch
+++ /dev/null
@@ -1,23 +0,0 @@
1commit 930469634910fa87c21f0a7423c98b270d35d8c6
2Author: Eric Anholt <eric@anholt.net>
3Date: Mon Sep 15 13:13:34 2008 -0700
4
5 drm: G33-class hardware has a newer 965-style MCH (no DCC register).
6
7 Fixes bad software fallback rendering in Mesa in dual-channel configurations.
8
9 d9a2470012588dc5313a5ac8bb2f03575af00e99
10
11diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
12index 0c1b3a0..6b3f1e4 100644
13--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
14+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
15@@ -96,7 +96,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
16 */
17 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
18 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
19- } else if (!IS_I965G(dev) || IS_I965GM(dev)) {
20+ } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev)) {
21 uint32_t dcc;
22
23 /* On 915-945 and GM965, channel interleave by the CPU is
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch
deleted file mode 100644
index 8e6cbe95a4..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch
+++ /dev/null
@@ -1,58 +0,0 @@
1commit d9f2382adde582f8792ad96e9570716bcbea21a0
2Author: Eric Anholt <eric@anholt.net>
3Date: Tue Sep 23 14:50:57 2008 -0700
4
5 drm: Use ioremap_wc in i915_driver instead of ioremap, since we always want WC.
6
7 Fixes failure to map the ringbuffer when PAT tells us we don't get to do
8 uncached on something that's already mapped WC, or something along those lines.
9
10 Signed-off-by: Eric Anholt <eric@anholt.net>
11
12diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
13index 29d9d21..6ecfd10 100644
14--- a/drivers/gpu/drm/i915/i915_gem.c
15+++ b/drivers/gpu/drm/i915/i915_gem.c
16@@ -233,7 +233,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
17 if (unwritten)
18 #endif /* CONFIG_HIGHMEM */
19 {
20- vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
21+ vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
22 #if WATCH_PWRITE
23 DRM_INFO("pwrite slow i %d o %d l %d "
24 "pfn %ld vaddr %p\n",
25@@ -1612,9 +1612,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
26 if (reloc_page != NULL)
27 iounmap(reloc_page);
28
29- reloc_page = ioremap(dev->agp->base +
30- (reloc_offset & ~(PAGE_SIZE - 1)),
31- PAGE_SIZE);
32+ reloc_page = ioremap_wc(dev->agp->base +
33+ (reloc_offset &
34+ ~(PAGE_SIZE - 1)),
35+ PAGE_SIZE);
36 last_reloc_offset = reloc_offset;
37 if (reloc_page == NULL) {
38 drm_gem_object_unreference(target_obj);
39@@ -2318,7 +2319,9 @@ i915_gem_init_hws(struct drm_device *dev)
40 dev_priv->hws_map.flags = 0;
41 dev_priv->hws_map.mtrr = 0;
42
43- drm_core_ioremap(&dev_priv->hws_map, dev);
44+ /* Ioremapping here is the wrong thing to do. We want cached access.
45+ */
46+ drm_core_ioremap_wc(&dev_priv->hws_map, dev);
47 if (dev_priv->hws_map.handle == NULL) {
48 DRM_ERROR("Failed to map status page.\n");
49 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
50@@ -2369,7 +2372,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
51 dev_priv->ring.map.flags = 0;
52 dev_priv->ring.map.mtrr = 0;
53
54- drm_core_ioremap(&dev_priv->ring.map, dev);
55+ drm_core_ioremap_wc(&dev_priv->ring.map, dev);
56 if (dev_priv->ring.map.handle == NULL) {
57 DRM_ERROR("Failed to map ringbuffer.\n");
58 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0023-drm-clean-up-many-sparse-warnings-in-i915.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0023-drm-clean-up-many-sparse-warnings-in-i915.patch
deleted file mode 100644
index 236b161587..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0023-drm-clean-up-many-sparse-warnings-in-i915.patch
+++ /dev/null
@@ -1,192 +0,0 @@
1commit 034994cfffbb2371b720e3f49378031ebc12645e
2Author: Eric Anholt <eric@anholt.net>
3Date: Thu Oct 2 12:24:47 2008 -0700
4
5 drm: Clean up many sparse warnings in i915.
6
7 Signed-off-by: Eric Anholt <eric@anholt.net>
8
9diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
10index dbd3f49..814cc12 100644
11--- a/drivers/gpu/drm/i915/i915_dma.c
12+++ b/drivers/gpu/drm/i915/i915_dma.c
13@@ -76,7 +76,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
14 * Sets up the hardware status page for devices that need a physical address
15 * in the register.
16 */
17-int i915_init_phys_hws(struct drm_device *dev)
18+static int i915_init_phys_hws(struct drm_device *dev)
19 {
20 drm_i915_private_t *dev_priv = dev->dev_private;
21 /* Program Hardware Status Page */
22@@ -101,7 +101,7 @@ int i915_init_phys_hws(struct drm_device *dev)
23 * Frees the hardware status page, whether it's a physical address or a virtual
24 * address set up by the X Server.
25 */
26-void i915_free_hws(struct drm_device *dev)
27+static void i915_free_hws(struct drm_device *dev)
28 {
29 drm_i915_private_t *dev_priv = dev->dev_private;
30 if (dev_priv->status_page_dmah) {
31@@ -145,8 +145,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
32
33 if (dev_priv->ring.virtual_start) {
34 drm_core_ioremapfree(&dev_priv->ring.map, dev);
35- dev_priv->ring.virtual_start = 0;
36- dev_priv->ring.map.handle = 0;
37+ dev_priv->ring.virtual_start = NULL;
38+ dev_priv->ring.map.handle = NULL;
39 dev_priv->ring.map.size = 0;
40 }
41
42@@ -827,9 +827,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
43 base = drm_get_resource_start(dev, mmio_bar);
44 size = drm_get_resource_len(dev, mmio_bar);
45
46- ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
47- _DRM_KERNEL | _DRM_DRIVER,
48- &dev_priv->mmio_map);
49+ dev_priv->regs = ioremap(base, size);
50
51 i915_gem_load(dev);
52
53@@ -867,8 +865,8 @@ int i915_driver_unload(struct drm_device *dev)
54
55 i915_free_hws(dev);
56
57- if (dev_priv->mmio_map)
58- drm_rmmap(dev, dev_priv->mmio_map);
59+ if (dev_priv->regs != NULL)
60+ iounmap(dev_priv->regs);
61
62 intel_opregion_free(dev);
63
64diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
65index 8547f0a..b184d54 100644
66--- a/drivers/gpu/drm/i915/i915_drv.h
67+++ b/drivers/gpu/drm/i915/i915_drv.h
68@@ -110,8 +110,8 @@ struct intel_opregion {
69 typedef struct drm_i915_private {
70 struct drm_device *dev;
71
72+ void __iomem *regs;
73 drm_local_map_t *sarea;
74- drm_local_map_t *mmio_map;
75
76 drm_i915_sarea_t *sarea_priv;
77 drm_i915_ring_buffer_t ring;
78@@ -553,12 +553,12 @@ extern void opregion_enable_asle(struct drm_device *dev);
79 LOCK_TEST_WITH_RETURN(dev, file_priv); \
80 } while (0)
81
82-#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
83-#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
84-#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
85-#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
86-#define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg))
87-#define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val))
88+#define I915_READ(reg) readl(dev_priv->regs + (reg))
89+#define I915_WRITE(reg,val) writel(val, dev_priv->regs + (reg))
90+#define I915_READ16(reg) readw(dev_priv->regs + (reg))
91+#define I915_WRITE16(reg,val) writel(val, dev_priv->regs + (reg))
92+#define I915_READ8(reg) readb(dev_priv->regs + (reg))
93+#define I915_WRITE8(reg,val) writeb(val, dev_priv->regs + (reg))
94
95 #define I915_VERBOSE 0
96
97diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
98index 6ecfd10..6a89449 100644
99--- a/drivers/gpu/drm/i915/i915_gem.c
100+++ b/drivers/gpu/drm/i915/i915_gem.c
101@@ -176,7 +176,8 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
102 ssize_t remain;
103 loff_t offset;
104 char __user *user_data;
105- char *vaddr;
106+ char __iomem *vaddr;
107+ char *vaddr_atomic;
108 int i, o, l;
109 int ret = 0;
110 unsigned long pfn;
111@@ -219,16 +220,20 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
112 pfn = (dev->agp->base >> PAGE_SHIFT) + i;
113
114 #ifdef CONFIG_HIGHMEM
115- /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
116+ /* This is a workaround for the low performance of iounmap
117+ * (approximate 10% cpu cost on normal 3D workloads).
118+ * kmap_atomic on HIGHMEM kernels happens to let us map card
119+ * memory without taking IPIs. When the vmap rework lands
120+ * we should be able to dump this hack.
121 */
122- vaddr = kmap_atomic_pfn(pfn, KM_USER0);
123+ vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0);
124 #if WATCH_PWRITE
125 DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
126- i, o, l, pfn, vaddr);
127+ i, o, l, pfn, vaddr_atomic);
128 #endif
129- unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
130+ unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o,
131 user_data, l);
132- kunmap_atomic(vaddr, KM_USER0);
133+ kunmap_atomic(vaddr_atomic, KM_USER0);
134
135 if (unwritten)
136 #endif /* CONFIG_HIGHMEM */
137@@ -271,7 +276,7 @@ fail:
138 return ret;
139 }
140
141-int
142+static int
143 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
144 struct drm_i915_gem_pwrite *args,
145 struct drm_file *file_priv)
146@@ -587,7 +592,7 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
147 * Ensures that all commands in the ring are finished
148 * before signalling the CPU
149 */
150-uint32_t
151+static uint32_t
152 i915_retire_commands(struct drm_device *dev)
153 {
154 drm_i915_private_t *dev_priv = dev->dev_private;
155@@ -734,7 +739,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
156 * Waits for a sequence number to be signaled, and cleans up the
157 * request and object lists appropriately for that event.
158 */
159-int
160+static int
161 i915_wait_request(struct drm_device *dev, uint32_t seqno)
162 {
163 drm_i915_private_t *dev_priv = dev->dev_private;
164@@ -1483,7 +1488,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
165 struct drm_i915_gem_object *obj_priv = obj->driver_private;
166 int i, ret;
167 uint32_t last_reloc_offset = -1;
168- void *reloc_page = NULL;
169+ void __iomem *reloc_page = NULL;
170
171 /* Choose the GTT offset for our buffer and put it there. */
172 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
173@@ -1500,8 +1505,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
174 for (i = 0; i < entry->relocation_count; i++) {
175 struct drm_gem_object *target_obj;
176 struct drm_i915_gem_object *target_obj_priv;
177- uint32_t reloc_val, reloc_offset, *reloc_entry;
178- int ret;
179+ uint32_t reloc_val, reloc_offset;
180+ uint32_t __iomem *reloc_entry;
181
182 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
183 if (ret != 0) {
184@@ -1624,7 +1629,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
185 }
186 }
187
188- reloc_entry = (uint32_t *)((char *)reloc_page +
189+ reloc_entry = (uint32_t __iomem *)(reloc_page +
190 (reloc_offset & (PAGE_SIZE - 1)));
191 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
192
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0024-fastboot-create-a-asynchronous-initlevel.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0024-fastboot-create-a-asynchronous-initlevel.patch
deleted file mode 100644
index db518b36eb..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0024-fastboot-create-a-asynchronous-initlevel.patch
+++ /dev/null
@@ -1,133 +0,0 @@
1From ac9103dd8e4dc65c110d6cba9a3380c6c617ffa7 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Fri, 18 Jul 2008 15:16:08 -0700
4Subject: [PATCH] fastboot: create a "asynchronous" initlevel
5
6This patch creates an asynchronous initlevel (6a) which is at the same
7level as the normal device initcalls, but with the difference that they
8are run asynchronous from all the other initcalls. The purpose of this
9*selective* level is that we can move long waiting inits that are not
10boot-critical to this level one at a time.
11
12To keep things not totally insane, the asynchronous initcalls are async
13to the other initcalls, but are still ordered to themselves; think of it
14as "bottom-half-not-softirq". This has the benefit that async drivers
15still have stable device ordering between them.
16
17Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
18Signed-off-by: Ingo Molnar <mingo@elte.hu>
19---
20 include/asm-generic/vmlinux.lds.h | 3 +++
21 include/linux/init.h | 6 ++++++
22 init/main.c | 35 ++++++++++++++++++++++++++++++++---
23 3 files changed, 41 insertions(+), 3 deletions(-)
24
25Index: linux-2.6.27/include/asm-generic/vmlinux.lds.h
26===================================================================
27--- linux-2.6.27.orig/include/asm-generic/vmlinux.lds.h 2008-10-14 16:55:43.000000000 +0200
28+++ linux-2.6.27/include/asm-generic/vmlinux.lds.h 2008-10-14 17:00:59.000000000 +0200
29@@ -376,6 +376,9 @@
30 *(.initcall5.init) \
31 *(.initcall5s.init) \
32 *(.initcallrootfs.init) \
33+ __async_initcall_start = .; \
34+ *(.initcall6a.init) \
35+ __async_initcall_end = .; \
36 *(.initcall6.init) \
37 *(.initcall6s.init) \
38 *(.initcall7.init) \
39Index: linux-2.6.27/include/linux/init.h
40===================================================================
41--- linux-2.6.27.orig/include/linux/init.h 2008-10-14 16:55:45.000000000 +0200
42+++ linux-2.6.27/include/linux/init.h 2008-10-14 17:00:59.000000000 +0200
43@@ -197,11 +197,13 @@ extern void (*late_time_init)(void);
44 #define fs_initcall_sync(fn) __define_initcall("5s",fn,5s)
45 #define rootfs_initcall(fn) __define_initcall("rootfs",fn,rootfs)
46 #define device_initcall(fn) __define_initcall("6",fn,6)
47+#define device_initcall_async(fn) __define_initcall("6a", fn, 6a)
48 #define device_initcall_sync(fn) __define_initcall("6s",fn,6s)
49 #define late_initcall(fn) __define_initcall("7",fn,7)
50 #define late_initcall_sync(fn) __define_initcall("7s",fn,7s)
51
52 #define __initcall(fn) device_initcall(fn)
53+#define __initcall_async(fn) device_initcall_async(fn)
54
55 #define __exitcall(fn) \
56 static exitcall_t __exitcall_##fn __exit_call = fn
57@@ -257,6 +259,7 @@ void __init parse_early_param(void);
58 * be one per module.
59 */
60 #define module_init(x) __initcall(x);
61+#define module_init_async(x) __initcall_async(x);
62
63 /**
64 * module_exit() - driver exit entry point
65@@ -279,10 +282,13 @@ void __init parse_early_param(void);
66 #define subsys_initcall(fn) module_init(fn)
67 #define fs_initcall(fn) module_init(fn)
68 #define device_initcall(fn) module_init(fn)
69+#define device_initcall_async(fn) module_init(fn)
70 #define late_initcall(fn) module_init(fn)
71
72 #define security_initcall(fn) module_init(fn)
73
74+#define module_init_async(fn) module_init(fn)
75+
76 /* Each module must use one module_init(). */
77 #define module_init(initfn) \
78 static inline initcall_t __inittest(void) \
79Index: linux-2.6.27/init/main.c
80===================================================================
81--- linux-2.6.27.orig/init/main.c 2008-10-14 16:55:47.000000000 +0200
82+++ linux-2.6.27/init/main.c 2008-10-14 17:00:59.000000000 +0200
83@@ -745,18 +745,47 @@ int do_one_initcall(initcall_t fn)
84
85
86 extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[];
87+extern initcall_t __async_initcall_start[], __async_initcall_end[];
88
89-static void __init do_initcalls(void)
90+static void __init do_async_initcalls(struct work_struct *dummy)
91 {
92 initcall_t *call;
93
94- for (call = __early_initcall_end; call < __initcall_end; call++)
95+ for (call = __async_initcall_start; call < __async_initcall_end; call++)
96 do_one_initcall(*call);
97+}
98+
99+static struct workqueue_struct *async_init_wq;
100+
101+static void __init do_initcalls(void)
102+{
103+ initcall_t *call;
104+ static DECLARE_WORK(async_work, do_async_initcalls);
105+ int phase = 0; /* 0 = levels 0 - 6, 1 = level 6a, 2 = after level 6a */
106+
107+ async_init_wq = create_singlethread_workqueue("kasyncinit");
108+
109+ for (call = __early_initcall_end; call < __initcall_end; call++) {
110+ if (phase == 0 && call >= __async_initcall_start) {
111+ phase = 1;
112+ queue_work(async_init_wq, &async_work);
113+ }
114+ if (phase == 1 && call >= __async_initcall_end)
115+ phase = 2;
116+ if (phase != 1)
117+ do_one_initcall(*call);
118+ }
119
120- /* Make sure there is no pending stuff from the initcall sequence */
121+ /*
122+ * Make sure there is no pending stuff from the initcall sequence,
123+ * including the async initcalls
124+ */
125 flush_scheduled_work();
126+ flush_workqueue(async_init_wq);
127+ destroy_workqueue(async_init_wq);
128 }
129
130+
131 /*
132 * Ok, the machine is now initialized. None of the devices
133 * have been touched yet, but the CPU subsystem is up and
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch
deleted file mode 100644
index f6db800c71..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch
+++ /dev/null
@@ -1,59 +0,0 @@
1From d1a26186ee222329a797bb0b2c8e2b5bc7d94d42 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Fri, 18 Jul 2008 15:16:53 -0700
4Subject: [PATCH] fastboot: turn the USB hostcontroller initcalls into async initcalls
5
6the USB host controller init calls take a long time, mostly due to a
7"minimally 100 msec" delay *per port* during initialization.
8These are prime candidates for going in parallel to everything else.
9
10The USB device ordering is not affected by this due to the
11serialized-within-eachother property of async initcalls.
12
13Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
14Signed-off-by: Ingo Molnar <mingo@elte.hu>
15---
16 drivers/usb/host/ehci-hcd.c | 2 +-
17 drivers/usb/host/ohci-hcd.c | 2 +-
18 drivers/usb/host/uhci-hcd.c | 2 +-
19 3 files changed, 3 insertions(+), 3 deletions(-)
20
21Index: linux-2.6.27/drivers/usb/host/ehci-hcd.c
22===================================================================
23--- linux-2.6.27.orig/drivers/usb/host/ehci-hcd.c 2008-10-14 16:55:35.000000000 +0200
24+++ linux-2.6.27/drivers/usb/host/ehci-hcd.c 2008-10-14 17:01:27.000000000 +0200
25@@ -1107,7 +1107,7 @@ clean0:
26 #endif
27 return retval;
28 }
29-module_init(ehci_hcd_init);
30+module_init_async(ehci_hcd_init);
31
32 static void __exit ehci_hcd_cleanup(void)
33 {
34Index: linux-2.6.27/drivers/usb/host/ohci-hcd.c
35===================================================================
36--- linux-2.6.27.orig/drivers/usb/host/ohci-hcd.c 2008-10-14 16:55:35.000000000 +0200
37+++ linux-2.6.27/drivers/usb/host/ohci-hcd.c 2008-10-14 17:01:27.000000000 +0200
38@@ -1186,7 +1186,7 @@ static int __init ohci_hcd_mod_init(void
39
40 return retval;
41 }
42-module_init(ohci_hcd_mod_init);
43+module_init_async(ohci_hcd_mod_init);
44
45 static void __exit ohci_hcd_mod_exit(void)
46 {
47Index: linux-2.6.27/drivers/usb/host/uhci-hcd.c
48===================================================================
49--- linux-2.6.27.orig/drivers/usb/host/uhci-hcd.c 2008-10-14 16:55:35.000000000 +0200
50+++ linux-2.6.27/drivers/usb/host/uhci-hcd.c 2008-10-14 17:01:27.000000000 +0200
51@@ -999,7 +999,7 @@ static void __exit uhci_hcd_cleanup(void
52 kfree(errbuf);
53 }
54
55-module_init(uhci_hcd_init);
56+module_init_async(uhci_hcd_init);
57 module_exit(uhci_hcd_cleanup);
58
59 MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch
deleted file mode 100644
index 4b10a93108..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch
+++ /dev/null
@@ -1,51 +0,0 @@
1From 60ddc2e5c44b4b9f5fcb440065469eacbeabf5eb Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Fri, 18 Jul 2008 15:17:35 -0700
4Subject: [PATCH] fastboot: convert a few non-critical ACPI drivers to async initcalls
5
6This patch converts a few non-critical ACPI drivers to async initcalls;
7these initcalls (battery, button and thermal) tend to take quite a bit of
8time (100's of milliseconds) due to the hardware they need to talk to,
9but are otherwise clearly non-essential for the boot process.
10
11Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
12Signed-off-by: Ingo Molnar <mingo@elte.hu>
13---
14 drivers/acpi/battery.c | 2 +-
15 drivers/acpi/button.c | 2 +-
16 drivers/acpi/thermal.c | 2 +-
17 3 files changed, 3 insertions(+), 3 deletions(-)
18
19Index: linux-2.6.27/drivers/acpi/battery.c
20===================================================================
21--- linux-2.6.27.orig/drivers/acpi/battery.c 2008-10-14 16:55:15.000000000 +0200
22+++ linux-2.6.27/drivers/acpi/battery.c 2008-10-14 17:01:33.000000000 +0200
23@@ -904,5 +904,5 @@ static void __exit acpi_battery_exit(voi
24 #endif
25 }
26
27-module_init(acpi_battery_init);
28+module_init_async(acpi_battery_init);
29 module_exit(acpi_battery_exit);
30Index: linux-2.6.27/drivers/acpi/button.c
31===================================================================
32--- linux-2.6.27.orig/drivers/acpi/button.c 2008-10-14 16:55:15.000000000 +0200
33+++ linux-2.6.27/drivers/acpi/button.c 2008-10-14 17:01:33.000000000 +0200
34@@ -545,5 +545,5 @@ static void __exit acpi_button_exit(void
35 remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir);
36 }
37
38-module_init(acpi_button_init);
39+module_init_async(acpi_button_init);
40 module_exit(acpi_button_exit);
41Index: linux-2.6.27/drivers/acpi/thermal.c
42===================================================================
43--- linux-2.6.27.orig/drivers/acpi/thermal.c 2008-10-14 16:55:15.000000000 +0200
44+++ linux-2.6.27/drivers/acpi/thermal.c 2008-10-14 17:01:33.000000000 +0200
45@@ -1876,5 +1876,5 @@ static void __exit acpi_thermal_exit(voi
46 return;
47 }
48
49-module_init(acpi_thermal_init);
50+module_init_async(acpi_thermal_init);
51 module_exit(acpi_thermal_exit);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch
deleted file mode 100644
index 11fb34dd91..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch
+++ /dev/null
@@ -1,37 +0,0 @@
1From 3e6558b693dd1e69e3177bc248977f067a769f14 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 20 Jul 2008 08:59:24 -0700
4Subject: [PATCH] fastboot: hold the BKL over the async init call sequence
5
6Regular init calls are called with the BKL held; make sure
7the async init calls are also called with the BKL held.
8While this reduces parallelism a little, it does provide
9lock-for-lock compatibility. The hit to prallelism isn't too
10bad, most of the init calls are done immediately or actually
11block for their delays.
12
13Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
14Signed-off-by: Ingo Molnar <mingo@elte.hu>
15---
16 init/main.c | 6 ++++++
17 1 file changed, 6 insertions(+)
18
19Index: linux-2.6.27/init/main.c
20===================================================================
21--- linux-2.6.27.orig/init/main.c 2008-10-14 17:00:59.000000000 +0200
22+++ linux-2.6.27/init/main.c 2008-10-14 17:01:38.000000000 +0200
23@@ -751,8 +751,14 @@ static void __init do_async_initcalls(st
24 {
25 initcall_t *call;
26
27+ /*
28+ * For compatibility with normal init calls... take the BKL
29+ * not pretty, not desirable, but compatibility first
30+ */
31+ lock_kernel();
32 for (call = __async_initcall_start; call < __async_initcall_end; call++)
33 do_one_initcall(*call);
34+ unlock_kernel();
35 }
36
37 static struct workqueue_struct *async_init_wq;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0028-fastboot-sync-the-async-execution-before-late_initc.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0028-fastboot-sync-the-async-execution-before-late_initc.patch
deleted file mode 100644
index d1ff95a39f..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0028-fastboot-sync-the-async-execution-before-late_initc.patch
+++ /dev/null
@@ -1,92 +0,0 @@
1From 660625fb93f2fc0e633da9cb71d13d895b385f64 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 20 Jul 2008 09:00:41 -0700
4Subject: [PATCH] fastboot: sync the async execution before late_initcall and move level 6s (sync) first
5
6Rene Herman points out several cases where it's basically needed to have
7all level 6/6a/6s calls done before the level 7 (late_initcall) code
8runs. This patch adds a sync point in the transition from the 6's to the
97's.
10
11Second, this patch makes sure that level 6s (sync) happens before the
12async code starts, and puts a user in driver/pci in this category that
13needs to happen before device init.
14
15Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
16Signed-off-by: Ingo Molnar <mingo@elte.hu>
17---
18 drivers/pci/pci.c | 2 +-
19 include/asm-generic/vmlinux.lds.h | 3 ++-
20 init/main.c | 14 +++++++++++++-
21 3 files changed, 16 insertions(+), 3 deletions(-)
22
23Index: linux-2.6.27/drivers/pci/pci.c
24===================================================================
25--- linux-2.6.27.orig/drivers/pci/pci.c 2008-10-14 16:55:30.000000000 +0200
26+++ linux-2.6.27/drivers/pci/pci.c 2008-10-14 17:01:42.000000000 +0200
27@@ -1909,7 +1909,7 @@ static int __devinit pci_setup(char *str
28 }
29 early_param("pci", pci_setup);
30
31-device_initcall(pci_init);
32+device_initcall_sync(pci_init);
33
34 EXPORT_SYMBOL(pci_reenable_device);
35 EXPORT_SYMBOL(pci_enable_device_io);
36Index: linux-2.6.27/include/asm-generic/vmlinux.lds.h
37===================================================================
38--- linux-2.6.27.orig/include/asm-generic/vmlinux.lds.h 2008-10-14 17:00:59.000000000 +0200
39+++ linux-2.6.27/include/asm-generic/vmlinux.lds.h 2008-10-14 17:01:42.000000000 +0200
40@@ -376,11 +376,12 @@
41 *(.initcall5.init) \
42 *(.initcall5s.init) \
43 *(.initcallrootfs.init) \
44+ *(.initcall6s.init) \
45 __async_initcall_start = .; \
46 *(.initcall6a.init) \
47 __async_initcall_end = .; \
48 *(.initcall6.init) \
49- *(.initcall6s.init) \
50+ __device_initcall_end = .; \
51 *(.initcall7.init) \
52 *(.initcall7s.init)
53
54Index: linux-2.6.27/init/main.c
55===================================================================
56--- linux-2.6.27.orig/init/main.c 2008-10-14 17:01:38.000000000 +0200
57+++ linux-2.6.27/init/main.c 2008-10-14 17:01:42.000000000 +0200
58@@ -746,6 +746,7 @@ int do_one_initcall(initcall_t fn)
59
60 extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[];
61 extern initcall_t __async_initcall_start[], __async_initcall_end[];
62+extern initcall_t __device_initcall_end[];
63
64 static void __init do_async_initcalls(struct work_struct *dummy)
65 {
66@@ -767,7 +768,13 @@ static void __init do_initcalls(void)
67 {
68 initcall_t *call;
69 static DECLARE_WORK(async_work, do_async_initcalls);
70- int phase = 0; /* 0 = levels 0 - 6, 1 = level 6a, 2 = after level 6a */
71+ /*
72+ * 0 = levels 0 - 6,
73+ * 1 = level 6a,
74+ * 2 = after level 6a,
75+ * 3 = after level 6
76+ */
77+ int phase = 0;
78
79 async_init_wq = create_singlethread_workqueue("kasyncinit");
80
81@@ -778,6 +785,11 @@ static void __init do_initcalls(void)
82 }
83 if (phase == 1 && call >= __async_initcall_end)
84 phase = 2;
85+ if (phase == 2 && call >= __device_initcall_end) {
86+ phase = 3;
87+ /* make sure all async work is done before level 7 */
88+ flush_workqueue(async_init_wq);
89+ }
90 if (phase != 1)
91 do_one_initcall(*call);
92 }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0029-fastboot-make-fastboot-a-config-option.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0029-fastboot-make-fastboot-a-config-option.patch
deleted file mode 100644
index 73b856372f..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0029-fastboot-make-fastboot-a-config-option.patch
+++ /dev/null
@@ -1,53 +0,0 @@
1From 50b6962016b824dfac254b8f36fc6cac301c8a8d Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 20 Jul 2008 10:20:49 -0700
4Subject: [PATCH] fastboot: make fastboot a config option
5
6to mitigate the risks of async bootup, make fastboot a configuration
7option...
8
9Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
10Signed-off-by: Ingo Molnar <mingo@elte.hu>
11---
12 init/Kconfig | 11 +++++++++++
13 init/main.c | 4 ++++
14 2 files changed, 15 insertions(+)
15
16Index: linux-2.6.27/init/Kconfig
17===================================================================
18--- linux-2.6.27.orig/init/Kconfig 2008-10-14 16:55:47.000000000 +0200
19+++ linux-2.6.27/init/Kconfig 2008-10-14 17:01:48.000000000 +0200
20@@ -524,6 +524,17 @@ config CC_OPTIMIZE_FOR_SIZE
21
22 If unsure, say Y.
23
24+config FASTBOOT
25+ bool "Fast boot support"
26+ help
27+ The fastboot option will cause the kernel to try to optimize
28+ for faster boot.
29+
30+ This includes doing some of the device initialization asynchronous
31+ as well as opportunistically trying to mount the root fs early.
32+
33+ If unsure, say N.
34+
35 config SYSCTL
36 bool
37
38Index: linux-2.6.27/init/main.c
39===================================================================
40--- linux-2.6.27.orig/init/main.c 2008-10-14 17:01:42.000000000 +0200
41+++ linux-2.6.27/init/main.c 2008-10-14 17:01:48.000000000 +0200
42@@ -781,7 +781,11 @@ static void __init do_initcalls(void)
43 for (call = __early_initcall_end; call < __initcall_end; call++) {
44 if (phase == 0 && call >= __async_initcall_start) {
45 phase = 1;
46+#ifdef CONFIG_FASTBOOT
47 queue_work(async_init_wq, &async_work);
48+#else
49+ do_async_initcalls(NULL);
50+#endif
51 }
52 if (phase == 1 && call >= __async_initcall_end)
53 phase = 2;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch
deleted file mode 100644
index 0e0c7fa84f..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch
+++ /dev/null
@@ -1,64 +0,0 @@
1From db62cd29f9b9142c19c574ca00916f66ff22ed4a Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 20 Jul 2008 13:01:28 -0700
4Subject: [PATCH] fastboot: retry mounting the root fs if we can't find init
5
6currently we wait until all device init is done before trying to mount
7the root fs, and to consequently execute init.
8
9In preparation for relaxing the first delay, this patch adds a retry
10attempt in case /sbin/init is not found. Before retrying, the code
11will wait for all device init to complete.
12
13While this patch by itself doesn't gain boot time yet (it needs follow on
14patches), the alternative already is to panic()...
15
16Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
17---
18 init/main.c | 19 +++++++++++++++++++
19 1 file changed, 19 insertions(+)
20
21Index: linux-2.6.27/init/main.c
22===================================================================
23--- linux-2.6.27.orig/init/main.c 2008-10-14 17:01:48.000000000 +0200
24+++ linux-2.6.27/init/main.c 2008-10-14 17:02:42.000000000 +0200
25@@ -845,6 +845,7 @@ static void run_init_process(char *init_
26 */
27 static int noinline init_post(void)
28 {
29+ int retry_count = 1;
30 free_initmem();
31 unlock_kernel();
32 mark_rodata_ro();
33@@ -865,6 +866,7 @@ static int noinline init_post(void)
34 ramdisk_execute_command);
35 }
36
37+retry:
38 /*
39 * We try each of these until one succeeds.
40 *
41@@ -877,6 +879,23 @@ static int noinline init_post(void)
42 "defaults...\n", execute_command);
43 }
44 run_init_process("/sbin/init");
45+
46+ if (retry_count > 0) {
47+ retry_count--;
48+ /*
49+ * We haven't found init yet... potentially because the device
50+ * is still being probed. We need to
51+ * - flush keventd and friends
52+ * - wait for the known devices to complete their probing
53+ * - try to mount the root fs again
54+ */
55+ flush_scheduled_work();
56+ while (driver_probe_done() != 0)
57+ msleep(100);
58+ prepare_namespace();
59+ goto retry;
60+ }
61+
62 run_init_process("/etc/init");
63 run_init_process("/bin/init");
64 run_init_process("/bin/sh");
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch
deleted file mode 100644
index 03b3b82202..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch
+++ /dev/null
@@ -1,41 +0,0 @@
1From b52c36a95ed8026b6925fe8595ebcab6921ae62d Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 20 Jul 2008 13:07:09 -0700
4Subject: [PATCH] fastboot: make the raid autodetect code wait for all devices to init
5
6The raid autodetect code really needs to have all devices probed before
7it can detect raid arrays; not doing so would give rather messy situations
8where arrays would get detected as degraded while they shouldn't be etc.
9
10This is in preparation of removing the "wait for everything to init"
11code that makes everyone pay, not just raid users.
12
13Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
14---
15 init/do_mounts_md.c | 7 +++++++
16 1 files changed, 7 insertions(+), 0 deletions(-)
17
18diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
19index 693d246..c0412a9 100644
20--- a/init/do_mounts_md.c
21+++ b/init/do_mounts_md.c
22@@ -267,9 +267,16 @@ __setup("md=", md_setup);
23 void __init md_run_setup(void)
24 {
25 create_dev("/dev/md0", MKDEV(MD_MAJOR, 0));
26+
27 if (raid_noautodetect)
28 printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=noautodetect)\n");
29 else {
30+ /*
31+ * Since we don't want to detect and use half a raid array, we need to
32+ * wait for the known devices to complete their probing
33+ */
34+ while (driver_probe_done() != 0)
35+ msleep(100);
36 int fd = sys_open("/dev/md0", 0, 0);
37 if (fd >= 0) {
38 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
39--
401.5.4.3
41
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0032-fastboot-remove-wait-for-all-devices-before-mounti.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0032-fastboot-remove-wait-for-all-devices-before-mounti.patch
deleted file mode 100644
index c963d4eaf3..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0032-fastboot-remove-wait-for-all-devices-before-mounti.patch
+++ /dev/null
@@ -1,41 +0,0 @@
1From 1b5a2bd0602010398cb473d1b821a9f1c1399caf Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 20 Jul 2008 13:12:16 -0700
4Subject: [PATCH] fastboot: remove "wait for all devices before mounting root" delay
5
6In the non-initrd case, we wait for all devices to finish their
7probing before we try to mount the rootfs.
8In practice, this means that we end up waiting 2 extra seconds for
9the PS/2 mouse probing even though the root holding device has been
10ready since a long time.
11
12The previous two patches in this series made the RAID autodetect code
13do it's own "wait for probing to be done" code, and added
14"wait and retry" functionality in case the root device isn't actually
15available.
16
17These two changes should make it safe to remove the delay itself,
18and this patch does this. On my test laptop, this reduces the boot time
19by 2 seconds (kernel time goes from 3.9 to 1.9 seconds).
20
21Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
22---
23 init/do_mounts.c | 2 ++
24 1 file changed, 2 insertions(+)
25
26Index: linux-2.6.27/init/do_mounts.c
27===================================================================
28--- linux-2.6.27.orig/init/do_mounts.c 2008-10-14 16:57:34.000000000 +0200
29+++ linux-2.6.27/init/do_mounts.c 2008-10-14 17:02:51.000000000 +0200
30@@ -365,9 +365,11 @@ void __init prepare_namespace(void)
31 ssleep(root_delay);
32 }
33
34+#ifndef CONFIG_FASTBOOT
35 /* wait for the known devices to complete their probing */
36 while (driver_probe_done() != 0)
37 msleep(100);
38+#endif
39
40 md_run_setup();
41
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch
deleted file mode 100644
index 55c6c1adae..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch
+++ /dev/null
@@ -1,32 +0,0 @@
1From 799d0da9e645258b9d1ae11d4aac73c9474906e3 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 20 Jul 2008 16:30:29 -0700
4Subject: [PATCH] fastboot: make the RAID autostart code print a message just before waiting
5
6As requested/suggested by Neil Brown: make the raid code print that it's
7about to wait for probing to be done as well as give a suggestion on how
8to disable the probing if the user doesn't use raid.
9
10Signed-off-by: Arjan van de Ven <arjan@linux.intel.com
11---
12 init/do_mounts_md.c | 4 +++-
13 1 files changed, 3 insertions(+), 1 deletions(-)
14
15diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
16index c0412a9..1ec5c41 100644
17--- a/init/do_mounts_md.c
18+++ b/init/do_mounts_md.c
19@@ -275,7 +275,9 @@ void __init md_run_setup(void)
20 * Since we don't want to detect and use half a raid array, we need to
21 * wait for the known devices to complete their probing
22 */
23- while (driver_probe_done() != 0)
24+ printk(KERN_INFO "md: Waiting for all devices to be available before autodetect\n");
25+ printk(KERN_INFO "md: If you don't use raid, use raid=noautodetect\n");
26+ while (driver_probe_done() < 0)
27 msleep(100);
28 int fd = sys_open("/dev/md0", 0, 0);
29 if (fd >= 0) {
30--
311.5.4.3
32
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0034-fastboot-fix-typo-in-init-Kconfig-text.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0034-fastboot-fix-typo-in-init-Kconfig-text.patch
deleted file mode 100644
index c6f3b8e9ac..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0034-fastboot-fix-typo-in-init-Kconfig-text.patch
+++ /dev/null
@@ -1,26 +0,0 @@
1From 1a23ed42e1baf0481cc70c2f71d97b0bf0f1be70 Mon Sep 17 00:00:00 2001
2From: Ingo Molnar <mingo@elte.hu>
3Date: Thu, 31 Jul 2008 12:52:29 +0200
4Subject: [PATCH] fastboot: fix typo in init/Kconfig text
5
6noticed by Randy Dunlap.
7
8Reported-by: Randy Dunlap <randy.dunlap@oracle.com>
9Signed-off-by: Ingo Molnar <mingo@elte.hu>
10---
11 init/Kconfig | 2 +-
12 1 file changed, 1 insertion(+), 1 deletion(-)
13
14Index: linux-2.6.27/init/Kconfig
15===================================================================
16--- linux-2.6.27.orig/init/Kconfig 2008-10-14 17:02:39.000000000 +0200
17+++ linux-2.6.27/init/Kconfig 2008-10-14 17:02:56.000000000 +0200
18@@ -530,7 +530,7 @@ config FASTBOOT
19 The fastboot option will cause the kernel to try to optimize
20 for faster boot.
21
22- This includes doing some of the device initialization asynchronous
23+ This includes doing some of the device initialization asynchronously
24 as well as opportunistically trying to mount the root fs early.
25
26 If unsure, say N.
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0035-fastboot-remove-duplicate-unpack_to_rootfs.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0035-fastboot-remove-duplicate-unpack_to_rootfs.patch
deleted file mode 100644
index b8af74eaf2..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0035-fastboot-remove-duplicate-unpack_to_rootfs.patch
+++ /dev/null
@@ -1,161 +0,0 @@
1From 8929dda869d51b953c8f300864da62297db8a74e Mon Sep 17 00:00:00 2001
2From: Li, Shaohua <shaohua.li@intel.com>
3Date: Wed, 13 Aug 2008 17:26:01 +0800
4Subject: [PATCH] fastboot: remove duplicate unpack_to_rootfs()
5
6we check if initrd is initramfs first and then do real unpack. The
7check isn't required, we can directly do unpack. If initrd isn't
8initramfs, we can remove garbage. In my laptop, this saves 0.1s boot
9time. This penalizes non-initramfs case, but now initramfs is mostly
10widely used.
11
12Signed-off-by: Shaohua Li <shaohua.li@intel.com>
13Acked-by: Arjan van de Ven <arjan@infradead.org>
14Signed-off-by: Ingo Molnar <mingo@elte.hu>
15---
16 init/initramfs.c | 71 ++++++++++++++++++++++++++++++++++++++++++-----------
17 1 files changed, 56 insertions(+), 15 deletions(-)
18
19diff --git a/init/initramfs.c b/init/initramfs.c
20index 644fc01..da8d030 100644
21--- a/init/initramfs.c
22+++ b/init/initramfs.c
23@@ -5,6 +5,7 @@
24 #include <linux/fcntl.h>
25 #include <linux/delay.h>
26 #include <linux/string.h>
27+#include <linux/dirent.h>
28 #include <linux/syscalls.h>
29
30 static __initdata char *message;
31@@ -121,8 +122,6 @@ static __initdata char *victim;
32 static __initdata unsigned count;
33 static __initdata loff_t this_header, next_header;
34
35-static __initdata int dry_run;
36-
37 static inline void __init eat(unsigned n)
38 {
39 victim += n;
40@@ -183,10 +182,6 @@ static int __init do_header(void)
41 parse_header(collected);
42 next_header = this_header + N_ALIGN(name_len) + body_len;
43 next_header = (next_header + 3) & ~3;
44- if (dry_run) {
45- read_into(name_buf, N_ALIGN(name_len), GotName);
46- return 0;
47- }
48 state = SkipIt;
49 if (name_len <= 0 || name_len > PATH_MAX)
50 return 0;
51@@ -257,8 +252,6 @@ static int __init do_name(void)
52 free_hash();
53 return 0;
54 }
55- if (dry_run)
56- return 0;
57 clean_path(collected, mode);
58 if (S_ISREG(mode)) {
59 int ml = maybe_link();
60@@ -423,10 +416,9 @@ static void __init flush_window(void)
61 outcnt = 0;
62 }
63
64-static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
65+static char * __init unpack_to_rootfs(char *buf, unsigned len)
66 {
67 int written;
68- dry_run = check_only;
69 header_buf = kmalloc(110, GFP_KERNEL);
70 symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
71 name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
72@@ -520,10 +512,57 @@ skip:
73 initrd_end = 0;
74 }
75
76+#define BUF_SIZE 1024
77+static void __init clean_rootfs(void)
78+{
79+ int fd;
80+ void *buf;
81+ struct linux_dirent64 *dirp;
82+ int count;
83+
84+ fd = sys_open("/", O_RDONLY, 0);
85+ WARN_ON(fd < 0);
86+ if (fd < 0)
87+ return;
88+ buf = kzalloc(BUF_SIZE, GFP_KERNEL);
89+ WARN_ON(!buf);
90+ if (!buf) {
91+ sys_close(fd);
92+ return;
93+ }
94+
95+ dirp = buf;
96+ count = sys_getdents64(fd, dirp, BUF_SIZE);
97+ while (count > 0) {
98+ while (count > 0) {
99+ struct stat st;
100+ int ret;
101+
102+ ret = sys_newlstat(dirp->d_name, &st);
103+ WARN_ON_ONCE(ret);
104+ if (!ret) {
105+ if (S_ISDIR(st.st_mode))
106+ sys_rmdir(dirp->d_name);
107+ else
108+ sys_unlink(dirp->d_name);
109+ }
110+
111+ count -= dirp->d_reclen;
112+ dirp = (void *)dirp + dirp->d_reclen;
113+ }
114+ dirp = buf;
115+ memset(buf, 0, BUF_SIZE);
116+ count = sys_getdents64(fd, dirp, BUF_SIZE);
117+ }
118+
119+ sys_close(fd);
120+ kfree(buf);
121+}
122+
123 static int __init populate_rootfs(void)
124 {
125 char *err = unpack_to_rootfs(__initramfs_start,
126- __initramfs_end - __initramfs_start, 0);
127+ __initramfs_end - __initramfs_start);
128 if (err)
129 panic(err);
130 if (initrd_start) {
131@@ -531,13 +570,15 @@ static int __init populate_rootfs(void)
132 int fd;
133 printk(KERN_INFO "checking if image is initramfs...");
134 err = unpack_to_rootfs((char *)initrd_start,
135- initrd_end - initrd_start, 1);
136+ initrd_end - initrd_start);
137 if (!err) {
138 printk(" it is\n");
139- unpack_to_rootfs((char *)initrd_start,
140- initrd_end - initrd_start, 0);
141 free_initrd();
142 return 0;
143+ } else {
144+ clean_rootfs();
145+ unpack_to_rootfs(__initramfs_start,
146+ __initramfs_end - __initramfs_start);
147 }
148 printk("it isn't (%s); looks like an initrd\n", err);
149 fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700);
150@@ -550,7 +591,7 @@ static int __init populate_rootfs(void)
151 #else
152 printk(KERN_INFO "Unpacking initramfs...");
153 err = unpack_to_rootfs((char *)initrd_start,
154- initrd_end - initrd_start, 0);
155+ initrd_end - initrd_start);
156 if (err)
157 panic(err);
158 printk(" done\n");
159--
1601.5.4.3
161
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0036-warning-fix-init-do_mounts_md-c.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0036-warning-fix-init-do_mounts_md-c.patch
deleted file mode 100644
index 9ba44a892d..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0036-warning-fix-init-do_mounts_md-c.patch
+++ /dev/null
@@ -1,82 +0,0 @@
1From fa3038625d7df2a1244c5b753069e7fdf99af3b5 Mon Sep 17 00:00:00 2001
2From: Ingo Molnar <mingo@elte.hu>
3Date: Mon, 18 Aug 2008 12:54:00 +0200
4Subject: [PATCH] warning: fix init do_mounts_md c
5MIME-Version: 1.0
6Content-Type: text/plain; charset=utf-8
7Content-Transfer-Encoding: 8bit
8
9fix warning:
10
11 init/do_mounts_md.c: In function ‘md_run_setup’:
12 init/do_mounts_md.c:282: warning: ISO C90 forbids mixed declarations and code
13
14also, use the opportunity to put the RAID autodetection code
15into a separate function - this also solves a checkpatch style warning.
16
17No code changed:
18
19md5:
20 aa36a35faef371b05f1974ad583bdbbd do_mounts_md.o.before.asm
21 aa36a35faef371b05f1974ad583bdbbd do_mounts_md.o.after.asm
22
23Signed-off-by: Ingo Molnar <mingo@elte.hu>
24---
25 init/do_mounts_md.c | 36 +++++++++++++++++++++---------------
26 1 files changed, 21 insertions(+), 15 deletions(-)
27
28diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
29index 1ec5c41..c0dfd3c 100644
30--- a/init/do_mounts_md.c
31+++ b/init/do_mounts_md.c
32@@ -264,26 +264,32 @@ static int __init raid_setup(char *str)
33 __setup("raid=", raid_setup);
34 __setup("md=", md_setup);
35
36+static void autodetect_raid(void)
37+{
38+ int fd;
39+
40+ /*
41+ * Since we don't want to detect and use half a raid array, we need to
42+ * wait for the known devices to complete their probing
43+ */
44+ printk(KERN_INFO "md: Waiting for all devices to be available before autodetect\n");
45+ printk(KERN_INFO "md: If you don't use raid, use raid=noautodetect\n");
46+ while (driver_probe_done() < 0)
47+ msleep(100);
48+ fd = sys_open("/dev/md0", 0, 0);
49+ if (fd >= 0) {
50+ sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
51+ sys_close(fd);
52+ }
53+}
54+
55 void __init md_run_setup(void)
56 {
57 create_dev("/dev/md0", MKDEV(MD_MAJOR, 0));
58
59 if (raid_noautodetect)
60 printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=noautodetect)\n");
61- else {
62- /*
63- * Since we don't want to detect and use half a raid array, we need to
64- * wait for the known devices to complete their probing
65- */
66- printk(KERN_INFO "md: Waiting for all devices to be available before autodetect\n");
67- printk(KERN_INFO "md: If you don't use raid, use raid=noautodetect\n");
68- while (driver_probe_done() < 0)
69- msleep(100);
70- int fd = sys_open("/dev/md0", 0, 0);
71- if (fd >= 0) {
72- sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
73- sys_close(fd);
74- }
75- }
76+ else
77+ autodetect_raid();
78 md_setup_drive();
79 }
80--
811.5.4.3
82
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0037-init-initramfs.c-unused-function-when-compiling-wit.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0037-init-initramfs.c-unused-function-when-compiling-wit.patch
deleted file mode 100644
index 159f988670..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0037-init-initramfs.c-unused-function-when-compiling-wit.patch
+++ /dev/null
@@ -1,37 +0,0 @@
1From b4931e6c151acad06b4c12dc7cdb634366d7d27a Mon Sep 17 00:00:00 2001
2From: Steven Noonan <steven@uplinklabs.net>
3Date: Mon, 8 Sep 2008 16:19:10 -0700
4Subject: [PATCH] init/initramfs.c: unused function when compiling without CONFIG_BLK_DEV_RAM
5
6Fixing compiler warning when the kernel isn't compiled with support
7for RAM block devices enabled.
8
9Signed-off-by: Steven Noonan <steven@uplinklabs.net>
10Signed-off-by: Ingo Molnar <mingo@elte.hu>
11---
12 init/initramfs.c | 2 ++
13 1 files changed, 2 insertions(+), 0 deletions(-)
14
15diff --git a/init/initramfs.c b/init/initramfs.c
16index da8d030..2f056e2 100644
17--- a/init/initramfs.c
18+++ b/init/initramfs.c
19@@ -512,6 +512,7 @@ skip:
20 initrd_end = 0;
21 }
22
23+#ifdef CONFIG_BLK_DEV_RAM
24 #define BUF_SIZE 1024
25 static void __init clean_rootfs(void)
26 {
27@@ -558,6 +559,7 @@ static void __init clean_rootfs(void)
28 sys_close(fd);
29 kfree(buf);
30 }
31+#endif
32
33 static int __init populate_rootfs(void)
34 {
35--
361.5.4.3
37
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch
deleted file mode 100644
index 8d1e3f22f1..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch
+++ /dev/null
@@ -1,38 +0,0 @@
1From 5e4f25d1f43991324794657655bbbc43983522a2 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@infradead.org>
3Date: Wed, 10 Sep 2008 08:25:34 -0700
4Subject: [PATCH] fastboot: fix blackfin breakage due to vmlinux.lds change
5
6As reported by Mike Frysinger, the vmlinux.lds changes should
7have used VMLINUX_SYMBOL()...
8
9Reported-by: Mike Frysinger <vapier.adi@gmail.com>
10Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
11Acked-by: Bryan Wu <cooloney@kernel.org>
12Signed-off-by: Ingo Molnar <mingo@elte.hu>
13---
14 include/asm-generic/vmlinux.lds.h | 6 +++---
15 1 files changed, 3 insertions(+), 3 deletions(-)
16
17diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
18index b9be858..ccabc4e 100644
19--- a/include/asm-generic/vmlinux.lds.h
20+++ b/include/asm-generic/vmlinux.lds.h
21@@ -377,11 +377,11 @@
22 *(.initcall5s.init) \
23 *(.initcallrootfs.init) \
24 *(.initcall6s.init) \
25- __async_initcall_start = .; \
26+ VMLINUX_SYMBOL(__async_initcall_start) = .; \
27 *(.initcall6a.init) \
28- __async_initcall_end = .; \
29+ VMLINUX_SYMBOL(__async_initcall_end) = .; \
30 *(.initcall6.init) \
31- __device_initcall_end = .; \
32+ VMLINUX_SYMBOL(__device_initcall_end) = .; \
33 *(.initcall7.init) \
34 *(.initcall7s.init)
35
36--
371.5.4.3
38
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0039-Add-a-script-to-visualize-the-kernel-boot-process.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0039-Add-a-script-to-visualize-the-kernel-boot-process.patch
deleted file mode 100644
index 6bcaab1087..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0039-Add-a-script-to-visualize-the-kernel-boot-process.patch
+++ /dev/null
@@ -1,177 +0,0 @@
1From 77e9695b9d5c9ce761dedc193045d9cb64b8e245 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sat, 13 Sep 2008 09:36:06 -0700
4Subject: [PATCH] Add a script to visualize the kernel boot process / time
5
6When optimizing the kernel boot time, it's very valuable to visualize
7what is going on at which time. In addition, with the fastboot asynchronous
8initcall level, it's very valuable to see which initcall gets run where
9and when.
10
11This patch adds a script to turn a dmesg into a SVG graph (that can be
12shown with tools such as InkScape, Gimp or Firefox) and a small change
13to the initcall code to print the PID of the thread calling the initcall
14(so that the script can work out the parallelism).
15
16Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
17---
18 init/main.c | 1
19 scripts/bootgraph.pl | 138 +++++++++++++++++++++++++++++++++++++++++++++++++++
20 2 files changed, 139 insertions(+)
21 create mode 100644 scripts/bootgraph.pl
22
23Index: linux-2.6.27/init/main.c
24===================================================================
25--- linux-2.6.27.orig/init/main.c 2008-10-14 17:02:46.000000000 +0200
26+++ linux-2.6.27/init/main.c 2008-10-14 17:05:23.000000000 +0200
27@@ -709,6 +709,7 @@ int do_one_initcall(initcall_t fn)
28
29 if (initcall_debug) {
30 printk("calling %pF\n", fn);
31+ printk(" @ %i\n", task_pid_nr(current));
32 t0 = ktime_get();
33 }
34
35Index: linux-2.6.27/scripts/bootgraph.pl
36===================================================================
37--- /dev/null 1970-01-01 00:00:00.000000000 +0000
38+++ linux-2.6.27/scripts/bootgraph.pl 2008-10-14 17:03:34.000000000 +0200
39@@ -0,0 +1,138 @@
40+#!/usr/bin/perl
41+
42+# Copyright 2008, Intel Corporation
43+#
44+# This file is part of the Linux kernel
45+#
46+# This program file is free software; you can redistribute it and/or modify it
47+# under the terms of the GNU General Public License as published by the
48+# Free Software Foundation; version 2 of the License.
49+#
50+# This program is distributed in the hope that it will be useful, but WITHOUT
51+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
52+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
53+# for more details.
54+#
55+# You should have received a copy of the GNU General Public License
56+# along with this program in a file named COPYING; if not, write to the
57+# Free Software Foundation, Inc.,
58+# 51 Franklin Street, Fifth Floor,
59+# Boston, MA 02110-1301 USA
60+#
61+# Authors:
62+# Arjan van de Ven <arjan@linux.intel.com>
63+
64+
65+#
66+# This script turns a dmesg output into a SVG graphic that shows which
67+# functions take how much time. You can view SVG graphics with various
68+# programs, including Inkscape, The Gimp and Firefox.
69+#
70+#
71+# For this script to work, the kernel needs to be compiled with the
72+# CONFIG_PRINTK_TIME configuration option enabled, and with
73+# "initcall_debug" passed on the kernel command line.
74+#
75+# usage:
76+# dmesg | perl scripts/bootgraph.pl > output.svg
77+#
78+
79+my @rows;
80+my %start, %end, %row;
81+my $done = 0;
82+my $rowcount = 0;
83+my $maxtime = 0;
84+my $count = 0;
85+while (<>) {
86+ my $line = $_;
87+ if ($line =~ /([0-9\.]+)\] calling ([a-zA-Z\_]+)\+/) {
88+ my $func = $2;
89+ if ($done == 0) {
90+ $start{$func} = $1;
91+ }
92+ $row{$func} = 1;
93+ if ($line =~ /\@ ([0-9]+)/) {
94+ my $pid = $1;
95+ if (!defined($rows[$pid])) {
96+ $rowcount = $rowcount + 1;
97+ $rows[$pid] = $rowcount;
98+ }
99+ $row{$func} = $rows[$pid];
100+ }
101+ $count = $count + 1;
102+ }
103+
104+ if ($line =~ /([0-9\.]+)\] initcall ([a-zA-Z\_]+)\+.*returned/) {
105+ if ($done == 0) {
106+ $end{$2} = $1;
107+ $maxtime = $1;
108+ }
109+ }
110+ if ($line =~ /Write protecting the/) {
111+ $done = 1;
112+ }
113+}
114+
115+if ($count == 0) {
116+ print "No data found in the dmesg. Make sure CONFIG_PRINTK_TIME is enabled and\n";
117+ print "that initcall_debug is passed on the kernel command line.\n\n";
118+ print "Usage: \n";
119+ print " dmesg | perl scripts/bootgraph.pl > output.svg\n\n";
120+ exit;
121+}
122+
123+print "<?xml version=\"1.0\" standalone=\"no\"?> \n";
124+print "<svg width=\"1000\" height=\"100%\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n";
125+
126+my @styles;
127+
128+$styles[0] = "fill:rgb(0,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
129+$styles[1] = "fill:rgb(0,255,0);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
130+$styles[2] = "fill:rgb(255,0,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
131+$styles[3] = "fill:rgb(255,255,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
132+$styles[4] = "fill:rgb(255,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
133+$styles[5] = "fill:rgb(0,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
134+$styles[6] = "fill:rgb(0,128,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
135+$styles[7] = "fill:rgb(0,255,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
136+$styles[8] = "fill:rgb(255,0,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
137+$styles[9] = "fill:rgb(255,255,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
138+$styles[10] = "fill:rgb(255,128,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
139+$styles[11] = "fill:rgb(128,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
140+
141+my $mult = 950.0 / $maxtime;
142+my $threshold = 0.0500 / $maxtime;
143+my $stylecounter = 0;
144+while (($key,$value) = each %start) {
145+ my $duration = $end{$key} - $start{$key};
146+
147+ if ($duration >= $threshold) {
148+ my $s, $s2, $e, $y;
149+ $s = $value * $mult;
150+ $s2 = $s + 6;
151+ $e = $end{$key} * $mult;
152+ $w = $e - $s;
153+
154+ $y = $row{$key} * 150;
155+ $y2 = $y + 4;
156+
157+ $style = $styles[$stylecounter];
158+ $stylecounter = $stylecounter + 1;
159+ if ($stylecounter > 11) {
160+ $stylecounter = 0;
161+ };
162+
163+ print "<rect x=\"$s\" width=\"$w\" y=\"$y\" height=\"145\" style=\"$style\"/>\n";
164+ print "<text transform=\"translate($s2,$y2) rotate(90)\">$key</text>\n";
165+ }
166+}
167+
168+
169+# print the time line on top
170+my $time = 0.0;
171+while ($time < $maxtime) {
172+ my $s2 = $time * $mult;
173+ print "<text transform=\"translate($s2,89) rotate(90)\">$time</text>\n";
174+ $time = $time + 0.1;
175+}
176+
177+print "</svg>\n";
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch
deleted file mode 100644
index 0daba9d2cf..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch
+++ /dev/null
@@ -1,91 +0,0 @@
1From 5470e09b98074974316bbf98c8b8da01d670c2a4 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Sun, 14 Sep 2008 15:30:52 -0700
4Subject: [PATCH] fastboot: fix issues and improve output of bootgraph.pl
5
6David Sanders reported some issues with bootgraph.pl's display
7of his sytems bootup; this commit fixes these by scaling the graph
8not from 0 - end time but from the first initcall to the end time;
9the minimum display size etc also now need to scale with this, as does
10the axis display.
11
12Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
13---
14 scripts/bootgraph.pl | 25 +++++++++++++++++--------
15 1 files changed, 17 insertions(+), 8 deletions(-)
16
17diff --git a/scripts/bootgraph.pl b/scripts/bootgraph.pl
18index d459b8b..4e5f4ab 100644
19--- a/scripts/bootgraph.pl
20+++ b/scripts/bootgraph.pl
21@@ -42,6 +42,7 @@ my %start, %end, %row;
22 my $done = 0;
23 my $rowcount = 0;
24 my $maxtime = 0;
25+my $firsttime = 100;
26 my $count = 0;
27 while (<>) {
28 my $line = $_;
29@@ -49,6 +50,9 @@ while (<>) {
30 my $func = $2;
31 if ($done == 0) {
32 $start{$func} = $1;
33+ if ($1 < $firsttime) {
34+ $firsttime = $1;
35+ }
36 }
37 $row{$func} = 1;
38 if ($line =~ /\@ ([0-9]+)/) {
39@@ -71,6 +75,9 @@ while (<>) {
40 if ($line =~ /Write protecting the/) {
41 $done = 1;
42 }
43+ if ($line =~ /Freeing unused kernel memory/) {
44+ $done = 1;
45+ }
46 }
47
48 if ($count == 0) {
49@@ -99,17 +106,17 @@ $styles[9] = "fill:rgb(255,255,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0
50 $styles[10] = "fill:rgb(255,128,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
51 $styles[11] = "fill:rgb(128,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
52
53-my $mult = 950.0 / $maxtime;
54-my $threshold = 0.0500 / $maxtime;
55+my $mult = 950.0 / ($maxtime - $firsttime);
56+my $threshold = ($maxtime - $firsttime) / 60.0;
57 my $stylecounter = 0;
58 while (($key,$value) = each %start) {
59 my $duration = $end{$key} - $start{$key};
60
61 if ($duration >= $threshold) {
62 my $s, $s2, $e, $y;
63- $s = $value * $mult;
64+ $s = ($value - $firsttime) * $mult;
65 $s2 = $s + 6;
66- $e = $end{$key} * $mult;
67+ $e = ($end{$key} - $firsttime) * $mult;
68 $w = $e - $s;
69
70 $y = $row{$key} * 150;
71@@ -128,11 +135,13 @@ while (($key,$value) = each %start) {
72
73
74 # print the time line on top
75-my $time = 0.0;
76+my $time = $firsttime;
77+my $step = ($maxtime - $firsttime) / 15;
78 while ($time < $maxtime) {
79- my $s2 = $time * $mult;
80- print "<text transform=\"translate($s2,89) rotate(90)\">$time</text>\n";
81- $time = $time + 0.1;
82+ my $s2 = ($time - $firsttime) * $mult;
83+ my $tm = int($time * 100) / 100.0;
84+ print "<text transform=\"translate($s2,89) rotate(90)\">$tm</text>\n";
85+ $time = $time + $step;
86 }
87
88 print "</svg>\n";
89--
901.5.4.3
91
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0041-r8169-8101e.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0041-r8169-8101e.patch
deleted file mode 100644
index 781c9a127e..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0041-r8169-8101e.patch
+++ /dev/null
@@ -1,940 +0,0 @@
1From 771c0d99c0ab3ca7f1a9bc400e8259171b518d5f Mon Sep 17 00:00:00 2001
2From: Francois Romieu <romieu@fr.zoreil.com>
3Date: Thu, 21 Aug 2008 23:20:40 +0200
4Subject: [PATCH] r8169: fix RxMissed register access
5
6- the register location is defined for the 8169 chipset only and
7 there is no 8169 beyond RTL_GIGA_MAC_VER_06
8- only the lower 3 bytes of the register are valid
9
10Fixes:
111. http://bugzilla.kernel.org/show_bug.cgi?id=10180
122. http://bugzilla.kernel.org/show_bug.cgi?id=11062 (bits of)
13
14Tested by Hermann Gausterer and Adam Huffman.
15
16Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
17Cc: Edward Hsu <edward_hsu@realtek.com.tw>
18---
19 drivers/net/r8169.c | 25 ++++++++++++++-----------
20 1 files changed, 14 insertions(+), 11 deletions(-)
21
22diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
23index 0f6f974..4190ee7 100644
24--- a/drivers/net/r8169.c
25+++ b/drivers/net/r8169.c
26@@ -2099,8 +2099,6 @@ static void rtl_hw_start_8168(struct net_device *dev)
27
28 RTL_R8(IntrMask);
29
30- RTL_W32(RxMissed, 0);
31-
32 rtl_set_rx_mode(dev);
33
34 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
35@@ -2143,8 +2141,6 @@ static void rtl_hw_start_8101(struct net_device *dev)
36
37 RTL_R8(IntrMask);
38
39- RTL_W32(RxMissed, 0);
40-
41 rtl_set_rx_mode(dev);
42
43 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
44@@ -2922,6 +2918,17 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
45 return work_done;
46 }
47
48+static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
49+{
50+ struct rtl8169_private *tp = netdev_priv(dev);
51+
52+ if (tp->mac_version > RTL_GIGA_MAC_VER_06)
53+ return;
54+
55+ dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
56+ RTL_W32(RxMissed, 0);
57+}
58+
59 static void rtl8169_down(struct net_device *dev)
60 {
61 struct rtl8169_private *tp = netdev_priv(dev);
62@@ -2939,9 +2946,7 @@ core_down:
63
64 rtl8169_asic_down(ioaddr);
65
66- /* Update the error counts. */
67- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
68- RTL_W32(RxMissed, 0);
69+ rtl8169_rx_missed(dev, ioaddr);
70
71 spin_unlock_irq(&tp->lock);
72
73@@ -3063,8 +3068,7 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
74
75 if (netif_running(dev)) {
76 spin_lock_irqsave(&tp->lock, flags);
77- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
78- RTL_W32(RxMissed, 0);
79+ rtl8169_rx_missed(dev, ioaddr);
80 spin_unlock_irqrestore(&tp->lock, flags);
81 }
82
83@@ -3089,8 +3093,7 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
84
85 rtl8169_asic_down(ioaddr);
86
87- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
88- RTL_W32(RxMissed, 0);
89+ rtl8169_rx_missed(dev, ioaddr);
90
91 spin_unlock_irq(&tp->lock);
92
93--
941.5.3.3
95
96From 6ee4bc96d446a9c466a18b715c7ab2d662c03ebd Mon Sep 17 00:00:00 2001
97From: Francois Romieu <romieu@fr.zoreil.com>
98Date: Sat, 26 Jul 2008 14:26:06 +0200
99Subject: [PATCH] r8169: get ethtool settings through the generic mii helper
100
101It avoids to report unsupported link capabilities with
102the fast-ethernet only 8101/8102.
103
104Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
105Tested-by: Martin Capitanio <martin@capitanio.org>
106Fixed-by: Ivan Vecera <ivecera@redhat.com>
107Cc: Edward Hsu <edward_hsu@realtek.com.tw>
108---
109 drivers/net/r8169.c | 99 +++++++++++++++++++++++---------------------------
110 1 files changed, 46 insertions(+), 53 deletions(-)
111
112diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
113index 4190ee7..7e026a6 100644
114--- a/drivers/net/r8169.c
115+++ b/drivers/net/r8169.c
116@@ -370,8 +370,9 @@ struct ring_info {
117 };
118
119 enum features {
120- RTL_FEATURE_WOL = (1 << 0),
121- RTL_FEATURE_MSI = (1 << 1),
122+ RTL_FEATURE_WOL = (1 << 0),
123+ RTL_FEATURE_MSI = (1 << 1),
124+ RTL_FEATURE_GMII = (1 << 2),
125 };
126
127 struct rtl8169_private {
128@@ -406,13 +407,15 @@ struct rtl8169_private {
129 struct vlan_group *vlgrp;
130 #endif
131 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
132- void (*get_settings)(struct net_device *, struct ethtool_cmd *);
133+ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
134 void (*phy_reset_enable)(void __iomem *);
135 void (*hw_start)(struct net_device *);
136 unsigned int (*phy_reset_pending)(void __iomem *);
137 unsigned int (*link_ok)(void __iomem *);
138 struct delayed_work task;
139 unsigned features;
140+
141+ struct mii_if_info mii;
142 };
143
144 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
145@@ -482,6 +485,23 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
146 return value;
147 }
148
149+static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
150+ int val)
151+{
152+ struct rtl8169_private *tp = netdev_priv(dev);
153+ void __iomem *ioaddr = tp->mmio_addr;
154+
155+ mdio_write(ioaddr, location, val);
156+}
157+
158+static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
159+{
160+ struct rtl8169_private *tp = netdev_priv(dev);
161+ void __iomem *ioaddr = tp->mmio_addr;
162+
163+ return mdio_read(ioaddr, location);
164+}
165+
166 static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
167 {
168 RTL_W16(IntrMask, 0x0000);
169@@ -850,7 +870,7 @@ static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
170
171 #endif
172
173-static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
174+static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
175 {
176 struct rtl8169_private *tp = netdev_priv(dev);
177 void __iomem *ioaddr = tp->mmio_addr;
178@@ -867,65 +887,29 @@ static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
179
180 cmd->speed = SPEED_1000;
181 cmd->duplex = DUPLEX_FULL; /* Always set */
182+
183+ return 0;
184 }
185
186-static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
187+static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
188 {
189 struct rtl8169_private *tp = netdev_priv(dev);
190- void __iomem *ioaddr = tp->mmio_addr;
191- u8 status;
192-
193- cmd->supported = SUPPORTED_10baseT_Half |
194- SUPPORTED_10baseT_Full |
195- SUPPORTED_100baseT_Half |
196- SUPPORTED_100baseT_Full |
197- SUPPORTED_1000baseT_Full |
198- SUPPORTED_Autoneg |
199- SUPPORTED_TP;
200-
201- cmd->autoneg = 1;
202- cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
203-
204- if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
205- cmd->advertising |= ADVERTISED_10baseT_Half;
206- if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
207- cmd->advertising |= ADVERTISED_10baseT_Full;
208- if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
209- cmd->advertising |= ADVERTISED_100baseT_Half;
210- if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
211- cmd->advertising |= ADVERTISED_100baseT_Full;
212- if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
213- cmd->advertising |= ADVERTISED_1000baseT_Full;
214-
215- status = RTL_R8(PHYstatus);
216-
217- if (status & _1000bpsF)
218- cmd->speed = SPEED_1000;
219- else if (status & _100bps)
220- cmd->speed = SPEED_100;
221- else if (status & _10bps)
222- cmd->speed = SPEED_10;
223-
224- if (status & TxFlowCtrl)
225- cmd->advertising |= ADVERTISED_Asym_Pause;
226- if (status & RxFlowCtrl)
227- cmd->advertising |= ADVERTISED_Pause;
228-
229- cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
230- DUPLEX_FULL : DUPLEX_HALF;
231+
232+ return mii_ethtool_gset(&tp->mii, cmd);
233 }
234
235 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
236 {
237 struct rtl8169_private *tp = netdev_priv(dev);
238 unsigned long flags;
239+ int rc;
240
241 spin_lock_irqsave(&tp->lock, flags);
242
243- tp->get_settings(dev, cmd);
244+ rc = tp->get_settings(dev, cmd);
245
246 spin_unlock_irqrestore(&tp->lock, flags);
247- return 0;
248+ return rc;
249 }
250
251 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
252@@ -1513,7 +1497,7 @@ static const struct rtl_cfg_info {
253 unsigned int align;
254 u16 intr_event;
255 u16 napi_event;
256- unsigned msi;
257+ unsigned features;
258 } rtl_cfg_infos [] = {
259 [RTL_CFG_0] = {
260 .hw_start = rtl_hw_start_8169,
261@@ -1522,7 +1506,7 @@ static const struct rtl_cfg_info {
262 .intr_event = SYSErr | LinkChg | RxOverflow |
263 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
264 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
265- .msi = 0
266+ .features = RTL_FEATURE_GMII
267 },
268 [RTL_CFG_1] = {
269 .hw_start = rtl_hw_start_8168,
270@@ -1531,7 +1515,7 @@ static const struct rtl_cfg_info {
271 .intr_event = SYSErr | LinkChg | RxOverflow |
272 TxErr | TxOK | RxOK | RxErr,
273 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
274- .msi = RTL_FEATURE_MSI
275+ .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI
276 },
277 [RTL_CFG_2] = {
278 .hw_start = rtl_hw_start_8101,
279@@ -1540,7 +1524,7 @@ static const struct rtl_cfg_info {
280 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
281 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
282 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
283- .msi = RTL_FEATURE_MSI
284+ .features = RTL_FEATURE_MSI
285 }
286 };
287
288@@ -1552,7 +1536,7 @@ static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
289 u8 cfg2;
290
291 cfg2 = RTL_R8(Config2) & ~MSIEnable;
292- if (cfg->msi) {
293+ if (cfg->features & RTL_FEATURE_MSI) {
294 if (pci_enable_msi(pdev)) {
295 dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
296 } else {
297@@ -1578,6 +1562,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
298 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
299 const unsigned int region = cfg->region;
300 struct rtl8169_private *tp;
301+ struct mii_if_info *mii;
302 struct net_device *dev;
303 void __iomem *ioaddr;
304 unsigned int i;
305@@ -1602,6 +1587,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
306 tp->pci_dev = pdev;
307 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
308
309+ mii = &tp->mii;
310+ mii->dev = dev;
311+ mii->mdio_read = rtl_mdio_read;
312+ mii->mdio_write = rtl_mdio_write;
313+ mii->phy_id_mask = 0x1f;
314+ mii->reg_num_mask = 0x1f;
315+ mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
316+
317 /* enable device (incl. PCI PM wakeup and hotplug setup) */
318 rc = pci_enable_device(pdev);
319 if (rc < 0) {
320--
3211.5.3.3
322
323From ef60b2a38e223a331e13ef503aee7cd5d4d5c12c Mon Sep 17 00:00:00 2001
324From: Hugh Dickins <hugh@veritas.com>
325Date: Mon, 8 Sep 2008 21:49:01 +0100
326Subject: [PATCH] r8169: select MII in Kconfig
327
328drivers/built-in.o: In function `rtl8169_gset_xmii':
329r8169.c:(.text+0x82259): undefined reference to `mii_ethtool_gset'
330suggests that the r8169 driver now needs to select MII.
331
332Signed-off-by: Hugh Dickins <hugh@veritas.com>
333Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
334Cc: Edward Hsu <edward_hsu@realtek.com.tw>
335---
336 drivers/net/Kconfig | 1 +
337 1 files changed, 1 insertions(+), 0 deletions(-)
338
339diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
340index 4a11296..60a0453 100644
341--- a/drivers/net/Kconfig
342+++ b/drivers/net/Kconfig
343@@ -2046,6 +2046,7 @@ config R8169
344 tristate "Realtek 8169 gigabit ethernet support"
345 depends on PCI
346 select CRC32
347+ select MII
348 ---help---
349 Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
350
351--
3521.5.3.3
353
354From bca31864fca6004c4a4a9bd549e95c93b3c3bb10 Mon Sep 17 00:00:00 2001
355From: Francois Romieu <romieu@fr.zoreil.com>
356Date: Sat, 2 Aug 2008 15:50:02 +0200
357Subject: [PATCH] r8169: Tx performance tweak helper
358
359Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
360Cc: Edward Hsu <edward_hsu@realtek.com.tw>
361---
362 drivers/net/r8169.c | 15 ++++++++++-----
363 1 files changed, 10 insertions(+), 5 deletions(-)
364
365diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
366index 7e026a6..eea96fb 100644
367--- a/drivers/net/r8169.c
368+++ b/drivers/net/r8169.c
369@@ -2054,12 +2054,20 @@ static void rtl_hw_start_8169(struct net_device *dev)
370 RTL_W16(IntrMask, tp->intr_event);
371 }
372
373+static void rtl_tx_performance_tweak(struct pci_dev *pdev, u8 force)
374+{
375+ u8 ctl;
376+
377+ pci_read_config_byte(pdev, 0x69, &ctl);
378+ ctl = (ctl & ~0x70) | force;
379+ pci_write_config_byte(pdev, 0x69, ctl);
380+}
381+
382 static void rtl_hw_start_8168(struct net_device *dev)
383 {
384 struct rtl8169_private *tp = netdev_priv(dev);
385 void __iomem *ioaddr = tp->mmio_addr;
386 struct pci_dev *pdev = tp->pci_dev;
387- u8 ctl;
388
389 RTL_W8(Cfg9346, Cfg9346_Unlock);
390
391@@ -2073,10 +2081,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
392
393 RTL_W16(CPlusCmd, tp->cp_cmd);
394
395- /* Tx performance tweak. */
396- pci_read_config_byte(pdev, 0x69, &ctl);
397- ctl = (ctl & ~0x70) | 0x50;
398- pci_write_config_byte(pdev, 0x69, ctl);
399+ rtl_tx_performance_tweak(pdev, 0x50);
400
401 RTL_W16(IntrMitigate, 0x5151);
402
403--
4041.5.3.3
405
406From 7a929ae7d5a3618f56bf1ccaf8c62df628e820aa Mon Sep 17 00:00:00 2001
407From: Francois Romieu <romieu@fr.zoreil.com>
408Date: Sat, 5 Jul 2008 00:21:15 +0200
409Subject: [PATCH] r8169: use pci_find_capability for the PCI-E features
410
411Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
412Cc: Edward Hsu <edward_hsu@realtek.com.tw>
413---
414 drivers/net/r8169.c | 32 ++++++++++++++++++++++++--------
415 1 files changed, 24 insertions(+), 8 deletions(-)
416
417diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
418index eea96fb..5c00522 100644
419--- a/drivers/net/r8169.c
420+++ b/drivers/net/r8169.c
421@@ -61,6 +61,7 @@ static const int multicast_filter_limit = 32;
422 /* MAC address length */
423 #define MAC_ADDR_LEN 6
424
425+#define MAX_READ_REQUEST_SHIFT 12
426 #define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
427 #define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
428 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
429@@ -412,6 +413,7 @@ struct rtl8169_private {
430 void (*hw_start)(struct net_device *);
431 unsigned int (*phy_reset_pending)(void __iomem *);
432 unsigned int (*link_ok)(void __iomem *);
433+ int pcie_cap;
434 struct delayed_work task;
435 unsigned features;
436
437@@ -1663,6 +1665,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
438 goto err_out_free_res_4;
439 }
440
441+ tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
442+ if (!tp->pcie_cap && netif_msg_probe(tp))
443+ dev_info(&pdev->dev, "no PCI Express capability\n");
444+
445 /* Unneeded ? Don't mess with Mrs. Murphy. */
446 rtl8169_irq_mask_and_ack(ioaddr);
447
448@@ -2054,13 +2060,19 @@ static void rtl_hw_start_8169(struct net_device *dev)
449 RTL_W16(IntrMask, tp->intr_event);
450 }
451
452-static void rtl_tx_performance_tweak(struct pci_dev *pdev, u8 force)
453+static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
454 {
455- u8 ctl;
456+ struct net_device *dev = pci_get_drvdata(pdev);
457+ struct rtl8169_private *tp = netdev_priv(dev);
458+ int cap = tp->pcie_cap;
459+
460+ if (cap) {
461+ u16 ctl;
462
463- pci_read_config_byte(pdev, 0x69, &ctl);
464- ctl = (ctl & ~0x70) | force;
465- pci_write_config_byte(pdev, 0x69, ctl);
466+ pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
467+ ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
468+ pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
469+ }
470 }
471
472 static void rtl_hw_start_8168(struct net_device *dev)
473@@ -2081,7 +2093,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
474
475 RTL_W16(CPlusCmd, tp->cp_cmd);
476
477- rtl_tx_performance_tweak(pdev, 0x50);
478+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
479
480 RTL_W16(IntrMitigate, 0x5151);
481
482@@ -2114,8 +2126,12 @@ static void rtl_hw_start_8101(struct net_device *dev)
483
484 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
485 (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
486- pci_write_config_word(pdev, 0x68, 0x00);
487- pci_write_config_word(pdev, 0x69, 0x08);
488+ int cap = tp->pcie_cap;
489+
490+ if (cap) {
491+ pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
492+ PCI_EXP_DEVCTL_NOSNOOP_EN);
493+ }
494 }
495
496 RTL_W8(Cfg9346, Cfg9346_Unlock);
497--
4981.5.3.3
499
500From ba648bdcbca93084360d348eb43dde4b19b2489e Mon Sep 17 00:00:00 2001
501From: Francois Romieu <romieu@fr.zoreil.com>
502Date: Sun, 1 Jun 2008 22:37:49 +0200
503Subject: [PATCH] r8169: add 8168/8101 registers description
504
505Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
506Cc: Edward Hsu <edward_hsu@realtek.com.tw>
507---
508 drivers/net/r8169.c | 47 +++++++++++++++++++++++++++++++++++++++++++----
509 1 files changed, 43 insertions(+), 4 deletions(-)
510
511diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
512index 5c00522..0b8db03 100644
513--- a/drivers/net/r8169.c
514+++ b/drivers/net/r8169.c
515@@ -197,9 +197,6 @@ enum rtl_registers {
516 Config5 = 0x56,
517 MultiIntr = 0x5c,
518 PHYAR = 0x60,
519- TBICSR = 0x64,
520- TBI_ANAR = 0x68,
521- TBI_LPAR = 0x6a,
522 PHYstatus = 0x6c,
523 RxMaxSize = 0xda,
524 CPlusCmd = 0xe0,
525@@ -213,6 +210,32 @@ enum rtl_registers {
526 FuncForceEvent = 0xfc,
527 };
528
529+enum rtl8110_registers {
530+ TBICSR = 0x64,
531+ TBI_ANAR = 0x68,
532+ TBI_LPAR = 0x6a,
533+};
534+
535+enum rtl8168_8101_registers {
536+ CSIDR = 0x64,
537+ CSIAR = 0x68,
538+#define CSIAR_FLAG 0x80000000
539+#define CSIAR_WRITE_CMD 0x80000000
540+#define CSIAR_BYTE_ENABLE 0x0f
541+#define CSIAR_BYTE_ENABLE_SHIFT 12
542+#define CSIAR_ADDR_MASK 0x0fff
543+
544+ EPHYAR = 0x80,
545+#define EPHYAR_FLAG 0x80000000
546+#define EPHYAR_WRITE_CMD 0x80000000
547+#define EPHYAR_REG_MASK 0x1f
548+#define EPHYAR_REG_SHIFT 16
549+#define EPHYAR_DATA_MASK 0xffff
550+ DBG_REG = 0xd1,
551+#define FIX_NAK_1 (1 << 4)
552+#define FIX_NAK_2 (1 << 3)
553+};
554+
555 enum rtl_register_content {
556 /* InterruptStatusBits */
557 SYSErr = 0x8000,
558@@ -266,7 +289,13 @@ enum rtl_register_content {
559 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
560
561 /* Config1 register p.24 */
562+ LEDS1 = (1 << 7),
563+ LEDS0 = (1 << 6),
564 MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
565+ Speed_down = (1 << 4),
566+ MEMMAP = (1 << 3),
567+ IOMAP = (1 << 2),
568+ VPD = (1 << 1),
569 PMEnable = (1 << 0), /* Power Management Enable */
570
571 /* Config2 register p. 25 */
572@@ -276,6 +305,7 @@ enum rtl_register_content {
573 /* Config3 register p.25 */
574 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
575 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
576+ Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
577
578 /* Config5 register p.27 */
579 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
580@@ -293,7 +323,16 @@ enum rtl_register_content {
581 TBINwComplete = 0x01000000,
582
583 /* CPlusCmd p.31 */
584- PktCntrDisable = (1 << 7), // 8168
585+ EnableBist = (1 << 15), // 8168 8101
586+ Mac_dbgo_oe = (1 << 14), // 8168 8101
587+ Normal_mode = (1 << 13), // unused
588+ Force_half_dup = (1 << 12), // 8168 8101
589+ Force_rxflow_en = (1 << 11), // 8168 8101
590+ Force_txflow_en = (1 << 10), // 8168 8101
591+ Cxpl_dbg_sel = (1 << 9), // 8168 8101
592+ ASF = (1 << 8), // 8168 8101
593+ PktCntrDisable = (1 << 7), // 8168 8101
594+ Mac_dbgo_sel = 0x001c, // 8168
595 RxVlan = (1 << 6),
596 RxChkSum = (1 << 5),
597 PCIDAC = (1 << 4),
598--
5991.5.3.3
600
601From 61650c9e3d637b0990d9f26b1421ac4b55f5c744 Mon Sep 17 00:00:00 2001
602From: Francois Romieu <romieu@fr.zoreil.com>
603Date: Sat, 2 Aug 2008 20:44:13 +0200
604Subject: [PATCH] r8169: add hw start helpers for the 8168 and the 8101
605
606This commit triggers three 'defined but not used' warnings but
607I prefer avoiding to tie these helpers to a specific change in
608the hw start sequences of the 8168 or of the 8101.
609
610Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
611Cc: Edward Hsu <edward_hsu@realtek.com.tw>
612---
613 drivers/net/r8169.c | 96 +++++++++++++++++++++++++++++++++++++++++++++++++++
614 1 files changed, 96 insertions(+), 0 deletions(-)
615
616diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
617index 0b8db03..52eba5c 100644
618--- a/drivers/net/r8169.c
619+++ b/drivers/net/r8169.c
620@@ -526,6 +526,11 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
621 return value;
622 }
623
624+static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value)
625+{
626+ mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value);
627+}
628+
629 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
630 int val)
631 {
632@@ -543,6 +548,72 @@ static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
633 return mdio_read(ioaddr, location);
634 }
635
636+static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
637+{
638+ unsigned int i;
639+
640+ RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
641+ (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
642+
643+ for (i = 0; i < 100; i++) {
644+ if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
645+ break;
646+ udelay(10);
647+ }
648+}
649+
650+static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
651+{
652+ u16 value = 0xffff;
653+ unsigned int i;
654+
655+ RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
656+
657+ for (i = 0; i < 100; i++) {
658+ if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
659+ value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
660+ break;
661+ }
662+ udelay(10);
663+ }
664+
665+ return value;
666+}
667+
668+static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
669+{
670+ unsigned int i;
671+
672+ RTL_W32(CSIDR, value);
673+ RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
674+ CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
675+
676+ for (i = 0; i < 100; i++) {
677+ if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
678+ break;
679+ udelay(10);
680+ }
681+}
682+
683+static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
684+{
685+ u32 value = ~0x00;
686+ unsigned int i;
687+
688+ RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
689+ CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
690+
691+ for (i = 0; i < 100; i++) {
692+ if (RTL_R32(CSIAR) & CSIAR_FLAG) {
693+ value = RTL_R32(CSIDR);
694+ break;
695+ }
696+ udelay(10);
697+ }
698+
699+ return value;
700+}
701+
702 static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
703 {
704 RTL_W16(IntrMask, 0x0000);
705@@ -2114,6 +2185,31 @@ static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
706 }
707 }
708
709+static void rtl_csi_access_enable(void __iomem *ioaddr)
710+{
711+ u32 csi;
712+
713+ csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
714+ rtl_csi_write(ioaddr, 0x070c, csi | 0x27000000);
715+}
716+
717+struct ephy_info {
718+ unsigned int offset;
719+ u16 mask;
720+ u16 bits;
721+};
722+
723+static void rtl_ephy_init(void __iomem *ioaddr, struct ephy_info *e, int len)
724+{
725+ u16 w;
726+
727+ while (len-- > 0) {
728+ w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
729+ rtl_ephy_write(ioaddr, e->offset, w);
730+ e++;
731+ }
732+}
733+
734 static void rtl_hw_start_8168(struct net_device *dev)
735 {
736 struct rtl8169_private *tp = netdev_priv(dev);
737--
7381.5.3.3
739
740From 81fbfc404f2a13646bee46fa98545c0023e3a67a Mon Sep 17 00:00:00 2001
741From: Francois Romieu <romieu@fr.zoreil.com>
742Date: Sat, 2 Aug 2008 21:08:49 +0200
743Subject: [PATCH] r8169: additional 8101 and 8102 support
744
745Signed-off-by: Ivan Vecera <ivecera@redhat.com>
746Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
747Cc: Edward Hsu <edward_hsu@realtek.com.tw>
748---
749 drivers/net/r8169.c | 124 ++++++++++++++++++++++++++++++++++++++++++++++++++-
750 1 files changed, 122 insertions(+), 2 deletions(-)
751
752diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
753index 52eba5c..f28c202 100644
754--- a/drivers/net/r8169.c
755+++ b/drivers/net/r8169.c
756@@ -96,6 +96,10 @@ enum mac_version {
757 RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB
758 RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd
759 RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe
760+ RTL_GIGA_MAC_VER_07 = 0x07, // 8102e
761+ RTL_GIGA_MAC_VER_08 = 0x08, // 8102e
762+ RTL_GIGA_MAC_VER_09 = 0x09, // 8102e
763+ RTL_GIGA_MAC_VER_10 = 0x0a, // 8101e
764 RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
765 RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be
766 RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb
767@@ -122,6 +126,10 @@ static const struct {
768 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB
769 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd
770 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_06, 0xff7e1880), // 8110SCe
771+ _R("RTL8102e", RTL_GIGA_MAC_VER_07, 0xff7e1880), // PCI-E
772+ _R("RTL8102e", RTL_GIGA_MAC_VER_08, 0xff7e1880), // PCI-E
773+ _R("RTL8102e", RTL_GIGA_MAC_VER_09, 0xff7e1880), // PCI-E
774+ _R("RTL8101e", RTL_GIGA_MAC_VER_10, 0xff7e1880), // PCI-E
775 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
776 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
777 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
778@@ -837,8 +845,12 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
779 }
780 }
781
782- /* The 8100e/8101e do Fast Ethernet only. */
783- if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
784+ /* The 8100e/8101e/8102e do Fast Ethernet only. */
785+ if ((tp->mac_version == RTL_GIGA_MAC_VER_07) ||
786+ (tp->mac_version == RTL_GIGA_MAC_VER_08) ||
787+ (tp->mac_version == RTL_GIGA_MAC_VER_09) ||
788+ (tp->mac_version == RTL_GIGA_MAC_VER_10) ||
789+ (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
790 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
791 (tp->mac_version == RTL_GIGA_MAC_VER_15) ||
792 (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
793@@ -1212,8 +1224,17 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
794 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
795
796 /* 8101 family. */
797+ { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
798+ { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
799+ { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
800+ { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
801+ { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
802+ { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
803 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
804+ { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
805 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
806+ { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
807+ { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
808 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
809 /* FIXME: where did these entries come from ? -- FR */
810 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
811@@ -1375,6 +1396,22 @@ static void rtl8168cx_hw_phy_config(void __iomem *ioaddr)
812 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
813 }
814
815+static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
816+{
817+ struct phy_reg phy_reg_init[] = {
818+ { 0x1f, 0x0003 },
819+ { 0x08, 0x441d },
820+ { 0x01, 0x9100 },
821+ { 0x1f, 0x0000 }
822+ };
823+
824+ mdio_write(ioaddr, 0x1f, 0x0000);
825+ mdio_patch(ioaddr, 0x11, 1 << 12);
826+ mdio_patch(ioaddr, 0x19, 1 << 13);
827+
828+ rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
829+}
830+
831 static void rtl_hw_phy_config(struct net_device *dev)
832 {
833 struct rtl8169_private *tp = netdev_priv(dev);
834@@ -1392,6 +1429,11 @@ static void rtl_hw_phy_config(struct net_device *dev)
835 case RTL_GIGA_MAC_VER_04:
836 rtl8169sb_hw_phy_config(ioaddr);
837 break;
838+ case RTL_GIGA_MAC_VER_07:
839+ case RTL_GIGA_MAC_VER_08:
840+ case RTL_GIGA_MAC_VER_09:
841+ rtl8102e_hw_phy_config(ioaddr);
842+ break;
843 case RTL_GIGA_MAC_VER_18:
844 rtl8168cp_hw_phy_config(ioaddr);
845 break;
846@@ -2253,6 +2295,70 @@ static void rtl_hw_start_8168(struct net_device *dev)
847 RTL_W16(IntrMask, tp->intr_event);
848 }
849
850+#define R810X_CPCMD_QUIRK_MASK (\
851+ EnableBist | \
852+ Mac_dbgo_oe | \
853+ Force_half_dup | \
854+ Force_half_dup | \
855+ Force_txflow_en | \
856+ Cxpl_dbg_sel | \
857+ ASF | \
858+ PktCntrDisable | \
859+ PCIDAC | \
860+ PCIMulRW)
861+
862+static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
863+{
864+ static struct ephy_info e_info_8102e_1[] = {
865+ { 0x01, 0, 0x6e65 },
866+ { 0x02, 0, 0x091f },
867+ { 0x03, 0, 0xc2f9 },
868+ { 0x06, 0, 0xafb5 },
869+ { 0x07, 0, 0x0e00 },
870+ { 0x19, 0, 0xec80 },
871+ { 0x01, 0, 0x2e65 },
872+ { 0x01, 0, 0x6e65 }
873+ };
874+ u8 cfg1;
875+
876+ rtl_csi_access_enable(ioaddr);
877+
878+ RTL_W8(DBG_REG, FIX_NAK_1);
879+
880+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
881+
882+ RTL_W8(Config1,
883+ LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
884+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
885+
886+ cfg1 = RTL_R8(Config1);
887+ if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
888+ RTL_W8(Config1, cfg1 & ~LEDS0);
889+
890+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
891+
892+ rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
893+}
894+
895+static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
896+{
897+ rtl_csi_access_enable(ioaddr);
898+
899+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
900+
901+ RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
902+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
903+
904+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
905+}
906+
907+static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
908+{
909+ rtl_hw_start_8102e_2(ioaddr, pdev);
910+
911+ rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
912+}
913+
914 static void rtl_hw_start_8101(struct net_device *dev)
915 {
916 struct rtl8169_private *tp = netdev_priv(dev);
917@@ -2269,6 +2375,20 @@ static void rtl_hw_start_8101(struct net_device *dev)
918 }
919 }
920
921+ switch (tp->mac_version) {
922+ case RTL_GIGA_MAC_VER_07:
923+ rtl_hw_start_8102e_1(ioaddr, pdev);
924+ break;
925+
926+ case RTL_GIGA_MAC_VER_08:
927+ rtl_hw_start_8102e_3(ioaddr, pdev);
928+ break;
929+
930+ case RTL_GIGA_MAC_VER_09:
931+ rtl_hw_start_8102e_2(ioaddr, pdev);
932+ break;
933+ }
934+
935 RTL_W8(Cfg9346, Cfg9346_Unlock);
936
937 RTL_W8(EarlyTxThres, EarlyTxThld);
938--
9391.5.3.3
940
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0042-intelfb-945gme.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0042-intelfb-945gme.patch
deleted file mode 100644
index 0f74d47bf1..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0042-intelfb-945gme.patch
+++ /dev/null
@@ -1,154 +0,0 @@
1The following patch adds support for Intel's 945GME graphics chip to
2the intelfb driver. I have assumed that the 945GME is identical to the
3already-supported 945GM apart from its PCI IDs; this is based on a quick
4look at the X driver for these chips which seems to treat them
5identically.
6
7Signed-off-by: Phil Endecott <spam_from_intelfb@chezphil.org>
8
9---
10
11The 945GME is used in the ASUS Eee 901, and I coded this in the hope that
12I'd be able to use it to get a console at the native 1024x600 resolution
13which is not known to the BIOS. I realised too late that the intelfb
14driver does not support mode changing on laptops, so it won't be any
15use for me. But rather than throw it away I will post it here as
16essentially "untested"; maybe someone who knows more about this driver,
17and with more useful hardware to test on, can pick it up.
18
19---
20 Documentation/fb/intelfb.txt | 1 +
21 drivers/video/intelfb/intelfb.h | 7 +++++--
22 drivers/video/intelfb/intelfb_i2c.c | 1 +
23 drivers/video/intelfb/intelfbdrv.c | 7 ++++++-
24 drivers/video/intelfb/intelfbhw.c | 7 +++++++
25 5 files changed, 20 insertions(+), 3 deletions(-)
26
27Index: linux-2.6.27/Documentation/fb/intelfb.txt
28===================================================================
29--- linux-2.6.27.orig/Documentation/fb/intelfb.txt 2008-10-14 16:54:54.000000000 +0200
30+++ linux-2.6.27/Documentation/fb/intelfb.txt 2008-10-14 17:05:36.000000000 +0200
31@@ -14,6 +14,7 @@ graphics devices. These would include:
32 Intel 915GM
33 Intel 945G
34 Intel 945GM
35+ Intel 945GME
36 Intel 965G
37 Intel 965GM
38
39Index: linux-2.6.27/drivers/video/intelfb/intelfb.h
40===================================================================
41--- linux-2.6.27.orig/drivers/video/intelfb/intelfb.h 2008-10-14 16:55:37.000000000 +0200
42+++ linux-2.6.27/drivers/video/intelfb/intelfb.h 2008-10-14 17:05:36.000000000 +0200
43@@ -12,9 +12,9 @@
44 #endif
45
46 /*** Version/name ***/
47-#define INTELFB_VERSION "0.9.5"
48+#define INTELFB_VERSION "0.9.6"
49 #define INTELFB_MODULE_NAME "intelfb"
50-#define SUPPORTED_CHIPSETS "830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM"
51+#define SUPPORTED_CHIPSETS "830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/945GME/965G/965GM"
52
53
54 /*** Debug/feature defines ***/
55@@ -58,6 +58,7 @@
56 #define PCI_DEVICE_ID_INTEL_915GM 0x2592
57 #define PCI_DEVICE_ID_INTEL_945G 0x2772
58 #define PCI_DEVICE_ID_INTEL_945GM 0x27A2
59+#define PCI_DEVICE_ID_INTEL_945GME 0x27AE
60 #define PCI_DEVICE_ID_INTEL_965G 0x29A2
61 #define PCI_DEVICE_ID_INTEL_965GM 0x2A02
62
63@@ -160,6 +161,7 @@ enum intel_chips {
64 INTEL_915GM,
65 INTEL_945G,
66 INTEL_945GM,
67+ INTEL_945GME,
68 INTEL_965G,
69 INTEL_965GM,
70 };
71@@ -363,6 +365,7 @@ struct intelfb_info {
72 ((dinfo)->chipset == INTEL_915GM) || \
73 ((dinfo)->chipset == INTEL_945G) || \
74 ((dinfo)->chipset == INTEL_945GM) || \
75+ ((dinfo)->chipset == INTEL_945GME) || \
76 ((dinfo)->chipset == INTEL_965G) || \
77 ((dinfo)->chipset == INTEL_965GM))
78
79Index: linux-2.6.27/drivers/video/intelfb/intelfb_i2c.c
80===================================================================
81--- linux-2.6.27.orig/drivers/video/intelfb/intelfb_i2c.c 2008-10-14 16:55:37.000000000 +0200
82+++ linux-2.6.27/drivers/video/intelfb/intelfb_i2c.c 2008-10-14 17:05:36.000000000 +0200
83@@ -171,6 +171,7 @@ void intelfb_create_i2c_busses(struct in
84 /* has some LVDS + tv-out */
85 case INTEL_945G:
86 case INTEL_945GM:
87+ case INTEL_945GME:
88 case INTEL_965G:
89 case INTEL_965GM:
90 /* SDVO ports have a single control bus - 2 devices */
91Index: linux-2.6.27/drivers/video/intelfb/intelfbdrv.c
92===================================================================
93--- linux-2.6.27.orig/drivers/video/intelfb/intelfbdrv.c 2008-10-14 16:55:37.000000000 +0200
94+++ linux-2.6.27/drivers/video/intelfb/intelfbdrv.c 2008-10-14 17:05:36.000000000 +0200
95@@ -2,7 +2,7 @@
96 * intelfb
97 *
98 * Linux framebuffer driver for Intel(R) 830M/845G/852GM/855GM/865G/915G/915GM/
99- * 945G/945GM/965G/965GM integrated graphics chips.
100+ * 945G/945GM/945GME/965G/965GM integrated graphics chips.
101 *
102 * Copyright © 2002, 2003 David Dawes <dawes@xfree86.org>
103 * 2004 Sylvain Meyer
104@@ -102,6 +102,9 @@
105 *
106 * 04/2008 - Version 0.9.5
107 * Add support for 965G/965GM. (Maik Broemme <mbroemme@plusserver.de>)
108+ *
109+ * 08/2008 - Version 0.9.6
110+ * Add support for 945GME. (Phil Endecott <spam_from_intelfb@chezphil.org>)
111 */
112
113 #include <linux/module.h>
114@@ -183,6 +186,7 @@ static struct pci_device_id intelfb_pci_
115 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915GM },
116 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945G },
117 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945GM },
118+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945GME, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945GME },
119 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_965G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_965G },
120 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_965GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_965GM },
121 { 0, }
122@@ -555,6 +559,7 @@ static int __devinit intelfb_pci_registe
123 (ent->device == PCI_DEVICE_ID_INTEL_915GM) ||
124 (ent->device == PCI_DEVICE_ID_INTEL_945G) ||
125 (ent->device == PCI_DEVICE_ID_INTEL_945GM) ||
126+ (ent->device == PCI_DEVICE_ID_INTEL_945GME) ||
127 (ent->device == PCI_DEVICE_ID_INTEL_965G) ||
128 (ent->device == PCI_DEVICE_ID_INTEL_965GM)) {
129
130Index: linux-2.6.27/drivers/video/intelfb/intelfbhw.c
131===================================================================
132--- linux-2.6.27.orig/drivers/video/intelfb/intelfbhw.c 2008-10-14 16:55:37.000000000 +0200
133+++ linux-2.6.27/drivers/video/intelfb/intelfbhw.c 2008-10-14 17:05:36.000000000 +0200
134@@ -143,6 +143,12 @@ int intelfbhw_get_chipset(struct pci_dev
135 dinfo->mobile = 1;
136 dinfo->pll_index = PLLS_I9xx;
137 return 0;
138+ case PCI_DEVICE_ID_INTEL_945GME:
139+ dinfo->name = "Intel(R) 945GME";
140+ dinfo->chipset = INTEL_945GME;
141+ dinfo->mobile = 1;
142+ dinfo->pll_index = PLLS_I9xx;
143+ return 0;
144 case PCI_DEVICE_ID_INTEL_965G:
145 dinfo->name = "Intel(R) 965G";
146 dinfo->chipset = INTEL_965G;
147@@ -186,6 +192,7 @@ int intelfbhw_get_memory(struct pci_dev
148 case PCI_DEVICE_ID_INTEL_915GM:
149 case PCI_DEVICE_ID_INTEL_945G:
150 case PCI_DEVICE_ID_INTEL_945GM:
151+ case PCI_DEVICE_ID_INTEL_945GME:
152 case PCI_DEVICE_ID_INTEL_965G:
153 case PCI_DEVICE_ID_INTEL_965GM:
154 /* 915, 945 and 965 chipsets support a 256MB aperture.
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0043-superreadahead-patch.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0043-superreadahead-patch.patch
deleted file mode 100644
index 101c100dd0..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0043-superreadahead-patch.patch
+++ /dev/null
@@ -1,65 +0,0 @@
1From: Arjan van de Ven <arjan@linux.intel.com>
2Date: Sun, 21 Sep 2008 11:58:27 -0700
3Subject: [PATCH] superreadahead patch
4
5---
6 fs/ext3/ioctl.c | 3 +++
7 fs/ext3/super.c | 1 +
8 include/linux/ext3_fs.h | 1 +
9 include/linux/fs.h | 2 ++
10 4 files changed, 7 insertions(+), 0 deletions(-)
11
12diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
13index 0d0c701..7e62d7d 100644
14--- a/fs/ext3/ioctl.c
15+++ b/fs/ext3/ioctl.c
16@@ -286,6 +286,9 @@ group_add_out:
17 mnt_drop_write(filp->f_path.mnt);
18 return err;
19 }
20+ case EXT3_IOC_INODE_JIFFIES: {
21+ return inode->created_when;
22+ }
23
24
25 default:
26diff --git a/fs/ext3/super.c b/fs/ext3/super.c
27index 2845425..6a896a4 100644
28--- a/fs/ext3/super.c
29+++ b/fs/ext3/super.c
30@@ -456,6 +456,7 @@ static struct inode *ext3_alloc_inode(struct super_block *sb)
31 #endif
32 ei->i_block_alloc_info = NULL;
33 ei->vfs_inode.i_version = 1;
34+ ei->vfs_inode.created_when = jiffies;
35 return &ei->vfs_inode;
36 }
37
38diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
39index 36c5403..b409fa7 100644
40--- a/include/linux/ext3_fs.h
41+++ b/include/linux/ext3_fs.h
42@@ -225,6 +225,7 @@ struct ext3_new_group_data {
43 #endif
44 #define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
45 #define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
46+#define EXT3_IOC_INODE_JIFFIES _IOR('f', 19, long)
47
48 /*
49 * ioctl commands in 32 bit emulation
50diff --git a/include/linux/fs.h b/include/linux/fs.h
51index c6455da..4ac846d 100644
52--- a/include/linux/fs.h
53+++ b/include/linux/fs.h
54@@ -655,6 +655,8 @@ struct inode {
55 void *i_security;
56 #endif
57 void *i_private; /* fs or device private pointer */
58+
59+ unsigned long created_when; /* jiffies of creation time */
60 };
61
62 /*
63--
641.5.5.1
65
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-menlow b/meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-menlow
deleted file mode 100644
index 30c1656220..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-menlow
+++ /dev/null
@@ -1,3137 +0,0 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.27
4# Wed Jan 14 11:45:36 2009
5#
6# CONFIG_64BIT is not set
7CONFIG_X86_32=y
8# CONFIG_X86_64 is not set
9CONFIG_X86=y
10CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
11# CONFIG_GENERIC_LOCKBREAK is not set
12CONFIG_GENERIC_TIME=y
13CONFIG_GENERIC_CMOS_UPDATE=y
14CONFIG_CLOCKSOURCE_WATCHDOG=y
15CONFIG_GENERIC_CLOCKEVENTS=y
16CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
17CONFIG_LOCKDEP_SUPPORT=y
18CONFIG_STACKTRACE_SUPPORT=y
19CONFIG_HAVE_LATENCYTOP_SUPPORT=y
20CONFIG_FAST_CMPXCHG_LOCAL=y
21CONFIG_MMU=y
22CONFIG_ZONE_DMA=y
23CONFIG_GENERIC_ISA_DMA=y
24CONFIG_GENERIC_IOMAP=y
25CONFIG_GENERIC_BUG=y
26CONFIG_GENERIC_HWEIGHT=y
27# CONFIG_GENERIC_GPIO is not set
28CONFIG_ARCH_MAY_HAVE_PC_FDC=y
29# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
30CONFIG_RWSEM_XCHGADD_ALGORITHM=y
31# CONFIG_ARCH_HAS_ILOG2_U32 is not set
32# CONFIG_ARCH_HAS_ILOG2_U64 is not set
33CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
34CONFIG_GENERIC_CALIBRATE_DELAY=y
35# CONFIG_GENERIC_TIME_VSYSCALL is not set
36CONFIG_ARCH_HAS_CPU_RELAX=y
37CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
38CONFIG_HAVE_SETUP_PER_CPU_AREA=y
39# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
40CONFIG_ARCH_HIBERNATION_POSSIBLE=y
41CONFIG_ARCH_SUSPEND_POSSIBLE=y
42# CONFIG_ZONE_DMA32 is not set
43CONFIG_ARCH_POPULATES_NODE_MAP=y
44# CONFIG_AUDIT_ARCH is not set
45CONFIG_ARCH_SUPPORTS_AOUT=y
46CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
47CONFIG_GENERIC_HARDIRQS=y
48CONFIG_GENERIC_IRQ_PROBE=y
49CONFIG_GENERIC_PENDING_IRQ=y
50CONFIG_X86_SMP=y
51CONFIG_X86_32_SMP=y
52CONFIG_X86_HT=y
53CONFIG_X86_BIOS_REBOOT=y
54CONFIG_X86_TRAMPOLINE=y
55CONFIG_KTIME_SCALAR=y
56CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
57
58#
59# General setup
60#
61CONFIG_EXPERIMENTAL=y
62CONFIG_LOCK_KERNEL=y
63CONFIG_INIT_ENV_ARG_LIMIT=32
64CONFIG_LOCALVERSION="-default"
65# CONFIG_LOCALVERSION_AUTO is not set
66CONFIG_SWAP=y
67CONFIG_SYSVIPC=y
68CONFIG_SYSVIPC_SYSCTL=y
69CONFIG_POSIX_MQUEUE=y
70CONFIG_BSD_PROCESS_ACCT=y
71CONFIG_BSD_PROCESS_ACCT_V3=y
72CONFIG_TASKSTATS=y
73CONFIG_TASK_DELAY_ACCT=y
74# CONFIG_TASK_XACCT is not set
75CONFIG_AUDIT=y
76CONFIG_AUDITSYSCALL=y
77CONFIG_AUDIT_TREE=y
78CONFIG_IKCONFIG=y
79CONFIG_IKCONFIG_PROC=y
80CONFIG_LOG_BUF_SHIFT=15
81# CONFIG_CGROUPS is not set
82CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
83# CONFIG_GROUP_SCHED is not set
84CONFIG_SYSFS_DEPRECATED=y
85CONFIG_SYSFS_DEPRECATED_V2=y
86CONFIG_RELAY=y
87CONFIG_NAMESPACES=y
88# CONFIG_UTS_NS is not set
89# CONFIG_IPC_NS is not set
90# CONFIG_USER_NS is not set
91# CONFIG_PID_NS is not set
92CONFIG_BLK_DEV_INITRD=y
93CONFIG_INITRAMFS_SOURCE=""
94CONFIG_CC_OPTIMIZE_FOR_SIZE=y
95# CONFIG_FASTBOOT is not set
96CONFIG_SYSCTL=y
97# CONFIG_EMBEDDED is not set
98CONFIG_UID16=y
99CONFIG_SYSCTL_SYSCALL=y
100CONFIG_KALLSYMS=y
101CONFIG_KALLSYMS_ALL=y
102# CONFIG_KALLSYMS_EXTRA_PASS is not set
103CONFIG_HOTPLUG=y
104CONFIG_PRINTK=y
105CONFIG_BUG=y
106CONFIG_ELF_CORE=y
107CONFIG_PCSPKR_PLATFORM=y
108CONFIG_COMPAT_BRK=y
109CONFIG_BASE_FULL=y
110CONFIG_FUTEX=y
111CONFIG_ANON_INODES=y
112CONFIG_EPOLL=y
113CONFIG_SIGNALFD=y
114CONFIG_TIMERFD=y
115CONFIG_EVENTFD=y
116CONFIG_SHMEM=y
117CONFIG_VM_EVENT_COUNTERS=y
118CONFIG_SLAB=y
119# CONFIG_SLUB is not set
120# CONFIG_SLOB is not set
121CONFIG_PROFILING=y
122# CONFIG_MARKERS is not set
123# CONFIG_OPROFILE is not set
124CONFIG_HAVE_OPROFILE=y
125# CONFIG_KPROBES is not set
126CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
127CONFIG_HAVE_IOREMAP_PROT=y
128CONFIG_HAVE_KPROBES=y
129CONFIG_HAVE_KRETPROBES=y
130# CONFIG_HAVE_ARCH_TRACEHOOK is not set
131# CONFIG_HAVE_DMA_ATTRS is not set
132CONFIG_USE_GENERIC_SMP_HELPERS=y
133# CONFIG_HAVE_CLK is not set
134CONFIG_PROC_PAGE_MONITOR=y
135CONFIG_HAVE_GENERIC_DMA_COHERENT=y
136CONFIG_SLABINFO=y
137CONFIG_RT_MUTEXES=y
138# CONFIG_TINY_SHMEM is not set
139CONFIG_BASE_SMALL=0
140CONFIG_MODULES=y
141# CONFIG_MODULE_FORCE_LOAD is not set
142CONFIG_MODULE_UNLOAD=y
143CONFIG_MODULE_FORCE_UNLOAD=y
144CONFIG_MODVERSIONS=y
145CONFIG_MODULE_SRCVERSION_ALL=y
146CONFIG_KMOD=y
147CONFIG_STOP_MACHINE=y
148CONFIG_BLOCK=y
149CONFIG_LBD=y
150CONFIG_BLK_DEV_IO_TRACE=y
151CONFIG_LSF=y
152# CONFIG_BLK_DEV_BSG is not set
153# CONFIG_BLK_DEV_INTEGRITY is not set
154
155#
156# IO Schedulers
157#
158CONFIG_IOSCHED_NOOP=y
159CONFIG_IOSCHED_AS=y
160CONFIG_IOSCHED_DEADLINE=y
161CONFIG_IOSCHED_CFQ=y
162# CONFIG_DEFAULT_AS is not set
163# CONFIG_DEFAULT_DEADLINE is not set
164CONFIG_DEFAULT_CFQ=y
165# CONFIG_DEFAULT_NOOP is not set
166CONFIG_DEFAULT_IOSCHED="cfq"
167CONFIG_CLASSIC_RCU=y
168
169#
170# Processor type and features
171#
172CONFIG_TICK_ONESHOT=y
173CONFIG_NO_HZ=y
174CONFIG_HIGH_RES_TIMERS=y
175CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
176CONFIG_SMP=y
177CONFIG_X86_FIND_SMP_CONFIG=y
178CONFIG_X86_MPPARSE=y
179# CONFIG_X86_PC is not set
180# CONFIG_X86_ELAN is not set
181# CONFIG_X86_VOYAGER is not set
182CONFIG_X86_GENERICARCH=y
183# CONFIG_X86_NUMAQ is not set
184# CONFIG_X86_SUMMIT is not set
185# CONFIG_X86_ES7000 is not set
186# CONFIG_X86_BIGSMP is not set
187# CONFIG_X86_VSMP is not set
188# CONFIG_X86_RDC321X is not set
189CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
190# CONFIG_PARAVIRT_GUEST is not set
191# CONFIG_MEMTEST is not set
192CONFIG_X86_CYCLONE_TIMER=y
193# CONFIG_M386 is not set
194# CONFIG_M486 is not set
195CONFIG_M586=y
196# CONFIG_M586TSC is not set
197# CONFIG_M586MMX is not set
198# CONFIG_M686 is not set
199# CONFIG_MPENTIUMII is not set
200# CONFIG_MPENTIUMIII is not set
201# CONFIG_MPENTIUMM is not set
202# CONFIG_MPENTIUM4 is not set
203# CONFIG_MK6 is not set
204# CONFIG_MK7 is not set
205# CONFIG_MK8 is not set
206# CONFIG_MCRUSOE is not set
207# CONFIG_MEFFICEON is not set
208# CONFIG_MWINCHIPC6 is not set
209# CONFIG_MWINCHIP2 is not set
210# CONFIG_MWINCHIP3D is not set
211# CONFIG_MGEODEGX1 is not set
212# CONFIG_MGEODE_LX is not set
213# CONFIG_MCYRIXIII is not set
214# CONFIG_MVIAC3_2 is not set
215# CONFIG_MVIAC7 is not set
216# CONFIG_MPSC is not set
217# CONFIG_MCORE2 is not set
218# CONFIG_GENERIC_CPU is not set
219CONFIG_X86_GENERIC=y
220CONFIG_X86_CPU=y
221CONFIG_X86_CMPXCHG=y
222CONFIG_X86_L1_CACHE_SHIFT=7
223CONFIG_X86_XADD=y
224CONFIG_X86_PPRO_FENCE=y
225CONFIG_X86_F00F_BUG=y
226CONFIG_X86_WP_WORKS_OK=y
227CONFIG_X86_INVLPG=y
228CONFIG_X86_BSWAP=y
229CONFIG_X86_POPAD_OK=y
230CONFIG_X86_ALIGNMENT_16=y
231CONFIG_X86_INTEL_USERCOPY=y
232CONFIG_X86_MINIMUM_CPU_FAMILY=4
233CONFIG_HPET_TIMER=y
234CONFIG_DMI=y
235# CONFIG_IOMMU_HELPER is not set
236CONFIG_NR_CPUS=8
237# CONFIG_SCHED_SMT is not set
238CONFIG_SCHED_MC=y
239# CONFIG_PREEMPT_NONE is not set
240CONFIG_PREEMPT_VOLUNTARY=y
241# CONFIG_PREEMPT is not set
242CONFIG_X86_LOCAL_APIC=y
243CONFIG_X86_IO_APIC=y
244CONFIG_X86_MCE=y
245CONFIG_X86_MCE_NONFATAL=y
246# CONFIG_X86_MCE_P4THERMAL is not set
247CONFIG_VM86=y
248# CONFIG_TOSHIBA is not set
249# CONFIG_I8K is not set
250CONFIG_X86_REBOOTFIXUPS=y
251CONFIG_MICROCODE=m
252CONFIG_MICROCODE_OLD_INTERFACE=y
253CONFIG_X86_MSR=m
254CONFIG_X86_CPUID=m
255# CONFIG_NOHIGHMEM is not set
256CONFIG_HIGHMEM4G=y
257# CONFIG_HIGHMEM64G is not set
258CONFIG_PAGE_OFFSET=0xC0000000
259CONFIG_HIGHMEM=y
260CONFIG_SELECT_MEMORY_MODEL=y
261CONFIG_FLATMEM_MANUAL=y
262# CONFIG_DISCONTIGMEM_MANUAL is not set
263# CONFIG_SPARSEMEM_MANUAL is not set
264CONFIG_FLATMEM=y
265CONFIG_FLAT_NODE_MEM_MAP=y
266# CONFIG_SPARSEMEM_STATIC is not set
267# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
268CONFIG_PAGEFLAGS_EXTENDED=y
269CONFIG_SPLIT_PTLOCK_CPUS=4
270# CONFIG_RESOURCES_64BIT is not set
271CONFIG_ZONE_DMA_FLAG=1
272CONFIG_BOUNCE=y
273CONFIG_VIRT_TO_BUS=y
274CONFIG_HIGHPTE=y
275# CONFIG_MATH_EMULATION is not set
276CONFIG_MTRR=y
277# CONFIG_MTRR_SANITIZER is not set
278# CONFIG_X86_PAT is not set
279CONFIG_EFI=y
280# CONFIG_IRQBALANCE is not set
281CONFIG_SECCOMP=y
282# CONFIG_HZ_100 is not set
283CONFIG_HZ_250=y
284# CONFIG_HZ_300 is not set
285# CONFIG_HZ_1000 is not set
286CONFIG_HZ=250
287CONFIG_SCHED_HRTICK=y
288CONFIG_KEXEC=y
289# CONFIG_CRASH_DUMP is not set
290# CONFIG_KEXEC_JUMP is not set
291CONFIG_PHYSICAL_START=0x100000
292# CONFIG_RELOCATABLE is not set
293CONFIG_PHYSICAL_ALIGN=0x100000
294CONFIG_HOTPLUG_CPU=y
295CONFIG_COMPAT_VDSO=y
296CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
297
298#
299# Power management options
300#
301CONFIG_PM=y
302# CONFIG_PM_DEBUG is not set
303CONFIG_PM_SLEEP_SMP=y
304CONFIG_PM_SLEEP=y
305CONFIG_SUSPEND=y
306CONFIG_SUSPEND_FREEZER=y
307CONFIG_HIBERNATION=y
308CONFIG_PM_STD_PARTITION=""
309CONFIG_ACPI=y
310CONFIG_ACPI_SLEEP=y
311CONFIG_ACPI_PROCFS=y
312CONFIG_ACPI_PROCFS_POWER=y
313CONFIG_ACPI_SYSFS_POWER=y
314CONFIG_ACPI_PROC_EVENT=y
315CONFIG_ACPI_AC=y
316CONFIG_ACPI_BATTERY=y
317CONFIG_ACPI_BUTTON=y
318CONFIG_ACPI_VIDEO=y
319CONFIG_ACPI_FAN=y
320CONFIG_ACPI_DOCK=y
321# CONFIG_ACPI_BAY is not set
322CONFIG_ACPI_PROCESSOR=y
323CONFIG_ACPI_HOTPLUG_CPU=y
324CONFIG_ACPI_THERMAL=y
325# CONFIG_ACPI_WMI is not set
326# CONFIG_ACPI_ASUS is not set
327# CONFIG_ACPI_TOSHIBA is not set
328CONFIG_ACPI_CUSTOM_DSDT_FILE=""
329# CONFIG_ACPI_CUSTOM_DSDT is not set
330CONFIG_ACPI_BLACKLIST_YEAR=2001
331# CONFIG_ACPI_DEBUG is not set
332CONFIG_ACPI_EC=y
333# CONFIG_ACPI_PCI_SLOT is not set
334CONFIG_ACPI_POWER=y
335CONFIG_ACPI_SYSTEM=y
336CONFIG_X86_PM_TIMER=y
337CONFIG_ACPI_CONTAINER=y
338CONFIG_ACPI_SBS=y
339CONFIG_X86_APM_BOOT=y
340CONFIG_APM=y
341# CONFIG_APM_IGNORE_USER_SUSPEND is not set
342CONFIG_APM_DO_ENABLE=y
343# CONFIG_APM_CPU_IDLE is not set
344CONFIG_APM_DISPLAY_BLANK=y
345CONFIG_APM_ALLOW_INTS=y
346# CONFIG_APM_REAL_MODE_POWER_OFF is not set
347
348#
349# CPU Frequency scaling
350#
351CONFIG_CPU_FREQ=y
352CONFIG_CPU_FREQ_TABLE=y
353# CONFIG_CPU_FREQ_DEBUG is not set
354CONFIG_CPU_FREQ_STAT=m
355CONFIG_CPU_FREQ_STAT_DETAILS=y
356# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
357# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
358CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
359# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
360# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
361CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
362CONFIG_CPU_FREQ_GOV_POWERSAVE=m
363CONFIG_CPU_FREQ_GOV_USERSPACE=y
364CONFIG_CPU_FREQ_GOV_ONDEMAND=m
365CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
366
367#
368# CPUFreq processor drivers
369#
370CONFIG_X86_ACPI_CPUFREQ=y
371# CONFIG_X86_POWERNOW_K6 is not set
372# CONFIG_X86_POWERNOW_K7 is not set
373# CONFIG_X86_POWERNOW_K8 is not set
374# CONFIG_X86_GX_SUSPMOD is not set
375CONFIG_X86_SPEEDSTEP_CENTRINO=m
376CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE=y
377CONFIG_X86_SPEEDSTEP_ICH=m
378CONFIG_X86_SPEEDSTEP_SMI=m
379CONFIG_X86_P4_CLOCKMOD=m
380# CONFIG_X86_CPUFREQ_NFORCE2 is not set
381# CONFIG_X86_LONGRUN is not set
382# CONFIG_X86_LONGHAUL is not set
383# CONFIG_X86_E_POWERSAVER is not set
384
385#
386# shared options
387#
388# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
389CONFIG_X86_SPEEDSTEP_LIB=m
390CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK=y
391CONFIG_CPU_IDLE=y
392CONFIG_CPU_IDLE_GOV_LADDER=y
393CONFIG_CPU_IDLE_GOV_MENU=y
394
395#
396# Bus options (PCI etc.)
397#
398CONFIG_PCI=y
399# CONFIG_PCI_GOBIOS is not set
400# CONFIG_PCI_GOMMCONFIG is not set
401# CONFIG_PCI_GODIRECT is not set
402# CONFIG_PCI_GOOLPC is not set
403CONFIG_PCI_GOANY=y
404CONFIG_PCI_BIOS=y
405CONFIG_PCI_DIRECT=y
406CONFIG_PCI_MMCONFIG=y
407CONFIG_PCI_DOMAINS=y
408CONFIG_PCIEPORTBUS=y
409CONFIG_HOTPLUG_PCI_PCIE=m
410CONFIG_PCIEAER=y
411# CONFIG_PCIEASPM is not set
412CONFIG_ARCH_SUPPORTS_MSI=y
413CONFIG_PCI_MSI=y
414CONFIG_PCI_LEGACY=y
415# CONFIG_PCI_DEBUG is not set
416CONFIG_HT_IRQ=y
417CONFIG_ISA_DMA_API=y
418CONFIG_ISA=y
419# CONFIG_EISA is not set
420# CONFIG_MCA is not set
421# CONFIG_SCx200 is not set
422# CONFIG_OLPC is not set
423# CONFIG_PCCARD is not set
424CONFIG_HOTPLUG_PCI=m
425CONFIG_HOTPLUG_PCI_FAKE=m
426# CONFIG_HOTPLUG_PCI_COMPAQ is not set
427# CONFIG_HOTPLUG_PCI_IBM is not set
428CONFIG_HOTPLUG_PCI_ACPI=m
429CONFIG_HOTPLUG_PCI_ACPI_IBM=m
430CONFIG_HOTPLUG_PCI_CPCI=y
431CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
432CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
433CONFIG_HOTPLUG_PCI_SHPC=m
434
435#
436# Executable file formats / Emulations
437#
438CONFIG_BINFMT_ELF=y
439CONFIG_BINFMT_AOUT=m
440CONFIG_BINFMT_MISC=m
441CONFIG_NET=y
442
443#
444# Networking options
445#
446CONFIG_PACKET=m
447CONFIG_PACKET_MMAP=y
448CONFIG_UNIX=y
449CONFIG_XFRM=y
450CONFIG_XFRM_USER=m
451# CONFIG_XFRM_SUB_POLICY is not set
452# CONFIG_XFRM_MIGRATE is not set
453# CONFIG_XFRM_STATISTICS is not set
454CONFIG_XFRM_IPCOMP=m
455CONFIG_NET_KEY=m
456# CONFIG_NET_KEY_MIGRATE is not set
457CONFIG_INET=y
458CONFIG_IP_MULTICAST=y
459CONFIG_IP_ADVANCED_ROUTER=y
460CONFIG_ASK_IP_FIB_HASH=y
461# CONFIG_IP_FIB_TRIE is not set
462CONFIG_IP_FIB_HASH=y
463CONFIG_IP_MULTIPLE_TABLES=y
464CONFIG_IP_ROUTE_MULTIPATH=y
465CONFIG_IP_ROUTE_VERBOSE=y
466CONFIG_IP_PNP=y
467CONFIG_IP_PNP_DHCP=y
468CONFIG_IP_PNP_BOOTP=y
469CONFIG_IP_PNP_RARP=y
470CONFIG_NET_IPIP=m
471CONFIG_NET_IPGRE=m
472CONFIG_NET_IPGRE_BROADCAST=y
473CONFIG_IP_MROUTE=y
474CONFIG_IP_PIMSM_V1=y
475CONFIG_IP_PIMSM_V2=y
476# CONFIG_ARPD is not set
477CONFIG_SYN_COOKIES=y
478CONFIG_INET_AH=m
479CONFIG_INET_ESP=m
480CONFIG_INET_IPCOMP=m
481CONFIG_INET_XFRM_TUNNEL=m
482CONFIG_INET_TUNNEL=m
483CONFIG_INET_XFRM_MODE_TRANSPORT=m
484CONFIG_INET_XFRM_MODE_TUNNEL=m
485CONFIG_INET_XFRM_MODE_BEET=y
486# CONFIG_INET_LRO is not set
487CONFIG_INET_DIAG=m
488CONFIG_INET_TCP_DIAG=m
489CONFIG_TCP_CONG_ADVANCED=y
490CONFIG_TCP_CONG_BIC=m
491CONFIG_TCP_CONG_CUBIC=m
492CONFIG_TCP_CONG_WESTWOOD=m
493CONFIG_TCP_CONG_HTCP=m
494CONFIG_TCP_CONG_HSTCP=m
495CONFIG_TCP_CONG_HYBLA=m
496CONFIG_TCP_CONG_VEGAS=m
497CONFIG_TCP_CONG_SCALABLE=m
498CONFIG_TCP_CONG_LP=m
499CONFIG_TCP_CONG_VENO=m
500# CONFIG_TCP_CONG_YEAH is not set
501# CONFIG_TCP_CONG_ILLINOIS is not set
502# CONFIG_DEFAULT_BIC is not set
503# CONFIG_DEFAULT_CUBIC is not set
504# CONFIG_DEFAULT_HTCP is not set
505# CONFIG_DEFAULT_VEGAS is not set
506# CONFIG_DEFAULT_WESTWOOD is not set
507CONFIG_DEFAULT_RENO=y
508CONFIG_DEFAULT_TCP_CONG="reno"
509# CONFIG_TCP_MD5SIG is not set
510CONFIG_IP_VS=m
511# CONFIG_IP_VS_DEBUG is not set
512CONFIG_IP_VS_TAB_BITS=12
513
514#
515# IPVS transport protocol load balancing support
516#
517CONFIG_IP_VS_PROTO_TCP=y
518CONFIG_IP_VS_PROTO_UDP=y
519CONFIG_IP_VS_PROTO_ESP=y
520CONFIG_IP_VS_PROTO_AH=y
521
522#
523# IPVS scheduler
524#
525CONFIG_IP_VS_RR=m
526CONFIG_IP_VS_WRR=m
527CONFIG_IP_VS_LC=m
528CONFIG_IP_VS_WLC=m
529CONFIG_IP_VS_LBLC=m
530CONFIG_IP_VS_LBLCR=m
531CONFIG_IP_VS_DH=m
532CONFIG_IP_VS_SH=m
533CONFIG_IP_VS_SED=m
534CONFIG_IP_VS_NQ=m
535
536#
537# IPVS application helper
538#
539CONFIG_IP_VS_FTP=m
540CONFIG_IPV6=m
541CONFIG_IPV6_PRIVACY=y
542CONFIG_IPV6_ROUTER_PREF=y
543CONFIG_IPV6_ROUTE_INFO=y
544# CONFIG_IPV6_OPTIMISTIC_DAD is not set
545CONFIG_INET6_AH=m
546CONFIG_INET6_ESP=m
547CONFIG_INET6_IPCOMP=m
548# CONFIG_IPV6_MIP6 is not set
549CONFIG_INET6_XFRM_TUNNEL=m
550CONFIG_INET6_TUNNEL=m
551CONFIG_INET6_XFRM_MODE_TRANSPORT=m
552CONFIG_INET6_XFRM_MODE_TUNNEL=m
553CONFIG_INET6_XFRM_MODE_BEET=m
554# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
555CONFIG_IPV6_SIT=m
556CONFIG_IPV6_NDISC_NODETYPE=y
557CONFIG_IPV6_TUNNEL=m
558# CONFIG_IPV6_MULTIPLE_TABLES is not set
559# CONFIG_IPV6_MROUTE is not set
560# CONFIG_NETLABEL is not set
561CONFIG_NETWORK_SECMARK=y
562CONFIG_NETFILTER=y
563# CONFIG_NETFILTER_DEBUG is not set
564CONFIG_NETFILTER_ADVANCED=y
565CONFIG_BRIDGE_NETFILTER=y
566
567#
568# Core Netfilter Configuration
569#
570CONFIG_NETFILTER_NETLINK=m
571CONFIG_NETFILTER_NETLINK_QUEUE=m
572CONFIG_NETFILTER_NETLINK_LOG=m
573# CONFIG_NF_CONNTRACK is not set
574CONFIG_NETFILTER_XTABLES=m
575CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
576# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
577CONFIG_NETFILTER_XT_TARGET_MARK=m
578CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
579# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
580# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
581# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
582CONFIG_NETFILTER_XT_TARGET_SECMARK=m
583# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
584# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
585CONFIG_NETFILTER_XT_MATCH_COMMENT=m
586CONFIG_NETFILTER_XT_MATCH_DCCP=m
587# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
588CONFIG_NETFILTER_XT_MATCH_ESP=m
589# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
590CONFIG_NETFILTER_XT_MATCH_LENGTH=m
591CONFIG_NETFILTER_XT_MATCH_LIMIT=m
592CONFIG_NETFILTER_XT_MATCH_MAC=m
593CONFIG_NETFILTER_XT_MATCH_MARK=m
594# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
595CONFIG_NETFILTER_XT_MATCH_POLICY=m
596CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
597CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
598CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
599CONFIG_NETFILTER_XT_MATCH_QUOTA=m
600# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
601CONFIG_NETFILTER_XT_MATCH_REALM=m
602CONFIG_NETFILTER_XT_MATCH_SCTP=m
603CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
604CONFIG_NETFILTER_XT_MATCH_STRING=m
605CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
606# CONFIG_NETFILTER_XT_MATCH_TIME is not set
607# CONFIG_NETFILTER_XT_MATCH_U32 is not set
608# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
609
610#
611# IP: Netfilter Configuration
612#
613CONFIG_IP_NF_QUEUE=m
614CONFIG_IP_NF_IPTABLES=m
615CONFIG_IP_NF_MATCH_RECENT=m
616CONFIG_IP_NF_MATCH_ECN=m
617CONFIG_IP_NF_MATCH_AH=m
618CONFIG_IP_NF_MATCH_TTL=m
619CONFIG_IP_NF_MATCH_ADDRTYPE=m
620CONFIG_IP_NF_FILTER=m
621CONFIG_IP_NF_TARGET_REJECT=m
622CONFIG_IP_NF_TARGET_LOG=m
623CONFIG_IP_NF_TARGET_ULOG=m
624CONFIG_IP_NF_MANGLE=m
625CONFIG_IP_NF_TARGET_ECN=m
626CONFIG_IP_NF_TARGET_TTL=m
627CONFIG_IP_NF_RAW=m
628# CONFIG_IP_NF_SECURITY is not set
629CONFIG_IP_NF_ARPTABLES=m
630CONFIG_IP_NF_ARPFILTER=m
631CONFIG_IP_NF_ARP_MANGLE=m
632
633#
634# IPv6: Netfilter Configuration
635#
636CONFIG_IP6_NF_QUEUE=m
637CONFIG_IP6_NF_IPTABLES=m
638CONFIG_IP6_NF_MATCH_RT=m
639CONFIG_IP6_NF_MATCH_OPTS=m
640CONFIG_IP6_NF_MATCH_FRAG=m
641CONFIG_IP6_NF_MATCH_HL=m
642CONFIG_IP6_NF_MATCH_IPV6HEADER=m
643CONFIG_IP6_NF_MATCH_AH=m
644# CONFIG_IP6_NF_MATCH_MH is not set
645CONFIG_IP6_NF_MATCH_EUI64=m
646CONFIG_IP6_NF_FILTER=m
647CONFIG_IP6_NF_TARGET_LOG=m
648CONFIG_IP6_NF_TARGET_REJECT=m
649CONFIG_IP6_NF_MANGLE=m
650CONFIG_IP6_NF_TARGET_HL=m
651CONFIG_IP6_NF_RAW=m
652# CONFIG_IP6_NF_SECURITY is not set
653
654#
655# DECnet: Netfilter Configuration
656#
657CONFIG_DECNET_NF_GRABULATOR=m
658
659#
660# Bridge: Netfilter Configuration
661#
662CONFIG_BRIDGE_NF_EBTABLES=m
663CONFIG_BRIDGE_EBT_BROUTE=m
664CONFIG_BRIDGE_EBT_T_FILTER=m
665CONFIG_BRIDGE_EBT_T_NAT=m
666CONFIG_BRIDGE_EBT_802_3=m
667CONFIG_BRIDGE_EBT_AMONG=m
668CONFIG_BRIDGE_EBT_ARP=m
669CONFIG_BRIDGE_EBT_IP=m
670# CONFIG_BRIDGE_EBT_IP6 is not set
671CONFIG_BRIDGE_EBT_LIMIT=m
672CONFIG_BRIDGE_EBT_MARK=m
673CONFIG_BRIDGE_EBT_PKTTYPE=m
674CONFIG_BRIDGE_EBT_STP=m
675CONFIG_BRIDGE_EBT_VLAN=m
676CONFIG_BRIDGE_EBT_ARPREPLY=m
677CONFIG_BRIDGE_EBT_DNAT=m
678CONFIG_BRIDGE_EBT_MARK_T=m
679CONFIG_BRIDGE_EBT_REDIRECT=m
680CONFIG_BRIDGE_EBT_SNAT=m
681CONFIG_BRIDGE_EBT_LOG=m
682CONFIG_BRIDGE_EBT_ULOG=m
683# CONFIG_BRIDGE_EBT_NFLOG is not set
684CONFIG_IP_DCCP=m
685CONFIG_INET_DCCP_DIAG=m
686CONFIG_IP_DCCP_ACKVEC=y
687
688#
689# DCCP CCIDs Configuration (EXPERIMENTAL)
690#
691CONFIG_IP_DCCP_CCID2=m
692# CONFIG_IP_DCCP_CCID2_DEBUG is not set
693CONFIG_IP_DCCP_CCID3=m
694# CONFIG_IP_DCCP_CCID3_DEBUG is not set
695CONFIG_IP_DCCP_CCID3_RTO=100
696CONFIG_IP_DCCP_TFRC_LIB=m
697
698#
699# DCCP Kernel Hacking
700#
701# CONFIG_IP_DCCP_DEBUG is not set
702CONFIG_IP_SCTP=m
703# CONFIG_SCTP_DBG_MSG is not set
704# CONFIG_SCTP_DBG_OBJCNT is not set
705# CONFIG_SCTP_HMAC_NONE is not set
706# CONFIG_SCTP_HMAC_SHA1 is not set
707CONFIG_SCTP_HMAC_MD5=y
708# CONFIG_TIPC is not set
709CONFIG_ATM=m
710CONFIG_ATM_CLIP=m
711CONFIG_ATM_CLIP_NO_ICMP=y
712CONFIG_ATM_LANE=m
713CONFIG_ATM_MPOA=m
714CONFIG_ATM_BR2684=m
715# CONFIG_ATM_BR2684_IPFILTER is not set
716CONFIG_STP=m
717CONFIG_BRIDGE=m
718CONFIG_VLAN_8021Q=m
719# CONFIG_VLAN_8021Q_GVRP is not set
720CONFIG_DECNET=m
721CONFIG_DECNET_ROUTER=y
722CONFIG_LLC=m
723CONFIG_LLC2=m
724CONFIG_IPX=m
725# CONFIG_IPX_INTERN is not set
726CONFIG_ATALK=m
727CONFIG_DEV_APPLETALK=m
728CONFIG_LTPC=m
729CONFIG_COPS=m
730CONFIG_COPS_DAYNA=y
731CONFIG_COPS_TANGENT=y
732CONFIG_IPDDP=m
733CONFIG_IPDDP_ENCAP=y
734CONFIG_IPDDP_DECAP=y
735CONFIG_X25=m
736CONFIG_LAPB=m
737CONFIG_ECONET=m
738# CONFIG_ECONET_AUNUDP is not set
739# CONFIG_ECONET_NATIVE is not set
740CONFIG_WAN_ROUTER=m
741CONFIG_NET_SCHED=y
742
743#
744# Queueing/Scheduling
745#
746CONFIG_NET_SCH_CBQ=m
747CONFIG_NET_SCH_HTB=m
748CONFIG_NET_SCH_HFSC=m
749CONFIG_NET_SCH_ATM=m
750CONFIG_NET_SCH_PRIO=m
751CONFIG_NET_SCH_RED=m
752CONFIG_NET_SCH_SFQ=m
753CONFIG_NET_SCH_TEQL=m
754CONFIG_NET_SCH_TBF=m
755CONFIG_NET_SCH_GRED=m
756CONFIG_NET_SCH_DSMARK=m
757CONFIG_NET_SCH_NETEM=m
758CONFIG_NET_SCH_INGRESS=m
759
760#
761# Classification
762#
763CONFIG_NET_CLS=y
764CONFIG_NET_CLS_BASIC=m
765CONFIG_NET_CLS_TCINDEX=m
766CONFIG_NET_CLS_ROUTE4=m
767CONFIG_NET_CLS_ROUTE=y
768CONFIG_NET_CLS_FW=m
769CONFIG_NET_CLS_U32=m
770CONFIG_CLS_U32_PERF=y
771CONFIG_CLS_U32_MARK=y
772CONFIG_NET_CLS_RSVP=m
773CONFIG_NET_CLS_RSVP6=m
774# CONFIG_NET_CLS_FLOW is not set
775# CONFIG_NET_EMATCH is not set
776CONFIG_NET_CLS_ACT=y
777CONFIG_NET_ACT_POLICE=m
778CONFIG_NET_ACT_GACT=m
779CONFIG_GACT_PROB=y
780CONFIG_NET_ACT_MIRRED=m
781CONFIG_NET_ACT_IPT=m
782# CONFIG_NET_ACT_NAT is not set
783CONFIG_NET_ACT_PEDIT=m
784CONFIG_NET_ACT_SIMP=m
785# CONFIG_NET_CLS_IND is not set
786CONFIG_NET_SCH_FIFO=y
787
788#
789# Network testing
790#
791CONFIG_NET_PKTGEN=m
792# CONFIG_HAMRADIO is not set
793# CONFIG_CAN is not set
794# CONFIG_IRDA is not set
795CONFIG_BT=m
796CONFIG_BT_L2CAP=m
797CONFIG_BT_SCO=m
798CONFIG_BT_RFCOMM=m
799CONFIG_BT_RFCOMM_TTY=y
800CONFIG_BT_BNEP=m
801CONFIG_BT_BNEP_MC_FILTER=y
802CONFIG_BT_BNEP_PROTO_FILTER=y
803CONFIG_BT_HIDP=m
804
805#
806# Bluetooth device drivers
807#
808CONFIG_BT_HCIUSB=m
809CONFIG_BT_HCIUSB_SCO=y
810# CONFIG_BT_HCIBTUSB is not set
811# CONFIG_BT_HCIBTSDIO is not set
812CONFIG_BT_HCIUART=m
813CONFIG_BT_HCIUART_H4=y
814CONFIG_BT_HCIUART_BCSP=y
815# CONFIG_BT_HCIUART_LL is not set
816CONFIG_BT_HCIBCM203X=m
817CONFIG_BT_HCIBPA10X=m
818CONFIG_BT_HCIBFUSB=m
819CONFIG_BT_HCIVHCI=m
820# CONFIG_AF_RXRPC is not set
821CONFIG_FIB_RULES=y
822
823#
824# Wireless
825#
826# CONFIG_CFG80211 is not set
827CONFIG_WIRELESS_EXT=y
828CONFIG_WIRELESS_EXT_SYSFS=y
829# CONFIG_MAC80211 is not set
830CONFIG_IEEE80211=m
831# CONFIG_IEEE80211_DEBUG is not set
832CONFIG_IEEE80211_CRYPT_WEP=m
833CONFIG_IEEE80211_CRYPT_CCMP=m
834CONFIG_IEEE80211_CRYPT_TKIP=m
835# CONFIG_RFKILL is not set
836# CONFIG_NET_9P is not set
837
838#
839# Device Drivers
840#
841
842#
843# Generic Driver Options
844#
845CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
846# CONFIG_STANDALONE is not set
847CONFIG_PREVENT_FIRMWARE_BUILD=y
848CONFIG_FW_LOADER=y
849CONFIG_FIRMWARE_IN_KERNEL=y
850CONFIG_EXTRA_FIRMWARE=""
851# CONFIG_DEBUG_DRIVER is not set
852# CONFIG_DEBUG_DEVRES is not set
853# CONFIG_SYS_HYPERVISOR is not set
854CONFIG_CONNECTOR=y
855CONFIG_PROC_EVENTS=y
856CONFIG_MTD=m
857# CONFIG_MTD_DEBUG is not set
858CONFIG_MTD_CONCAT=m
859CONFIG_MTD_PARTITIONS=y
860CONFIG_MTD_REDBOOT_PARTS=m
861CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
862# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
863# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
864# CONFIG_MTD_AR7_PARTS is not set
865
866#
867# User Modules And Translation Layers
868#
869CONFIG_MTD_CHAR=m
870CONFIG_MTD_BLKDEVS=m
871CONFIG_MTD_BLOCK=m
872# CONFIG_MTD_BLOCK_RO is not set
873# CONFIG_FTL is not set
874# CONFIG_NFTL is not set
875# CONFIG_INFTL is not set
876CONFIG_RFD_FTL=m
877# CONFIG_SSFDC is not set
878# CONFIG_MTD_OOPS is not set
879
880#
881# RAM/ROM/Flash chip drivers
882#
883CONFIG_MTD_CFI=m
884CONFIG_MTD_JEDECPROBE=m
885CONFIG_MTD_GEN_PROBE=m
886CONFIG_MTD_CFI_ADV_OPTIONS=y
887CONFIG_MTD_CFI_NOSWAP=y
888# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
889# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
890# CONFIG_MTD_CFI_GEOMETRY is not set
891CONFIG_MTD_MAP_BANK_WIDTH_1=y
892CONFIG_MTD_MAP_BANK_WIDTH_2=y
893CONFIG_MTD_MAP_BANK_WIDTH_4=y
894# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
895# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
896# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
897CONFIG_MTD_CFI_I1=y
898CONFIG_MTD_CFI_I2=y
899# CONFIG_MTD_CFI_I4 is not set
900# CONFIG_MTD_CFI_I8 is not set
901# CONFIG_MTD_OTP is not set
902CONFIG_MTD_CFI_INTELEXT=m
903CONFIG_MTD_CFI_AMDSTD=m
904CONFIG_MTD_CFI_STAA=m
905CONFIG_MTD_CFI_UTIL=m
906# CONFIG_MTD_RAM is not set
907# CONFIG_MTD_ROM is not set
908CONFIG_MTD_ABSENT=m
909
910#
911# Mapping drivers for chip access
912#
913CONFIG_MTD_COMPLEX_MAPPINGS=y
914CONFIG_MTD_PHYSMAP=m
915CONFIG_MTD_PHYSMAP_START=0x8000000
916CONFIG_MTD_PHYSMAP_LEN=0x4000000
917CONFIG_MTD_PHYSMAP_BANKWIDTH=2
918CONFIG_MTD_SC520CDP=m
919CONFIG_MTD_NETSC520=m
920CONFIG_MTD_TS5500=m
921CONFIG_MTD_SBC_GXX=m
922CONFIG_MTD_AMD76XROM=m
923CONFIG_MTD_ICHXROM=m
924# CONFIG_MTD_ESB2ROM is not set
925# CONFIG_MTD_CK804XROM is not set
926CONFIG_MTD_SCB2_FLASH=m
927CONFIG_MTD_NETtel=m
928CONFIG_MTD_DILNETPC=m
929CONFIG_MTD_DILNETPC_BOOTSIZE=0x80000
930CONFIG_MTD_L440GX=m
931CONFIG_MTD_PCI=m
932# CONFIG_MTD_INTEL_VR_NOR is not set
933# CONFIG_MTD_PLATRAM is not set
934
935#
936# Self-contained MTD device drivers
937#
938CONFIG_MTD_PMC551=m
939CONFIG_MTD_PMC551_BUGFIX=y
940# CONFIG_MTD_PMC551_DEBUG is not set
941# CONFIG_MTD_DATAFLASH is not set
942# CONFIG_MTD_M25P80 is not set
943CONFIG_MTD_SLRAM=m
944CONFIG_MTD_PHRAM=m
945CONFIG_MTD_MTDRAM=m
946CONFIG_MTDRAM_TOTAL_SIZE=4096
947CONFIG_MTDRAM_ERASE_SIZE=128
948CONFIG_MTD_BLOCK2MTD=m
949
950#
951# Disk-On-Chip Device Drivers
952#
953CONFIG_MTD_DOC2000=m
954CONFIG_MTD_DOC2001=m
955CONFIG_MTD_DOC2001PLUS=m
956CONFIG_MTD_DOCPROBE=m
957CONFIG_MTD_DOCECC=m
958CONFIG_MTD_DOCPROBE_ADVANCED=y
959CONFIG_MTD_DOCPROBE_ADDRESS=0x0000
960CONFIG_MTD_DOCPROBE_HIGH=y
961CONFIG_MTD_DOCPROBE_55AA=y
962CONFIG_MTD_NAND=m
963# CONFIG_MTD_NAND_VERIFY_WRITE is not set
964CONFIG_MTD_NAND_ECC_SMC=y
965# CONFIG_MTD_NAND_MUSEUM_IDS is not set
966CONFIG_MTD_NAND_IDS=m
967CONFIG_MTD_NAND_DISKONCHIP=m
968# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
969CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
970CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y
971# CONFIG_MTD_NAND_CAFE is not set
972CONFIG_MTD_NAND_CS553X=m
973CONFIG_MTD_NAND_NANDSIM=m
974# CONFIG_MTD_NAND_PLATFORM is not set
975# CONFIG_MTD_ALAUDA is not set
976CONFIG_MTD_ONENAND=m
977# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
978CONFIG_MTD_ONENAND_OTP=y
979# CONFIG_MTD_ONENAND_2X_PROGRAM is not set
980# CONFIG_MTD_ONENAND_SIM is not set
981
982#
983# UBI - Unsorted block images
984#
985# CONFIG_MTD_UBI is not set
986# CONFIG_PARPORT is not set
987CONFIG_PNP=y
988# CONFIG_PNP_DEBUG is not set
989
990#
991# Protocols
992#
993# CONFIG_ISAPNP is not set
994CONFIG_PNPBIOS=y
995CONFIG_PNPBIOS_PROC_FS=y
996CONFIG_PNPACPI=y
997CONFIG_BLK_DEV=y
998# CONFIG_BLK_DEV_FD is not set
999CONFIG_BLK_DEV_XD=m
1000CONFIG_BLK_CPQ_DA=m
1001CONFIG_BLK_CPQ_CISS_DA=m
1002CONFIG_CISS_SCSI_TAPE=y
1003CONFIG_BLK_DEV_DAC960=m
1004CONFIG_BLK_DEV_UMEM=m
1005# CONFIG_BLK_DEV_COW_COMMON is not set
1006CONFIG_BLK_DEV_LOOP=y
1007CONFIG_BLK_DEV_CRYPTOLOOP=m
1008CONFIG_BLK_DEV_NBD=m
1009CONFIG_BLK_DEV_SX8=m
1010# CONFIG_BLK_DEV_UB is not set
1011CONFIG_BLK_DEV_RAM=y
1012CONFIG_BLK_DEV_RAM_COUNT=16
1013CONFIG_BLK_DEV_RAM_SIZE=64000
1014# CONFIG_BLK_DEV_XIP is not set
1015CONFIG_CDROM_PKTCDVD=m
1016CONFIG_CDROM_PKTCDVD_BUFFERS=8
1017CONFIG_CDROM_PKTCDVD_WCACHE=y
1018CONFIG_ATA_OVER_ETH=m
1019# CONFIG_BLK_DEV_HD is not set
1020CONFIG_MISC_DEVICES=y
1021# CONFIG_IBM_ASM is not set
1022# CONFIG_PHANTOM is not set
1023# CONFIG_EEPROM_93CX6 is not set
1024# CONFIG_SGI_IOC4 is not set
1025# CONFIG_TIFM_CORE is not set
1026# CONFIG_ACER_WMI is not set
1027# CONFIG_ASUS_LAPTOP is not set
1028# CONFIG_FUJITSU_LAPTOP is not set
1029# CONFIG_TC1100_WMI is not set
1030# CONFIG_MSI_LAPTOP is not set
1031# CONFIG_COMPAL_LAPTOP is not set
1032# CONFIG_SONY_LAPTOP is not set
1033# CONFIG_THINKPAD_ACPI is not set
1034# CONFIG_INTEL_MENLOW is not set
1035# CONFIG_EEEPC_LAPTOP is not set
1036# CONFIG_ENCLOSURE_SERVICES is not set
1037# CONFIG_HP_ILO is not set
1038CONFIG_HAVE_IDE=y
1039# CONFIG_IDE is not set
1040
1041#
1042# SCSI device support
1043#
1044CONFIG_RAID_ATTRS=m
1045CONFIG_SCSI=y
1046CONFIG_SCSI_DMA=y
1047# CONFIG_SCSI_TGT is not set
1048CONFIG_SCSI_NETLINK=y
1049CONFIG_SCSI_PROC_FS=y
1050
1051#
1052# SCSI support type (disk, tape, CD-ROM)
1053#
1054CONFIG_BLK_DEV_SD=y
1055CONFIG_CHR_DEV_ST=m
1056CONFIG_CHR_DEV_OSST=m
1057CONFIG_BLK_DEV_SR=y
1058# CONFIG_BLK_DEV_SR_VENDOR is not set
1059CONFIG_CHR_DEV_SG=y
1060CONFIG_CHR_DEV_SCH=m
1061
1062#
1063# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
1064#
1065CONFIG_SCSI_MULTI_LUN=y
1066CONFIG_SCSI_CONSTANTS=y
1067CONFIG_SCSI_LOGGING=y
1068# CONFIG_SCSI_SCAN_ASYNC is not set
1069CONFIG_SCSI_WAIT_SCAN=m
1070
1071#
1072# SCSI Transports
1073#
1074CONFIG_SCSI_SPI_ATTRS=m
1075CONFIG_SCSI_FC_ATTRS=m
1076CONFIG_SCSI_ISCSI_ATTRS=m
1077# CONFIG_SCSI_SAS_LIBSAS is not set
1078# CONFIG_SCSI_SRP_ATTRS is not set
1079CONFIG_SCSI_LOWLEVEL=y
1080# CONFIG_ISCSI_TCP is not set
1081# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
1082# CONFIG_SCSI_3W_9XXX is not set
1083# CONFIG_SCSI_7000FASST is not set
1084# CONFIG_SCSI_ACARD is not set
1085# CONFIG_SCSI_AHA152X is not set
1086# CONFIG_SCSI_AHA1542 is not set
1087# CONFIG_SCSI_AACRAID is not set
1088# CONFIG_SCSI_AIC7XXX is not set
1089# CONFIG_SCSI_AIC7XXX_OLD is not set
1090# CONFIG_SCSI_AIC79XX is not set
1091# CONFIG_SCSI_AIC94XX is not set
1092# CONFIG_SCSI_DPT_I2O is not set
1093# CONFIG_SCSI_ADVANSYS is not set
1094# CONFIG_SCSI_IN2000 is not set
1095# CONFIG_SCSI_ARCMSR is not set
1096# CONFIG_MEGARAID_NEWGEN is not set
1097# CONFIG_MEGARAID_LEGACY is not set
1098# CONFIG_MEGARAID_SAS is not set
1099# CONFIG_SCSI_HPTIOP is not set
1100# CONFIG_SCSI_BUSLOGIC is not set
1101# CONFIG_SCSI_DMX3191D is not set
1102# CONFIG_SCSI_DTC3280 is not set
1103# CONFIG_SCSI_EATA is not set
1104# CONFIG_SCSI_FUTURE_DOMAIN is not set
1105CONFIG_SCSI_GDTH=m
1106# CONFIG_SCSI_GENERIC_NCR5380 is not set
1107# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
1108# CONFIG_SCSI_IPS is not set
1109# CONFIG_SCSI_INITIO is not set
1110# CONFIG_SCSI_INIA100 is not set
1111# CONFIG_SCSI_MVSAS is not set
1112# CONFIG_SCSI_NCR53C406A is not set
1113# CONFIG_SCSI_STEX is not set
1114# CONFIG_SCSI_SYM53C8XX_2 is not set
1115# CONFIG_SCSI_IPR is not set
1116# CONFIG_SCSI_PAS16 is not set
1117# CONFIG_SCSI_QLOGIC_FAS is not set
1118# CONFIG_SCSI_QLOGIC_1280 is not set
1119# CONFIG_SCSI_QLA_FC is not set
1120# CONFIG_SCSI_QLA_ISCSI is not set
1121# CONFIG_SCSI_LPFC is not set
1122# CONFIG_SCSI_SYM53C416 is not set
1123# CONFIG_SCSI_DC395x is not set
1124# CONFIG_SCSI_DC390T is not set
1125# CONFIG_SCSI_T128 is not set
1126# CONFIG_SCSI_U14_34F is not set
1127# CONFIG_SCSI_ULTRASTOR is not set
1128# CONFIG_SCSI_NSP32 is not set
1129# CONFIG_SCSI_DEBUG is not set
1130# CONFIG_SCSI_SRP is not set
1131# CONFIG_SCSI_DH is not set
1132CONFIG_ATA=y
1133# CONFIG_ATA_NONSTANDARD is not set
1134CONFIG_ATA_ACPI=y
1135CONFIG_SATA_PMP=y
1136# CONFIG_SATA_AHCI is not set
1137# CONFIG_SATA_SIL24 is not set
1138CONFIG_ATA_SFF=y
1139# CONFIG_SATA_SVW is not set
1140CONFIG_ATA_PIIX=y
1141# CONFIG_SATA_MV is not set
1142# CONFIG_SATA_NV is not set
1143# CONFIG_PDC_ADMA is not set
1144# CONFIG_SATA_QSTOR is not set
1145# CONFIG_SATA_PROMISE is not set
1146# CONFIG_SATA_SX4 is not set
1147# CONFIG_SATA_SIL is not set
1148# CONFIG_SATA_SIS is not set
1149# CONFIG_SATA_ULI is not set
1150# CONFIG_SATA_VIA is not set
1151# CONFIG_SATA_VITESSE is not set
1152# CONFIG_SATA_INIC162X is not set
1153# CONFIG_PATA_ACPI is not set
1154# CONFIG_PATA_ALI is not set
1155# CONFIG_PATA_AMD is not set
1156# CONFIG_PATA_ARTOP is not set
1157# CONFIG_PATA_ATIIXP is not set
1158# CONFIG_PATA_CMD640_PCI is not set
1159# CONFIG_PATA_CMD64X is not set
1160# CONFIG_PATA_CS5520 is not set
1161# CONFIG_PATA_CS5530 is not set
1162# CONFIG_PATA_CS5535 is not set
1163# CONFIG_PATA_CS5536 is not set
1164# CONFIG_PATA_CYPRESS is not set
1165# CONFIG_PATA_EFAR is not set
1166CONFIG_ATA_GENERIC=y
1167# CONFIG_PATA_HPT366 is not set
1168# CONFIG_PATA_HPT37X is not set
1169# CONFIG_PATA_HPT3X2N is not set
1170# CONFIG_PATA_HPT3X3 is not set
1171# CONFIG_PATA_IT821X is not set
1172# CONFIG_PATA_IT8213 is not set
1173# CONFIG_PATA_JMICRON is not set
1174# CONFIG_PATA_LEGACY is not set
1175# CONFIG_PATA_TRIFLEX is not set
1176# CONFIG_PATA_MARVELL is not set
1177CONFIG_PATA_MPIIX=y
1178# CONFIG_PATA_OLDPIIX is not set
1179# CONFIG_PATA_NETCELL is not set
1180# CONFIG_PATA_NINJA32 is not set
1181# CONFIG_PATA_NS87410 is not set
1182# CONFIG_PATA_NS87415 is not set
1183# CONFIG_PATA_OPTI is not set
1184# CONFIG_PATA_OPTIDMA is not set
1185# CONFIG_PATA_PDC_OLD is not set
1186# CONFIG_PATA_QDI is not set
1187# CONFIG_PATA_RADISYS is not set
1188# CONFIG_PATA_RZ1000 is not set
1189# CONFIG_PATA_SC1200 is not set
1190# CONFIG_PATA_SERVERWORKS is not set
1191# CONFIG_PATA_PDC2027X is not set
1192# CONFIG_PATA_SIL680 is not set
1193# CONFIG_PATA_SIS is not set
1194# CONFIG_PATA_VIA is not set
1195# CONFIG_PATA_WINBOND is not set
1196# CONFIG_PATA_WINBOND_VLB is not set
1197# CONFIG_PATA_SCH is not set
1198# CONFIG_MD is not set
1199# CONFIG_FUSION is not set
1200
1201#
1202# IEEE 1394 (FireWire) support
1203#
1204
1205#
1206# Enable only one of the two stacks, unless you know what you are doing
1207#
1208# CONFIG_FIREWIRE is not set
1209CONFIG_IEEE1394=m
1210CONFIG_IEEE1394_OHCI1394=m
1211# CONFIG_IEEE1394_PCILYNX is not set
1212CONFIG_IEEE1394_SBP2=m
1213# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
1214CONFIG_IEEE1394_ETH1394_ROM_ENTRY=y
1215CONFIG_IEEE1394_ETH1394=m
1216CONFIG_IEEE1394_RAWIO=m
1217CONFIG_IEEE1394_VIDEO1394=m
1218CONFIG_IEEE1394_DV1394=m
1219# CONFIG_IEEE1394_VERBOSEDEBUG is not set
1220CONFIG_I2O=m
1221CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y
1222CONFIG_I2O_EXT_ADAPTEC=y
1223CONFIG_I2O_CONFIG=m
1224CONFIG_I2O_CONFIG_OLD_IOCTL=y
1225CONFIG_I2O_BUS=m
1226CONFIG_I2O_BLOCK=m
1227CONFIG_I2O_SCSI=m
1228CONFIG_I2O_PROC=m
1229# CONFIG_MACINTOSH_DRIVERS is not set
1230CONFIG_NETDEVICES=y
1231CONFIG_IFB=m
1232CONFIG_DUMMY=m
1233CONFIG_BONDING=m
1234# CONFIG_MACVLAN is not set
1235CONFIG_EQUALIZER=m
1236CONFIG_TUN=m
1237# CONFIG_VETH is not set
1238# CONFIG_NET_SB1000 is not set
1239# CONFIG_ARCNET is not set
1240CONFIG_PHYLIB=m
1241
1242#
1243# MII PHY device drivers
1244#
1245CONFIG_MARVELL_PHY=m
1246CONFIG_DAVICOM_PHY=m
1247CONFIG_QSEMI_PHY=m
1248CONFIG_LXT_PHY=m
1249CONFIG_CICADA_PHY=m
1250CONFIG_VITESSE_PHY=m
1251CONFIG_SMSC_PHY=m
1252# CONFIG_BROADCOM_PHY is not set
1253# CONFIG_ICPLUS_PHY is not set
1254# CONFIG_REALTEK_PHY is not set
1255# CONFIG_MDIO_BITBANG is not set
1256CONFIG_NET_ETHERNET=y
1257CONFIG_MII=y
1258# CONFIG_HAPPYMEAL is not set
1259# CONFIG_SUNGEM is not set
1260# CONFIG_CASSINI is not set
1261CONFIG_NET_VENDOR_3COM=y
1262CONFIG_EL1=m
1263CONFIG_EL2=m
1264CONFIG_ELPLUS=m
1265CONFIG_EL16=m
1266CONFIG_EL3=m
1267CONFIG_3C515=m
1268CONFIG_VORTEX=m
1269CONFIG_TYPHOON=m
1270# CONFIG_LANCE is not set
1271CONFIG_NET_VENDOR_SMC=y
1272CONFIG_WD80x3=m
1273CONFIG_ULTRA=m
1274CONFIG_SMC9194=m
1275# CONFIG_ENC28J60 is not set
1276# CONFIG_NET_VENDOR_RACAL is not set
1277CONFIG_NET_TULIP=y
1278CONFIG_DE2104X=m
1279CONFIG_TULIP=m
1280# CONFIG_TULIP_MWI is not set
1281# CONFIG_TULIP_MMIO is not set
1282CONFIG_TULIP_NAPI=y
1283CONFIG_TULIP_NAPI_HW_MITIGATION=y
1284CONFIG_DE4X5=m
1285CONFIG_WINBOND_840=m
1286CONFIG_DM9102=m
1287CONFIG_ULI526X=m
1288# CONFIG_AT1700 is not set
1289# CONFIG_DEPCA is not set
1290# CONFIG_HP100 is not set
1291# CONFIG_NET_ISA is not set
1292# CONFIG_IBM_NEW_EMAC_ZMII is not set
1293# CONFIG_IBM_NEW_EMAC_RGMII is not set
1294# CONFIG_IBM_NEW_EMAC_TAH is not set
1295# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
1296CONFIG_NET_PCI=y
1297# CONFIG_PCNET32 is not set
1298# CONFIG_AMD8111_ETH is not set
1299# CONFIG_ADAPTEC_STARFIRE is not set
1300# CONFIG_AC3200 is not set
1301# CONFIG_APRICOT is not set
1302# CONFIG_B44 is not set
1303# CONFIG_FORCEDETH is not set
1304# CONFIG_CS89x0 is not set
1305# CONFIG_EEPRO100 is not set
1306CONFIG_E100=m
1307# CONFIG_FEALNX is not set
1308# CONFIG_NATSEMI is not set
1309CONFIG_NE2K_PCI=m
1310CONFIG_8139CP=m
1311CONFIG_8139TOO=m
1312# CONFIG_8139TOO_PIO is not set
1313# CONFIG_8139TOO_TUNE_TWISTER is not set
1314CONFIG_8139TOO_8129=y
1315# CONFIG_8139_OLD_RX_RESET is not set
1316# CONFIG_R6040 is not set
1317# CONFIG_SIS900 is not set
1318CONFIG_EPIC100=m
1319# CONFIG_SUNDANCE is not set
1320# CONFIG_TLAN is not set
1321# CONFIG_VIA_RHINE is not set
1322# CONFIG_SC92031 is not set
1323CONFIG_NETDEV_1000=y
1324# CONFIG_ACENIC is not set
1325# CONFIG_DL2K is not set
1326CONFIG_E1000=m
1327CONFIG_E1000_DISABLE_PACKET_SPLIT=y
1328# CONFIG_E1000E is not set
1329# CONFIG_IP1000 is not set
1330# CONFIG_IGB is not set
1331# CONFIG_NS83820 is not set
1332# CONFIG_HAMACHI is not set
1333# CONFIG_YELLOWFIN is not set
1334# CONFIG_R8169 is not set
1335# CONFIG_SIS190 is not set
1336CONFIG_SKGE=y
1337# CONFIG_SKGE_DEBUG is not set
1338CONFIG_SKY2=y
1339# CONFIG_SKY2_DEBUG is not set
1340# CONFIG_VIA_VELOCITY is not set
1341# CONFIG_TIGON3 is not set
1342# CONFIG_BNX2 is not set
1343# CONFIG_QLA3XXX is not set
1344# CONFIG_ATL1 is not set
1345# CONFIG_ATL1E is not set
1346CONFIG_NETDEV_10000=y
1347# CONFIG_CHELSIO_T1 is not set
1348# CONFIG_CHELSIO_T3 is not set
1349# CONFIG_IXGBE is not set
1350CONFIG_IXGB=m
1351# CONFIG_S2IO is not set
1352# CONFIG_MYRI10GE is not set
1353# CONFIG_NETXEN_NIC is not set
1354# CONFIG_NIU is not set
1355# CONFIG_MLX4_CORE is not set
1356# CONFIG_TEHUTI is not set
1357# CONFIG_BNX2X is not set
1358# CONFIG_SFC is not set
1359# CONFIG_TR is not set
1360
1361#
1362# Wireless LAN
1363#
1364# CONFIG_WLAN_PRE80211 is not set
1365CONFIG_WLAN_80211=y
1366CONFIG_IPW2100=m
1367# CONFIG_IPW2100_MONITOR is not set
1368# CONFIG_IPW2100_DEBUG is not set
1369CONFIG_IPW2200=m
1370# CONFIG_IPW2200_MONITOR is not set
1371# CONFIG_IPW2200_QOS is not set
1372# CONFIG_IPW2200_DEBUG is not set
1373# CONFIG_LIBERTAS is not set
1374# CONFIG_AIRO is not set
1375# CONFIG_HERMES is not set
1376# CONFIG_ATMEL is not set
1377# CONFIG_PRISM54 is not set
1378# CONFIG_USB_ZD1201 is not set
1379# CONFIG_USB_NET_RNDIS_WLAN is not set
1380# CONFIG_IWLWIFI_LEDS is not set
1381# CONFIG_HOSTAP is not set
1382
1383#
1384# USB Network Adapters
1385#
1386CONFIG_USB_CATC=m
1387CONFIG_USB_KAWETH=m
1388CONFIG_USB_PEGASUS=m
1389CONFIG_USB_RTL8150=m
1390CONFIG_USB_USBNET=y
1391CONFIG_USB_NET_AX8817X=y
1392CONFIG_USB_NET_CDCETHER=m
1393# CONFIG_USB_NET_DM9601 is not set
1394CONFIG_USB_NET_GL620A=m
1395CONFIG_USB_NET_NET1080=m
1396CONFIG_USB_NET_PLUSB=m
1397# CONFIG_USB_NET_MCS7830 is not set
1398CONFIG_USB_NET_RNDIS_HOST=m
1399CONFIG_USB_NET_CDC_SUBSET=m
1400CONFIG_USB_ALI_M5632=y
1401CONFIG_USB_AN2720=y
1402CONFIG_USB_BELKIN=y
1403CONFIG_USB_ARMLINUX=y
1404CONFIG_USB_EPSON2888=y
1405# CONFIG_USB_KC2190 is not set
1406CONFIG_USB_NET_ZAURUS=m
1407# CONFIG_WAN is not set
1408CONFIG_ATM_DRIVERS=y
1409# CONFIG_ATM_DUMMY is not set
1410# CONFIG_ATM_TCP is not set
1411# CONFIG_ATM_LANAI is not set
1412# CONFIG_ATM_ENI is not set
1413# CONFIG_ATM_FIRESTREAM is not set
1414# CONFIG_ATM_ZATM is not set
1415# CONFIG_ATM_NICSTAR is not set
1416# CONFIG_ATM_IDT77252 is not set
1417# CONFIG_ATM_AMBASSADOR is not set
1418# CONFIG_ATM_HORIZON is not set
1419# CONFIG_ATM_IA is not set
1420# CONFIG_ATM_FORE200E is not set
1421# CONFIG_ATM_HE is not set
1422# CONFIG_FDDI is not set
1423# CONFIG_HIPPI is not set
1424CONFIG_PPP=m
1425CONFIG_PPP_MULTILINK=y
1426CONFIG_PPP_FILTER=y
1427CONFIG_PPP_ASYNC=m
1428CONFIG_PPP_SYNC_TTY=m
1429CONFIG_PPP_DEFLATE=m
1430CONFIG_PPP_BSDCOMP=m
1431CONFIG_PPP_MPPE=m
1432CONFIG_PPPOE=m
1433CONFIG_PPPOATM=m
1434# CONFIG_PPPOL2TP is not set
1435CONFIG_SLIP=m
1436CONFIG_SLIP_COMPRESSED=y
1437CONFIG_SLHC=m
1438CONFIG_SLIP_SMART=y
1439CONFIG_SLIP_MODE_SLIP6=y
1440CONFIG_NET_FC=y
1441CONFIG_NETCONSOLE=m
1442# CONFIG_NETCONSOLE_DYNAMIC is not set
1443CONFIG_NETPOLL=y
1444CONFIG_NETPOLL_TRAP=y
1445CONFIG_NET_POLL_CONTROLLER=y
1446# CONFIG_ISDN is not set
1447CONFIG_PHONE=m
1448# CONFIG_PHONE_IXJ is not set
1449
1450#
1451# Input device support
1452#
1453CONFIG_INPUT=y
1454CONFIG_INPUT_FF_MEMLESS=y
1455CONFIG_INPUT_POLLDEV=m
1456
1457#
1458# Userland interfaces
1459#
1460CONFIG_INPUT_MOUSEDEV=y
1461CONFIG_INPUT_MOUSEDEV_PSAUX=y
1462CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
1463CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
1464CONFIG_INPUT_JOYDEV=m
1465CONFIG_INPUT_EVDEV=y
1466# CONFIG_INPUT_EVBUG is not set
1467
1468#
1469# Input Device Drivers
1470#
1471CONFIG_INPUT_KEYBOARD=y
1472CONFIG_KEYBOARD_ATKBD=y
1473CONFIG_KEYBOARD_SUNKBD=m
1474# CONFIG_KEYBOARD_LKKBD is not set
1475CONFIG_KEYBOARD_XTKBD=m
1476CONFIG_KEYBOARD_NEWTON=m
1477# CONFIG_KEYBOARD_STOWAWAY is not set
1478CONFIG_INPUT_MOUSE=y
1479CONFIG_MOUSE_PS2=y
1480CONFIG_MOUSE_PS2_ALPS=y
1481CONFIG_MOUSE_PS2_LOGIPS2PP=y
1482CONFIG_MOUSE_PS2_SYNAPTICS=y
1483CONFIG_MOUSE_PS2_LIFEBOOK=y
1484CONFIG_MOUSE_PS2_TRACKPOINT=y
1485# CONFIG_MOUSE_PS2_TOUCHKIT is not set
1486CONFIG_MOUSE_SERIAL=m
1487# CONFIG_MOUSE_APPLETOUCH is not set
1488# CONFIG_MOUSE_BCM5974 is not set
1489CONFIG_MOUSE_INPORT=m
1490CONFIG_MOUSE_ATIXL=y
1491CONFIG_MOUSE_LOGIBM=m
1492CONFIG_MOUSE_PC110PAD=m
1493# CONFIG_MOUSE_VSXXXAA is not set
1494CONFIG_INPUT_JOYSTICK=y
1495CONFIG_JOYSTICK_ANALOG=m
1496CONFIG_JOYSTICK_A3D=m
1497CONFIG_JOYSTICK_ADI=m
1498CONFIG_JOYSTICK_COBRA=m
1499CONFIG_JOYSTICK_GF2K=m
1500CONFIG_JOYSTICK_GRIP=m
1501CONFIG_JOYSTICK_GRIP_MP=m
1502CONFIG_JOYSTICK_GUILLEMOT=m
1503CONFIG_JOYSTICK_INTERACT=m
1504CONFIG_JOYSTICK_SIDEWINDER=m
1505CONFIG_JOYSTICK_TMDC=m
1506CONFIG_JOYSTICK_IFORCE=m
1507CONFIG_JOYSTICK_IFORCE_USB=y
1508CONFIG_JOYSTICK_IFORCE_232=y
1509CONFIG_JOYSTICK_WARRIOR=m
1510CONFIG_JOYSTICK_MAGELLAN=m
1511CONFIG_JOYSTICK_SPACEORB=m
1512CONFIG_JOYSTICK_SPACEBALL=m
1513CONFIG_JOYSTICK_STINGER=m
1514CONFIG_JOYSTICK_TWIDJOY=m
1515# CONFIG_JOYSTICK_ZHENHUA is not set
1516CONFIG_JOYSTICK_JOYDUMP=m
1517# CONFIG_JOYSTICK_XPAD is not set
1518# CONFIG_INPUT_TABLET is not set
1519CONFIG_INPUT_TOUCHSCREEN=y
1520CONFIG_TOUCHSCREEN_ADS7846=m
1521# CONFIG_TOUCHSCREEN_FUJITSU is not set
1522CONFIG_TOUCHSCREEN_GUNZE=m
1523CONFIG_TOUCHSCREEN_ELO=m
1524CONFIG_TOUCHSCREEN_MTOUCH=m
1525# CONFIG_TOUCHSCREEN_INEXIO is not set
1526CONFIG_TOUCHSCREEN_MK712=m
1527# CONFIG_TOUCHSCREEN_HTCPEN is not set
1528# CONFIG_TOUCHSCREEN_PENMOUNT is not set
1529# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
1530# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
1531# CONFIG_TOUCHSCREEN_UCB1400 is not set
1532# CONFIG_TOUCHSCREEN_WM97XX is not set
1533# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
1534# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
1535CONFIG_INPUT_MISC=y
1536CONFIG_INPUT_PCSPKR=y
1537# CONFIG_INPUT_APANEL is not set
1538CONFIG_INPUT_WISTRON_BTNS=m
1539# CONFIG_INPUT_ATLAS_BTNS is not set
1540# CONFIG_INPUT_ATI_REMOTE is not set
1541# CONFIG_INPUT_ATI_REMOTE2 is not set
1542# CONFIG_INPUT_KEYSPAN_REMOTE is not set
1543# CONFIG_INPUT_POWERMATE is not set
1544# CONFIG_INPUT_YEALINK is not set
1545CONFIG_INPUT_UINPUT=m
1546
1547#
1548# Hardware I/O ports
1549#
1550CONFIG_SERIO=y
1551CONFIG_SERIO_I8042=y
1552CONFIG_SERIO_SERPORT=m
1553CONFIG_SERIO_CT82C710=m
1554CONFIG_SERIO_PCIPS2=m
1555CONFIG_SERIO_LIBPS2=y
1556CONFIG_SERIO_RAW=m
1557CONFIG_GAMEPORT=m
1558CONFIG_GAMEPORT_NS558=m
1559CONFIG_GAMEPORT_L4=m
1560CONFIG_GAMEPORT_EMU10K1=m
1561CONFIG_GAMEPORT_FM801=m
1562
1563#
1564# Character devices
1565#
1566CONFIG_VT=y
1567CONFIG_CONSOLE_TRANSLATIONS=y
1568CONFIG_VT_CONSOLE=y
1569CONFIG_HW_CONSOLE=y
1570CONFIG_VT_HW_CONSOLE_BINDING=y
1571CONFIG_DEVKMEM=y
1572CONFIG_SERIAL_NONSTANDARD=y
1573# CONFIG_COMPUTONE is not set
1574# CONFIG_ROCKETPORT is not set
1575# CONFIG_CYCLADES is not set
1576# CONFIG_DIGIEPCA is not set
1577# CONFIG_ESPSERIAL is not set
1578# CONFIG_MOXA_INTELLIO is not set
1579# CONFIG_MOXA_SMARTIO is not set
1580# CONFIG_ISI is not set
1581# CONFIG_SYNCLINK is not set
1582# CONFIG_SYNCLINKMP is not set
1583# CONFIG_SYNCLINK_GT is not set
1584# CONFIG_N_HDLC is not set
1585# CONFIG_RISCOM8 is not set
1586# CONFIG_SPECIALIX is not set
1587# CONFIG_SX is not set
1588# CONFIG_RIO is not set
1589# CONFIG_STALDRV is not set
1590# CONFIG_NOZOMI is not set
1591
1592#
1593# Serial drivers
1594#
1595CONFIG_SERIAL_8250=y
1596CONFIG_SERIAL_8250_CONSOLE=y
1597CONFIG_FIX_EARLYCON_MEM=y
1598CONFIG_SERIAL_8250_PCI=y
1599CONFIG_SERIAL_8250_PNP=y
1600CONFIG_SERIAL_8250_NR_UARTS=8
1601CONFIG_SERIAL_8250_RUNTIME_UARTS=4
1602CONFIG_SERIAL_8250_EXTENDED=y
1603# CONFIG_SERIAL_8250_MANY_PORTS is not set
1604CONFIG_SERIAL_8250_SHARE_IRQ=y
1605# CONFIG_SERIAL_8250_DETECT_IRQ is not set
1606# CONFIG_SERIAL_8250_RSA is not set
1607
1608#
1609# Non-8250 serial port support
1610#
1611CONFIG_SERIAL_CORE=y
1612CONFIG_SERIAL_CORE_CONSOLE=y
1613CONFIG_SERIAL_JSM=y
1614CONFIG_UNIX98_PTYS=y
1615CONFIG_LEGACY_PTYS=y
1616CONFIG_LEGACY_PTY_COUNT=64
1617CONFIG_IPMI_HANDLER=m
1618CONFIG_IPMI_PANIC_EVENT=y
1619CONFIG_IPMI_PANIC_STRING=y
1620CONFIG_IPMI_DEVICE_INTERFACE=m
1621CONFIG_IPMI_SI=m
1622CONFIG_IPMI_WATCHDOG=m
1623CONFIG_IPMI_POWEROFF=m
1624CONFIG_HW_RANDOM=y
1625CONFIG_HW_RANDOM_INTEL=m
1626# CONFIG_HW_RANDOM_AMD is not set
1627# CONFIG_HW_RANDOM_GEODE is not set
1628# CONFIG_HW_RANDOM_VIA is not set
1629CONFIG_NVRAM=m
1630# CONFIG_DTLK is not set
1631# CONFIG_R3964 is not set
1632# CONFIG_APPLICOM is not set
1633# CONFIG_SONYPI is not set
1634# CONFIG_MWAVE is not set
1635# CONFIG_PC8736x_GPIO is not set
1636# CONFIG_NSC_GPIO is not set
1637# CONFIG_CS5535_GPIO is not set
1638CONFIG_RAW_DRIVER=m
1639CONFIG_MAX_RAW_DEVS=4096
1640CONFIG_HPET=y
1641CONFIG_HPET_MMAP=y
1642CONFIG_HANGCHECK_TIMER=m
1643# CONFIG_TCG_TPM is not set
1644# CONFIG_TELCLOCK is not set
1645CONFIG_DEVPORT=y
1646CONFIG_I2C=m
1647CONFIG_I2C_BOARDINFO=y
1648CONFIG_I2C_CHARDEV=m
1649CONFIG_I2C_HELPER_AUTO=y
1650CONFIG_I2C_ALGOBIT=m
1651CONFIG_I2C_ALGOPCA=m
1652
1653#
1654# I2C Hardware Bus support
1655#
1656
1657#
1658# PC SMBus host controller drivers
1659#
1660CONFIG_I2C_ALI1535=m
1661CONFIG_I2C_ALI1563=m
1662CONFIG_I2C_ALI15X3=m
1663CONFIG_I2C_AMD756=m
1664CONFIG_I2C_AMD756_S4882=m
1665CONFIG_I2C_AMD8111=m
1666CONFIG_I2C_I801=m
1667# CONFIG_I2C_ISCH is not set
1668CONFIG_I2C_PIIX4=m
1669CONFIG_I2C_NFORCE2=m
1670# CONFIG_I2C_NFORCE2_S4985 is not set
1671CONFIG_I2C_SIS5595=m
1672CONFIG_I2C_SIS630=m
1673CONFIG_I2C_SIS96X=m
1674CONFIG_I2C_VIA=m
1675CONFIG_I2C_VIAPRO=m
1676
1677#
1678# I2C system bus drivers (mostly embedded / system-on-chip)
1679#
1680CONFIG_I2C_OCORES=m
1681# CONFIG_I2C_SIMTEC is not set
1682
1683#
1684# External I2C/SMBus adapter drivers
1685#
1686CONFIG_I2C_PARPORT_LIGHT=m
1687# CONFIG_I2C_TAOS_EVM is not set
1688# CONFIG_I2C_TINY_USB is not set
1689
1690#
1691# Graphics adapter I2C/DDC channel drivers
1692#
1693CONFIG_I2C_VOODOO3=m
1694
1695#
1696# Other I2C/SMBus bus drivers
1697#
1698CONFIG_I2C_PCA_ISA=m
1699# CONFIG_I2C_PCA_PLATFORM is not set
1700CONFIG_I2C_STUB=m
1701CONFIG_SCx200_ACB=m
1702
1703#
1704# Miscellaneous I2C Chip support
1705#
1706# CONFIG_DS1682 is not set
1707# CONFIG_AT24 is not set
1708CONFIG_SENSORS_EEPROM=m
1709CONFIG_SENSORS_PCF8574=m
1710# CONFIG_PCF8575 is not set
1711CONFIG_SENSORS_PCA9539=m
1712CONFIG_SENSORS_PCF8591=m
1713CONFIG_SENSORS_MAX6875=m
1714# CONFIG_SENSORS_TSL2550 is not set
1715# CONFIG_I2C_DEBUG_CORE is not set
1716# CONFIG_I2C_DEBUG_ALGO is not set
1717# CONFIG_I2C_DEBUG_BUS is not set
1718# CONFIG_I2C_DEBUG_CHIP is not set
1719CONFIG_SPI=y
1720# CONFIG_SPI_DEBUG is not set
1721CONFIG_SPI_MASTER=y
1722
1723#
1724# SPI Master Controller Drivers
1725#
1726CONFIG_SPI_BITBANG=m
1727
1728#
1729# SPI Protocol Masters
1730#
1731# CONFIG_SPI_AT25 is not set
1732# CONFIG_SPI_SPIDEV is not set
1733# CONFIG_SPI_TLE62X0 is not set
1734CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
1735# CONFIG_GPIOLIB is not set
1736CONFIG_W1=m
1737CONFIG_W1_CON=y
1738
1739#
1740# 1-wire Bus Masters
1741#
1742CONFIG_W1_MASTER_MATROX=m
1743CONFIG_W1_MASTER_DS2490=m
1744CONFIG_W1_MASTER_DS2482=m
1745
1746#
1747# 1-wire Slaves
1748#
1749CONFIG_W1_SLAVE_THERM=m
1750CONFIG_W1_SLAVE_SMEM=m
1751CONFIG_W1_SLAVE_DS2433=m
1752CONFIG_W1_SLAVE_DS2433_CRC=y
1753# CONFIG_W1_SLAVE_DS2760 is not set
1754CONFIG_POWER_SUPPLY=y
1755# CONFIG_POWER_SUPPLY_DEBUG is not set
1756# CONFIG_PDA_POWER is not set
1757# CONFIG_BATTERY_DS2760 is not set
1758CONFIG_HWMON=y
1759CONFIG_HWMON_VID=m
1760# CONFIG_SENSORS_ABITUGURU is not set
1761# CONFIG_SENSORS_ABITUGURU3 is not set
1762# CONFIG_SENSORS_AD7414 is not set
1763# CONFIG_SENSORS_AD7418 is not set
1764# CONFIG_SENSORS_ADCXX is not set
1765# CONFIG_SENSORS_ADM1021 is not set
1766# CONFIG_SENSORS_ADM1025 is not set
1767# CONFIG_SENSORS_ADM1026 is not set
1768# CONFIG_SENSORS_ADM1029 is not set
1769# CONFIG_SENSORS_ADM1031 is not set
1770# CONFIG_SENSORS_ADM9240 is not set
1771# CONFIG_SENSORS_ADT7470 is not set
1772# CONFIG_SENSORS_ADT7473 is not set
1773# CONFIG_SENSORS_K8TEMP is not set
1774# CONFIG_SENSORS_ASB100 is not set
1775# CONFIG_SENSORS_ATXP1 is not set
1776# CONFIG_SENSORS_DS1621 is not set
1777# CONFIG_SENSORS_I5K_AMB is not set
1778# CONFIG_SENSORS_F71805F is not set
1779# CONFIG_SENSORS_F71882FG is not set
1780# CONFIG_SENSORS_F75375S is not set
1781# CONFIG_SENSORS_FSCHER is not set
1782# CONFIG_SENSORS_FSCPOS is not set
1783# CONFIG_SENSORS_FSCHMD is not set
1784# CONFIG_SENSORS_GL518SM is not set
1785# CONFIG_SENSORS_GL520SM is not set
1786# CONFIG_SENSORS_CORETEMP is not set
1787# CONFIG_SENSORS_IBMAEM is not set
1788# CONFIG_SENSORS_IBMPEX is not set
1789# CONFIG_SENSORS_IT87 is not set
1790# CONFIG_SENSORS_LM63 is not set
1791# CONFIG_SENSORS_LM70 is not set
1792# CONFIG_SENSORS_LM75 is not set
1793# CONFIG_SENSORS_LM77 is not set
1794# CONFIG_SENSORS_LM78 is not set
1795# CONFIG_SENSORS_LM80 is not set
1796# CONFIG_SENSORS_LM83 is not set
1797CONFIG_SENSORS_LM85=m
1798# CONFIG_SENSORS_LM87 is not set
1799# CONFIG_SENSORS_LM90 is not set
1800# CONFIG_SENSORS_LM92 is not set
1801# CONFIG_SENSORS_LM93 is not set
1802# CONFIG_SENSORS_MAX1619 is not set
1803# CONFIG_SENSORS_MAX6650 is not set
1804# CONFIG_SENSORS_PC87360 is not set
1805# CONFIG_SENSORS_PC87427 is not set
1806# CONFIG_SENSORS_SIS5595 is not set
1807# CONFIG_SENSORS_DME1737 is not set
1808# CONFIG_SENSORS_SMSC47M1 is not set
1809# CONFIG_SENSORS_SMSC47M192 is not set
1810# CONFIG_SENSORS_SMSC47B397 is not set
1811# CONFIG_SENSORS_ADS7828 is not set
1812# CONFIG_SENSORS_THMC50 is not set
1813# CONFIG_SENSORS_VIA686A is not set
1814# CONFIG_SENSORS_VT1211 is not set
1815# CONFIG_SENSORS_VT8231 is not set
1816# CONFIG_SENSORS_W83781D is not set
1817# CONFIG_SENSORS_W83791D is not set
1818# CONFIG_SENSORS_W83792D is not set
1819# CONFIG_SENSORS_W83793 is not set
1820# CONFIG_SENSORS_W83L785TS is not set
1821# CONFIG_SENSORS_W83L786NG is not set
1822# CONFIG_SENSORS_W83627HF is not set
1823# CONFIG_SENSORS_W83627EHF is not set
1824# CONFIG_SENSORS_HDAPS is not set
1825# CONFIG_SENSORS_APPLESMC is not set
1826# CONFIG_HWMON_DEBUG_CHIP is not set
1827CONFIG_THERMAL=y
1828# CONFIG_THERMAL_HWMON is not set
1829# CONFIG_WATCHDOG is not set
1830
1831#
1832# Sonics Silicon Backplane
1833#
1834CONFIG_SSB_POSSIBLE=y
1835# CONFIG_SSB is not set
1836
1837#
1838# Multifunction device drivers
1839#
1840# CONFIG_MFD_CORE is not set
1841# CONFIG_MFD_SM501 is not set
1842# CONFIG_HTC_PASIC3 is not set
1843# CONFIG_MFD_TMIO is not set
1844
1845#
1846# Multimedia devices
1847#
1848
1849#
1850# Multimedia core support
1851#
1852CONFIG_VIDEO_DEV=m
1853CONFIG_VIDEO_V4L2_COMMON=m
1854CONFIG_VIDEO_ALLOW_V4L1=y
1855CONFIG_VIDEO_V4L1_COMPAT=y
1856CONFIG_DVB_CORE=m
1857CONFIG_VIDEO_MEDIA=m
1858
1859#
1860# Multimedia drivers
1861#
1862# CONFIG_MEDIA_ATTACH is not set
1863CONFIG_MEDIA_TUNER=m
1864# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
1865CONFIG_MEDIA_TUNER_SIMPLE=m
1866CONFIG_MEDIA_TUNER_TDA8290=m
1867CONFIG_MEDIA_TUNER_TDA18271=m
1868CONFIG_MEDIA_TUNER_TDA9887=m
1869CONFIG_MEDIA_TUNER_TEA5761=m
1870CONFIG_MEDIA_TUNER_TEA5767=m
1871CONFIG_MEDIA_TUNER_MT20XX=m
1872CONFIG_MEDIA_TUNER_MT2060=m
1873CONFIG_MEDIA_TUNER_XC2028=m
1874CONFIG_MEDIA_TUNER_XC5000=m
1875CONFIG_VIDEO_V4L2=m
1876CONFIG_VIDEO_V4L1=m
1877CONFIG_VIDEOBUF_GEN=m
1878CONFIG_VIDEOBUF_VMALLOC=m
1879CONFIG_VIDEO_IR=m
1880CONFIG_VIDEO_TVEEPROM=m
1881CONFIG_VIDEO_TUNER=m
1882CONFIG_VIDEO_CAPTURE_DRIVERS=y
1883# CONFIG_VIDEO_ADV_DEBUG is not set
1884CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
1885CONFIG_VIDEO_IR_I2C=m
1886CONFIG_VIDEO_MSP3400=m
1887CONFIG_VIDEO_CS53L32A=m
1888CONFIG_VIDEO_WM8775=m
1889CONFIG_VIDEO_SAA711X=m
1890CONFIG_VIDEO_TVP5150=m
1891CONFIG_VIDEO_CX25840=m
1892CONFIG_VIDEO_CX2341X=m
1893# CONFIG_VIDEO_VIVI is not set
1894# CONFIG_VIDEO_BT848 is not set
1895# CONFIG_VIDEO_PMS is not set
1896# CONFIG_VIDEO_CPIA is not set
1897# CONFIG_VIDEO_CPIA2 is not set
1898# CONFIG_VIDEO_SAA5246A is not set
1899# CONFIG_VIDEO_SAA5249 is not set
1900# CONFIG_TUNER_3036 is not set
1901# CONFIG_VIDEO_STRADIS is not set
1902# CONFIG_VIDEO_ZORAN is not set
1903# CONFIG_VIDEO_SAA7134 is not set
1904# CONFIG_VIDEO_MXB is not set
1905# CONFIG_VIDEO_DPC is not set
1906# CONFIG_VIDEO_HEXIUM_ORION is not set
1907# CONFIG_VIDEO_HEXIUM_GEMINI is not set
1908# CONFIG_VIDEO_CX88 is not set
1909# CONFIG_VIDEO_CX23885 is not set
1910# CONFIG_VIDEO_AU0828 is not set
1911# CONFIG_VIDEO_IVTV is not set
1912# CONFIG_VIDEO_CX18 is not set
1913# CONFIG_VIDEO_CAFE_CCIC is not set
1914CONFIG_V4L_USB_DRIVERS=y
1915# CONFIG_USB_VIDEO_CLASS is not set
1916# CONFIG_USB_GSPCA is not set
1917CONFIG_VIDEO_PVRUSB2=m
1918CONFIG_VIDEO_PVRUSB2_SYSFS=y
1919CONFIG_VIDEO_PVRUSB2_DVB=y
1920# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
1921CONFIG_VIDEO_EM28XX=m
1922# CONFIG_VIDEO_EM28XX_ALSA is not set
1923# CONFIG_VIDEO_EM28XX_DVB is not set
1924# CONFIG_VIDEO_USBVISION is not set
1925CONFIG_VIDEO_USBVIDEO=m
1926CONFIG_USB_VICAM=m
1927CONFIG_USB_IBMCAM=m
1928CONFIG_USB_KONICAWC=m
1929CONFIG_USB_QUICKCAM_MESSENGER=m
1930CONFIG_USB_ET61X251=m
1931CONFIG_VIDEO_OVCAMCHIP=m
1932CONFIG_USB_W9968CF=m
1933CONFIG_USB_OV511=m
1934CONFIG_USB_SE401=m
1935CONFIG_USB_SN9C102=m
1936CONFIG_USB_STV680=m
1937# CONFIG_USB_ZC0301 is not set
1938CONFIG_USB_PWC=m
1939# CONFIG_USB_PWC_DEBUG is not set
1940# CONFIG_USB_ZR364XX is not set
1941# CONFIG_USB_STKWEBCAM is not set
1942# CONFIG_USB_S2255 is not set
1943# CONFIG_SOC_CAMERA is not set
1944# CONFIG_VIDEO_SH_MOBILE_CEU is not set
1945CONFIG_RADIO_ADAPTERS=y
1946# CONFIG_RADIO_CADET is not set
1947# CONFIG_RADIO_RTRACK is not set
1948# CONFIG_RADIO_RTRACK2 is not set
1949# CONFIG_RADIO_AZTECH is not set
1950# CONFIG_RADIO_GEMTEK is not set
1951# CONFIG_RADIO_GEMTEK_PCI is not set
1952# CONFIG_RADIO_MAXIRADIO is not set
1953# CONFIG_RADIO_MAESTRO is not set
1954# CONFIG_RADIO_SF16FMI is not set
1955# CONFIG_RADIO_SF16FMR2 is not set
1956# CONFIG_RADIO_TERRATEC is not set
1957# CONFIG_RADIO_TRUST is not set
1958# CONFIG_RADIO_TYPHOON is not set
1959# CONFIG_RADIO_ZOLTRIX is not set
1960# CONFIG_USB_DSBR is not set
1961# CONFIG_USB_SI470X is not set
1962CONFIG_DVB_CAPTURE_DRIVERS=y
1963
1964#
1965# Supported SAA7146 based PCI Adapters
1966#
1967# CONFIG_TTPCI_EEPROM is not set
1968# CONFIG_DVB_AV7110 is not set
1969# CONFIG_DVB_BUDGET_CORE is not set
1970
1971#
1972# Supported USB Adapters
1973#
1974CONFIG_DVB_USB=m
1975# CONFIG_DVB_USB_DEBUG is not set
1976CONFIG_DVB_USB_A800=m
1977CONFIG_DVB_USB_DIBUSB_MB=m
1978# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
1979CONFIG_DVB_USB_DIBUSB_MC=m
1980# CONFIG_DVB_USB_DIB0700 is not set
1981CONFIG_DVB_USB_UMT_010=m
1982# CONFIG_DVB_USB_CXUSB is not set
1983# CONFIG_DVB_USB_M920X is not set
1984# CONFIG_DVB_USB_GL861 is not set
1985# CONFIG_DVB_USB_AU6610 is not set
1986CONFIG_DVB_USB_DIGITV=m
1987CONFIG_DVB_USB_VP7045=m
1988CONFIG_DVB_USB_VP702X=m
1989CONFIG_DVB_USB_GP8PSK=m
1990CONFIG_DVB_USB_NOVA_T_USB2=m
1991# CONFIG_DVB_USB_TTUSB2 is not set
1992CONFIG_DVB_USB_DTT200U=m
1993# CONFIG_DVB_USB_OPERA1 is not set
1994# CONFIG_DVB_USB_AF9005 is not set
1995# CONFIG_DVB_USB_DW2102 is not set
1996# CONFIG_DVB_USB_ANYSEE is not set
1997# CONFIG_DVB_TTUSB_BUDGET is not set
1998# CONFIG_DVB_TTUSB_DEC is not set
1999# CONFIG_DVB_CINERGYT2 is not set
2000# CONFIG_DVB_SIANO_SMS1XXX is not set
2001
2002#
2003# Supported FlexCopII (B2C2) Adapters
2004#
2005# CONFIG_DVB_B2C2_FLEXCOP is not set
2006
2007#
2008# Supported BT878 Adapters
2009#
2010
2011#
2012# Supported Pluto2 Adapters
2013#
2014# CONFIG_DVB_PLUTO2 is not set
2015
2016#
2017# Supported DVB Frontends
2018#
2019
2020#
2021# Customise DVB Frontends
2022#
2023# CONFIG_DVB_FE_CUSTOMISE is not set
2024
2025#
2026# DVB-S (satellite) frontends
2027#
2028CONFIG_DVB_CX24110=m
2029CONFIG_DVB_CX24123=m
2030CONFIG_DVB_MT312=m
2031CONFIG_DVB_S5H1420=m
2032CONFIG_DVB_STV0299=m
2033CONFIG_DVB_TDA8083=m
2034CONFIG_DVB_TDA10086=m
2035CONFIG_DVB_VES1X93=m
2036# CONFIG_DVB_TUNER_ITD1000 is not set
2037CONFIG_DVB_TDA826X=m
2038CONFIG_DVB_TUA6100=m
2039
2040#
2041# DVB-T (terrestrial) frontends
2042#
2043CONFIG_DVB_SP8870=m
2044CONFIG_DVB_SP887X=m
2045CONFIG_DVB_CX22700=m
2046CONFIG_DVB_CX22702=m
2047# CONFIG_DVB_DRX397XD is not set
2048CONFIG_DVB_L64781=m
2049CONFIG_DVB_TDA1004X=m
2050CONFIG_DVB_NXT6000=m
2051CONFIG_DVB_MT352=m
2052CONFIG_DVB_ZL10353=m
2053CONFIG_DVB_DIB3000MB=m
2054CONFIG_DVB_DIB3000MC=m
2055# CONFIG_DVB_DIB7000M is not set
2056# CONFIG_DVB_DIB7000P is not set
2057CONFIG_DVB_TDA10048=m
2058
2059#
2060# DVB-C (cable) frontends
2061#
2062CONFIG_DVB_VES1820=m
2063CONFIG_DVB_TDA10021=m
2064# CONFIG_DVB_TDA10023 is not set
2065CONFIG_DVB_STV0297=m
2066
2067#
2068# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
2069#
2070CONFIG_DVB_NXT200X=m
2071CONFIG_DVB_OR51211=m
2072CONFIG_DVB_OR51132=m
2073CONFIG_DVB_BCM3510=m
2074CONFIG_DVB_LGDT330X=m
2075CONFIG_DVB_S5H1409=m
2076# CONFIG_DVB_AU8522 is not set
2077CONFIG_DVB_S5H1411=m
2078
2079#
2080# Digital terrestrial only tuners/PLL
2081#
2082CONFIG_DVB_PLL=m
2083# CONFIG_DVB_TUNER_DIB0070 is not set
2084
2085#
2086# SEC control devices for DVB-S
2087#
2088CONFIG_DVB_LNBP21=m
2089# CONFIG_DVB_ISL6405 is not set
2090CONFIG_DVB_ISL6421=m
2091CONFIG_DAB=y
2092CONFIG_USB_DABUSB=m
2093
2094#
2095# Graphics support
2096#
2097CONFIG_AGP=m
2098# CONFIG_AGP_ALI is not set
2099# CONFIG_AGP_ATI is not set
2100# CONFIG_AGP_AMD is not set
2101# CONFIG_AGP_AMD64 is not set
2102CONFIG_AGP_INTEL=m
2103CONFIG_AGP_NVIDIA=m
2104# CONFIG_AGP_SIS is not set
2105# CONFIG_AGP_SWORKS is not set
2106# CONFIG_AGP_VIA is not set
2107# CONFIG_AGP_EFFICEON is not set
2108CONFIG_DRM=m
2109# CONFIG_DRM_TDFX is not set
2110# CONFIG_DRM_R128 is not set
2111# CONFIG_DRM_RADEON is not set
2112# CONFIG_DRM_I810 is not set
2113# CONFIG_DRM_I830 is not set
2114# CONFIG_DRM_I915 is not set
2115# CONFIG_DRM_MGA is not set
2116# CONFIG_DRM_SIS is not set
2117# CONFIG_DRM_VIA is not set
2118# CONFIG_DRM_SAVAGE is not set
2119CONFIG_DRM_PSB=m
2120CONFIG_VGASTATE=m
2121CONFIG_VIDEO_OUTPUT_CONTROL=y
2122CONFIG_FB=y
2123CONFIG_FIRMWARE_EDID=y
2124CONFIG_FB_DDC=m
2125CONFIG_FB_CFB_FILLRECT=y
2126CONFIG_FB_CFB_COPYAREA=y
2127CONFIG_FB_CFB_IMAGEBLIT=y
2128# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
2129# CONFIG_FB_SYS_FILLRECT is not set
2130# CONFIG_FB_SYS_COPYAREA is not set
2131# CONFIG_FB_SYS_IMAGEBLIT is not set
2132# CONFIG_FB_FOREIGN_ENDIAN is not set
2133# CONFIG_FB_SYS_FOPS is not set
2134# CONFIG_FB_SVGALIB is not set
2135# CONFIG_FB_MACMODES is not set
2136CONFIG_FB_BACKLIGHT=y
2137CONFIG_FB_MODE_HELPERS=y
2138CONFIG_FB_TILEBLITTING=y
2139
2140#
2141# Frame buffer hardware drivers
2142#
2143# CONFIG_FB_CIRRUS is not set
2144# CONFIG_FB_PM2 is not set
2145# CONFIG_FB_CYBER2000 is not set
2146# CONFIG_FB_ARC is not set
2147# CONFIG_FB_ASILIANT is not set
2148# CONFIG_FB_IMSTT is not set
2149CONFIG_FB_VGA16=m
2150# CONFIG_FB_UVESA is not set
2151CONFIG_FB_VESA=y
2152# CONFIG_FB_EFI is not set
2153# CONFIG_FB_IMAC is not set
2154# CONFIG_FB_N411 is not set
2155# CONFIG_FB_HGA is not set
2156# CONFIG_FB_S1D13XXX is not set
2157CONFIG_FB_NVIDIA=m
2158CONFIG_FB_NVIDIA_I2C=y
2159# CONFIG_FB_NVIDIA_DEBUG is not set
2160CONFIG_FB_NVIDIA_BACKLIGHT=y
2161CONFIG_FB_RIVA=m
2162CONFIG_FB_RIVA_I2C=y
2163# CONFIG_FB_RIVA_DEBUG is not set
2164CONFIG_FB_RIVA_BACKLIGHT=y
2165CONFIG_FB_I810=m
2166CONFIG_FB_I810_GTF=y
2167CONFIG_FB_I810_I2C=y
2168# CONFIG_FB_LE80578 is not set
2169CONFIG_FB_INTEL=m
2170# CONFIG_FB_INTEL_DEBUG is not set
2171CONFIG_FB_INTEL_I2C=y
2172# CONFIG_FB_MATROX is not set
2173CONFIG_FB_RADEON=m
2174CONFIG_FB_RADEON_I2C=y
2175CONFIG_FB_RADEON_BACKLIGHT=y
2176# CONFIG_FB_RADEON_DEBUG is not set
2177# CONFIG_FB_ATY128 is not set
2178CONFIG_FB_ATY=m
2179CONFIG_FB_ATY_CT=y
2180CONFIG_FB_ATY_GENERIC_LCD=y
2181CONFIG_FB_ATY_GX=y
2182CONFIG_FB_ATY_BACKLIGHT=y
2183# CONFIG_FB_S3 is not set
2184# CONFIG_FB_SAVAGE is not set
2185# CONFIG_FB_SIS is not set
2186# CONFIG_FB_NEOMAGIC is not set
2187# CONFIG_FB_KYRO is not set
2188# CONFIG_FB_3DFX is not set
2189# CONFIG_FB_VOODOO1 is not set
2190# CONFIG_FB_VT8623 is not set
2191# CONFIG_FB_CYBLA is not set
2192# CONFIG_FB_TRIDENT is not set
2193# CONFIG_FB_ARK is not set
2194# CONFIG_FB_PM3 is not set
2195# CONFIG_FB_CARMINE is not set
2196# CONFIG_FB_GEODE is not set
2197# CONFIG_FB_VIRTUAL is not set
2198CONFIG_BACKLIGHT_LCD_SUPPORT=y
2199CONFIG_LCD_CLASS_DEVICE=m
2200# CONFIG_LCD_LTV350QV is not set
2201# CONFIG_LCD_ILI9320 is not set
2202# CONFIG_LCD_VGG2432A4 is not set
2203# CONFIG_LCD_PLATFORM is not set
2204CONFIG_BACKLIGHT_CLASS_DEVICE=y
2205# CONFIG_BACKLIGHT_CORGI is not set
2206# CONFIG_BACKLIGHT_PROGEAR is not set
2207# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
2208
2209#
2210# Display device support
2211#
2212# CONFIG_DISPLAY_SUPPORT is not set
2213
2214#
2215# Console display driver support
2216#
2217CONFIG_VGA_CONSOLE=y
2218CONFIG_VGACON_SOFT_SCROLLBACK=y
2219CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
2220CONFIG_VIDEO_SELECT=y
2221CONFIG_MDA_CONSOLE=m
2222CONFIG_DUMMY_CONSOLE=y
2223CONFIG_FRAMEBUFFER_CONSOLE=y
2224# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
2225CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
2226# CONFIG_FONTS is not set
2227CONFIG_FONT_8x8=y
2228CONFIG_FONT_8x16=y
2229# CONFIG_LOGO is not set
2230CONFIG_SOUND=m
2231CONFIG_SND=m
2232CONFIG_SND_TIMER=m
2233CONFIG_SND_PCM=m
2234CONFIG_SND_HWDEP=m
2235CONFIG_SND_RAWMIDI=m
2236CONFIG_SND_SEQUENCER=m
2237CONFIG_SND_SEQ_DUMMY=m
2238CONFIG_SND_OSSEMUL=y
2239CONFIG_SND_MIXER_OSS=m
2240CONFIG_SND_PCM_OSS=m
2241CONFIG_SND_PCM_OSS_PLUGINS=y
2242CONFIG_SND_SEQUENCER_OSS=y
2243CONFIG_SND_DYNAMIC_MINORS=y
2244CONFIG_SND_SUPPORT_OLD_API=y
2245CONFIG_SND_VERBOSE_PROCFS=y
2246CONFIG_SND_VERBOSE_PRINTK=y
2247CONFIG_SND_DEBUG=y
2248# CONFIG_SND_DEBUG_VERBOSE is not set
2249# CONFIG_SND_PCM_XRUN_DEBUG is not set
2250CONFIG_SND_VMASTER=y
2251CONFIG_SND_MPU401_UART=m
2252CONFIG_SND_AC97_CODEC=m
2253CONFIG_SND_DRIVERS=y
2254CONFIG_SND_DUMMY=m
2255CONFIG_SND_VIRMIDI=m
2256CONFIG_SND_MTPAV=m
2257CONFIG_SND_SERIAL_U16550=m
2258CONFIG_SND_MPU401=m
2259CONFIG_SND_AC97_POWER_SAVE=y
2260CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
2261CONFIG_SND_ISA=y
2262# CONFIG_SND_ADLIB is not set
2263# CONFIG_SND_AD1816A is not set
2264# CONFIG_SND_AD1848 is not set
2265# CONFIG_SND_ALS100 is not set
2266# CONFIG_SND_AZT2320 is not set
2267# CONFIG_SND_CMI8330 is not set
2268# CONFIG_SND_CS4231 is not set
2269# CONFIG_SND_CS4232 is not set
2270# CONFIG_SND_CS4236 is not set
2271# CONFIG_SND_DT019X is not set
2272# CONFIG_SND_ES968 is not set
2273# CONFIG_SND_ES1688 is not set
2274# CONFIG_SND_ES18XX is not set
2275# CONFIG_SND_SC6000 is not set
2276# CONFIG_SND_GUSCLASSIC is not set
2277# CONFIG_SND_GUSEXTREME is not set
2278# CONFIG_SND_GUSMAX is not set
2279# CONFIG_SND_INTERWAVE is not set
2280# CONFIG_SND_INTERWAVE_STB is not set
2281# CONFIG_SND_OPL3SA2 is not set
2282# CONFIG_SND_OPTI92X_AD1848 is not set
2283# CONFIG_SND_OPTI92X_CS4231 is not set
2284# CONFIG_SND_OPTI93X is not set
2285# CONFIG_SND_MIRO is not set
2286# CONFIG_SND_SB8 is not set
2287# CONFIG_SND_SB16 is not set
2288# CONFIG_SND_SBAWE is not set
2289# CONFIG_SND_SGALAXY is not set
2290# CONFIG_SND_SSCAPE is not set
2291# CONFIG_SND_WAVEFRONT is not set
2292CONFIG_SND_PCI=y
2293# CONFIG_SND_AD1889 is not set
2294# CONFIG_SND_ALS300 is not set
2295# CONFIG_SND_ALS4000 is not set
2296# CONFIG_SND_ALI5451 is not set
2297# CONFIG_SND_ATIIXP is not set
2298# CONFIG_SND_ATIIXP_MODEM is not set
2299# CONFIG_SND_AU8810 is not set
2300# CONFIG_SND_AU8820 is not set
2301# CONFIG_SND_AU8830 is not set
2302# CONFIG_SND_AW2 is not set
2303# CONFIG_SND_AZT3328 is not set
2304# CONFIG_SND_BT87X is not set
2305# CONFIG_SND_CA0106 is not set
2306# CONFIG_SND_CMIPCI is not set
2307# CONFIG_SND_OXYGEN is not set
2308# CONFIG_SND_CS4281 is not set
2309# CONFIG_SND_CS46XX is not set
2310# CONFIG_SND_CS5530 is not set
2311# CONFIG_SND_CS5535AUDIO is not set
2312# CONFIG_SND_DARLA20 is not set
2313# CONFIG_SND_GINA20 is not set
2314# CONFIG_SND_LAYLA20 is not set
2315# CONFIG_SND_DARLA24 is not set
2316# CONFIG_SND_GINA24 is not set
2317# CONFIG_SND_LAYLA24 is not set
2318# CONFIG_SND_MONA is not set
2319# CONFIG_SND_MIA is not set
2320# CONFIG_SND_ECHO3G is not set
2321# CONFIG_SND_INDIGO is not set
2322# CONFIG_SND_INDIGOIO is not set
2323# CONFIG_SND_INDIGODJ is not set
2324# CONFIG_SND_EMU10K1 is not set
2325# CONFIG_SND_EMU10K1X is not set
2326# CONFIG_SND_ENS1370 is not set
2327# CONFIG_SND_ENS1371 is not set
2328# CONFIG_SND_ES1938 is not set
2329# CONFIG_SND_ES1968 is not set
2330# CONFIG_SND_FM801 is not set
2331CONFIG_SND_HDA_INTEL=m
2332# CONFIG_SND_HDA_HWDEP is not set
2333CONFIG_SND_HDA_CODEC_REALTEK=y
2334CONFIG_SND_HDA_CODEC_ANALOG=y
2335CONFIG_SND_HDA_CODEC_SIGMATEL=y
2336CONFIG_SND_HDA_CODEC_VIA=y
2337CONFIG_SND_HDA_CODEC_ATIHDMI=y
2338CONFIG_SND_HDA_CODEC_CONEXANT=y
2339CONFIG_SND_HDA_CODEC_CMEDIA=y
2340CONFIG_SND_HDA_CODEC_SI3054=y
2341CONFIG_SND_HDA_GENERIC=y
2342# CONFIG_SND_HDA_POWER_SAVE is not set
2343# CONFIG_SND_HDSP is not set
2344# CONFIG_SND_HDSPM is not set
2345# CONFIG_SND_HIFIER is not set
2346# CONFIG_SND_ICE1712 is not set
2347# CONFIG_SND_ICE1724 is not set
2348CONFIG_SND_INTEL8X0=m
2349CONFIG_SND_INTEL8X0M=m
2350# CONFIG_SND_KORG1212 is not set
2351# CONFIG_SND_MAESTRO3 is not set
2352# CONFIG_SND_MIXART is not set
2353# CONFIG_SND_NM256 is not set
2354# CONFIG_SND_PCXHR is not set
2355# CONFIG_SND_RIPTIDE is not set
2356# CONFIG_SND_RME32 is not set
2357# CONFIG_SND_RME96 is not set
2358# CONFIG_SND_RME9652 is not set
2359# CONFIG_SND_SIS7019 is not set
2360# CONFIG_SND_SONICVIBES is not set
2361# CONFIG_SND_TRIDENT is not set
2362# CONFIG_SND_VIA82XX is not set
2363# CONFIG_SND_VIA82XX_MODEM is not set
2364# CONFIG_SND_VIRTUOSO is not set
2365# CONFIG_SND_VX222 is not set
2366# CONFIG_SND_YMFPCI is not set
2367CONFIG_SND_SPI=y
2368CONFIG_SND_USB=y
2369CONFIG_SND_USB_AUDIO=m
2370# CONFIG_SND_USB_USX2Y is not set
2371# CONFIG_SND_USB_CAIAQ is not set
2372# CONFIG_SND_SOC is not set
2373# CONFIG_SOUND_PRIME is not set
2374CONFIG_AC97_BUS=m
2375CONFIG_HID_SUPPORT=y
2376CONFIG_HID=y
2377# CONFIG_HID_DEBUG is not set
2378# CONFIG_HIDRAW is not set
2379
2380#
2381# USB Input Devices
2382#
2383CONFIG_USB_HID=y
2384CONFIG_USB_HIDINPUT_POWERBOOK=y
2385CONFIG_HID_FF=y
2386CONFIG_HID_PID=y
2387CONFIG_LOGITECH_FF=y
2388# CONFIG_LOGIRUMBLEPAD2_FF is not set
2389# CONFIG_PANTHERLORD_FF is not set
2390CONFIG_THRUSTMASTER_FF=y
2391# CONFIG_ZEROPLUS_FF is not set
2392CONFIG_USB_HIDDEV=y
2393CONFIG_USB_SUPPORT=y
2394CONFIG_USB_ARCH_HAS_HCD=y
2395CONFIG_USB_ARCH_HAS_OHCI=y
2396CONFIG_USB_ARCH_HAS_EHCI=y
2397CONFIG_USB=y
2398# CONFIG_USB_DEBUG is not set
2399# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
2400
2401#
2402# Miscellaneous USB options
2403#
2404CONFIG_USB_DEVICEFS=y
2405CONFIG_USB_DEVICE_CLASS=y
2406# CONFIG_USB_DYNAMIC_MINORS is not set
2407CONFIG_USB_SUSPEND=y
2408# CONFIG_USB_OTG is not set
2409CONFIG_USB_MON=y
2410
2411#
2412# USB Host Controller Drivers
2413#
2414# CONFIG_USB_C67X00_HCD is not set
2415CONFIG_USB_EHCI_HCD=y
2416CONFIG_USB_EHCI_ROOT_HUB_TT=y
2417CONFIG_USB_EHCI_TT_NEWSCHED=y
2418# CONFIG_USB_ISP116X_HCD is not set
2419# CONFIG_USB_ISP1760_HCD is not set
2420CONFIG_USB_OHCI_HCD=y
2421# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
2422# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
2423CONFIG_USB_OHCI_LITTLE_ENDIAN=y
2424CONFIG_USB_UHCI_HCD=y
2425# CONFIG_USB_SL811_HCD is not set
2426# CONFIG_USB_R8A66597_HCD is not set
2427# CONFIG_USB_GADGET_MUSB_HDRC is not set
2428
2429#
2430# USB Device Class drivers
2431#
2432CONFIG_USB_ACM=m
2433CONFIG_USB_PRINTER=m
2434# CONFIG_USB_WDM is not set
2435
2436#
2437# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
2438#
2439
2440#
2441# may also be needed; see USB_STORAGE Help for more information
2442#
2443CONFIG_USB_STORAGE=y
2444# CONFIG_USB_STORAGE_DEBUG is not set
2445CONFIG_USB_STORAGE_DATAFAB=y
2446CONFIG_USB_STORAGE_FREECOM=y
2447# CONFIG_USB_STORAGE_ISD200 is not set
2448CONFIG_USB_STORAGE_DPCM=y
2449CONFIG_USB_STORAGE_USBAT=y
2450CONFIG_USB_STORAGE_SDDR09=y
2451CONFIG_USB_STORAGE_SDDR55=y
2452CONFIG_USB_STORAGE_JUMPSHOT=y
2453CONFIG_USB_STORAGE_ALAUDA=y
2454# CONFIG_USB_STORAGE_ONETOUCH is not set
2455# CONFIG_USB_STORAGE_KARMA is not set
2456# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
2457# CONFIG_USB_LIBUSUAL is not set
2458
2459#
2460# USB Imaging devices
2461#
2462CONFIG_USB_MDC800=m
2463CONFIG_USB_MICROTEK=m
2464
2465#
2466# USB port drivers
2467#
2468CONFIG_USB_SERIAL=m
2469CONFIG_USB_EZUSB=y
2470CONFIG_USB_SERIAL_GENERIC=y
2471# CONFIG_USB_SERIAL_AIRCABLE is not set
2472CONFIG_USB_SERIAL_ARK3116=m
2473CONFIG_USB_SERIAL_BELKIN=m
2474# CONFIG_USB_SERIAL_CH341 is not set
2475CONFIG_USB_SERIAL_WHITEHEAT=m
2476CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
2477CONFIG_USB_SERIAL_CP2101=m
2478CONFIG_USB_SERIAL_CYPRESS_M8=m
2479CONFIG_USB_SERIAL_EMPEG=m
2480CONFIG_USB_SERIAL_FTDI_SIO=m
2481CONFIG_USB_SERIAL_FUNSOFT=m
2482CONFIG_USB_SERIAL_VISOR=m
2483CONFIG_USB_SERIAL_IPAQ=m
2484CONFIG_USB_SERIAL_IR=m
2485CONFIG_USB_SERIAL_EDGEPORT=m
2486CONFIG_USB_SERIAL_EDGEPORT_TI=m
2487CONFIG_USB_SERIAL_GARMIN=m
2488CONFIG_USB_SERIAL_IPW=m
2489# CONFIG_USB_SERIAL_IUU is not set
2490CONFIG_USB_SERIAL_KEYSPAN_PDA=m
2491CONFIG_USB_SERIAL_KEYSPAN=m
2492CONFIG_USB_SERIAL_KEYSPAN_MPR=y
2493CONFIG_USB_SERIAL_KEYSPAN_USA28=y
2494CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
2495CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
2496CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
2497CONFIG_USB_SERIAL_KEYSPAN_USA19=y
2498CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
2499CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
2500CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
2501CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
2502CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
2503CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
2504CONFIG_USB_SERIAL_KLSI=m
2505CONFIG_USB_SERIAL_KOBIL_SCT=m
2506CONFIG_USB_SERIAL_MCT_U232=m
2507# CONFIG_USB_SERIAL_MOS7720 is not set
2508# CONFIG_USB_SERIAL_MOS7840 is not set
2509# CONFIG_USB_SERIAL_MOTOROLA is not set
2510CONFIG_USB_SERIAL_NAVMAN=m
2511CONFIG_USB_SERIAL_PL2303=m
2512# CONFIG_USB_SERIAL_OTI6858 is not set
2513# CONFIG_USB_SERIAL_SPCP8X5 is not set
2514CONFIG_USB_SERIAL_HP4X=m
2515CONFIG_USB_SERIAL_SAFE=m
2516CONFIG_USB_SERIAL_SAFE_PADDED=y
2517CONFIG_USB_SERIAL_SIERRAWIRELESS=m
2518CONFIG_USB_SERIAL_TI=m
2519CONFIG_USB_SERIAL_CYBERJACK=m
2520CONFIG_USB_SERIAL_XIRCOM=m
2521CONFIG_USB_SERIAL_OPTION=m
2522CONFIG_USB_SERIAL_OMNINET=m
2523# CONFIG_USB_SERIAL_DEBUG is not set
2524
2525#
2526# USB Miscellaneous drivers
2527#
2528CONFIG_USB_EMI62=m
2529CONFIG_USB_EMI26=m
2530# CONFIG_USB_ADUTUX is not set
2531CONFIG_USB_RIO500=m
2532CONFIG_USB_LEGOTOWER=m
2533CONFIG_USB_LCD=m
2534# CONFIG_USB_BERRY_CHARGE is not set
2535CONFIG_USB_LED=m
2536CONFIG_USB_CYPRESS_CY7C63=m
2537CONFIG_USB_CYTHERM=m
2538# CONFIG_USB_PHIDGET is not set
2539CONFIG_USB_IDMOUSE=m
2540# CONFIG_USB_FTDI_ELAN is not set
2541CONFIG_USB_APPLEDISPLAY=m
2542CONFIG_USB_SISUSBVGA=m
2543CONFIG_USB_SISUSBVGA_CON=y
2544CONFIG_USB_LD=m
2545# CONFIG_USB_TRANCEVIBRATOR is not set
2546# CONFIG_USB_IOWARRIOR is not set
2547# CONFIG_USB_TEST is not set
2548# CONFIG_USB_ISIGHTFW is not set
2549CONFIG_USB_ATM=m
2550CONFIG_USB_SPEEDTOUCH=m
2551CONFIG_USB_CXACRU=m
2552CONFIG_USB_UEAGLEATM=m
2553CONFIG_USB_XUSBATM=m
2554CONFIG_USB_GADGET=y
2555# CONFIG_USB_GADGET_DEBUG is not set
2556CONFIG_USB_GADGET_DEBUG_FILES=y
2557# CONFIG_USB_GADGET_DEBUG_FS is not set
2558CONFIG_USB_GADGET_SELECTED=y
2559CONFIG_USB_GADGET_AMD5536UDC=y
2560CONFIG_USB_AMD5536UDC=y
2561# CONFIG_USB_GADGET_ATMEL_USBA is not set
2562# CONFIG_USB_GADGET_FSL_USB2 is not set
2563# CONFIG_USB_GADGET_NET2280 is not set
2564# CONFIG_USB_GADGET_PXA25X is not set
2565# CONFIG_USB_GADGET_M66592 is not set
2566# CONFIG_USB_GADGET_PXA27X is not set
2567# CONFIG_USB_GADGET_GOKU is not set
2568# CONFIG_USB_GADGET_LH7A40X is not set
2569# CONFIG_USB_GADGET_OMAP is not set
2570# CONFIG_USB_GADGET_S3C2410 is not set
2571# CONFIG_USB_GADGET_AT91 is not set
2572# CONFIG_USB_GADGET_DUMMY_HCD is not set
2573CONFIG_USB_GADGET_DUALSPEED=y
2574# CONFIG_USB_ZERO is not set
2575CONFIG_USB_ETH=m
2576CONFIG_USB_ETH_RNDIS=y
2577# CONFIG_USB_GADGETFS is not set
2578CONFIG_USB_FILE_STORAGE=m
2579CONFIG_USB_FILE_STORAGE_TEST=y
2580# CONFIG_USB_G_SERIAL is not set
2581# CONFIG_USB_MIDI_GADGET is not set
2582# CONFIG_USB_G_PRINTER is not set
2583# CONFIG_USB_CDC_COMPOSITE is not set
2584CONFIG_MMC=y
2585# CONFIG_MMC_DEBUG is not set
2586CONFIG_MMC_UNSAFE_RESUME=y
2587
2588#
2589# MMC/SD Card Drivers
2590#
2591CONFIG_MMC_BLOCK=y
2592CONFIG_MMC_BLOCK_BOUNCE=y
2593# CONFIG_SDIO_UART is not set
2594# CONFIG_MMC_TEST is not set
2595
2596#
2597# MMC/SD Host Controller Drivers
2598#
2599CONFIG_MMC_SDHCI=y
2600# CONFIG_MMC_SDHCI_PCI is not set
2601# CONFIG_MMC_WBSD is not set
2602# CONFIG_MMC_TIFM_SD is not set
2603# CONFIG_MEMSTICK is not set
2604CONFIG_NEW_LEDS=y
2605CONFIG_LEDS_CLASS=m
2606
2607#
2608# LED drivers
2609#
2610# CONFIG_LEDS_PCA9532 is not set
2611# CONFIG_LEDS_CLEVO_MAIL is not set
2612# CONFIG_LEDS_PCA955X is not set
2613
2614#
2615# LED Triggers
2616#
2617CONFIG_LEDS_TRIGGERS=y
2618CONFIG_LEDS_TRIGGER_TIMER=m
2619CONFIG_LEDS_TRIGGER_HEARTBEAT=m
2620# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
2621# CONFIG_ACCESSIBILITY is not set
2622# CONFIG_INFINIBAND is not set
2623# CONFIG_EDAC is not set
2624CONFIG_RTC_LIB=m
2625CONFIG_RTC_CLASS=m
2626
2627#
2628# RTC interfaces
2629#
2630CONFIG_RTC_INTF_SYSFS=y
2631CONFIG_RTC_INTF_PROC=y
2632CONFIG_RTC_INTF_DEV=y
2633CONFIG_RTC_INTF_DEV_UIE_EMUL=y
2634CONFIG_RTC_DRV_TEST=m
2635
2636#
2637# I2C RTC drivers
2638#
2639CONFIG_RTC_DRV_DS1307=m
2640# CONFIG_RTC_DRV_DS1374 is not set
2641CONFIG_RTC_DRV_DS1672=m
2642# CONFIG_RTC_DRV_MAX6900 is not set
2643CONFIG_RTC_DRV_RS5C372=m
2644CONFIG_RTC_DRV_ISL1208=m
2645CONFIG_RTC_DRV_X1205=m
2646CONFIG_RTC_DRV_PCF8563=m
2647CONFIG_RTC_DRV_PCF8583=m
2648# CONFIG_RTC_DRV_M41T80 is not set
2649# CONFIG_RTC_DRV_S35390A is not set
2650# CONFIG_RTC_DRV_FM3130 is not set
2651
2652#
2653# SPI RTC drivers
2654#
2655# CONFIG_RTC_DRV_M41T94 is not set
2656# CONFIG_RTC_DRV_DS1305 is not set
2657CONFIG_RTC_DRV_MAX6902=m
2658# CONFIG_RTC_DRV_R9701 is not set
2659CONFIG_RTC_DRV_RS5C348=m
2660
2661#
2662# Platform RTC drivers
2663#
2664# CONFIG_RTC_DRV_CMOS is not set
2665# CONFIG_RTC_DRV_DS1511 is not set
2666CONFIG_RTC_DRV_DS1553=m
2667CONFIG_RTC_DRV_DS1742=m
2668# CONFIG_RTC_DRV_STK17TA8 is not set
2669CONFIG_RTC_DRV_M48T86=m
2670# CONFIG_RTC_DRV_M48T59 is not set
2671CONFIG_RTC_DRV_V3020=m
2672
2673#
2674# on-CPU RTC drivers
2675#
2676# CONFIG_DMADEVICES is not set
2677# CONFIG_UIO is not set
2678
2679#
2680# Firmware Drivers
2681#
2682CONFIG_EDD=m
2683# CONFIG_EDD_OFF is not set
2684CONFIG_FIRMWARE_MEMMAP=y
2685# CONFIG_EFI_VARS is not set
2686# CONFIG_DELL_RBU is not set
2687# CONFIG_DCDBAS is not set
2688CONFIG_DMIID=y
2689# CONFIG_ISCSI_IBFT_FIND is not set
2690
2691#
2692# File systems
2693#
2694CONFIG_EXT2_FS=y
2695CONFIG_EXT2_FS_XATTR=y
2696CONFIG_EXT2_FS_POSIX_ACL=y
2697CONFIG_EXT2_FS_SECURITY=y
2698# CONFIG_EXT2_FS_XIP is not set
2699CONFIG_EXT3_FS=y
2700CONFIG_EXT3_FS_XATTR=y
2701CONFIG_EXT3_FS_POSIX_ACL=y
2702CONFIG_EXT3_FS_SECURITY=y
2703# CONFIG_EXT4DEV_FS is not set
2704CONFIG_JBD=y
2705# CONFIG_JBD_DEBUG is not set
2706CONFIG_FS_MBCACHE=y
2707CONFIG_REISERFS_FS=m
2708# CONFIG_REISERFS_CHECK is not set
2709# CONFIG_REISERFS_PROC_INFO is not set
2710CONFIG_REISERFS_FS_XATTR=y
2711CONFIG_REISERFS_FS_POSIX_ACL=y
2712CONFIG_REISERFS_FS_SECURITY=y
2713CONFIG_JFS_FS=m
2714CONFIG_JFS_POSIX_ACL=y
2715CONFIG_JFS_SECURITY=y
2716# CONFIG_JFS_DEBUG is not set
2717CONFIG_JFS_STATISTICS=y
2718CONFIG_FS_POSIX_ACL=y
2719# CONFIG_XFS_FS is not set
2720# CONFIG_GFS2_FS is not set
2721# CONFIG_OCFS2_FS is not set
2722CONFIG_DNOTIFY=y
2723CONFIG_INOTIFY=y
2724CONFIG_INOTIFY_USER=y
2725CONFIG_QUOTA=y
2726# CONFIG_QUOTA_NETLINK_INTERFACE is not set
2727CONFIG_PRINT_QUOTA_WARNING=y
2728CONFIG_QFMT_V1=m
2729CONFIG_QFMT_V2=m
2730CONFIG_QUOTACTL=y
2731CONFIG_AUTOFS_FS=m
2732CONFIG_AUTOFS4_FS=m
2733CONFIG_FUSE_FS=m
2734CONFIG_GENERIC_ACL=y
2735
2736#
2737# CD-ROM/DVD Filesystems
2738#
2739CONFIG_ISO9660_FS=y
2740CONFIG_JOLIET=y
2741CONFIG_ZISOFS=y
2742CONFIG_UDF_FS=m
2743CONFIG_UDF_NLS=y
2744
2745#
2746# DOS/FAT/NT Filesystems
2747#
2748CONFIG_FAT_FS=y
2749CONFIG_MSDOS_FS=y
2750CONFIG_VFAT_FS=y
2751CONFIG_FAT_DEFAULT_CODEPAGE=437
2752CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
2753CONFIG_NTFS_FS=m
2754# CONFIG_NTFS_DEBUG is not set
2755CONFIG_NTFS_RW=y
2756
2757#
2758# Pseudo filesystems
2759#
2760CONFIG_PROC_FS=y
2761CONFIG_PROC_KCORE=y
2762CONFIG_PROC_SYSCTL=y
2763CONFIG_SYSFS=y
2764CONFIG_TMPFS=y
2765CONFIG_TMPFS_POSIX_ACL=y
2766CONFIG_HUGETLBFS=y
2767CONFIG_HUGETLB_PAGE=y
2768CONFIG_CONFIGFS_FS=m
2769
2770#
2771# Miscellaneous filesystems
2772#
2773CONFIG_ADFS_FS=m
2774# CONFIG_ADFS_FS_RW is not set
2775CONFIG_AFFS_FS=m
2776# CONFIG_ECRYPT_FS is not set
2777CONFIG_HFS_FS=m
2778CONFIG_HFSPLUS_FS=m
2779CONFIG_BEFS_FS=m
2780# CONFIG_BEFS_DEBUG is not set
2781CONFIG_BFS_FS=m
2782CONFIG_EFS_FS=m
2783CONFIG_JFFS2_FS=m
2784CONFIG_JFFS2_FS_DEBUG=0
2785CONFIG_JFFS2_FS_WRITEBUFFER=y
2786# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
2787CONFIG_JFFS2_SUMMARY=y
2788CONFIG_JFFS2_FS_XATTR=y
2789CONFIG_JFFS2_FS_POSIX_ACL=y
2790CONFIG_JFFS2_FS_SECURITY=y
2791CONFIG_JFFS2_COMPRESSION_OPTIONS=y
2792CONFIG_JFFS2_ZLIB=y
2793# CONFIG_JFFS2_LZO is not set
2794CONFIG_JFFS2_RTIME=y
2795# CONFIG_JFFS2_RUBIN is not set
2796# CONFIG_JFFS2_CMODE_NONE is not set
2797CONFIG_JFFS2_CMODE_PRIORITY=y
2798# CONFIG_JFFS2_CMODE_SIZE is not set
2799# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
2800CONFIG_CRAMFS=y
2801CONFIG_VXFS_FS=m
2802# CONFIG_MINIX_FS is not set
2803# CONFIG_OMFS_FS is not set
2804CONFIG_HPFS_FS=m
2805CONFIG_QNX4FS_FS=m
2806CONFIG_ROMFS_FS=m
2807CONFIG_SYSV_FS=m
2808CONFIG_UFS_FS=m
2809CONFIG_UFS_FS_WRITE=y
2810# CONFIG_UFS_DEBUG is not set
2811CONFIG_NETWORK_FILESYSTEMS=y
2812CONFIG_NFS_FS=m
2813CONFIG_NFS_V3=y
2814CONFIG_NFS_V3_ACL=y
2815CONFIG_NFS_V4=y
2816CONFIG_NFSD=m
2817CONFIG_NFSD_V2_ACL=y
2818CONFIG_NFSD_V3=y
2819CONFIG_NFSD_V3_ACL=y
2820CONFIG_NFSD_V4=y
2821CONFIG_LOCKD=m
2822CONFIG_LOCKD_V4=y
2823CONFIG_EXPORTFS=m
2824CONFIG_NFS_ACL_SUPPORT=m
2825CONFIG_NFS_COMMON=y
2826CONFIG_SUNRPC=m
2827CONFIG_SUNRPC_GSS=m
2828CONFIG_RPCSEC_GSS_KRB5=m
2829CONFIG_RPCSEC_GSS_SPKM3=m
2830CONFIG_SMB_FS=y
2831# CONFIG_SMB_NLS_DEFAULT is not set
2832CONFIG_CIFS=m
2833CONFIG_CIFS_STATS=y
2834CONFIG_CIFS_STATS2=y
2835CONFIG_CIFS_WEAK_PW_HASH=y
2836# CONFIG_CIFS_UPCALL is not set
2837CONFIG_CIFS_XATTR=y
2838CONFIG_CIFS_POSIX=y
2839# CONFIG_CIFS_DEBUG2 is not set
2840# CONFIG_CIFS_EXPERIMENTAL is not set
2841# CONFIG_NCP_FS is not set
2842# CONFIG_CODA_FS is not set
2843# CONFIG_AFS_FS is not set
2844
2845#
2846# Partition Types
2847#
2848CONFIG_PARTITION_ADVANCED=y
2849# CONFIG_ACORN_PARTITION is not set
2850CONFIG_OSF_PARTITION=y
2851# CONFIG_AMIGA_PARTITION is not set
2852CONFIG_ATARI_PARTITION=y
2853CONFIG_MAC_PARTITION=y
2854CONFIG_MSDOS_PARTITION=y
2855CONFIG_BSD_DISKLABEL=y
2856# CONFIG_MINIX_SUBPARTITION is not set
2857CONFIG_SOLARIS_X86_PARTITION=y
2858CONFIG_UNIXWARE_DISKLABEL=y
2859CONFIG_LDM_PARTITION=y
2860# CONFIG_LDM_DEBUG is not set
2861CONFIG_SGI_PARTITION=y
2862CONFIG_ULTRIX_PARTITION=y
2863CONFIG_SUN_PARTITION=y
2864CONFIG_KARMA_PARTITION=y
2865CONFIG_EFI_PARTITION=y
2866# CONFIG_SYSV68_PARTITION is not set
2867CONFIG_NLS=y
2868CONFIG_NLS_DEFAULT="utf8"
2869CONFIG_NLS_CODEPAGE_437=y
2870CONFIG_NLS_CODEPAGE_737=m
2871CONFIG_NLS_CODEPAGE_775=m
2872CONFIG_NLS_CODEPAGE_850=m
2873CONFIG_NLS_CODEPAGE_852=m
2874CONFIG_NLS_CODEPAGE_855=m
2875CONFIG_NLS_CODEPAGE_857=m
2876CONFIG_NLS_CODEPAGE_860=m
2877CONFIG_NLS_CODEPAGE_861=m
2878CONFIG_NLS_CODEPAGE_862=m
2879CONFIG_NLS_CODEPAGE_863=m
2880CONFIG_NLS_CODEPAGE_864=m
2881CONFIG_NLS_CODEPAGE_865=m
2882CONFIG_NLS_CODEPAGE_866=m
2883CONFIG_NLS_CODEPAGE_869=m
2884CONFIG_NLS_CODEPAGE_936=m
2885CONFIG_NLS_CODEPAGE_950=m
2886CONFIG_NLS_CODEPAGE_932=m
2887CONFIG_NLS_CODEPAGE_949=m
2888CONFIG_NLS_CODEPAGE_874=m
2889CONFIG_NLS_ISO8859_8=m
2890CONFIG_NLS_CODEPAGE_1250=m
2891CONFIG_NLS_CODEPAGE_1251=m
2892CONFIG_NLS_ASCII=y
2893CONFIG_NLS_ISO8859_1=y
2894CONFIG_NLS_ISO8859_2=m
2895CONFIG_NLS_ISO8859_3=m
2896CONFIG_NLS_ISO8859_4=m
2897CONFIG_NLS_ISO8859_5=m
2898CONFIG_NLS_ISO8859_6=m
2899CONFIG_NLS_ISO8859_7=m
2900CONFIG_NLS_ISO8859_9=m
2901CONFIG_NLS_ISO8859_13=m
2902CONFIG_NLS_ISO8859_14=m
2903CONFIG_NLS_ISO8859_15=m
2904CONFIG_NLS_KOI8_R=m
2905CONFIG_NLS_KOI8_U=m
2906CONFIG_NLS_UTF8=m
2907# CONFIG_DLM is not set
2908
2909#
2910# Kernel hacking
2911#
2912CONFIG_TRACE_IRQFLAGS_SUPPORT=y
2913# CONFIG_PRINTK_TIME is not set
2914CONFIG_ENABLE_WARN_DEPRECATED=y
2915CONFIG_ENABLE_MUST_CHECK=y
2916CONFIG_FRAME_WARN=1024
2917CONFIG_MAGIC_SYSRQ=y
2918# CONFIG_UNUSED_SYMBOLS is not set
2919CONFIG_DEBUG_FS=y
2920# CONFIG_HEADERS_CHECK is not set
2921CONFIG_DEBUG_KERNEL=y
2922# CONFIG_DEBUG_SHIRQ is not set
2923CONFIG_DETECT_SOFTLOCKUP=y
2924# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
2925CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
2926CONFIG_SCHED_DEBUG=y
2927# CONFIG_SCHEDSTATS is not set
2928CONFIG_TIMER_STATS=y
2929# CONFIG_DEBUG_OBJECTS is not set
2930# CONFIG_DEBUG_SLAB is not set
2931# CONFIG_DEBUG_RT_MUTEXES is not set
2932# CONFIG_RT_MUTEX_TESTER is not set
2933# CONFIG_DEBUG_SPINLOCK is not set
2934# CONFIG_DEBUG_MUTEXES is not set
2935# CONFIG_DEBUG_LOCK_ALLOC is not set
2936# CONFIG_PROVE_LOCKING is not set
2937# CONFIG_LOCK_STAT is not set
2938# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
2939# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
2940# CONFIG_DEBUG_KOBJECT is not set
2941# CONFIG_DEBUG_HIGHMEM is not set
2942CONFIG_DEBUG_BUGVERBOSE=y
2943# CONFIG_DEBUG_INFO is not set
2944# CONFIG_DEBUG_VM is not set
2945# CONFIG_DEBUG_WRITECOUNT is not set
2946CONFIG_DEBUG_MEMORY_INIT=y
2947# CONFIG_DEBUG_LIST is not set
2948# CONFIG_DEBUG_SG is not set
2949# CONFIG_FRAME_POINTER is not set
2950# CONFIG_BOOT_PRINTK_DELAY is not set
2951# CONFIG_RCU_TORTURE_TEST is not set
2952# CONFIG_BACKTRACE_SELF_TEST is not set
2953# CONFIG_FAULT_INJECTION is not set
2954# CONFIG_LATENCYTOP is not set
2955# CONFIG_SYSCTL_SYSCALL_CHECK is not set
2956CONFIG_HAVE_FTRACE=y
2957CONFIG_HAVE_DYNAMIC_FTRACE=y
2958# CONFIG_FTRACE is not set
2959# CONFIG_IRQSOFF_TRACER is not set
2960# CONFIG_SYSPROF_TRACER is not set
2961# CONFIG_SCHED_TRACER is not set
2962# CONFIG_CONTEXT_SWITCH_TRACER is not set
2963# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
2964# CONFIG_SAMPLES is not set
2965CONFIG_HAVE_ARCH_KGDB=y
2966# CONFIG_KGDB is not set
2967# CONFIG_STRICT_DEVMEM is not set
2968CONFIG_X86_VERBOSE_BOOTUP=y
2969CONFIG_EARLY_PRINTK=y
2970# CONFIG_DEBUG_STACKOVERFLOW is not set
2971# CONFIG_DEBUG_STACK_USAGE is not set
2972# CONFIG_DEBUG_PAGEALLOC is not set
2973# CONFIG_DEBUG_PER_CPU_MAPS is not set
2974# CONFIG_X86_PTDUMP is not set
2975# CONFIG_DEBUG_RODATA is not set
2976# CONFIG_DEBUG_NX_TEST is not set
2977# CONFIG_4KSTACKS is not set
2978CONFIG_DOUBLEFAULT=y
2979# CONFIG_MMIOTRACE is not set
2980CONFIG_IO_DELAY_TYPE_0X80=0
2981CONFIG_IO_DELAY_TYPE_0XED=1
2982CONFIG_IO_DELAY_TYPE_UDELAY=2
2983CONFIG_IO_DELAY_TYPE_NONE=3
2984CONFIG_IO_DELAY_0X80=y
2985# CONFIG_IO_DELAY_0XED is not set
2986# CONFIG_IO_DELAY_UDELAY is not set
2987# CONFIG_IO_DELAY_NONE is not set
2988CONFIG_DEFAULT_IO_DELAY_TYPE=0
2989# CONFIG_DEBUG_BOOT_PARAMS is not set
2990# CONFIG_CPA_DEBUG is not set
2991# CONFIG_OPTIMIZE_INLINING is not set
2992
2993#
2994# Security options
2995#
2996CONFIG_KEYS=y
2997CONFIG_KEYS_DEBUG_PROC_KEYS=y
2998CONFIG_SECURITY=y
2999CONFIG_SECURITY_NETWORK=y
3000# CONFIG_SECURITY_NETWORK_XFRM is not set
3001# CONFIG_SECURITY_FILE_CAPABILITIES is not set
3002# CONFIG_SECURITY_ROOTPLUG is not set
3003CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
3004CONFIG_SECURITY_SELINUX=y
3005CONFIG_SECURITY_SELINUX_BOOTPARAM=y
3006CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
3007CONFIG_SECURITY_SELINUX_DISABLE=y
3008CONFIG_SECURITY_SELINUX_DEVELOP=y
3009CONFIG_SECURITY_SELINUX_AVC_STATS=y
3010CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
3011# CONFIG_SECURITY_SELINUX_ENABLE_SECMARK_DEFAULT is not set
3012# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
3013CONFIG_CRYPTO=y
3014
3015#
3016# Crypto core or helper
3017#
3018CONFIG_CRYPTO_ALGAPI=y
3019CONFIG_CRYPTO_AEAD=m
3020CONFIG_CRYPTO_BLKCIPHER=y
3021CONFIG_CRYPTO_HASH=y
3022CONFIG_CRYPTO_MANAGER=y
3023# CONFIG_CRYPTO_GF128MUL is not set
3024CONFIG_CRYPTO_NULL=m
3025# CONFIG_CRYPTO_CRYPTD is not set
3026CONFIG_CRYPTO_AUTHENC=m
3027CONFIG_CRYPTO_TEST=m
3028
3029#
3030# Authenticated Encryption with Associated Data
3031#
3032# CONFIG_CRYPTO_CCM is not set
3033# CONFIG_CRYPTO_GCM is not set
3034# CONFIG_CRYPTO_SEQIV is not set
3035
3036#
3037# Block modes
3038#
3039CONFIG_CRYPTO_CBC=y
3040# CONFIG_CRYPTO_CTR is not set
3041# CONFIG_CRYPTO_CTS is not set
3042CONFIG_CRYPTO_ECB=m
3043# CONFIG_CRYPTO_LRW is not set
3044CONFIG_CRYPTO_PCBC=m
3045# CONFIG_CRYPTO_XTS is not set
3046
3047#
3048# Hash modes
3049#
3050CONFIG_CRYPTO_HMAC=y
3051# CONFIG_CRYPTO_XCBC is not set
3052
3053#
3054# Digest
3055#
3056CONFIG_CRYPTO_CRC32C=m
3057CONFIG_CRYPTO_MD4=m
3058CONFIG_CRYPTO_MD5=y
3059CONFIG_CRYPTO_MICHAEL_MIC=m
3060# CONFIG_CRYPTO_RMD128 is not set
3061# CONFIG_CRYPTO_RMD160 is not set
3062# CONFIG_CRYPTO_RMD256 is not set
3063# CONFIG_CRYPTO_RMD320 is not set
3064CONFIG_CRYPTO_SHA1=m
3065CONFIG_CRYPTO_SHA256=m
3066CONFIG_CRYPTO_SHA512=m
3067CONFIG_CRYPTO_TGR192=m
3068CONFIG_CRYPTO_WP512=m
3069
3070#
3071# Ciphers
3072#
3073CONFIG_CRYPTO_AES=m
3074CONFIG_CRYPTO_AES_586=m
3075CONFIG_CRYPTO_ANUBIS=m
3076CONFIG_CRYPTO_ARC4=m
3077CONFIG_CRYPTO_BLOWFISH=m
3078# CONFIG_CRYPTO_CAMELLIA is not set
3079CONFIG_CRYPTO_CAST5=y
3080CONFIG_CRYPTO_CAST6=m
3081CONFIG_CRYPTO_DES=y
3082# CONFIG_CRYPTO_FCRYPT is not set
3083CONFIG_CRYPTO_KHAZAD=m
3084# CONFIG_CRYPTO_SALSA20 is not set
3085# CONFIG_CRYPTO_SALSA20_586 is not set
3086# CONFIG_CRYPTO_SEED is not set
3087CONFIG_CRYPTO_SERPENT=m
3088CONFIG_CRYPTO_TEA=m
3089CONFIG_CRYPTO_TWOFISH=m
3090CONFIG_CRYPTO_TWOFISH_COMMON=m
3091# CONFIG_CRYPTO_TWOFISH_586 is not set
3092
3093#
3094# Compression
3095#
3096CONFIG_CRYPTO_DEFLATE=m
3097# CONFIG_CRYPTO_LZO is not set
3098CONFIG_CRYPTO_HW=y
3099CONFIG_CRYPTO_DEV_PADLOCK=m
3100CONFIG_CRYPTO_DEV_PADLOCK_AES=m
3101CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
3102CONFIG_CRYPTO_DEV_GEODE=m
3103# CONFIG_CRYPTO_DEV_HIFN_795X is not set
3104CONFIG_HAVE_KVM=y
3105CONFIG_VIRTUALIZATION=y
3106# CONFIG_KVM is not set
3107# CONFIG_LGUEST is not set
3108# CONFIG_VIRTIO_PCI is not set
3109# CONFIG_VIRTIO_BALLOON is not set
3110
3111#
3112# Library routines
3113#
3114CONFIG_BITREVERSE=y
3115CONFIG_GENERIC_FIND_FIRST_BIT=y
3116CONFIG_GENERIC_FIND_NEXT_BIT=y
3117CONFIG_CRC_CCITT=m
3118CONFIG_CRC16=m
3119# CONFIG_CRC_T10DIF is not set
3120CONFIG_CRC_ITU_T=m
3121CONFIG_CRC32=y
3122# CONFIG_CRC7 is not set
3123CONFIG_LIBCRC32C=m
3124CONFIG_AUDIT_GENERIC=y
3125CONFIG_ZLIB_INFLATE=y
3126CONFIG_ZLIB_DEFLATE=m
3127CONFIG_REED_SOLOMON=m
3128CONFIG_REED_SOLOMON_DEC16=y
3129CONFIG_TEXTSEARCH=y
3130CONFIG_TEXTSEARCH_KMP=m
3131CONFIG_TEXTSEARCH_BM=m
3132CONFIG_TEXTSEARCH_FSM=m
3133CONFIG_PLIST=y
3134CONFIG_HAS_IOMEM=y
3135CONFIG_HAS_IOPORT=y
3136CONFIG_HAS_DMA=y
3137CONFIG_CHECK_SIGNATURE=y
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/psb-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/psb-driver.patch
deleted file mode 100644
index 1ef6e378fe..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/psb-driver.patch
+++ /dev/null
@@ -1,33991 +0,0 @@
1Index: linux-2.6.27/include/drm/drm.h
2===================================================================
3--- linux-2.6.27.orig/include/drm/drm.h 2009-02-05 13:29:29.000000000 +0000
4+++ linux-2.6.27/include/drm/drm.h 2009-02-05 13:29:33.000000000 +0000
5@@ -173,6 +173,7 @@
6 _DRM_AGP = 3, /**< AGP/GART */
7 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
8 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
9+ _DRM_TTM = 7
10 };
11
12 /**
13@@ -598,6 +599,400 @@
14 uint64_t size;
15 };
16
17+#define DRM_FENCE_FLAG_EMIT 0x00000001
18+#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
19+#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
20+#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
21+#define DRM_FENCE_FLAG_NO_USER 0x00000010
22+
23+/* Reserved for driver use */
24+#define DRM_FENCE_MASK_DRIVER 0xFF000000
25+
26+#define DRM_FENCE_TYPE_EXE 0x00000001
27+
28+struct drm_fence_arg {
29+ unsigned int handle;
30+ unsigned int fence_class;
31+ unsigned int type;
32+ unsigned int flags;
33+ unsigned int signaled;
34+ unsigned int error;
35+ unsigned int sequence;
36+ unsigned int pad64;
37+ uint64_t expand_pad[2]; /*Future expansion */
38+};
39+
40+/* Buffer permissions, referring to how the GPU uses the buffers.
41+ * these translate to fence types used for the buffers.
42+ * Typically a texture buffer is read, A destination buffer is write and
43+ * a command (batch-) buffer is exe. Can be or-ed together.
44+ */
45+
46+#define DRM_BO_FLAG_READ (1ULL << 0)
47+#define DRM_BO_FLAG_WRITE (1ULL << 1)
48+#define DRM_BO_FLAG_EXE (1ULL << 2)
49+
50+/*
51+ * Status flags. Can be read to determine the actual state of a buffer.
52+ * Can also be set in the buffer mask before validation.
53+ */
54+
55+/*
56+ * Mask: Never evict this buffer. Not even with force. This type of buffer is only
57+ * available to root and must be manually removed before buffer manager shutdown
58+ * or lock.
59+ * Flags: Acknowledge
60+ */
61+#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
62+
63+/*
64+ * Mask: Require that the buffer is placed in mappable memory when validated.
65+ * If not set the buffer may or may not be in mappable memory when validated.
66+ * Flags: If set, the buffer is in mappable memory.
67+ */
68+#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
69+
70+/* Mask: The buffer should be shareable with other processes.
71+ * Flags: The buffer is shareable with other processes.
72+ */
73+#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
74+
75+/* Mask: If set, place the buffer in cache-coherent memory if available.
76+ * If clear, never place the buffer in cache coherent memory if validated.
77+ * Flags: The buffer is currently in cache-coherent memory.
78+ */
79+#define DRM_BO_FLAG_CACHED (1ULL << 7)
80+
81+/* Mask: Make sure that every time this buffer is validated,
82+ * it ends up on the same location provided that the memory mask is the same.
83+ * The buffer will also not be evicted when claiming space for
84+ * other buffers. Basically a pinned buffer but it may be thrown out as
85+ * part of buffer manager shutdown or locking.
86+ * Flags: Acknowledge.
87+ */
88+#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
89+
90+/* Mask: Make sure the buffer is in cached memory when mapped
91+ * Flags: Acknowledge.
92+ * Buffers allocated with this flag should not be used for suballocators
93+ * This type may have issues on CPUs with over-aggressive caching
94+ * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
95+ */
96+#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
97+
98+
99+/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
100+ * Flags: Acknowledge.
101+ */
102+#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
103+
104+/*
105+ * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
106+ * Flags: Acknowledge.
107+ */
108+#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
109+#define DRM_BO_FLAG_TILE (1ULL << 15)
110+
111+/*
112+ * Memory type flags that can be or'ed together in the mask, but only
113+ * one appears in flags.
114+ */
115+
116+/* System memory */
117+#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
118+/* Translation table memory */
119+#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
120+/* Vram memory */
121+#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
122+/* Up to the driver to define. */
123+#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
124+#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
125+#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
126+#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
127+#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
128+/* We can add more of these now with a 64-bit flag type */
129+
130+/* Memory flag mask */
131+#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
132+#define DRM_BO_MASK_MEMTYPE 0x00000000FF0800A0ULL
133+
134+/* Driver-private flags */
135+#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
136+
137+/* Don't block on validate and map */
138+#define DRM_BO_HINT_DONT_BLOCK 0x00000002
139+/* Don't place this buffer on the unfenced list.*/
140+#define DRM_BO_HINT_DONT_FENCE 0x00000004
141+#define DRM_BO_HINT_WAIT_LAZY 0x00000008
142+#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
143+
144+#define DRM_BO_INIT_MAGIC 0xfe769812
145+#define DRM_BO_INIT_MAJOR 1
146+#define DRM_BO_INIT_MINOR 0
147+#define DRM_BO_INIT_PATCH 0
148+
149+
150+struct drm_bo_info_req {
151+ uint64_t mask;
152+ uint64_t flags;
153+ unsigned int handle;
154+ unsigned int hint;
155+ unsigned int fence_class;
156+ unsigned int desired_tile_stride;
157+ unsigned int tile_info;
158+ unsigned int pad64;
159+ uint64_t presumed_offset;
160+};
161+
162+struct drm_bo_create_req {
163+ uint64_t mask;
164+ uint64_t size;
165+ uint64_t buffer_start;
166+ unsigned int hint;
167+ unsigned int page_alignment;
168+};
169+
170+
171+/*
172+ * Reply flags
173+ */
174+
175+#define DRM_BO_REP_BUSY 0x00000001
176+
177+struct drm_bo_info_rep {
178+ uint64_t flags;
179+ uint64_t mask;
180+ uint64_t size;
181+ uint64_t offset;
182+ uint64_t arg_handle;
183+ uint64_t buffer_start;
184+ unsigned int handle;
185+ unsigned int fence_flags;
186+ unsigned int rep_flags;
187+ unsigned int page_alignment;
188+ unsigned int desired_tile_stride;
189+ unsigned int hw_tile_stride;
190+ unsigned int tile_info;
191+ unsigned int pad64;
192+ uint64_t expand_pad[4]; /*Future expansion */
193+};
194+
195+struct drm_bo_arg_rep {
196+ struct drm_bo_info_rep bo_info;
197+ int ret;
198+ unsigned int pad64;
199+};
200+
201+struct drm_bo_create_arg {
202+ union {
203+ struct drm_bo_create_req req;
204+ struct drm_bo_info_rep rep;
205+ } d;
206+};
207+
208+struct drm_bo_handle_arg {
209+ unsigned int handle;
210+};
211+
212+struct drm_bo_reference_info_arg {
213+ union {
214+ struct drm_bo_handle_arg req;
215+ struct drm_bo_info_rep rep;
216+ } d;
217+};
218+
219+struct drm_bo_map_wait_idle_arg {
220+ union {
221+ struct drm_bo_info_req req;
222+ struct drm_bo_info_rep rep;
223+ } d;
224+};
225+
226+struct drm_bo_op_req {
227+ enum {
228+ drm_bo_validate,
229+ drm_bo_fence,
230+ drm_bo_ref_fence,
231+ } op;
232+ unsigned int arg_handle;
233+ struct drm_bo_info_req bo_req;
234+};
235+
236+
237+struct drm_bo_op_arg {
238+ uint64_t next;
239+ union {
240+ struct drm_bo_op_req req;
241+ struct drm_bo_arg_rep rep;
242+ } d;
243+ int handled;
244+ unsigned int pad64;
245+};
246+
247+
248+#define DRM_BO_MEM_LOCAL 0
249+#define DRM_BO_MEM_TT 1
250+#define DRM_BO_MEM_VRAM 2
251+#define DRM_BO_MEM_PRIV0 3
252+#define DRM_BO_MEM_PRIV1 4
253+#define DRM_BO_MEM_PRIV2 5
254+#define DRM_BO_MEM_PRIV3 6
255+#define DRM_BO_MEM_PRIV4 7
256+
257+#define DRM_BO_MEM_TYPES 8 /* For now. */
258+
259+#define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
260+#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
261+
262+struct drm_bo_version_arg {
263+ uint32_t major;
264+ uint32_t minor;
265+ uint32_t patchlevel;
266+};
267+
268+struct drm_mm_type_arg {
269+ unsigned int mem_type;
270+ unsigned int lock_flags;
271+};
272+
273+struct drm_mm_init_arg {
274+ unsigned int magic;
275+ unsigned int major;
276+ unsigned int minor;
277+ unsigned int mem_type;
278+ uint64_t p_offset;
279+ uint64_t p_size;
280+};
281+
282+/*
283+ * Drm mode setting
284+ */
285+#define DRM_DISPLAY_INFO_LEN 32
286+#define DRM_OUTPUT_NAME_LEN 32
287+#define DRM_DISPLAY_MODE_LEN 32
288+#define DRM_PROP_NAME_LEN 32
289+
290+#define DRM_MODE_TYPE_BUILTIN (1<<0)
291+#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
292+#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
293+#define DRM_MODE_TYPE_PREFERRED (1<<3)
294+#define DRM_MODE_TYPE_DEFAULT (1<<4)
295+#define DRM_MODE_TYPE_USERDEF (1<<5)
296+#define DRM_MODE_TYPE_DRIVER (1<<6)
297+#define DRM_MODE_TYPE_USERPREF (1<<7)
298+
299+struct drm_mode_modeinfo {
300+
301+ unsigned int id;
302+
303+ unsigned int clock;
304+ unsigned short hdisplay, hsync_start, hsync_end, htotal, hskew;
305+ unsigned short vdisplay, vsync_start, vsync_end, vtotal, vscan;
306+
307+ unsigned int vrefresh; /* vertical refresh * 1000 */
308+
309+ unsigned int flags;
310+ unsigned int type;
311+ char name[DRM_DISPLAY_MODE_LEN];
312+};
313+
314+struct drm_mode_card_res {
315+
316+ int count_fbs;
317+ unsigned int __user *fb_id;
318+
319+ int count_crtcs;
320+ unsigned int __user *crtc_id;
321+
322+ int count_outputs;
323+ unsigned int __user *output_id;
324+
325+ int count_modes;
326+ struct drm_mode_modeinfo __user *modes;
327+
328+};
329+
330+struct drm_mode_crtc {
331+ unsigned int crtc_id; /**< Id */
332+ unsigned int fb_id; /**< Id of framebuffer */
333+
334+ int x, y; /**< Position on the frameuffer */
335+
336+ unsigned int mode; /**< Current mode used */
337+
338+ int count_outputs;
339+ unsigned int outputs; /**< Outputs that are connected */
340+
341+ int count_possibles;
342+ unsigned int possibles; /**< Outputs that can be connected */
343+
344+ unsigned int __user *set_outputs; /**< Outputs to be connected */
345+
346+ int gamma_size;
347+
348+};
349+
350+struct drm_mode_get_output {
351+
352+ unsigned int output; /**< Id */
353+ unsigned int crtc; /**< Id of crtc */
354+ unsigned char name[DRM_OUTPUT_NAME_LEN];
355+
356+ unsigned int connection;
357+ unsigned int mm_width, mm_height; /**< HxW in millimeters */
358+ unsigned int subpixel;
359+
360+ int count_crtcs;
361+ unsigned int crtcs; /**< possible crtc to connect to */
362+
363+ int count_clones;
364+ unsigned int clones; /**< list of clones */
365+
366+ int count_modes;
367+ unsigned int __user *modes; /**< list of modes it supports */
368+
369+ int count_props;
370+ unsigned int __user *props;
371+ unsigned int __user *prop_values;
372+};
373+
374+#define DRM_MODE_PROP_PENDING (1<<0)
375+#define DRM_MODE_PROP_RANGE (1<<1)
376+#define DRM_MODE_PROP_IMMUTABLE (1<<2)
377+#define DRM_MODE_PROP_ENUM (1<<3) // enumerated type with text strings
378+
379+struct drm_mode_property_enum {
380+ uint32_t value;
381+ unsigned char name[DRM_PROP_NAME_LEN];
382+};
383+
384+struct drm_mode_get_property {
385+
386+ unsigned int prop_id;
387+ unsigned int flags;
388+ unsigned char name[DRM_PROP_NAME_LEN];
389+
390+ int count_values;
391+ uint32_t __user *values;
392+
393+ int count_enums;
394+ struct drm_mode_property_enum *enums;
395+};
396+
397+struct drm_mode_fb_cmd {
398+ unsigned int buffer_id;
399+ unsigned int width, height;
400+ unsigned int pitch;
401+ unsigned int bpp;
402+ unsigned int handle;
403+ unsigned int depth;
404+};
405+
406+struct drm_mode_mode_cmd {
407+ unsigned int output_id;
408+ unsigned int mode_id;
409+};
410+
411 #define DRM_IOCTL_BASE 'd'
412 #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
413 #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
414@@ -664,6 +1059,47 @@
415
416 #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
417
418+#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
419+#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
420+#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
421+#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
422+
423+#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
424+#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
425+#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
426+#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
427+#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
428+#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
429+#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
430+#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
431+
432+#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
433+#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
434+#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
435+#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
436+#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
437+#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
438+#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
439+#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
440+#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg)
441+
442+
443+#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
444+#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
445+#define DRM_IOCTL_MODE_GETOUTPUT DRM_IOWR(0xA2, struct drm_mode_get_output)
446+#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA3, struct drm_mode_crtc)
447+#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xA4, struct drm_mode_fb_cmd)
448+#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xA5, unsigned int)
449+#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xA6, struct drm_mode_fb_cmd)
450+
451+#define DRM_IOCTL_MODE_ADDMODE DRM_IOWR(0xA7, struct drm_mode_modeinfo)
452+#define DRM_IOCTL_MODE_RMMODE DRM_IOWR(0xA8, unsigned int)
453+#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
454+#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xAA, struct drm_mode_mode_cmd)
455+
456+#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAB, struct drm_mode_get_property)
457+/*@}*/
458+
459 /**
460 * Device specific ioctls should only be in their respective headers
461 * The device specific ioctl range is from 0x40 to 0x99.
462@@ -718,6 +1154,11 @@
463 typedef struct drm_agp_info drm_agp_info_t;
464 typedef struct drm_scatter_gather drm_scatter_gather_t;
465 typedef struct drm_set_version drm_set_version_t;
466+
467+typedef struct drm_fence_arg drm_fence_arg_t;
468+typedef struct drm_mm_type_arg drm_mm_type_arg_t;
469+typedef struct drm_mm_init_arg drm_mm_init_arg_t;
470+typedef enum drm_bo_type drm_bo_type_t;
471 #endif
472
473 #endif
474Index: linux-2.6.27/include/drm/drmP.h
475===================================================================
476--- linux-2.6.27.orig/include/drm/drmP.h 2009-02-05 13:29:30.000000000 +0000
477+++ linux-2.6.27/include/drm/drmP.h 2009-02-05 13:29:33.000000000 +0000
478@@ -57,6 +57,7 @@
479 #include <linux/dma-mapping.h>
480 #include <linux/mm.h>
481 #include <linux/cdev.h>
482+#include <linux/i2c.h>
483 #include <linux/mutex.h>
484 #if defined(__alpha__) || defined(__powerpc__)
485 #include <asm/pgtable.h> /* For pte_wrprotect */
486@@ -146,9 +147,24 @@
487 #define DRM_MEM_CTXLIST 21
488 #define DRM_MEM_MM 22
489 #define DRM_MEM_HASHTAB 23
490+#define DRM_MEM_OBJECTS 24
491+#define DRM_MEM_FENCE 25
492+#define DRM_MEM_TTM 26
493+#define DRM_MEM_BUFOBJ 27
494
495 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
496 #define DRM_MAP_HASH_OFFSET 0x10000000
497+#define DRM_MAP_HASH_ORDER 12
498+#define DRM_OBJECT_HASH_ORDER 12
499+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
500+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
501+/*
502+ * This should be small enough to allow the use of kmalloc for hash tables
503+ * instead of vmalloc.
504+ */
505+
506+#define DRM_FILE_HASH_ORDER 8
507+#define DRM_MM_INIT_MAX_PAGES 256
508
509 /*@}*/
510
511@@ -376,6 +392,14 @@
512 struct drm_freelist freelist;
513 };
514
515+
516+enum drm_ref_type {
517+ _DRM_REF_USE = 0,
518+ _DRM_REF_TYPE1,
519+ _DRM_NO_REF_TYPES
520+};
521+
522+
523 /** File private data */
524 struct drm_file {
525 int authenticated;
526@@ -388,12 +412,26 @@
527 struct drm_minor *minor;
528 int remove_auth_on_close;
529 unsigned long lock_count;
530+
531 /** Mapping of mm object handles to object pointers. */
532 struct idr object_idr;
533 /** Lock for synchronization of access to object_idr. */
534 spinlock_t table_lock;
535+
536+ /*
537+ * The user object hash table is global and resides in the
538+ * drm_device structure. We protect the lists and hash tables with the
539+ * device struct_mutex. A bit coarse-grained but probably the best
540+ * option.
541+ */
542+
543+ struct list_head refd_objects;
544+
545+ struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
546 struct file *filp;
547 void *driver_priv;
548+
549+ struct list_head fbs;
550 };
551
552 /** Wait queue */
553@@ -523,6 +561,7 @@
554 struct drm_hash_item hash;
555 struct drm_map *map; /**< mapping */
556 uint64_t user_token;
557+ struct drm_mm_node *file_offset_node;
558 };
559
560 typedef struct drm_map drm_local_map_t;
561@@ -612,6 +651,11 @@
562 void *driver_private;
563 };
564
565+
566+#include "drm_objects.h"
567+#include "drm_edid.h"
568+#include "drm_crtc.h"
569+
570 /**
571 * DRM driver structure. This structure represent the common code for
572 * a family of cards. There will one drm_device for each card present
573@@ -637,50 +681,8 @@
574 void (*kernel_context_switch_unlock) (struct drm_device *dev);
575 int (*dri_library_name) (struct drm_device *dev, char *buf);
576
577- /**
578- * get_vblank_counter - get raw hardware vblank counter
579- * @dev: DRM device
580- * @crtc: counter to fetch
581- *
582- * Driver callback for fetching a raw hardware vblank counter
583- * for @crtc. If a device doesn't have a hardware counter, the
584- * driver can simply return the value of drm_vblank_count and
585- * make the enable_vblank() and disable_vblank() hooks into no-ops,
586- * leaving interrupts enabled at all times.
587- *
588- * Wraparound handling and loss of events due to modesetting is dealt
589- * with in the DRM core code.
590- *
591- * RETURNS
592- * Raw vblank counter value.
593- */
594- u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
595-
596- /**
597- * enable_vblank - enable vblank interrupt events
598- * @dev: DRM device
599- * @crtc: which irq to enable
600- *
601- * Enable vblank interrupts for @crtc. If the device doesn't have
602- * a hardware vblank counter, this routine should be a no-op, since
603- * interrupts will have to stay on to keep the count accurate.
604- *
605- * RETURNS
606- * Zero on success, appropriate errno if the given @crtc's vblank
607- * interrupt cannot be enabled.
608- */
609- int (*enable_vblank) (struct drm_device *dev, int crtc);
610-
611- /**
612- * disable_vblank - disable vblank interrupt events
613- * @dev: DRM device
614- * @crtc: which irq to enable
615- *
616- * Disable vblank interrupts for @crtc. If the device doesn't have
617- * a hardware vblank counter, this routine should be a no-op, since
618- * interrupts will have to stay on to keep the count accurate.
619- */
620- void (*disable_vblank) (struct drm_device *dev, int crtc);
621+ int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
622+ int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
623
624 /**
625 * Called by \c drm_device_is_agp. Typically used to determine if a
626@@ -715,6 +717,13 @@
627 int (*proc_init)(struct drm_minor *minor);
628 void (*proc_cleanup)(struct drm_minor *minor);
629
630+ /* FB routines, if present */
631+ int (*fb_probe)(struct drm_device *dev, struct drm_crtc *crtc);
632+ int (*fb_remove)(struct drm_device *dev, struct drm_crtc *crtc);
633+
634+ struct drm_fence_driver *fence_driver;
635+ struct drm_bo_driver *bo_driver;
636+
637 /**
638 * Driver-specific constructor for drm_gem_objects, to set up
639 * obj->driver_private.
640@@ -800,6 +809,10 @@
641 struct list_head maplist; /**< Linked list of regions */
642 int map_count; /**< Number of mappable regions */
643 struct drm_open_hash map_hash; /**< User token hash table for maps */
644+ struct drm_mm offset_manager; /**< User token manager */
645+ struct drm_open_hash object_hash; /**< User token hash table for objects */
646+ struct address_space *dev_mapping; /**< For unmap_mapping_range() */
647+ struct page *ttm_dummy_page;
648
649 /** \name Context handle management */
650 /*@{ */
651@@ -848,20 +861,13 @@
652 */
653 int vblank_disable_allowed;
654
655- wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
656- atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
657+ wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
658+ atomic_t vbl_received;
659+ atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
660 spinlock_t vbl_lock;
661- struct list_head *vbl_sigs; /**< signal list to send on VBLANK */
662- atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
663- atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
664- u32 *last_vblank; /* protected by dev->vbl_lock, used */
665- /* for wraparound handling */
666- int *vblank_enabled; /* so we don't call enable more than
667- once per disable */
668- int *vblank_inmodeset; /* Display driver is setting mode */
669- struct timer_list vblank_disable_timer;
670-
671- u32 max_vblank_count; /**< size of vblank counter register */
672+ struct list_head vbl_sigs; /**< signal list to send on VBLANK */
673+ struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
674+ unsigned int vbl_pending;
675 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
676 void (*locked_tasklet_func)(struct drm_device *dev);
677
678@@ -892,12 +898,18 @@
679 unsigned int agp_buffer_token;
680 struct drm_minor *primary; /**< render type primary screen head */
681
682+ struct drm_fence_manager fm;
683+ struct drm_buffer_manager bm;
684+
685 /** \name Drawable information */
686 /*@{ */
687 spinlock_t drw_lock;
688 struct idr drw_idr;
689 /*@} */
690
691+ /* DRM mode setting */
692+ struct drm_mode_config mode_config;
693+
694 /** \name GEM information */
695 /*@{ */
696 spinlock_t object_name_lock;
697@@ -915,6 +927,27 @@
698
699 };
700
701+#if __OS_HAS_AGP
702+struct drm_agp_ttm_backend {
703+ struct drm_ttm_backend backend;
704+ DRM_AGP_MEM *mem;
705+ struct agp_bridge_data *bridge;
706+ int populated;
707+};
708+#endif
709+
710+typedef struct ati_pcigart_ttm_backend {
711+ struct drm_ttm_backend backend;
712+ int populated;
713+ void (*gart_flush_fn)(struct drm_device *dev);
714+ struct drm_ati_pcigart_info *gart_info;
715+ unsigned long offset;
716+ struct page **pages;
717+ int num_pages;
718+ int bound;
719+ struct drm_device *dev;
720+} ati_pcigart_ttm_backend_t;
721+
722 static __inline__ int drm_core_check_feature(struct drm_device *dev,
723 int feature)
724 {
725@@ -979,8 +1012,12 @@
726 /*@{*/
727
728 /* Driver support (drm_drv.h) */
729-extern int drm_init(struct drm_driver *driver);
730+extern int drm_init(struct drm_driver *driver,
731+ struct pci_device_id *pciidlist);
732 extern void drm_exit(struct drm_driver *driver);
733+extern void drm_cleanup_pci(struct pci_dev *pdev);
734+extern void drm_vbl_send_signals(struct drm_device *dev);
735+extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev);
736 extern int drm_ioctl(struct inode *inode, struct file *filp,
737 unsigned int cmd, unsigned long arg);
738 extern long drm_compat_ioctl(struct file *filp,
739Index: linux-2.6.27/include/drm/drm_pciids.h
740===================================================================
741--- linux-2.6.27.orig/include/drm/drm_pciids.h 2008-10-09 23:13:53.000000000 +0100
742+++ linux-2.6.27/include/drm/drm_pciids.h 2009-02-05 13:29:33.000000000 +0000
743@@ -413,3 +413,9 @@
744 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
745 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
746 {0, 0, 0}
747+
748+#define psb_PCI_IDS \
749+ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
750+ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
751+ {0, 0, 0}
752+
753Index: linux-2.6.27/drivers/gpu/drm/Makefile
754===================================================================
755--- linux-2.6.27.orig/drivers/gpu/drm/Makefile 2009-02-05 13:29:29.000000000 +0000
756+++ linux-2.6.27/drivers/gpu/drm/Makefile 2009-02-05 13:29:33.000000000 +0000
757@@ -9,11 +9,14 @@
758 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
759 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
760 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
761- drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
762+ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
763+ drm_fence.o drm_object.o drm_crtc.o drm_ttm.o drm_bo.o \
764+ drm_bo_lock.o drm_bo_move.o drm_edid.o drm_modes.o drm_regman.o
765
766 drm-$(CONFIG_COMPAT) += drm_ioc32.o
767
768 obj-$(CONFIG_DRM) += drm.o
769+obj-$(CONFIG_DRM_PSB) += psb/
770 obj-$(CONFIG_DRM_TDFX) += tdfx/
771 obj-$(CONFIG_DRM_R128) += r128/
772 obj-$(CONFIG_DRM_RADEON)+= radeon/
773@@ -24,4 +27,3 @@
774 obj-$(CONFIG_DRM_SIS) += sis/
775 obj-$(CONFIG_DRM_SAVAGE)+= savage/
776 obj-$(CONFIG_DRM_VIA) +=via/
777-
778Index: linux-2.6.27/drivers/gpu/drm/drm_agpsupport.c
779===================================================================
780--- linux-2.6.27.orig/drivers/gpu/drm/drm_agpsupport.c 2009-02-05 13:29:29.000000000 +0000
781+++ linux-2.6.27/drivers/gpu/drm/drm_agpsupport.c 2009-02-05 13:29:33.000000000 +0000
782@@ -453,47 +453,158 @@
783 return agp_unbind_memory(handle);
784 }
785
786-/**
787- * Binds a collection of pages into AGP memory at the given offset, returning
788- * the AGP memory structure containing them.
789- *
790- * No reference is held on the pages during this time -- it is up to the
791- * caller to handle that.
792+
793+
794+/*
795+ * AGP ttm backend interface.
796 */
797-DRM_AGP_MEM *
798-drm_agp_bind_pages(struct drm_device *dev,
799- struct page **pages,
800- unsigned long num_pages,
801- uint32_t gtt_offset)
802+
803+#ifndef AGP_USER_TYPES
804+#define AGP_USER_TYPES (1 << 16)
805+#define AGP_USER_MEMORY (AGP_USER_TYPES)
806+#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
807+#endif
808+#define AGP_REQUIRED_MAJOR 0
809+#define AGP_REQUIRED_MINOR 102
810+
811+static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
812 {
813+ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
814+}
815+
816+
817+static int drm_agp_populate(struct drm_ttm_backend *backend,
818+ unsigned long num_pages, struct page **pages)
819+{
820+ struct drm_agp_ttm_backend *agp_be =
821+ container_of(backend, struct drm_agp_ttm_backend, backend);
822+ struct page **cur_page, **last_page = pages + num_pages;
823 DRM_AGP_MEM *mem;
824- int ret, i;
825
826- DRM_DEBUG("\n");
827+ DRM_DEBUG("drm_agp_populate_ttm\n");
828+ mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
829+ if (!mem)
830+ return -ENOMEM;
831+
832+ DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
833+ mem->page_count = 0;
834+ for (cur_page = pages; cur_page < last_page; ++cur_page)
835+ mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
836+ agp_be->mem = mem;
837+ return 0;
838+}
839+
840+static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
841+ struct drm_bo_mem_reg *bo_mem)
842+{
843+ struct drm_agp_ttm_backend *agp_be =
844+ container_of(backend, struct drm_agp_ttm_backend, backend);
845+ DRM_AGP_MEM *mem = agp_be->mem;
846+ int ret;
847+ int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED);
848+
849+ DRM_DEBUG("drm_agp_bind_ttm\n");
850+ mem->is_flushed = 1;
851+ mem->type = AGP_USER_MEMORY;
852+ /* CACHED MAPPED implies not snooped memory */
853+ if (snooped)
854+ mem->type = AGP_USER_CACHED_MEMORY;
855+
856+ ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);
857+ if (ret)
858+ DRM_ERROR("AGP Bind memory failed\n");
859+
860+ DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
861+ DRM_BE_FLAG_BOUND_CACHED : 0,
862+ DRM_BE_FLAG_BOUND_CACHED);
863+ return ret;
864+}
865+
866+static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend)
867+{
868+ struct drm_agp_ttm_backend *agp_be =
869+ container_of(backend, struct drm_agp_ttm_backend, backend);
870+
871+ DRM_DEBUG("drm_agp_unbind_ttm\n");
872+ if (agp_be->mem->is_bound)
873+ return drm_agp_unbind_memory(agp_be->mem);
874+ else
875+ return 0;
876+}
877+
878+static void drm_agp_clear_ttm(struct drm_ttm_backend *backend)
879+{
880+ struct drm_agp_ttm_backend *agp_be =
881+ container_of(backend, struct drm_agp_ttm_backend, backend);
882+ DRM_AGP_MEM *mem = agp_be->mem;
883+
884+ DRM_DEBUG("drm_agp_clear_ttm\n");
885+ if (mem) {
886+ backend->func->unbind(backend);
887+ agp_free_memory(mem);
888+ }
889+ agp_be->mem = NULL;
890+}
891+
892+static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend)
893+{
894+ struct drm_agp_ttm_backend *agp_be;
895+
896+ if (backend) {
897+ DRM_DEBUG("drm_agp_destroy_ttm\n");
898+ agp_be = container_of(backend, struct drm_agp_ttm_backend, backend);
899+ if (agp_be && agp_be->mem)
900+ backend->func->clear(backend);
901+ }
902+}
903+
904+static struct drm_ttm_backend_func agp_ttm_backend = {
905+ .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
906+ .populate = drm_agp_populate,
907+ .clear = drm_agp_clear_ttm,
908+ .bind = drm_agp_bind_ttm,
909+ .unbind = drm_agp_unbind_ttm,
910+ .destroy = drm_agp_destroy_ttm,
911+};
912
913- mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
914- AGP_USER_MEMORY);
915- if (mem == NULL) {
916- DRM_ERROR("Failed to allocate memory for %ld pages\n",
917- num_pages);
918+struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
919+{
920+
921+ struct drm_agp_ttm_backend *agp_be;
922+ struct agp_kern_info *info;
923+
924+ if (!dev->agp) {
925+ DRM_ERROR("AGP is not initialized.\n");
926 return NULL;
927 }
928+ info = &dev->agp->agp_info;
929
930- for (i = 0; i < num_pages; i++)
931- mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
932- mem->page_count = num_pages;
933-
934- mem->is_flushed = true;
935- ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
936- if (ret != 0) {
937- DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
938- agp_free_memory(mem);
939+ if (info->version.major != AGP_REQUIRED_MAJOR ||
940+ info->version.minor < AGP_REQUIRED_MINOR) {
941+ DRM_ERROR("Wrong agpgart version %d.%d\n"
942+ "\tYou need at least version %d.%d.\n",
943+ info->version.major,
944+ info->version.minor,
945+ AGP_REQUIRED_MAJOR,
946+ AGP_REQUIRED_MINOR);
947 return NULL;
948 }
949
950- return mem;
951+
952+ agp_be = drm_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
953+ if (!agp_be)
954+ return NULL;
955+
956+ agp_be->mem = NULL;
957+
958+ agp_be->bridge = dev->agp->bridge;
959+ agp_be->populated = 0;
960+ agp_be->backend.func = &agp_ttm_backend;
961+ agp_be->backend.dev = dev;
962+
963+ return &agp_be->backend;
964 }
965-EXPORT_SYMBOL(drm_agp_bind_pages);
966+EXPORT_SYMBOL(drm_agp_init_ttm);
967
968 void drm_agp_chipset_flush(struct drm_device *dev)
969 {
970Index: linux-2.6.27/drivers/gpu/drm/drm_bo.c
971===================================================================
972--- /dev/null 1970-01-01 00:00:00.000000000 +0000
973+++ linux-2.6.27/drivers/gpu/drm/drm_bo.c 2009-02-05 13:29:33.000000000 +0000
974@@ -0,0 +1,2660 @@
975+/**************************************************************************
976+ *
977+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
978+ * All Rights Reserved.
979+ *
980+ * Permission is hereby granted, free of charge, to any person obtaining a
981+ * copy of this software and associated documentation files (the
982+ * "Software"), to deal in the Software without restriction, including
983+ * without limitation the rights to use, copy, modify, merge, publish,
984+ * distribute, sub license, and/or sell copies of the Software, and to
985+ * permit persons to whom the Software is furnished to do so, subject to
986+ * the following conditions:
987+ *
988+ * The above copyright notice and this permission notice (including the
989+ * next paragraph) shall be included in all copies or substantial portions
990+ * of the Software.
991+ *
992+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
993+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
994+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
995+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
996+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
997+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
998+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
999+ *
1000+ **************************************************************************/
1001+/*
1002+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
1003+ */
1004+
1005+#include "drmP.h"
1006+
1007+/*
1008+ * Locking may look a bit complicated but isn't really:
1009+ *
1010+ * The buffer usage atomic_t needs to be protected by dev->struct_mutex
1011+ * when there is a chance that it can be zero before or after the operation.
1012+ *
1013+ * dev->struct_mutex also protects all lists and list heads,
1014+ * Hash tables and hash heads.
1015+ *
1016+ * bo->mutex protects the buffer object itself excluding the usage field.
1017+ * bo->mutex does also protect the buffer list heads, so to manipulate those,
1018+ * we need both the bo->mutex and the dev->struct_mutex.
1019+ *
1020+ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
1021+ * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
1022+ * the list traversal will, in general, need to be restarted.
1023+ *
1024+ */
1025+
1026+static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
1027+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
1028+static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
1029+static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
1030+
1031+static inline uint64_t drm_bo_type_flags(unsigned type)
1032+{
1033+ return (1ULL << (24 + type));
1034+}
1035+
1036+/*
1037+ * bo locked. dev->struct_mutex locked.
1038+ */
1039+
1040+void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
1041+{
1042+ struct drm_mem_type_manager *man;
1043+
1044+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
1045+ DRM_ASSERT_LOCKED(&bo->mutex);
1046+
1047+ man = &bo->dev->bm.man[bo->pinned_mem_type];
1048+ list_add_tail(&bo->pinned_lru, &man->pinned);
1049+}
1050+
1051+void drm_bo_add_to_lru(struct drm_buffer_object *bo)
1052+{
1053+ struct drm_mem_type_manager *man;
1054+
1055+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
1056+
1057+ if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
1058+ || bo->mem.mem_type != bo->pinned_mem_type) {
1059+ man = &bo->dev->bm.man[bo->mem.mem_type];
1060+ list_add_tail(&bo->lru, &man->lru);
1061+ } else {
1062+ INIT_LIST_HEAD(&bo->lru);
1063+ }
1064+}
1065+
1066+static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
1067+{
1068+#ifdef DRM_ODD_MM_COMPAT
1069+ int ret;
1070+
1071+ if (!bo->map_list.map)
1072+ return 0;
1073+
1074+ ret = drm_bo_lock_kmm(bo);
1075+ if (ret)
1076+ return ret;
1077+ drm_bo_unmap_virtual(bo);
1078+ if (old_is_pci)
1079+ drm_bo_finish_unmap(bo);
1080+#else
1081+ if (!bo->map_list.map)
1082+ return 0;
1083+
1084+ drm_bo_unmap_virtual(bo);
1085+#endif
1086+ return 0;
1087+}
1088+
1089+static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
1090+{
1091+#ifdef DRM_ODD_MM_COMPAT
1092+ int ret;
1093+
1094+ if (!bo->map_list.map)
1095+ return;
1096+
1097+ ret = drm_bo_remap_bound(bo);
1098+ if (ret) {
1099+ DRM_ERROR("Failed to remap a bound buffer object.\n"
1100+ "\tThis might cause a sigbus later.\n");
1101+ }
1102+ drm_bo_unlock_kmm(bo);
1103+#endif
1104+}
1105+
1106+/*
1107+ * Call bo->mutex locked.
1108+ */
1109+
1110+static int drm_bo_add_ttm(struct drm_buffer_object *bo)
1111+{
1112+ struct drm_device *dev = bo->dev;
1113+ int ret = 0;
1114+
1115+ DRM_ASSERT_LOCKED(&bo->mutex);
1116+ bo->ttm = NULL;
1117+
1118+ switch (bo->type) {
1119+ case drm_bo_type_dc:
1120+ case drm_bo_type_kernel:
1121+ bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
1122+ if (!bo->ttm)
1123+ ret = -ENOMEM;
1124+ break;
1125+ case drm_bo_type_user:
1126+ bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
1127+ if (!bo->ttm)
1128+ ret = -ENOMEM;
1129+
1130+ ret = drm_ttm_set_user(bo->ttm, current,
1131+ bo->mem.mask & DRM_BO_FLAG_WRITE,
1132+ bo->buffer_start,
1133+ bo->num_pages,
1134+ dev->bm.dummy_read_page);
1135+ if (ret)
1136+ return ret;
1137+
1138+ break;
1139+ default:
1140+ DRM_ERROR("Illegal buffer object type\n");
1141+ ret = -EINVAL;
1142+ break;
1143+ }
1144+
1145+ return ret;
1146+}
1147+
1148+static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
1149+ struct drm_bo_mem_reg *mem,
1150+ int evict, int no_wait)
1151+{
1152+ struct drm_device *dev = bo->dev;
1153+ struct drm_buffer_manager *bm = &dev->bm;
1154+ int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
1155+ int new_is_pci = drm_mem_reg_is_pci(dev, mem);
1156+ struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
1157+ struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
1158+ int ret = 0;
1159+
1160+ if (old_is_pci || new_is_pci ||
1161+ ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
1162+ ret = drm_bo_vm_pre_move(bo, old_is_pci);
1163+ if (ret)
1164+ return ret;
1165+
1166+ /*
1167+ * Create and bind a ttm if required.
1168+ */
1169+
1170+ if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
1171+ ret = drm_bo_add_ttm(bo);
1172+ if (ret)
1173+ goto out_err;
1174+
1175+ if (mem->mem_type != DRM_BO_MEM_LOCAL) {
1176+ ret = drm_bind_ttm(bo->ttm, mem);
1177+ if (ret)
1178+ goto out_err;
1179+ }
1180+
1181+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
1182+
1183+ struct drm_bo_mem_reg *old_mem = &bo->mem;
1184+ uint64_t save_flags = old_mem->flags;
1185+ uint64_t save_mask = old_mem->mask;
1186+
1187+ *old_mem = *mem;
1188+ mem->mm_node = NULL;
1189+ old_mem->mask = save_mask;
1190+ DRM_FLAG_MASKED(save_flags, mem->flags,
1191+ DRM_BO_MASK_MEMTYPE);
1192+ goto moved;
1193+ }
1194+
1195+ }
1196+
1197+ if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
1198+ !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
1199+
1200+ ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
1201+
1202+ } else if (dev->driver->bo_driver->move) {
1203+ ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
1204+
1205+ } else {
1206+
1207+ ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
1208+
1209+ }
1210+
1211+ if (ret)
1212+ goto out_err;
1213+
1214+moved:
1215+ if (old_is_pci || new_is_pci)
1216+ drm_bo_vm_post_move(bo);
1217+
1218+ if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
1219+ ret =
1220+ dev->driver->bo_driver->invalidate_caches(dev,
1221+ bo->mem.flags);
1222+ if (ret)
1223+ DRM_ERROR("Can not flush read caches\n");
1224+ }
1225+
1226+ DRM_FLAG_MASKED(bo->priv_flags,
1227+ (evict) ? _DRM_BO_FLAG_EVICTED : 0,
1228+ _DRM_BO_FLAG_EVICTED);
1229+
1230+ if (bo->mem.mm_node)
1231+ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
1232+ bm->man[bo->mem.mem_type].gpu_offset;
1233+
1234+
1235+ return 0;
1236+
1237+out_err:
1238+ if (old_is_pci || new_is_pci)
1239+ drm_bo_vm_post_move(bo);
1240+
1241+ new_man = &bm->man[bo->mem.mem_type];
1242+ if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
1243+ drm_ttm_unbind(bo->ttm);
1244+ drm_destroy_ttm(bo->ttm);
1245+ bo->ttm = NULL;
1246+ }
1247+
1248+ return ret;
1249+}
1250+
1251+/*
1252+ * Call bo->mutex locked.
1253+ * Wait until the buffer is idle.
1254+ */
1255+
1256+int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
1257+ int no_wait)
1258+{
1259+ int ret;
1260+
1261+ DRM_ASSERT_LOCKED(&bo->mutex);
1262+
1263+ if (bo->fence) {
1264+ if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
1265+ drm_fence_usage_deref_unlocked(&bo->fence);
1266+ return 0;
1267+ }
1268+ if (no_wait)
1269+ return -EBUSY;
1270+
1271+ ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
1272+ bo->fence_type);
1273+ if (ret)
1274+ return ret;
1275+
1276+ drm_fence_usage_deref_unlocked(&bo->fence);
1277+ }
1278+ return 0;
1279+}
1280+EXPORT_SYMBOL(drm_bo_wait);
1281+
1282+static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
1283+{
1284+ struct drm_device *dev = bo->dev;
1285+ struct drm_buffer_manager *bm = &dev->bm;
1286+
1287+ if (bo->fence) {
1288+ if (bm->nice_mode) {
1289+ unsigned long _end = jiffies + 3 * DRM_HZ;
1290+ int ret;
1291+ do {
1292+ ret = drm_bo_wait(bo, 0, 1, 0);
1293+ if (ret && allow_errors)
1294+ return ret;
1295+
1296+ } while (ret && !time_after_eq(jiffies, _end));
1297+
1298+ if (bo->fence) {
1299+ bm->nice_mode = 0;
1300+ DRM_ERROR("Detected GPU lockup or "
1301+ "fence driver was taken down. "
1302+ "Evicting buffer.\n");
1303+ }
1304+ }
1305+ if (bo->fence)
1306+ drm_fence_usage_deref_unlocked(&bo->fence);
1307+ }
1308+ return 0;
1309+}
1310+
1311+/*
1312+ * Call dev->struct_mutex locked.
1313+ * Attempts to remove all private references to a buffer by expiring its
1314+ * fence object and removing from lru lists and memory managers.
1315+ */
1316+
1317+static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
1318+{
1319+ struct drm_device *dev = bo->dev;
1320+ struct drm_buffer_manager *bm = &dev->bm;
1321+
1322+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
1323+
1324+ atomic_inc(&bo->usage);
1325+ mutex_unlock(&dev->struct_mutex);
1326+ mutex_lock(&bo->mutex);
1327+
1328+ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1329+
1330+ if (bo->fence && drm_fence_object_signaled(bo->fence,
1331+ bo->fence_type))
1332+ drm_fence_usage_deref_unlocked(&bo->fence);
1333+
1334+ if (bo->fence && remove_all)
1335+ (void)drm_bo_expire_fence(bo, 0);
1336+
1337+ mutex_lock(&dev->struct_mutex);
1338+
1339+ if (!atomic_dec_and_test(&bo->usage))
1340+ goto out;
1341+
1342+ if (!bo->fence) {
1343+ list_del_init(&bo->lru);
1344+ if (bo->mem.mm_node) {
1345+ drm_mm_put_block(bo->mem.mm_node);
1346+ if (bo->pinned_node == bo->mem.mm_node)
1347+ bo->pinned_node = NULL;
1348+ bo->mem.mm_node = NULL;
1349+ }
1350+ list_del_init(&bo->pinned_lru);
1351+ if (bo->pinned_node) {
1352+ drm_mm_put_block(bo->pinned_node);
1353+ bo->pinned_node = NULL;
1354+ }
1355+ list_del_init(&bo->ddestroy);
1356+ mutex_unlock(&bo->mutex);
1357+ drm_bo_destroy_locked(bo);
1358+ return;
1359+ }
1360+
1361+ if (list_empty(&bo->ddestroy)) {
1362+ drm_fence_object_flush(bo->fence, bo->fence_type);
1363+ list_add_tail(&bo->ddestroy, &bm->ddestroy);
1364+ schedule_delayed_work(&bm->wq,
1365+ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
1366+ }
1367+
1368+out:
1369+ mutex_unlock(&bo->mutex);
1370+ return;
1371+}
1372+
1373+static void drm_bo_unreserve_size(unsigned long size)
1374+{
1375+ //drm_free_memctl(size);
1376+}
1377+
1378+/*
1379+ * Verify that refcount is 0 and that there are no internal references
1380+ * to the buffer object. Then destroy it.
1381+ */
1382+
1383+static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
1384+{
1385+ struct drm_device *dev = bo->dev;
1386+ struct drm_buffer_manager *bm = &dev->bm;
1387+ unsigned long reserved_size;
1388+
1389+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
1390+
1391+ if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
1392+ list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
1393+ list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
1394+ if (bo->fence != NULL) {
1395+ DRM_ERROR("Fence was non-zero.\n");
1396+ drm_bo_cleanup_refs(bo, 0);
1397+ return;
1398+ }
1399+
1400+#ifdef DRM_ODD_MM_COMPAT
1401+ BUG_ON(!list_empty(&bo->vma_list));
1402+ BUG_ON(!list_empty(&bo->p_mm_list));
1403+#endif
1404+
1405+ if (bo->ttm) {
1406+ drm_ttm_unbind(bo->ttm);
1407+ drm_destroy_ttm(bo->ttm);
1408+ bo->ttm = NULL;
1409+ }
1410+
1411+ atomic_dec(&bm->count);
1412+
1413+ reserved_size = bo->reserved_size;
1414+
1415+ drm_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
1416+ drm_bo_unreserve_size(reserved_size);
1417+
1418+ return;
1419+ }
1420+
1421+ /*
1422+ * Some stuff is still trying to reference the buffer object.
1423+ * Get rid of those references.
1424+ */
1425+
1426+ drm_bo_cleanup_refs(bo, 0);
1427+
1428+ return;
1429+}
1430+
1431+/*
1432+ * Call dev->struct_mutex locked.
1433+ */
1434+
1435+static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
1436+{
1437+ struct drm_buffer_manager *bm = &dev->bm;
1438+
1439+ struct drm_buffer_object *entry, *nentry;
1440+ struct list_head *list, *next;
1441+
1442+ list_for_each_safe(list, next, &bm->ddestroy) {
1443+ entry = list_entry(list, struct drm_buffer_object, ddestroy);
1444+
1445+ nentry = NULL;
1446+ if (next != &bm->ddestroy) {
1447+ nentry = list_entry(next, struct drm_buffer_object,
1448+ ddestroy);
1449+ atomic_inc(&nentry->usage);
1450+ }
1451+
1452+ drm_bo_cleanup_refs(entry, remove_all);
1453+
1454+ if (nentry)
1455+ atomic_dec(&nentry->usage);
1456+ }
1457+}
1458+
1459+static void drm_bo_delayed_workqueue(struct work_struct *work)
1460+{
1461+ struct drm_buffer_manager *bm =
1462+ container_of(work, struct drm_buffer_manager, wq.work);
1463+ struct drm_device *dev = container_of(bm, struct drm_device, bm);
1464+
1465+ DRM_DEBUG("Delayed delete Worker\n");
1466+
1467+ mutex_lock(&dev->struct_mutex);
1468+ if (!bm->initialized) {
1469+ mutex_unlock(&dev->struct_mutex);
1470+ return;
1471+ }
1472+ drm_bo_delayed_delete(dev, 0);
1473+ if (bm->initialized && !list_empty(&bm->ddestroy)) {
1474+ schedule_delayed_work(&bm->wq,
1475+ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
1476+ }
1477+ mutex_unlock(&dev->struct_mutex);
1478+}
1479+
1480+void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
1481+{
1482+ struct drm_buffer_object *tmp_bo = *bo;
1483+ bo = NULL;
1484+
1485+ DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
1486+
1487+ if (atomic_dec_and_test(&tmp_bo->usage))
1488+ drm_bo_destroy_locked(tmp_bo);
1489+}
1490+EXPORT_SYMBOL(drm_bo_usage_deref_locked);
1491+
1492+static void drm_bo_base_deref_locked(struct drm_file *file_priv,
1493+ struct drm_user_object *uo)
1494+{
1495+ struct drm_buffer_object *bo =
1496+ drm_user_object_entry(uo, struct drm_buffer_object, base);
1497+
1498+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
1499+
1500+ drm_bo_takedown_vm_locked(bo);
1501+ drm_bo_usage_deref_locked(&bo);
1502+}
1503+
1504+void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
1505+{
1506+ struct drm_buffer_object *tmp_bo = *bo;
1507+ struct drm_device *dev = tmp_bo->dev;
1508+
1509+ *bo = NULL;
1510+ if (atomic_dec_and_test(&tmp_bo->usage)) {
1511+ mutex_lock(&dev->struct_mutex);
1512+ if (atomic_read(&tmp_bo->usage) == 0)
1513+ drm_bo_destroy_locked(tmp_bo);
1514+ mutex_unlock(&dev->struct_mutex);
1515+ }
1516+}
1517+EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
1518+
1519+void drm_putback_buffer_objects(struct drm_device *dev)
1520+{
1521+ struct drm_buffer_manager *bm = &dev->bm;
1522+ struct list_head *list = &bm->unfenced;
1523+ struct drm_buffer_object *entry, *next;
1524+
1525+ mutex_lock(&dev->struct_mutex);
1526+ list_for_each_entry_safe(entry, next, list, lru) {
1527+ atomic_inc(&entry->usage);
1528+ mutex_unlock(&dev->struct_mutex);
1529+
1530+ mutex_lock(&entry->mutex);
1531+ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
1532+ mutex_lock(&dev->struct_mutex);
1533+
1534+ list_del_init(&entry->lru);
1535+ DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1536+ wake_up_all(&entry->event_queue);
1537+
1538+ /*
1539+ * FIXME: Might want to put back on head of list
1540+ * instead of tail here.
1541+ */
1542+
1543+ drm_bo_add_to_lru(entry);
1544+ mutex_unlock(&entry->mutex);
1545+ drm_bo_usage_deref_locked(&entry);
1546+ }
1547+ mutex_unlock(&dev->struct_mutex);
1548+}
1549+EXPORT_SYMBOL(drm_putback_buffer_objects);
1550+
1551+
1552+/*
1553+ * Note. The caller has to register (if applicable)
1554+ * and deregister fence object usage.
1555+ */
1556+
1557+int drm_fence_buffer_objects(struct drm_device *dev,
1558+ struct list_head *list,
1559+ uint32_t fence_flags,
1560+ struct drm_fence_object *fence,
1561+ struct drm_fence_object **used_fence)
1562+{
1563+ struct drm_buffer_manager *bm = &dev->bm;
1564+ struct drm_buffer_object *entry;
1565+ uint32_t fence_type = 0;
1566+ uint32_t fence_class = ~0;
1567+ int count = 0;
1568+ int ret = 0;
1569+ struct list_head *l;
1570+
1571+ mutex_lock(&dev->struct_mutex);
1572+
1573+ if (!list)
1574+ list = &bm->unfenced;
1575+
1576+ if (fence)
1577+ fence_class = fence->fence_class;
1578+
1579+ list_for_each_entry(entry, list, lru) {
1580+ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
1581+ fence_type |= entry->new_fence_type;
1582+ if (fence_class == ~0)
1583+ fence_class = entry->new_fence_class;
1584+ else if (entry->new_fence_class != fence_class) {
1585+ DRM_ERROR("Unmatching fence classes on unfenced list: "
1586+ "%d and %d.\n",
1587+ fence_class,
1588+ entry->new_fence_class);
1589+ ret = -EINVAL;
1590+ goto out;
1591+ }
1592+ count++;
1593+ }
1594+
1595+ if (!count) {
1596+ ret = -EINVAL;
1597+ goto out;
1598+ }
1599+
1600+ if (fence) {
1601+ if ((fence_type & fence->type) != fence_type ||
1602+ (fence->fence_class != fence_class)) {
1603+ DRM_ERROR("Given fence doesn't match buffers "
1604+ "on unfenced list.\n");
1605+ ret = -EINVAL;
1606+ goto out;
1607+ }
1608+ } else {
1609+ mutex_unlock(&dev->struct_mutex);
1610+ ret = drm_fence_object_create(dev, fence_class, fence_type,
1611+ fence_flags | DRM_FENCE_FLAG_EMIT,
1612+ &fence);
1613+ mutex_lock(&dev->struct_mutex);
1614+ if (ret)
1615+ goto out;
1616+ }
1617+
1618+ count = 0;
1619+ l = list->next;
1620+ while (l != list) {
1621+ prefetch(l->next);
1622+ entry = list_entry(l, struct drm_buffer_object, lru);
1623+ atomic_inc(&entry->usage);
1624+ mutex_unlock(&dev->struct_mutex);
1625+ mutex_lock(&entry->mutex);
1626+ mutex_lock(&dev->struct_mutex);
1627+ list_del_init(l);
1628+ if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1629+ count++;
1630+ if (entry->fence)
1631+ drm_fence_usage_deref_locked(&entry->fence);
1632+ entry->fence = drm_fence_reference_locked(fence);
1633+ entry->fence_class = entry->new_fence_class;
1634+ entry->fence_type = entry->new_fence_type;
1635+ DRM_FLAG_MASKED(entry->priv_flags, 0,
1636+ _DRM_BO_FLAG_UNFENCED);
1637+ wake_up_all(&entry->event_queue);
1638+ drm_bo_add_to_lru(entry);
1639+ }
1640+ mutex_unlock(&entry->mutex);
1641+ drm_bo_usage_deref_locked(&entry);
1642+ l = list->next;
1643+ }
1644+ DRM_DEBUG("Fenced %d buffers\n", count);
1645+out:
1646+ mutex_unlock(&dev->struct_mutex);
1647+ *used_fence = fence;
1648+ return ret;
1649+}
1650+EXPORT_SYMBOL(drm_fence_buffer_objects);
1651+
1652+/*
1653+ * bo->mutex locked
1654+ */
1655+
1656+static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
1657+ int no_wait)
1658+{
1659+ int ret = 0;
1660+ struct drm_device *dev = bo->dev;
1661+ struct drm_bo_mem_reg evict_mem;
1662+
1663+ /*
1664+ * Someone might have modified the buffer before we took the
1665+ * buffer mutex.
1666+ */
1667+
1668+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
1669+ goto out;
1670+ if (bo->mem.mem_type != mem_type)
1671+ goto out;
1672+
1673+ ret = drm_bo_wait(bo, 0, 0, no_wait);
1674+
1675+ if (ret && ret != -EAGAIN) {
1676+ DRM_ERROR("Failed to expire fence before "
1677+ "buffer eviction.\n");
1678+ goto out;
1679+ }
1680+
1681+ evict_mem = bo->mem;
1682+ evict_mem.mm_node = NULL;
1683+
1684+ evict_mem = bo->mem;
1685+ evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
1686+ ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
1687+
1688+ if (ret) {
1689+ if (ret != -EAGAIN)
1690+ DRM_ERROR("Failed to find memory space for "
1691+ "buffer 0x%p eviction.\n", bo);
1692+ goto out;
1693+ }
1694+
1695+ ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
1696+
1697+ if (ret) {
1698+ if (ret != -EAGAIN)
1699+ DRM_ERROR("Buffer eviction failed\n");
1700+ goto out;
1701+ }
1702+
1703+ mutex_lock(&dev->struct_mutex);
1704+ if (evict_mem.mm_node) {
1705+ if (evict_mem.mm_node != bo->pinned_node)
1706+ drm_mm_put_block(evict_mem.mm_node);
1707+ evict_mem.mm_node = NULL;
1708+ }
1709+ list_del(&bo->lru);
1710+ drm_bo_add_to_lru(bo);
1711+ mutex_unlock(&dev->struct_mutex);
1712+
1713+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
1714+ _DRM_BO_FLAG_EVICTED);
1715+
1716+out:
1717+ return ret;
1718+}
1719+
1720+/**
1721+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
1722+ * space, or we've evicted everything and there isn't enough space.
1723+ */
1724+static int drm_bo_mem_force_space(struct drm_device *dev,
1725+ struct drm_bo_mem_reg *mem,
1726+ uint32_t mem_type, int no_wait)
1727+{
1728+ struct drm_mm_node *node;
1729+ struct drm_buffer_manager *bm = &dev->bm;
1730+ struct drm_buffer_object *entry;
1731+ struct drm_mem_type_manager *man = &bm->man[mem_type];
1732+ struct list_head *lru;
1733+ unsigned long num_pages = mem->num_pages;
1734+ int ret;
1735+
1736+ mutex_lock(&dev->struct_mutex);
1737+ do {
1738+ node = drm_mm_search_free(&man->manager, num_pages,
1739+ mem->page_alignment, 1);
1740+ if (node)
1741+ break;
1742+
1743+ lru = &man->lru;
1744+ if (lru->next == lru)
1745+ break;
1746+
1747+ entry = list_entry(lru->next, struct drm_buffer_object, lru);
1748+ atomic_inc(&entry->usage);
1749+ mutex_unlock(&dev->struct_mutex);
1750+ mutex_lock(&entry->mutex);
1751+ BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
1752+
1753+ ret = drm_bo_evict(entry, mem_type, no_wait);
1754+ mutex_unlock(&entry->mutex);
1755+ drm_bo_usage_deref_unlocked(&entry);
1756+ if (ret)
1757+ return ret;
1758+ mutex_lock(&dev->struct_mutex);
1759+ } while (1);
1760+
1761+ if (!node) {
1762+ mutex_unlock(&dev->struct_mutex);
1763+ return -ENOMEM;
1764+ }
1765+
1766+ node = drm_mm_get_block(node, num_pages, mem->page_alignment);
1767+ if (!node) {
1768+ mutex_unlock(&dev->struct_mutex);
1769+ return -ENOMEM;
1770+ }
1771+
1772+ mutex_unlock(&dev->struct_mutex);
1773+ mem->mm_node = node;
1774+ mem->mem_type = mem_type;
1775+ return 0;
1776+}
1777+
1778+static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
1779+ int disallow_fixed,
1780+ uint32_t mem_type,
1781+ uint64_t mask, uint32_t *res_mask)
1782+{
1783+ uint64_t cur_flags = drm_bo_type_flags(mem_type);
1784+ uint64_t flag_diff;
1785+
1786+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
1787+ return 0;
1788+ if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
1789+ cur_flags |= DRM_BO_FLAG_CACHED;
1790+ if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
1791+ cur_flags |= DRM_BO_FLAG_MAPPABLE;
1792+ if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
1793+ DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
1794+
1795+ if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
1796+ return 0;
1797+
1798+ if (mem_type == DRM_BO_MEM_LOCAL) {
1799+ *res_mask = cur_flags;
1800+ return 1;
1801+ }
1802+
1803+ flag_diff = (mask ^ cur_flags);
1804+ if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
1805+ cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
1806+
1807+ if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1808+ (!(mask & DRM_BO_FLAG_CACHED) ||
1809+ (mask & DRM_BO_FLAG_FORCE_CACHING)))
1810+ return 0;
1811+
1812+ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1813+ ((mask & DRM_BO_FLAG_MAPPABLE) ||
1814+ (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1815+ return 0;
1816+
1817+ *res_mask = cur_flags;
1818+ return 1;
1819+}
1820+
1821+/**
1822+ * Creates space for memory region @mem according to its type.
1823+ *
1824+ * This function first searches for free space in compatible memory types in
1825+ * the priority order defined by the driver. If free space isn't found, then
1826+ * drm_bo_mem_force_space is attempted in priority order to evict and find
1827+ * space.
1828+ */
1829+int drm_bo_mem_space(struct drm_buffer_object *bo,
1830+ struct drm_bo_mem_reg *mem, int no_wait)
1831+{
1832+ struct drm_device *dev = bo->dev;
1833+ struct drm_buffer_manager *bm = &dev->bm;
1834+ struct drm_mem_type_manager *man;
1835+
1836+ uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1837+ const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1838+ uint32_t i;
1839+ uint32_t mem_type = DRM_BO_MEM_LOCAL;
1840+ uint32_t cur_flags;
1841+ int type_found = 0;
1842+ int type_ok = 0;
1843+ int has_eagain = 0;
1844+ struct drm_mm_node *node = NULL;
1845+ int ret;
1846+
1847+ mem->mm_node = NULL;
1848+ for (i = 0; i < num_prios; ++i) {
1849+ mem_type = prios[i];
1850+ man = &bm->man[mem_type];
1851+
1852+ type_ok = drm_bo_mt_compatible(man,
1853+ bo->type == drm_bo_type_user,
1854+ mem_type, mem->mask,
1855+ &cur_flags);
1856+
1857+ if (!type_ok)
1858+ continue;
1859+
1860+ if (mem_type == DRM_BO_MEM_LOCAL)
1861+ break;
1862+
1863+ if ((mem_type == bo->pinned_mem_type) &&
1864+ (bo->pinned_node != NULL)) {
1865+ node = bo->pinned_node;
1866+ break;
1867+ }
1868+
1869+ mutex_lock(&dev->struct_mutex);
1870+ if (man->has_type && man->use_type) {
1871+ type_found = 1;
1872+ node = drm_mm_search_free(&man->manager, mem->num_pages,
1873+ mem->page_alignment, 1);
1874+ if (node)
1875+ node = drm_mm_get_block(node, mem->num_pages,
1876+ mem->page_alignment);
1877+ }
1878+ mutex_unlock(&dev->struct_mutex);
1879+ if (node)
1880+ break;
1881+ }
1882+
1883+ if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
1884+ mem->mm_node = node;
1885+ mem->mem_type = mem_type;
1886+ mem->flags = cur_flags;
1887+ return 0;
1888+ }
1889+
1890+ if (!type_found)
1891+ return -EINVAL;
1892+
1893+ num_prios = dev->driver->bo_driver->num_mem_busy_prio;
1894+ prios = dev->driver->bo_driver->mem_busy_prio;
1895+
1896+ for (i = 0; i < num_prios; ++i) {
1897+ mem_type = prios[i];
1898+ man = &bm->man[mem_type];
1899+
1900+ if (!man->has_type)
1901+ continue;
1902+
1903+ if (!drm_bo_mt_compatible(man,
1904+ bo->type == drm_bo_type_user,
1905+ mem_type,
1906+ mem->mask,
1907+ &cur_flags))
1908+ continue;
1909+
1910+ ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
1911+
1912+ if (ret == 0 && mem->mm_node) {
1913+ mem->flags = cur_flags;
1914+ return 0;
1915+ }
1916+
1917+ if (ret == -EAGAIN)
1918+ has_eagain = 1;
1919+ }
1920+
1921+ ret = (has_eagain) ? -EAGAIN : -ENOMEM;
1922+ return ret;
1923+}
1924+EXPORT_SYMBOL(drm_bo_mem_space);
1925+
1926+static int drm_bo_new_mask(struct drm_buffer_object *bo,
1927+ uint64_t new_flags, uint64_t used_mask)
1928+{
1929+ uint32_t new_props;
1930+
1931+ if (bo->type == drm_bo_type_user &&
1932+ ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
1933+ (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
1934+ DRM_ERROR("User buffers require cache-coherent memory.\n");
1935+ return -EINVAL;
1936+ }
1937+
1938+ if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
1939+ DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
1940+ return -EPERM;
1941+ }
1942+
1943+ if (likely(used_mask & DRM_BO_MASK_MEM) &&
1944+ (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
1945+ !DRM_SUSER(DRM_CURPROC)) {
1946+ if (likely(bo->mem.flags & new_flags & used_mask &
1947+ DRM_BO_MASK_MEM))
1948+ new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
1949+ (bo->mem.flags & DRM_BO_MASK_MEM);
1950+ else {
1951+ DRM_ERROR("Incompatible memory type specification "
1952+ "for NO_EVICT buffer.\n");
1953+ return -EPERM;
1954+ }
1955+ }
1956+
1957+ if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
1958+ DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
1959+ return -EPERM;
1960+ }
1961+
1962+ new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
1963+ DRM_BO_FLAG_READ);
1964+
1965+ if (!new_props) {
1966+ DRM_ERROR("Invalid buffer object rwx properties\n");
1967+ return -EINVAL;
1968+ }
1969+
1970+ bo->mem.mask = new_flags;
1971+ return 0;
1972+}
1973+
1974+/*
1975+ * Call dev->struct_mutex locked.
1976+ */
1977+
1978+struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
1979+ uint32_t handle, int check_owner)
1980+{
1981+ struct drm_user_object *uo;
1982+ struct drm_buffer_object *bo;
1983+
1984+ uo = drm_lookup_user_object(file_priv, handle);
1985+
1986+ if (!uo || (uo->type != drm_buffer_type)) {
1987+ DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
1988+ return NULL;
1989+ }
1990+
1991+ if (check_owner && file_priv != uo->owner) {
1992+ if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
1993+ return NULL;
1994+ }
1995+
1996+ bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
1997+ atomic_inc(&bo->usage);
1998+ return bo;
1999+}
2000+EXPORT_SYMBOL(drm_lookup_buffer_object);
2001+
2002+/*
2003+ * Call bo->mutex locked.
2004+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
2005+ * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
2006+ */
2007+
2008+static int drm_bo_quick_busy(struct drm_buffer_object *bo)
2009+{
2010+ struct drm_fence_object *fence = bo->fence;
2011+
2012+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
2013+ if (fence) {
2014+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
2015+ drm_fence_usage_deref_unlocked(&bo->fence);
2016+ return 0;
2017+ }
2018+ return 1;
2019+ }
2020+ return 0;
2021+}
2022+
2023+/*
2024+ * Call bo->mutex locked.
2025+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
2026+ */
2027+
2028+static int drm_bo_busy(struct drm_buffer_object *bo)
2029+{
2030+ struct drm_fence_object *fence = bo->fence;
2031+
2032+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
2033+ if (fence) {
2034+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
2035+ drm_fence_usage_deref_unlocked(&bo->fence);
2036+ return 0;
2037+ }
2038+ drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
2039+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
2040+ drm_fence_usage_deref_unlocked(&bo->fence);
2041+ return 0;
2042+ }
2043+ return 1;
2044+ }
2045+ return 0;
2046+}
2047+
2048+static int drm_bo_evict_cached(struct drm_buffer_object *bo)
2049+{
2050+ int ret = 0;
2051+
2052+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
2053+ if (bo->mem.mm_node)
2054+ ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
2055+ return ret;
2056+}
2057+
2058+/*
2059+ * Wait until a buffer is unmapped.
2060+ */
2061+
2062+static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
2063+{
2064+ int ret = 0;
2065+
2066+ if ((atomic_read(&bo->mapped) >= 0) && no_wait)
2067+ return -EBUSY;
2068+
2069+ DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
2070+ atomic_read(&bo->mapped) == -1);
2071+
2072+ if (ret == -EINTR)
2073+ ret = -EAGAIN;
2074+
2075+ return ret;
2076+}
2077+
2078+static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
2079+{
2080+ int ret;
2081+
2082+ mutex_lock(&bo->mutex);
2083+ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
2084+ mutex_unlock(&bo->mutex);
2085+ return ret;
2086+}
2087+
2088+/*
2089+ * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
2090+ * Until then, we cannot really do anything with it except delete it.
2091+ */
2092+
2093+static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
2094+ int eagain_if_wait)
2095+{
2096+ int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
2097+
2098+ if (ret && no_wait)
2099+ return -EBUSY;
2100+ else if (!ret)
2101+ return 0;
2102+
2103+ ret = 0;
2104+ mutex_unlock(&bo->mutex);
2105+ DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
2106+ !drm_bo_check_unfenced(bo));
2107+ mutex_lock(&bo->mutex);
2108+ if (ret == -EINTR)
2109+ return -EAGAIN;
2110+ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
2111+ if (ret) {
2112+ DRM_ERROR("Timeout waiting for buffer to become fenced\n");
2113+ return -EBUSY;
2114+ }
2115+ if (eagain_if_wait)
2116+ return -EAGAIN;
2117+
2118+ return 0;
2119+}
2120+
2121+/*
2122+ * Fill in the ioctl reply argument with buffer info.
2123+ * Bo locked.
2124+ */
2125+
2126+void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
2127+ struct drm_bo_info_rep *rep)
2128+{
2129+ if (!rep)
2130+ return;
2131+
2132+ rep->handle = bo->base.hash.key;
2133+ rep->flags = bo->mem.flags;
2134+ rep->size = bo->num_pages * PAGE_SIZE;
2135+ rep->offset = bo->offset;
2136+
2137+ if (bo->type == drm_bo_type_dc)
2138+ rep->arg_handle = bo->map_list.user_token;
2139+ else
2140+ rep->arg_handle = 0;
2141+
2142+ rep->mask = bo->mem.mask;
2143+ rep->buffer_start = bo->buffer_start;
2144+ rep->fence_flags = bo->fence_type;
2145+ rep->rep_flags = 0;
2146+ rep->page_alignment = bo->mem.page_alignment;
2147+
2148+ if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
2149+ DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
2150+ DRM_BO_REP_BUSY);
2151+ }
2152+}
2153+EXPORT_SYMBOL(drm_bo_fill_rep_arg);
2154+
2155+/*
2156+ * Wait for buffer idle and register that we've mapped the buffer.
2157+ * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
2158+ * so that if the client dies, the mapping is automatically
2159+ * unregistered.
2160+ */
2161+
2162+static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
2163+ uint32_t map_flags, unsigned hint,
2164+ struct drm_bo_info_rep *rep)
2165+{
2166+ struct drm_buffer_object *bo;
2167+ struct drm_device *dev = file_priv->minor->dev;
2168+ int ret = 0;
2169+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
2170+
2171+ mutex_lock(&dev->struct_mutex);
2172+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
2173+ mutex_unlock(&dev->struct_mutex);
2174+
2175+ if (!bo)
2176+ return -EINVAL;
2177+
2178+ mutex_lock(&bo->mutex);
2179+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
2180+ if (ret)
2181+ goto out;
2182+
2183+ /*
2184+ * If this returns true, we are currently unmapped.
2185+ * We need to do this test, because unmapping can
2186+ * be done without the bo->mutex held.
2187+ */
2188+
2189+ while (1) {
2190+ if (atomic_inc_and_test(&bo->mapped)) {
2191+ if (no_wait && drm_bo_busy(bo)) {
2192+ atomic_dec(&bo->mapped);
2193+ ret = -EBUSY;
2194+ goto out;
2195+ }
2196+ ret = drm_bo_wait(bo, 0, 0, no_wait);
2197+ if (ret) {
2198+ atomic_dec(&bo->mapped);
2199+ goto out;
2200+ }
2201+
2202+ if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
2203+ drm_bo_evict_cached(bo);
2204+
2205+ break;
2206+ } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
2207+
2208+ /*
2209+ * We are already mapped with different flags.
2210+ * need to wait for unmap.
2211+ */
2212+
2213+ ret = drm_bo_wait_unmapped(bo, no_wait);
2214+ if (ret)
2215+ goto out;
2216+
2217+ continue;
2218+ }
2219+ break;
2220+ }
2221+
2222+ mutex_lock(&dev->struct_mutex);
2223+ ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
2224+ mutex_unlock(&dev->struct_mutex);
2225+ if (ret) {
2226+ if (atomic_add_negative(-1, &bo->mapped))
2227+ wake_up_all(&bo->event_queue);
2228+
2229+ } else
2230+ drm_bo_fill_rep_arg(bo, rep);
2231+out:
2232+ mutex_unlock(&bo->mutex);
2233+ drm_bo_usage_deref_unlocked(&bo);
2234+ return ret;
2235+}
2236+
2237+static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
2238+{
2239+ struct drm_device *dev = file_priv->minor->dev;
2240+ struct drm_buffer_object *bo;
2241+ struct drm_ref_object *ro;
2242+ int ret = 0;
2243+
2244+ mutex_lock(&dev->struct_mutex);
2245+
2246+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
2247+ if (!bo) {
2248+ ret = -EINVAL;
2249+ goto out;
2250+ }
2251+
2252+ ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
2253+ if (!ro) {
2254+ ret = -EINVAL;
2255+ goto out;
2256+ }
2257+
2258+ drm_remove_ref_object(file_priv, ro);
2259+ drm_bo_usage_deref_locked(&bo);
2260+out:
2261+ mutex_unlock(&dev->struct_mutex);
2262+ return ret;
2263+}
2264+
2265+/*
2266+ * Call struct-sem locked.
2267+ */
2268+
2269+static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
2270+ struct drm_user_object *uo,
2271+ enum drm_ref_type action)
2272+{
2273+ struct drm_buffer_object *bo =
2274+ drm_user_object_entry(uo, struct drm_buffer_object, base);
2275+
2276+ /*
2277+ * We DON'T want to take the bo->lock here, because we want to
2278+ * hold it when we wait for unmapped buffer.
2279+ */
2280+
2281+ BUG_ON(action != _DRM_REF_TYPE1);
2282+
2283+ if (atomic_add_negative(-1, &bo->mapped))
2284+ wake_up_all(&bo->event_queue);
2285+}
2286+
2287+/*
2288+ * bo->mutex locked.
2289+ * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
2290+ */
2291+
2292+int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
2293+ int no_wait, int move_unfenced)
2294+{
2295+ struct drm_device *dev = bo->dev;
2296+ struct drm_buffer_manager *bm = &dev->bm;
2297+ int ret = 0;
2298+ struct drm_bo_mem_reg mem;
2299+ /*
2300+ * Flush outstanding fences.
2301+ */
2302+
2303+ drm_bo_busy(bo);
2304+
2305+ /*
2306+ * Wait for outstanding fences.
2307+ */
2308+
2309+ ret = drm_bo_wait(bo, 0, 0, no_wait);
2310+ if (ret)
2311+ return ret;
2312+
2313+ mem.num_pages = bo->num_pages;
2314+ mem.size = mem.num_pages << PAGE_SHIFT;
2315+ mem.mask = new_mem_flags;
2316+ mem.page_alignment = bo->mem.page_alignment;
2317+
2318+ mutex_lock(&bm->evict_mutex);
2319+ mutex_lock(&dev->struct_mutex);
2320+ list_del_init(&bo->lru);
2321+ mutex_unlock(&dev->struct_mutex);
2322+
2323+ /*
2324+ * Determine where to move the buffer.
2325+ */
2326+ ret = drm_bo_mem_space(bo, &mem, no_wait);
2327+ if (ret)
2328+ goto out_unlock;
2329+
2330+ ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
2331+
2332+out_unlock:
2333+ mutex_lock(&dev->struct_mutex);
2334+ if (ret || !move_unfenced) {
2335+ if (mem.mm_node) {
2336+ if (mem.mm_node != bo->pinned_node)
2337+ drm_mm_put_block(mem.mm_node);
2338+ mem.mm_node = NULL;
2339+ }
2340+ drm_bo_add_to_lru(bo);
2341+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
2342+ wake_up_all(&bo->event_queue);
2343+ DRM_FLAG_MASKED(bo->priv_flags, 0,
2344+ _DRM_BO_FLAG_UNFENCED);
2345+ }
2346+ } else {
2347+ list_add_tail(&bo->lru, &bm->unfenced);
2348+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
2349+ _DRM_BO_FLAG_UNFENCED);
2350+ }
2351+ mutex_unlock(&dev->struct_mutex);
2352+ mutex_unlock(&bm->evict_mutex);
2353+ return ret;
2354+}
2355+
2356+static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
2357+{
2358+ uint32_t flag_diff = (mem->mask ^ mem->flags);
2359+
2360+ if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
2361+ return 0;
2362+ if ((flag_diff & DRM_BO_FLAG_CACHED) &&
2363+ (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
2364+ (mem->mask & DRM_BO_FLAG_FORCE_CACHING)))
2365+ return 0;
2366+
2367+ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
2368+ ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
2369+ (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
2370+ return 0;
2371+ return 1;
2372+}
2373+
2374+/*
2375+ * bo locked.
2376+ */
2377+
2378+static int drm_buffer_object_validate(struct drm_buffer_object *bo,
2379+ uint32_t fence_class,
2380+ int move_unfenced, int no_wait)
2381+{
2382+ struct drm_device *dev = bo->dev;
2383+ struct drm_buffer_manager *bm = &dev->bm;
2384+ struct drm_bo_driver *driver = dev->driver->bo_driver;
2385+ uint32_t ftype;
2386+ int ret;
2387+
2388+ DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
2389+ (unsigned long long) bo->mem.mask,
2390+ (unsigned long long) bo->mem.flags);
2391+
2392+ ret = driver->fence_type(bo, &fence_class, &ftype);
2393+
2394+ if (ret) {
2395+ DRM_ERROR("Driver did not support given buffer permissions\n");
2396+ return ret;
2397+ }
2398+
2399+ /*
2400+ * We're switching command submission mechanism,
2401+ * or cannot simply rely on the hardware serializing for us.
2402+ *
2403+ * Insert a driver-dependant barrier or wait for buffer idle.
2404+ */
2405+
2406+ if ((fence_class != bo->fence_class) ||
2407+ ((ftype ^ bo->fence_type) & bo->fence_type)) {
2408+
2409+ ret = -EINVAL;
2410+ if (driver->command_stream_barrier) {
2411+ ret = driver->command_stream_barrier(bo,
2412+ fence_class,
2413+ ftype,
2414+ no_wait);
2415+ }
2416+ if (ret)
2417+ ret = drm_bo_wait(bo, 0, 0, no_wait);
2418+
2419+ if (ret)
2420+ return ret;
2421+
2422+ }
2423+
2424+ bo->new_fence_class = fence_class;
2425+ bo->new_fence_type = ftype;
2426+
2427+ ret = drm_bo_wait_unmapped(bo, no_wait);
2428+ if (ret) {
2429+ DRM_ERROR("Timed out waiting for buffer unmap.\n");
2430+ return ret;
2431+ }
2432+
2433+ /*
2434+ * Check whether we need to move buffer.
2435+ */
2436+
2437+ if (!drm_bo_mem_compat(&bo->mem)) {
2438+ ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
2439+ move_unfenced);
2440+ if (ret) {
2441+ if (ret != -EAGAIN)
2442+ DRM_ERROR("Failed moving buffer.\n");
2443+ if (ret == -ENOMEM)
2444+ DRM_ERROR("Out of aperture space.\n");
2445+ return ret;
2446+ }
2447+ }
2448+
2449+ /*
2450+ * Pinned buffers.
2451+ */
2452+
2453+ if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
2454+ bo->pinned_mem_type = bo->mem.mem_type;
2455+ mutex_lock(&dev->struct_mutex);
2456+ list_del_init(&bo->pinned_lru);
2457+ drm_bo_add_to_pinned_lru(bo);
2458+
2459+ if (bo->pinned_node != bo->mem.mm_node) {
2460+ if (bo->pinned_node != NULL)
2461+ drm_mm_put_block(bo->pinned_node);
2462+ bo->pinned_node = bo->mem.mm_node;
2463+ }
2464+
2465+ mutex_unlock(&dev->struct_mutex);
2466+
2467+ } else if (bo->pinned_node != NULL) {
2468+
2469+ mutex_lock(&dev->struct_mutex);
2470+
2471+ if (bo->pinned_node != bo->mem.mm_node)
2472+ drm_mm_put_block(bo->pinned_node);
2473+
2474+ list_del_init(&bo->pinned_lru);
2475+ bo->pinned_node = NULL;
2476+ mutex_unlock(&dev->struct_mutex);
2477+
2478+ }
2479+
2480+ /*
2481+ * We might need to add a TTM.
2482+ */
2483+
2484+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
2485+ ret = drm_bo_add_ttm(bo);
2486+ if (ret)
2487+ return ret;
2488+ }
2489+ DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
2490+
2491+ /*
2492+ * Finally, adjust lru to be sure.
2493+ */
2494+
2495+ mutex_lock(&dev->struct_mutex);
2496+ list_del(&bo->lru);
2497+ if (move_unfenced) {
2498+ list_add_tail(&bo->lru, &bm->unfenced);
2499+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
2500+ _DRM_BO_FLAG_UNFENCED);
2501+ } else {
2502+ drm_bo_add_to_lru(bo);
2503+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
2504+ wake_up_all(&bo->event_queue);
2505+ DRM_FLAG_MASKED(bo->priv_flags, 0,
2506+ _DRM_BO_FLAG_UNFENCED);
2507+ }
2508+ }
2509+ mutex_unlock(&dev->struct_mutex);
2510+
2511+ return 0;
2512+}
2513+
2514+int drm_bo_do_validate(struct drm_buffer_object *bo,
2515+ uint64_t flags, uint64_t mask, uint32_t hint,
2516+ uint32_t fence_class,
2517+ int no_wait,
2518+ struct drm_bo_info_rep *rep)
2519+{
2520+ int ret;
2521+
2522+ mutex_lock(&bo->mutex);
2523+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
2524+
2525+ if (ret)
2526+ goto out;
2527+
2528+ DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
2529+ ret = drm_bo_new_mask(bo, flags, mask);
2530+ if (ret)
2531+ goto out;
2532+
2533+ ret = drm_buffer_object_validate(bo,
2534+ fence_class,
2535+ !(hint & DRM_BO_HINT_DONT_FENCE),
2536+ no_wait);
2537+out:
2538+ if (rep)
2539+ drm_bo_fill_rep_arg(bo, rep);
2540+
2541+ mutex_unlock(&bo->mutex);
2542+ return ret;
2543+}
2544+EXPORT_SYMBOL(drm_bo_do_validate);
2545+
2546+
2547+int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
2548+ uint32_t fence_class,
2549+ uint64_t flags, uint64_t mask,
2550+ uint32_t hint,
2551+ int use_old_fence_class,
2552+ struct drm_bo_info_rep *rep,
2553+ struct drm_buffer_object **bo_rep)
2554+{
2555+ struct drm_device *dev = file_priv->minor->dev;
2556+ struct drm_buffer_object *bo;
2557+ int ret;
2558+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
2559+
2560+ mutex_lock(&dev->struct_mutex);
2561+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
2562+ mutex_unlock(&dev->struct_mutex);
2563+
2564+ if (!bo)
2565+ return -EINVAL;
2566+
2567+ if (use_old_fence_class)
2568+ fence_class = bo->fence_class;
2569+
2570+ /*
2571+ * Only allow creator to change shared buffer mask.
2572+ */
2573+
2574+ if (bo->base.owner != file_priv)
2575+ mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
2576+
2577+
2578+ ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
2579+ no_wait, rep);
2580+
2581+ if (!ret && bo_rep)
2582+ *bo_rep = bo;
2583+ else
2584+ drm_bo_usage_deref_unlocked(&bo);
2585+
2586+ return ret;
2587+}
2588+EXPORT_SYMBOL(drm_bo_handle_validate);
2589+
2590+static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
2591+ struct drm_bo_info_rep *rep)
2592+{
2593+ struct drm_device *dev = file_priv->minor->dev;
2594+ struct drm_buffer_object *bo;
2595+
2596+ mutex_lock(&dev->struct_mutex);
2597+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
2598+ mutex_unlock(&dev->struct_mutex);
2599+
2600+ if (!bo)
2601+ return -EINVAL;
2602+
2603+ mutex_lock(&bo->mutex);
2604+ if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
2605+ (void)drm_bo_busy(bo);
2606+ drm_bo_fill_rep_arg(bo, rep);
2607+ mutex_unlock(&bo->mutex);
2608+ drm_bo_usage_deref_unlocked(&bo);
2609+ return 0;
2610+}
2611+
2612+static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
2613+ uint32_t hint,
2614+ struct drm_bo_info_rep *rep)
2615+{
2616+ struct drm_device *dev = file_priv->minor->dev;
2617+ struct drm_buffer_object *bo;
2618+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
2619+ int ret;
2620+
2621+ mutex_lock(&dev->struct_mutex);
2622+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
2623+ mutex_unlock(&dev->struct_mutex);
2624+
2625+ if (!bo)
2626+ return -EINVAL;
2627+
2628+ mutex_lock(&bo->mutex);
2629+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
2630+ if (ret)
2631+ goto out;
2632+ ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
2633+ if (ret)
2634+ goto out;
2635+
2636+ drm_bo_fill_rep_arg(bo, rep);
2637+
2638+out:
2639+ mutex_unlock(&bo->mutex);
2640+ drm_bo_usage_deref_unlocked(&bo);
2641+ return ret;
2642+}
2643+
2644+static inline size_t drm_size_align(size_t size)
2645+{
2646+ size_t tmpSize = 4;
2647+ if (size > PAGE_SIZE)
2648+ return PAGE_ALIGN(size);
2649+ while (tmpSize < size)
2650+ tmpSize <<= 1;
2651+
2652+ return (size_t) tmpSize;
2653+}
2654+
2655+static int drm_bo_reserve_size(struct drm_device *dev,
2656+ int user_bo,
2657+ unsigned long num_pages,
2658+ unsigned long *size)
2659+{
2660+ struct drm_bo_driver *driver = dev->driver->bo_driver;
2661+
2662+ *size = drm_size_align(sizeof(struct drm_buffer_object)) +
2663+ /* Always account for a TTM, even for fixed memory types */
2664+ drm_ttm_size(dev, num_pages, user_bo) +
2665+ /* user space mapping structure */
2666+ drm_size_align(sizeof(drm_local_map_t)) +
2667+ /* file offset space, aperture space, pinned space */
2668+ 3*drm_size_align(sizeof(struct drm_mm_node *)) +
2669+ /* ttm backend */
2670+ driver->backend_size(dev, num_pages);
2671+
2672+ // FIXME - ENOMEM?
2673+ return 0;
2674+}
2675+
2676+int drm_buffer_object_create(struct drm_device *dev,
2677+ unsigned long size,
2678+ enum drm_bo_type type,
2679+ uint64_t mask,
2680+ uint32_t hint,
2681+ uint32_t page_alignment,
2682+ unsigned long buffer_start,
2683+ struct drm_buffer_object **buf_obj)
2684+{
2685+ struct drm_buffer_manager *bm = &dev->bm;
2686+ struct drm_buffer_object *bo;
2687+ int ret = 0;
2688+ unsigned long num_pages;
2689+ unsigned long reserved_size;
2690+
2691+ size += buffer_start & ~PAGE_MASK;
2692+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2693+ if (num_pages == 0) {
2694+ DRM_ERROR("Illegal buffer object size.\n");
2695+ return -EINVAL;
2696+ }
2697+
2698+ ret = drm_bo_reserve_size(dev, type == drm_bo_type_user,
2699+ num_pages, &reserved_size);
2700+
2701+ if (ret) {
2702+ DRM_DEBUG("Failed reserving space for buffer object.\n");
2703+ return ret;
2704+ }
2705+
2706+ bo = drm_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
2707+
2708+ if (!bo) {
2709+ drm_bo_unreserve_size(num_pages);
2710+ return -ENOMEM;
2711+ }
2712+
2713+ mutex_init(&bo->mutex);
2714+ mutex_lock(&bo->mutex);
2715+
2716+ bo->reserved_size = reserved_size;
2717+ atomic_set(&bo->usage, 1);
2718+ atomic_set(&bo->mapped, -1);
2719+ DRM_INIT_WAITQUEUE(&bo->event_queue);
2720+ INIT_LIST_HEAD(&bo->lru);
2721+ INIT_LIST_HEAD(&bo->pinned_lru);
2722+ INIT_LIST_HEAD(&bo->ddestroy);
2723+#ifdef DRM_ODD_MM_COMPAT
2724+ INIT_LIST_HEAD(&bo->p_mm_list);
2725+ INIT_LIST_HEAD(&bo->vma_list);
2726+#endif
2727+ bo->dev = dev;
2728+ bo->type = type;
2729+ bo->num_pages = num_pages;
2730+ bo->mem.mem_type = DRM_BO_MEM_LOCAL;
2731+ bo->mem.num_pages = bo->num_pages;
2732+ bo->mem.mm_node = NULL;
2733+ bo->mem.page_alignment = page_alignment;
2734+ bo->buffer_start = buffer_start & PAGE_MASK;
2735+ bo->priv_flags = 0;
2736+ bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
2737+ DRM_BO_FLAG_MAPPABLE;
2738+ bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
2739+ DRM_BO_FLAG_MAPPABLE;
2740+ atomic_inc(&bm->count);
2741+ ret = drm_bo_new_mask(bo, mask, mask);
2742+ if (ret)
2743+ goto out_err;
2744+
2745+ if (bo->type == drm_bo_type_dc) {
2746+ mutex_lock(&dev->struct_mutex);
2747+ ret = drm_bo_setup_vm_locked(bo);
2748+ mutex_unlock(&dev->struct_mutex);
2749+ if (ret)
2750+ goto out_err;
2751+ }
2752+
2753+ ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
2754+ if (ret)
2755+ goto out_err;
2756+
2757+ mutex_unlock(&bo->mutex);
2758+ *buf_obj = bo;
2759+ return 0;
2760+
2761+out_err:
2762+ mutex_unlock(&bo->mutex);
2763+
2764+ drm_bo_usage_deref_unlocked(&bo);
2765+ return ret;
2766+}
2767+EXPORT_SYMBOL(drm_buffer_object_create);
2768+
2769+
2770+static int drm_bo_add_user_object(struct drm_file *file_priv,
2771+ struct drm_buffer_object *bo, int shareable)
2772+{
2773+ struct drm_device *dev = file_priv->minor->dev;
2774+ int ret;
2775+
2776+ mutex_lock(&dev->struct_mutex);
2777+ ret = drm_add_user_object(file_priv, &bo->base, shareable);
2778+ if (ret)
2779+ goto out;
2780+
2781+ bo->base.remove = drm_bo_base_deref_locked;
2782+ bo->base.type = drm_buffer_type;
2783+ bo->base.ref_struct_locked = NULL;
2784+ bo->base.unref = drm_buffer_user_object_unmap;
2785+
2786+out:
2787+ mutex_unlock(&dev->struct_mutex);
2788+ return ret;
2789+}
2790+
2791+int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2792+{
2793+ struct drm_bo_create_arg *arg = data;
2794+ struct drm_bo_create_req *req = &arg->d.req;
2795+ struct drm_bo_info_rep *rep = &arg->d.rep;
2796+ struct drm_buffer_object *entry;
2797+ enum drm_bo_type bo_type;
2798+ int ret = 0;
2799+
2800+ DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
2801+ (int)(req->size / 1024), req->page_alignment * 4);
2802+
2803+ if (!dev->bm.initialized) {
2804+ DRM_ERROR("Buffer object manager is not initialized.\n");
2805+ return -EINVAL;
2806+ }
2807+
2808+ bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
2809+
2810+ if (bo_type == drm_bo_type_user)
2811+ req->mask &= ~DRM_BO_FLAG_SHAREABLE;
2812+
2813+ ret = drm_buffer_object_create(file_priv->minor->dev,
2814+ req->size, bo_type, req->mask,
2815+ req->hint, req->page_alignment,
2816+ req->buffer_start, &entry);
2817+ if (ret)
2818+ goto out;
2819+
2820+ ret = drm_bo_add_user_object(file_priv, entry,
2821+ req->mask & DRM_BO_FLAG_SHAREABLE);
2822+ if (ret) {
2823+ drm_bo_usage_deref_unlocked(&entry);
2824+ goto out;
2825+ }
2826+
2827+ mutex_lock(&entry->mutex);
2828+ drm_bo_fill_rep_arg(entry, rep);
2829+ mutex_unlock(&entry->mutex);
2830+
2831+out:
2832+ return ret;
2833+}
2834+
2835+int drm_bo_setstatus_ioctl(struct drm_device *dev,
2836+ void *data, struct drm_file *file_priv)
2837+{
2838+ struct drm_bo_map_wait_idle_arg *arg = data;
2839+ struct drm_bo_info_req *req = &arg->d.req;
2840+ struct drm_bo_info_rep *rep = &arg->d.rep;
2841+ int ret;
2842+
2843+ if (!dev->bm.initialized) {
2844+ DRM_ERROR("Buffer object manager is not initialized.\n");
2845+ return -EINVAL;
2846+ }
2847+
2848+ ret = drm_bo_read_lock(&dev->bm.bm_lock);
2849+ if (ret)
2850+ return ret;
2851+
2852+ ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
2853+ req->flags,
2854+ req->mask,
2855+ req->hint | DRM_BO_HINT_DONT_FENCE,
2856+ 1,
2857+ rep, NULL);
2858+
2859+ (void) drm_bo_read_unlock(&dev->bm.bm_lock);
2860+ if (ret)
2861+ return ret;
2862+
2863+ return 0;
2864+}
2865+
2866+int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2867+{
2868+ struct drm_bo_map_wait_idle_arg *arg = data;
2869+ struct drm_bo_info_req *req = &arg->d.req;
2870+ struct drm_bo_info_rep *rep = &arg->d.rep;
2871+ int ret;
2872+ if (!dev->bm.initialized) {
2873+ DRM_ERROR("Buffer object manager is not initialized.\n");
2874+ return -EINVAL;
2875+ }
2876+
2877+ ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
2878+ req->hint, rep);
2879+ if (ret)
2880+ return ret;
2881+
2882+ return 0;
2883+}
2884+
2885+int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2886+{
2887+ struct drm_bo_handle_arg *arg = data;
2888+ int ret;
2889+ if (!dev->bm.initialized) {
2890+ DRM_ERROR("Buffer object manager is not initialized.\n");
2891+ return -EINVAL;
2892+ }
2893+
2894+ ret = drm_buffer_object_unmap(file_priv, arg->handle);
2895+ return ret;
2896+}
2897+
2898+
2899+int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2900+{
2901+ struct drm_bo_reference_info_arg *arg = data;
2902+ struct drm_bo_handle_arg *req = &arg->d.req;
2903+ struct drm_bo_info_rep *rep = &arg->d.rep;
2904+ struct drm_user_object *uo;
2905+ int ret;
2906+
2907+ if (!dev->bm.initialized) {
2908+ DRM_ERROR("Buffer object manager is not initialized.\n");
2909+ return -EINVAL;
2910+ }
2911+
2912+ ret = drm_user_object_ref(file_priv, req->handle,
2913+ drm_buffer_type, &uo);
2914+ if (ret)
2915+ return ret;
2916+
2917+ ret = drm_bo_handle_info(file_priv, req->handle, rep);
2918+ if (ret)
2919+ return ret;
2920+
2921+ return 0;
2922+}
2923+
2924+int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2925+{
2926+ struct drm_bo_handle_arg *arg = data;
2927+ int ret = 0;
2928+
2929+ if (!dev->bm.initialized) {
2930+ DRM_ERROR("Buffer object manager is not initialized.\n");
2931+ return -EINVAL;
2932+ }
2933+
2934+ ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
2935+ return ret;
2936+}
2937+
2938+int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2939+{
2940+ struct drm_bo_reference_info_arg *arg = data;
2941+ struct drm_bo_handle_arg *req = &arg->d.req;
2942+ struct drm_bo_info_rep *rep = &arg->d.rep;
2943+ int ret;
2944+
2945+ if (!dev->bm.initialized) {
2946+ DRM_ERROR("Buffer object manager is not initialized.\n");
2947+ return -EINVAL;
2948+ }
2949+
2950+ ret = drm_bo_handle_info(file_priv, req->handle, rep);
2951+ if (ret)
2952+ return ret;
2953+
2954+ return 0;
2955+}
2956+
2957+int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2958+{
2959+ struct drm_bo_map_wait_idle_arg *arg = data;
2960+ struct drm_bo_info_req *req = &arg->d.req;
2961+ struct drm_bo_info_rep *rep = &arg->d.rep;
2962+ int ret;
2963+ if (!dev->bm.initialized) {
2964+ DRM_ERROR("Buffer object manager is not initialized.\n");
2965+ return -EINVAL;
2966+ }
2967+
2968+ ret = drm_bo_handle_wait(file_priv, req->handle,
2969+ req->hint, rep);
2970+ if (ret)
2971+ return ret;
2972+
2973+ return 0;
2974+}
2975+
2976+static int drm_bo_leave_list(struct drm_buffer_object *bo,
2977+ uint32_t mem_type,
2978+ int free_pinned,
2979+ int allow_errors)
2980+{
2981+ struct drm_device *dev = bo->dev;
2982+ int ret = 0;
2983+
2984+ mutex_lock(&bo->mutex);
2985+
2986+ ret = drm_bo_expire_fence(bo, allow_errors);
2987+ if (ret)
2988+ goto out;
2989+
2990+ if (free_pinned) {
2991+ DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2992+ mutex_lock(&dev->struct_mutex);
2993+ list_del_init(&bo->pinned_lru);
2994+ if (bo->pinned_node == bo->mem.mm_node)
2995+ bo->pinned_node = NULL;
2996+ if (bo->pinned_node != NULL) {
2997+ drm_mm_put_block(bo->pinned_node);
2998+ bo->pinned_node = NULL;
2999+ }
3000+ mutex_unlock(&dev->struct_mutex);
3001+ }
3002+
3003+ if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
3004+ DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
3005+ "cleanup. Removing flag and evicting.\n");
3006+ bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
3007+ bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
3008+ }
3009+
3010+ if (bo->mem.mem_type == mem_type)
3011+ ret = drm_bo_evict(bo, mem_type, 0);
3012+
3013+ if (ret) {
3014+ if (allow_errors) {
3015+ goto out;
3016+ } else {
3017+ ret = 0;
3018+ DRM_ERROR("Cleanup eviction failed\n");
3019+ }
3020+ }
3021+
3022+out:
3023+ mutex_unlock(&bo->mutex);
3024+ return ret;
3025+}
3026+
3027+
3028+static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
3029+ int pinned_list)
3030+{
3031+ if (pinned_list)
3032+ return list_entry(list, struct drm_buffer_object, pinned_lru);
3033+ else
3034+ return list_entry(list, struct drm_buffer_object, lru);
3035+}
3036+
3037+/*
3038+ * dev->struct_mutex locked.
3039+ */
3040+
3041+static int drm_bo_force_list_clean(struct drm_device *dev,
3042+ struct list_head *head,
3043+ unsigned mem_type,
3044+ int free_pinned,
3045+ int allow_errors,
3046+ int pinned_list)
3047+{
3048+ struct list_head *list, *next, *prev;
3049+ struct drm_buffer_object *entry, *nentry;
3050+ int ret;
3051+ int do_restart;
3052+
3053+ /*
3054+ * The list traversal is a bit odd here, because an item may
3055+ * disappear from the list when we release the struct_mutex or
3056+ * when we decrease the usage count. Also we're not guaranteed
3057+ * to drain pinned lists, so we can't always restart.
3058+ */
3059+
3060+restart:
3061+ nentry = NULL;
3062+ list_for_each_safe(list, next, head) {
3063+ prev = list->prev;
3064+
3065+ entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
3066+ atomic_inc(&entry->usage);
3067+ if (nentry) {
3068+ atomic_dec(&nentry->usage);
3069+ nentry = NULL;
3070+ }
3071+
3072+ /*
3073+ * Protect the next item from destruction, so we can check
3074+ * its list pointers later on.
3075+ */
3076+
3077+ if (next != head) {
3078+ nentry = drm_bo_entry(next, pinned_list);
3079+ atomic_inc(&nentry->usage);
3080+ }
3081+ mutex_unlock(&dev->struct_mutex);
3082+
3083+ ret = drm_bo_leave_list(entry, mem_type, free_pinned,
3084+ allow_errors);
3085+ mutex_lock(&dev->struct_mutex);
3086+
3087+ drm_bo_usage_deref_locked(&entry);
3088+ if (ret)
3089+ return ret;
3090+
3091+ /*
3092+ * Has the next item disappeared from the list?
3093+ */
3094+
3095+ do_restart = ((next->prev != list) && (next->prev != prev));
3096+
3097+ if (nentry != NULL && do_restart)
3098+ drm_bo_usage_deref_locked(&nentry);
3099+
3100+ if (do_restart)
3101+ goto restart;
3102+ }
3103+ return 0;
3104+}
3105+
3106+int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type)
3107+{
3108+ struct drm_buffer_manager *bm = &dev->bm;
3109+ struct drm_mem_type_manager *man = &bm->man[mem_type];
3110+ int ret = -EINVAL;
3111+
3112+ if (mem_type >= DRM_BO_MEM_TYPES) {
3113+ DRM_ERROR("Illegal memory type %d\n", mem_type);
3114+ return ret;
3115+ }
3116+
3117+ if (!man->has_type) {
3118+ DRM_ERROR("Trying to take down uninitialized "
3119+ "memory manager type %u\n", mem_type);
3120+ return ret;
3121+ }
3122+ man->use_type = 0;
3123+ man->has_type = 0;
3124+
3125+ ret = 0;
3126+ if (mem_type > 0) {
3127+ BUG_ON(!list_empty(&bm->unfenced));
3128+ drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
3129+ drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
3130+
3131+ if (drm_mm_clean(&man->manager)) {
3132+ drm_mm_takedown(&man->manager);
3133+ } else {
3134+ ret = -EBUSY;
3135+ }
3136+ }
3137+
3138+ return ret;
3139+}
3140+EXPORT_SYMBOL(drm_bo_clean_mm);
3141+
3142+/**
3143+ *Evict all buffers of a particular mem_type, but leave memory manager
3144+ *regions for NO_MOVE buffers intact. New buffers cannot be added at this
3145+ *point since we have the hardware lock.
3146+ */
3147+
3148+static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
3149+{
3150+ int ret;
3151+ struct drm_buffer_manager *bm = &dev->bm;
3152+ struct drm_mem_type_manager *man = &bm->man[mem_type];
3153+
3154+ if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
3155+ DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
3156+ return -EINVAL;
3157+ }
3158+
3159+ if (!man->has_type) {
3160+ DRM_ERROR("Memory type %u has not been initialized.\n",
3161+ mem_type);
3162+ return 0;
3163+ }
3164+
3165+ ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
3166+ if (ret)
3167+ return ret;
3168+ ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
3169+
3170+ return ret;
3171+}
3172+
3173+int drm_bo_init_mm(struct drm_device *dev,
3174+ unsigned type,
3175+ unsigned long p_offset, unsigned long p_size)
3176+{
3177+ struct drm_buffer_manager *bm = &dev->bm;
3178+ int ret = -EINVAL;
3179+ struct drm_mem_type_manager *man;
3180+
3181+ if (type >= DRM_BO_MEM_TYPES) {
3182+ DRM_ERROR("Illegal memory type %d\n", type);
3183+ return ret;
3184+ }
3185+
3186+ man = &bm->man[type];
3187+ if (man->has_type) {
3188+ DRM_ERROR("Memory manager already initialized for type %d\n",
3189+ type);
3190+ return ret;
3191+ }
3192+
3193+ ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
3194+ if (ret)
3195+ return ret;
3196+
3197+ ret = 0;
3198+ if (type != DRM_BO_MEM_LOCAL) {
3199+ if (!p_size) {
3200+ DRM_ERROR("Zero size memory manager type %d\n", type);
3201+ return ret;
3202+ }
3203+ ret = drm_mm_init(&man->manager, p_offset, p_size);
3204+ if (ret)
3205+ return ret;
3206+ }
3207+ man->has_type = 1;
3208+ man->use_type = 1;
3209+
3210+ INIT_LIST_HEAD(&man->lru);
3211+ INIT_LIST_HEAD(&man->pinned);
3212+
3213+ return 0;
3214+}
3215+EXPORT_SYMBOL(drm_bo_init_mm);
3216+
3217+/*
3218+ * This function is intended to be called on drm driver unload.
3219+ * If you decide to call it from lastclose, you must protect the call
3220+ * from a potentially racing drm_bo_driver_init in firstopen.
3221+ * (This may happen on X server restart).
3222+ */
3223+
3224+int drm_bo_driver_finish(struct drm_device *dev)
3225+{
3226+ struct drm_buffer_manager *bm = &dev->bm;
3227+ int ret = 0;
3228+ unsigned i = DRM_BO_MEM_TYPES;
3229+ struct drm_mem_type_manager *man;
3230+
3231+ mutex_lock(&dev->struct_mutex);
3232+
3233+ if (!bm->initialized)
3234+ goto out;
3235+ bm->initialized = 0;
3236+
3237+ while (i--) {
3238+ man = &bm->man[i];
3239+ if (man->has_type) {
3240+ man->use_type = 0;
3241+ if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
3242+ ret = -EBUSY;
3243+ DRM_ERROR("DRM memory manager type %d "
3244+ "is not clean.\n", i);
3245+ }
3246+ man->has_type = 0;
3247+ }
3248+ }
3249+ mutex_unlock(&dev->struct_mutex);
3250+
3251+ if (!cancel_delayed_work(&bm->wq))
3252+ flush_scheduled_work();
3253+
3254+ mutex_lock(&dev->struct_mutex);
3255+ drm_bo_delayed_delete(dev, 1);
3256+ if (list_empty(&bm->ddestroy))
3257+ DRM_DEBUG("Delayed destroy list was clean\n");
3258+
3259+ if (list_empty(&bm->man[0].lru))
3260+ DRM_DEBUG("Swap list was clean\n");
3261+
3262+ if (list_empty(&bm->man[0].pinned))
3263+ DRM_DEBUG("NO_MOVE list was clean\n");
3264+
3265+ if (list_empty(&bm->unfenced))
3266+ DRM_DEBUG("Unfenced list was clean\n");
3267+
3268+ __free_page(bm->dummy_read_page);
3269+
3270+out:
3271+ mutex_unlock(&dev->struct_mutex);
3272+ return ret;
3273+}
3274+EXPORT_SYMBOL(drm_bo_driver_finish);
3275+
3276+/*
3277+ * This function is intended to be called on drm driver load.
3278+ * If you decide to call it from firstopen, you must protect the call
3279+ * from a potentially racing drm_bo_driver_finish in lastclose.
3280+ * (This may happen on X server restart).
3281+ */
3282+
3283+int drm_bo_driver_init(struct drm_device *dev)
3284+{
3285+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3286+ struct drm_buffer_manager *bm = &dev->bm;
3287+ int ret = -EINVAL;
3288+
3289+ bm->dummy_read_page = NULL;
3290+ drm_bo_init_lock(&bm->bm_lock);
3291+ mutex_lock(&dev->struct_mutex);
3292+ if (!driver)
3293+ goto out_unlock;
3294+
3295+ bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
3296+ if (!bm->dummy_read_page) {
3297+ ret = -ENOMEM;
3298+ goto out_unlock;
3299+ }
3300+
3301+
3302+ /*
3303+ * Initialize the system memory buffer type.
3304+ * Other types need to be driver / IOCTL initialized.
3305+ */
3306+ ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
3307+ if (ret)
3308+ goto out_unlock;
3309+
3310+ INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
3311+
3312+ bm->initialized = 1;
3313+ bm->nice_mode = 1;
3314+ atomic_set(&bm->count, 0);
3315+ bm->cur_pages = 0;
3316+ INIT_LIST_HEAD(&bm->unfenced);
3317+ INIT_LIST_HEAD(&bm->ddestroy);
3318+out_unlock:
3319+ mutex_unlock(&dev->struct_mutex);
3320+ return ret;
3321+}
3322+EXPORT_SYMBOL(drm_bo_driver_init);
3323+
3324+int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
3325+{
3326+ struct drm_mm_init_arg *arg = data;
3327+ struct drm_buffer_manager *bm = &dev->bm;
3328+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3329+ int ret;
3330+
3331+ if (!driver) {
3332+ DRM_ERROR("Buffer objects are not supported by this driver\n");
3333+ return -EINVAL;
3334+ }
3335+
3336+ ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
3337+ if (ret)
3338+ return ret;
3339+
3340+ ret = -EINVAL;
3341+ if (arg->magic != DRM_BO_INIT_MAGIC) {
3342+ DRM_ERROR("You are using an old libdrm that is not compatible with\n"
3343+ "\tthe kernel DRM module. Please upgrade your libdrm.\n");
3344+ return -EINVAL;
3345+ }
3346+ if (arg->major != DRM_BO_INIT_MAJOR) {
3347+ DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
3348+ "\tversion don't match. Got %d, expected %d.\n",
3349+ arg->major, DRM_BO_INIT_MAJOR);
3350+ return -EINVAL;
3351+ }
3352+
3353+ mutex_lock(&dev->struct_mutex);
3354+ if (!bm->initialized) {
3355+ DRM_ERROR("DRM memory manager was not initialized.\n");
3356+ goto out;
3357+ }
3358+ if (arg->mem_type == 0) {
3359+ DRM_ERROR("System memory buffers already initialized.\n");
3360+ goto out;
3361+ }
3362+ ret = drm_bo_init_mm(dev, arg->mem_type,
3363+ arg->p_offset, arg->p_size);
3364+
3365+out:
3366+ mutex_unlock(&dev->struct_mutex);
3367+ (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
3368+
3369+ if (ret)
3370+ return ret;
3371+
3372+ return 0;
3373+}
3374+
3375+int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
3376+{
3377+ struct drm_mm_type_arg *arg = data;
3378+ struct drm_buffer_manager *bm = &dev->bm;
3379+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3380+ int ret;
3381+
3382+ if (!driver) {
3383+ DRM_ERROR("Buffer objects are not supported by this driver\n");
3384+ return -EINVAL;
3385+ }
3386+
3387+ ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
3388+ if (ret)
3389+ return ret;
3390+
3391+ mutex_lock(&dev->struct_mutex);
3392+ ret = -EINVAL;
3393+ if (!bm->initialized) {
3394+ DRM_ERROR("DRM memory manager was not initialized\n");
3395+ goto out;
3396+ }
3397+ if (arg->mem_type == 0) {
3398+ DRM_ERROR("No takedown for System memory buffers.\n");
3399+ goto out;
3400+ }
3401+ ret = 0;
3402+ if (drm_bo_clean_mm(dev, arg->mem_type)) {
3403+ DRM_ERROR("Memory manager type %d not clean. "
3404+ "Delaying takedown\n", arg->mem_type);
3405+ }
3406+out:
3407+ mutex_unlock(&dev->struct_mutex);
3408+ (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
3409+
3410+ if (ret)
3411+ return ret;
3412+
3413+ return 0;
3414+}
3415+
3416+int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
3417+{
3418+ struct drm_mm_type_arg *arg = data;
3419+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3420+ int ret;
3421+
3422+ if (!driver) {
3423+ DRM_ERROR("Buffer objects are not supported by this driver\n");
3424+ return -EINVAL;
3425+ }
3426+
3427+ if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
3428+ DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
3429+ return -EINVAL;
3430+ }
3431+
3432+ if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
3433+ ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
3434+ if (ret)
3435+ return ret;
3436+ }
3437+
3438+ mutex_lock(&dev->struct_mutex);
3439+ ret = drm_bo_lock_mm(dev, arg->mem_type);
3440+ mutex_unlock(&dev->struct_mutex);
3441+ if (ret) {
3442+ (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
3443+ return ret;
3444+ }
3445+
3446+ return 0;
3447+}
3448+
3449+int drm_mm_unlock_ioctl(struct drm_device *dev,
3450+ void *data,
3451+ struct drm_file *file_priv)
3452+{
3453+ struct drm_mm_type_arg *arg = data;
3454+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3455+ int ret;
3456+
3457+ if (!driver) {
3458+ DRM_ERROR("Buffer objects are not supported by this driver\n");
3459+ return -EINVAL;
3460+ }
3461+
3462+ if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
3463+ ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
3464+ if (ret)
3465+ return ret;
3466+ }
3467+
3468+ return 0;
3469+}
3470+
3471+/*
3472+ * buffer object vm functions.
3473+ */
3474+
3475+int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
3476+{
3477+ struct drm_buffer_manager *bm = &dev->bm;
3478+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
3479+
3480+ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
3481+ if (mem->mem_type == DRM_BO_MEM_LOCAL)
3482+ return 0;
3483+
3484+ if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
3485+ return 0;
3486+
3487+ if (mem->flags & DRM_BO_FLAG_CACHED)
3488+ return 0;
3489+ }
3490+ return 1;
3491+}
3492+EXPORT_SYMBOL(drm_mem_reg_is_pci);
3493+
3494+/**
3495+ * \c Get the PCI offset for the buffer object memory.
3496+ *
3497+ * \param bo The buffer object.
3498+ * \param bus_base On return the base of the PCI region
3499+ * \param bus_offset On return the byte offset into the PCI region
3500+ * \param bus_size On return the byte size of the buffer object or zero if
3501+ * the buffer object memory is not accessible through a PCI region.
3502+ * \return Failure indication.
3503+ *
3504+ * Returns -EINVAL if the buffer object is currently not mappable.
3505+ * Otherwise returns zero.
3506+ */
3507+
3508+int drm_bo_pci_offset(struct drm_device *dev,
3509+ struct drm_bo_mem_reg *mem,
3510+ unsigned long *bus_base,
3511+ unsigned long *bus_offset, unsigned long *bus_size)
3512+{
3513+ struct drm_buffer_manager *bm = &dev->bm;
3514+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
3515+
3516+ *bus_size = 0;
3517+ if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
3518+ return -EINVAL;
3519+
3520+ if (drm_mem_reg_is_pci(dev, mem)) {
3521+ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
3522+ *bus_size = mem->num_pages << PAGE_SHIFT;
3523+ *bus_base = man->io_offset;
3524+ }
3525+
3526+ return 0;
3527+}
3528+
3529+/**
3530+ * \c Kill all user-space virtual mappings of this buffer object.
3531+ *
3532+ * \param bo The buffer object.
3533+ *
3534+ * Call bo->mutex locked.
3535+ */
3536+
3537+void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
3538+{
3539+ struct drm_device *dev = bo->dev;
3540+ loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
3541+ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
3542+
3543+ if (!dev->dev_mapping)
3544+ return;
3545+
3546+ unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
3547+}
3548+
3549+static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
3550+{
3551+ struct drm_map_list *list;
3552+ drm_local_map_t *map;
3553+ struct drm_device *dev = bo->dev;
3554+
3555+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
3556+ if (bo->type != drm_bo_type_dc)
3557+ return;
3558+
3559+ list = &bo->map_list;
3560+ if (list->user_token) {
3561+ drm_ht_remove_item(&dev->map_hash, &list->hash);
3562+ list->user_token = 0;
3563+ }
3564+ if (list->file_offset_node) {
3565+ drm_mm_put_block(list->file_offset_node);
3566+ list->file_offset_node = NULL;
3567+ }
3568+
3569+ map = list->map;
3570+ if (!map)
3571+ return;
3572+
3573+ drm_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
3574+ list->map = NULL;
3575+ list->user_token = 0ULL;
3576+ drm_bo_usage_deref_locked(&bo);
3577+}
3578+
3579+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
3580+{
3581+ struct drm_map_list *list = &bo->map_list;
3582+ drm_local_map_t *map;
3583+ struct drm_device *dev = bo->dev;
3584+
3585+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
3586+ list->map = drm_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
3587+ if (!list->map)
3588+ return -ENOMEM;
3589+
3590+ map = list->map;
3591+ map->offset = 0;
3592+ map->type = _DRM_TTM;
3593+ map->flags = _DRM_REMOVABLE;
3594+ map->size = bo->mem.num_pages * PAGE_SIZE;
3595+ atomic_inc(&bo->usage);
3596+ map->handle = (void *)bo;
3597+
3598+ list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
3599+ bo->mem.num_pages, 0, 0);
3600+
3601+ if (!list->file_offset_node) {
3602+ drm_bo_takedown_vm_locked(bo);
3603+ return -ENOMEM;
3604+ }
3605+
3606+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
3607+ bo->mem.num_pages, 0);
3608+ if (!list->file_offset_node) {
3609+ drm_bo_takedown_vm_locked(bo);
3610+ return -ENOMEM;
3611+ }
3612+
3613+ list->hash.key = list->file_offset_node->start;
3614+ if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
3615+ drm_bo_takedown_vm_locked(bo);
3616+ return -ENOMEM;
3617+ }
3618+
3619+ list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
3620+
3621+ return 0;
3622+}
3623+
3624+int drm_bo_version_ioctl(struct drm_device *dev, void *data,
3625+ struct drm_file *file_priv)
3626+{
3627+ struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
3628+
3629+ arg->major = DRM_BO_INIT_MAJOR;
3630+ arg->minor = DRM_BO_INIT_MINOR;
3631+ arg->patchlevel = DRM_BO_INIT_PATCH;
3632+
3633+ return 0;
3634+}
3635Index: linux-2.6.27/drivers/gpu/drm/drm_bo_lock.c
3636===================================================================
3637--- /dev/null 1970-01-01 00:00:00.000000000 +0000
3638+++ linux-2.6.27/drivers/gpu/drm/drm_bo_lock.c 2009-02-05 13:29:33.000000000 +0000
3639@@ -0,0 +1,175 @@
3640+/**************************************************************************
3641+ *
3642+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
3643+ * All Rights Reserved.
3644+ *
3645+ * Permission is hereby granted, free of charge, to any person obtaining a
3646+ * copy of this software and associated documentation files (the
3647+ * "Software"), to deal in the Software without restriction, including
3648+ * without limitation the rights to use, copy, modify, merge, publish,
3649+ * distribute, sub license, and/or sell copies of the Software, and to
3650+ * permit persons to whom the Software is furnished to do so, subject to
3651+ * the following conditions:
3652+ *
3653+ * The above copyright notice and this permission notice (including the
3654+ * next paragraph) shall be included in all copies or substantial portions
3655+ * of the Software.
3656+ *
3657+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3658+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3659+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
3660+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
3661+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
3662+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
3663+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
3664+ *
3665+ **************************************************************************/
3666+/*
3667+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
3668+ */
3669+
3670+/*
3671+ * This file implements a simple replacement for the buffer manager use
3672+ * of the heavyweight hardware lock.
3673+ * The lock is a read-write lock. Taking it in read mode is fast, and
3674+ * intended for in-kernel use only.
3675+ * Taking it in write mode is slow.
3676+ *
3677+ * The write mode is used only when there is a need to block all
3678+ * user-space processes from allocating a
3679+ * new memory area.
3680+ * Typical use in write mode is X server VT switching, and it's allowed
3681+ * to leave kernel space with the write lock held. If a user-space process
3682+ * dies while having the write-lock, it will be released during the file
3683+ * descriptor release.
3684+ *
3685+ * The read lock is typically placed at the start of an IOCTL- or
3686+ * user-space callable function that may end up allocating a memory area.
3687+ * This includes setstatus, super-ioctls and no_pfn; the latter may move
3688+ * unmappable regions to mappable. It's a bug to leave kernel space with the
3689+ * read lock held.
3690+ *
3691+ * Both read- and write lock taking is interruptible for low signal-delivery
3692+ * latency. The locking functions will return -EAGAIN if interrupted by a
3693+ * signal.
3694+ *
3695+ * Locking order: The lock should be taken BEFORE any kernel mutexes
3696+ * or spinlocks.
3697+ */
3698+
3699+#include "drmP.h"
3700+
3701+void drm_bo_init_lock(struct drm_bo_lock *lock)
3702+{
3703+ DRM_INIT_WAITQUEUE(&lock->queue);
3704+ atomic_set(&lock->write_lock_pending, 0);
3705+ atomic_set(&lock->readers, 0);
3706+}
3707+
3708+void drm_bo_read_unlock(struct drm_bo_lock *lock)
3709+{
3710+ if (unlikely(atomic_add_negative(-1, &lock->readers)))
3711+ BUG();
3712+ if (atomic_read(&lock->readers) == 0)
3713+ wake_up_interruptible(&lock->queue);
3714+}
3715+EXPORT_SYMBOL(drm_bo_read_unlock);
3716+
3717+int drm_bo_read_lock(struct drm_bo_lock *lock)
3718+{
3719+ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
3720+ int ret;
3721+ ret = wait_event_interruptible
3722+ (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
3723+ if (ret)
3724+ return -EAGAIN;
3725+ }
3726+
3727+ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
3728+ int ret;
3729+ ret = wait_event_interruptible
3730+ (lock->queue, atomic_add_unless(&lock->readers, 1, -1));
3731+ if (ret)
3732+ return -EAGAIN;
3733+ }
3734+ return 0;
3735+}
3736+EXPORT_SYMBOL(drm_bo_read_lock);
3737+
3738+static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
3739+{
3740+ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
3741+ return -EINVAL;
3742+ if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1))
3743+ return -EINVAL;
3744+ wake_up_interruptible(&lock->queue);
3745+ return 0;
3746+}
3747+
3748+static void drm_bo_write_lock_remove(struct drm_file *file_priv,
3749+ struct drm_user_object *item)
3750+{
3751+ struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base);
3752+ int ret;
3753+
3754+ ret = __drm_bo_write_unlock(lock);
3755+ BUG_ON(ret);
3756+}
3757+
3758+int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
3759+{
3760+ int ret = 0;
3761+ struct drm_device *dev;
3762+
3763+ if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0))
3764+ return -EINVAL;
3765+
3766+ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
3767+ ret = wait_event_interruptible
3768+ (lock->queue, atomic_cmpxchg(&lock->readers, 0, -1) == 0);
3769+
3770+ if (ret) {
3771+ atomic_set(&lock->write_lock_pending, 0);
3772+ wake_up_interruptible(&lock->queue);
3773+ return -EAGAIN;
3774+ }
3775+ }
3776+
3777+ /*
3778+ * Add a dummy user-object, the destructor of which will
3779+ * make sure the lock is released if the client dies
3780+ * while holding it.
3781+ */
3782+
3783+ dev = file_priv->minor->dev;
3784+ mutex_lock(&dev->struct_mutex);
3785+ ret = drm_add_user_object(file_priv, &lock->base, 0);
3786+ lock->base.remove = &drm_bo_write_lock_remove;
3787+ lock->base.type = drm_lock_type;
3788+ if (ret)
3789+ (void)__drm_bo_write_unlock(lock);
3790+
3791+ mutex_unlock(&dev->struct_mutex);
3792+
3793+ return ret;
3794+}
3795+
3796+int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv)
3797+{
3798+ struct drm_device *dev = file_priv->minor->dev;
3799+ struct drm_ref_object *ro;
3800+
3801+ mutex_lock(&dev->struct_mutex);
3802+
3803+ if (lock->base.owner != file_priv) {
3804+ mutex_unlock(&dev->struct_mutex);
3805+ return -EINVAL;
3806+ }
3807+ ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE);
3808+ BUG_ON(!ro);
3809+ drm_remove_ref_object(file_priv, ro);
3810+ lock->base.owner = NULL;
3811+
3812+ mutex_unlock(&dev->struct_mutex);
3813+ return 0;
3814+}
3815Index: linux-2.6.27/drivers/gpu/drm/drm_bo_move.c
3816===================================================================
3817--- /dev/null 1970-01-01 00:00:00.000000000 +0000
3818+++ linux-2.6.27/drivers/gpu/drm/drm_bo_move.c 2009-02-05 13:29:33.000000000 +0000
3819@@ -0,0 +1,597 @@
3820+/**************************************************************************
3821+ *
3822+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
3823+ * All Rights Reserved.
3824+ *
3825+ * Permission is hereby granted, free of charge, to any person obtaining a
3826+ * copy of this software and associated documentation files (the
3827+ * "Software"), to deal in the Software without restriction, including
3828+ * without limitation the rights to use, copy, modify, merge, publish,
3829+ * distribute, sub license, and/or sell copies of the Software, and to
3830+ * permit persons to whom the Software is furnished to do so, subject to
3831+ * the following conditions:
3832+ *
3833+ * The above copyright notice and this permission notice (including the
3834+ * next paragraph) shall be included in all copies or substantial portions
3835+ * of the Software.
3836+ *
3837+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3838+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3839+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
3840+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
3841+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
3842+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
3843+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
3844+ *
3845+ **************************************************************************/
3846+/*
3847+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
3848+ */
3849+
3850+#include "drmP.h"
3851+
3852+/**
3853+ * Free the old memory node unless it's a pinned region and we
3854+ * have not been requested to free also pinned regions.
3855+ */
3856+
3857+static void drm_bo_free_old_node(struct drm_buffer_object *bo)
3858+{
3859+ struct drm_bo_mem_reg *old_mem = &bo->mem;
3860+
3861+ if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
3862+ mutex_lock(&bo->dev->struct_mutex);
3863+ drm_mm_put_block(old_mem->mm_node);
3864+ mutex_unlock(&bo->dev->struct_mutex);
3865+ }
3866+ old_mem->mm_node = NULL;
3867+}
3868+
3869+int drm_bo_move_ttm(struct drm_buffer_object *bo,
3870+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
3871+{
3872+ struct drm_ttm *ttm = bo->ttm;
3873+ struct drm_bo_mem_reg *old_mem = &bo->mem;
3874+ uint64_t save_flags = old_mem->flags;
3875+ uint64_t save_mask = old_mem->mask;
3876+ int ret;
3877+
3878+ if (old_mem->mem_type != DRM_BO_MEM_LOCAL) {
3879+ if (evict)
3880+ drm_ttm_evict(ttm);
3881+ else
3882+ drm_ttm_unbind(ttm);
3883+
3884+ drm_bo_free_old_node(bo);
3885+ DRM_FLAG_MASKED(old_mem->flags,
3886+ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
3887+ DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
3888+ old_mem->mem_type = DRM_BO_MEM_LOCAL;
3889+ save_flags = old_mem->flags;
3890+ }
3891+ if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
3892+ ret = drm_bind_ttm(ttm, new_mem);
3893+ if (ret)
3894+ return ret;
3895+ }
3896+
3897+ *old_mem = *new_mem;
3898+ new_mem->mm_node = NULL;
3899+ old_mem->mask = save_mask;
3900+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
3901+ return 0;
3902+}
3903+EXPORT_SYMBOL(drm_bo_move_ttm);
3904+
3905+/**
3906+ * \c Return a kernel virtual address to the buffer object PCI memory.
3907+ *
3908+ * \param bo The buffer object.
3909+ * \return Failure indication.
3910+ *
3911+ * Returns -EINVAL if the buffer object is currently not mappable.
3912+ * Returns -ENOMEM if the ioremap operation failed.
3913+ * Otherwise returns zero.
3914+ *
3915+ * After a successfull call, bo->iomap contains the virtual address, or NULL
3916+ * if the buffer object content is not accessible through PCI space.
3917+ * Call bo->mutex locked.
3918+ */
3919+
3920+int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
3921+ void **virtual)
3922+{
3923+ struct drm_buffer_manager *bm = &dev->bm;
3924+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
3925+ unsigned long bus_offset;
3926+ unsigned long bus_size;
3927+ unsigned long bus_base;
3928+ int ret;
3929+ void *addr;
3930+
3931+ *virtual = NULL;
3932+ ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
3933+ if (ret || bus_size == 0)
3934+ return ret;
3935+
3936+ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
3937+ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
3938+ else {
3939+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
3940+ if (!addr)
3941+ return -ENOMEM;
3942+ }
3943+ *virtual = addr;
3944+ return 0;
3945+}
3946+EXPORT_SYMBOL(drm_mem_reg_ioremap);
3947+
3948+/**
3949+ * \c Unmap mapping obtained using drm_bo_ioremap
3950+ *
3951+ * \param bo The buffer object.
3952+ *
3953+ * Call bo->mutex locked.
3954+ */
3955+
3956+void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
3957+ void *virtual)
3958+{
3959+ struct drm_buffer_manager *bm;
3960+ struct drm_mem_type_manager *man;
3961+
3962+ bm = &dev->bm;
3963+ man = &bm->man[mem->mem_type];
3964+
3965+ if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
3966+ iounmap(virtual);
3967+}
3968+EXPORT_SYMBOL(drm_mem_reg_iounmap);
3969+
3970+static int drm_copy_io_page(void *dst, void *src, unsigned long page)
3971+{
3972+ uint32_t *dstP =
3973+ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
3974+ uint32_t *srcP =
3975+ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
3976+
3977+ int i;
3978+ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
3979+ iowrite32(ioread32(srcP++), dstP++);
3980+ return 0;
3981+}
3982+
3983+static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
3984+ unsigned long page)
3985+{
3986+ struct page *d = drm_ttm_get_page(ttm, page);
3987+ void *dst;
3988+
3989+ if (!d)
3990+ return -ENOMEM;
3991+
3992+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
3993+ dst = kmap(d);
3994+ if (!dst)
3995+ return -ENOMEM;
3996+
3997+ memcpy_fromio(dst, src, PAGE_SIZE);
3998+ kunmap(d);
3999+ return 0;
4000+}
4001+
4002+static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
4003+{
4004+ struct page *s = drm_ttm_get_page(ttm, page);
4005+ void *src;
4006+
4007+ if (!s)
4008+ return -ENOMEM;
4009+
4010+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
4011+ src = kmap(s);
4012+ if (!src)
4013+ return -ENOMEM;
4014+
4015+ memcpy_toio(dst, src, PAGE_SIZE);
4016+ kunmap(s);
4017+ return 0;
4018+}
4019+
4020+int drm_bo_move_memcpy(struct drm_buffer_object *bo,
4021+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
4022+{
4023+ struct drm_device *dev = bo->dev;
4024+ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
4025+ struct drm_ttm *ttm = bo->ttm;
4026+ struct drm_bo_mem_reg *old_mem = &bo->mem;
4027+ struct drm_bo_mem_reg old_copy = *old_mem;
4028+ void *old_iomap;
4029+ void *new_iomap;
4030+ int ret;
4031+ uint64_t save_flags = old_mem->flags;
4032+ uint64_t save_mask = old_mem->mask;
4033+ unsigned long i;
4034+ unsigned long page;
4035+ unsigned long add = 0;
4036+ int dir;
4037+
4038+ ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
4039+ if (ret)
4040+ return ret;
4041+ ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
4042+ if (ret)
4043+ goto out;
4044+
4045+ if (old_iomap == NULL && new_iomap == NULL)
4046+ goto out2;
4047+ if (old_iomap == NULL && ttm == NULL)
4048+ goto out2;
4049+
4050+ add = 0;
4051+ dir = 1;
4052+
4053+ if ((old_mem->mem_type == new_mem->mem_type) &&
4054+ (new_mem->mm_node->start <
4055+ old_mem->mm_node->start + old_mem->mm_node->size)) {
4056+ dir = -1;
4057+ add = new_mem->num_pages - 1;
4058+ }
4059+
4060+ for (i = 0; i < new_mem->num_pages; ++i) {
4061+ page = i * dir + add;
4062+ if (old_iomap == NULL)
4063+ ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
4064+ else if (new_iomap == NULL)
4065+ ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
4066+ else
4067+ ret = drm_copy_io_page(new_iomap, old_iomap, page);
4068+ if (ret)
4069+ goto out1;
4070+ }
4071+ mb();
4072+out2:
4073+ drm_bo_free_old_node(bo);
4074+
4075+ *old_mem = *new_mem;
4076+ new_mem->mm_node = NULL;
4077+ old_mem->mask = save_mask;
4078+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
4079+
4080+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
4081+ drm_ttm_unbind(ttm);
4082+ drm_destroy_ttm(ttm);
4083+ bo->ttm = NULL;
4084+ }
4085+
4086+out1:
4087+ drm_mem_reg_iounmap(dev, new_mem, new_iomap);
4088+out:
4089+ drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
4090+ return ret;
4091+}
4092+EXPORT_SYMBOL(drm_bo_move_memcpy);
4093+
4094+/*
4095+ * Transfer a buffer object's memory and LRU status to a newly
4096+ * created object. User-space references remains with the old
4097+ * object. Call bo->mutex locked.
4098+ */
4099+
4100+int drm_buffer_object_transfer(struct drm_buffer_object *bo,
4101+ struct drm_buffer_object **new_obj)
4102+{
4103+ struct drm_buffer_object *fbo;
4104+ struct drm_device *dev = bo->dev;
4105+ struct drm_buffer_manager *bm = &dev->bm;
4106+
4107+ fbo = drm_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
4108+ if (!fbo)
4109+ return -ENOMEM;
4110+
4111+ *fbo = *bo;
4112+ mutex_init(&fbo->mutex);
4113+ mutex_lock(&fbo->mutex);
4114+ mutex_lock(&dev->struct_mutex);
4115+
4116+ DRM_INIT_WAITQUEUE(&bo->event_queue);
4117+ INIT_LIST_HEAD(&fbo->ddestroy);
4118+ INIT_LIST_HEAD(&fbo->lru);
4119+ INIT_LIST_HEAD(&fbo->pinned_lru);
4120+#ifdef DRM_ODD_MM_COMPAT
4121+ INIT_LIST_HEAD(&fbo->vma_list);
4122+ INIT_LIST_HEAD(&fbo->p_mm_list);
4123+#endif
4124+
4125+ fbo->fence = drm_fence_reference_locked(bo->fence);
4126+ fbo->pinned_node = NULL;
4127+ fbo->mem.mm_node->private = (void *)fbo;
4128+ atomic_set(&fbo->usage, 1);
4129+ atomic_inc(&bm->count);
4130+ mutex_unlock(&dev->struct_mutex);
4131+ mutex_unlock(&fbo->mutex);
4132+ bo->reserved_size = 0;
4133+ *new_obj = fbo;
4134+ return 0;
4135+}
4136+
4137+/*
4138+ * Since move is underway, we need to block signals in this function.
4139+ * We cannot restart until it has finished.
4140+ */
4141+
4142+int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
4143+ int evict, int no_wait, uint32_t fence_class,
4144+ uint32_t fence_type, uint32_t fence_flags,
4145+ struct drm_bo_mem_reg *new_mem)
4146+{
4147+ struct drm_device *dev = bo->dev;
4148+ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
4149+ struct drm_bo_mem_reg *old_mem = &bo->mem;
4150+ int ret;
4151+ uint64_t save_flags = old_mem->flags;
4152+ uint64_t save_mask = old_mem->mask;
4153+ struct drm_buffer_object *old_obj;
4154+
4155+ if (bo->fence)
4156+ drm_fence_usage_deref_unlocked(&bo->fence);
4157+ ret = drm_fence_object_create(dev, fence_class, fence_type,
4158+ fence_flags | DRM_FENCE_FLAG_EMIT,
4159+ &bo->fence);
4160+ bo->fence_type = fence_type;
4161+ if (ret)
4162+ return ret;
4163+
4164+#ifdef DRM_ODD_MM_COMPAT
4165+ /*
4166+ * In this mode, we don't allow pipelining a copy blit,
4167+ * since the buffer will be accessible from user space
4168+ * the moment we return and rebuild the page tables.
4169+ *
4170+ * With normal vm operation, page tables are rebuilt
4171+ * on demand using fault(), which waits for buffer idle.
4172+ */
4173+ if (1)
4174+#else
4175+ if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
4176+ bo->mem.mm_node != NULL))
4177+#endif
4178+ {
4179+ ret = drm_bo_wait(bo, 0, 1, 0);
4180+ if (ret)
4181+ return ret;
4182+
4183+ drm_bo_free_old_node(bo);
4184+
4185+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
4186+ drm_ttm_unbind(bo->ttm);
4187+ drm_destroy_ttm(bo->ttm);
4188+ bo->ttm = NULL;
4189+ }
4190+ } else {
4191+
4192+ /* This should help pipeline ordinary buffer moves.
4193+ *
4194+ * Hang old buffer memory on a new buffer object,
4195+ * and leave it to be released when the GPU
4196+ * operation has completed.
4197+ */
4198+
4199+ ret = drm_buffer_object_transfer(bo, &old_obj);
4200+
4201+ if (ret)
4202+ return ret;
4203+
4204+ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
4205+ old_obj->ttm = NULL;
4206+ else
4207+ bo->ttm = NULL;
4208+
4209+ mutex_lock(&dev->struct_mutex);
4210+ list_del_init(&old_obj->lru);
4211+ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
4212+ drm_bo_add_to_lru(old_obj);
4213+
4214+ drm_bo_usage_deref_locked(&old_obj);
4215+ mutex_unlock(&dev->struct_mutex);
4216+
4217+ }
4218+
4219+ *old_mem = *new_mem;
4220+ new_mem->mm_node = NULL;
4221+ old_mem->mask = save_mask;
4222+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
4223+ return 0;
4224+}
4225+EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
4226+
4227+int drm_bo_same_page(unsigned long offset,
4228+ unsigned long offset2)
4229+{
4230+ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
4231+}
4232+EXPORT_SYMBOL(drm_bo_same_page);
4233+
4234+unsigned long drm_bo_offset_end(unsigned long offset,
4235+ unsigned long end)
4236+{
4237+ offset = (offset + PAGE_SIZE) & PAGE_MASK;
4238+ return (end < offset) ? end : offset;
4239+}
4240+EXPORT_SYMBOL(drm_bo_offset_end);
4241+
4242+static pgprot_t drm_kernel_io_prot(uint32_t map_type)
4243+{
4244+ pgprot_t tmp = PAGE_KERNEL;
4245+
4246+#if defined(__i386__) || defined(__x86_64__)
4247+#ifdef USE_PAT_WC
4248+#warning using pat
4249+ if (drm_use_pat() && map_type == _DRM_TTM) {
4250+ pgprot_val(tmp) |= _PAGE_PAT;
4251+ return tmp;
4252+ }
4253+#endif
4254+ if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
4255+ pgprot_val(tmp) |= _PAGE_PCD;
4256+ pgprot_val(tmp) &= ~_PAGE_PWT;
4257+ }
4258+#elif defined(__powerpc__)
4259+ pgprot_val(tmp) |= _PAGE_NO_CACHE;
4260+ if (map_type == _DRM_REGISTERS)
4261+ pgprot_val(tmp) |= _PAGE_GUARDED;
4262+#endif
4263+#if defined(__ia64__)
4264+ if (map_type == _DRM_TTM)
4265+ tmp = pgprot_writecombine(tmp);
4266+ else
4267+ tmp = pgprot_noncached(tmp);
4268+#endif
4269+ return tmp;
4270+}
4271+
4272+static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
4273+ unsigned long bus_offset, unsigned long bus_size,
4274+ struct drm_bo_kmap_obj *map)
4275+{
4276+ struct drm_device *dev = bo->dev;
4277+ struct drm_bo_mem_reg *mem = &bo->mem;
4278+ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
4279+
4280+ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
4281+ map->bo_kmap_type = bo_map_premapped;
4282+ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
4283+ } else {
4284+ map->bo_kmap_type = bo_map_iomap;
4285+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
4286+ }
4287+ return (!map->virtual) ? -ENOMEM : 0;
4288+}
4289+
4290+static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
4291+ unsigned long start_page, unsigned long num_pages,
4292+ struct drm_bo_kmap_obj *map)
4293+{
4294+ struct drm_device *dev = bo->dev;
4295+ struct drm_bo_mem_reg *mem = &bo->mem;
4296+ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
4297+ pgprot_t prot;
4298+ struct drm_ttm *ttm = bo->ttm;
4299+ struct page *d;
4300+ int i;
4301+
4302+ BUG_ON(!ttm);
4303+
4304+ if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
4305+
4306+ /*
4307+ * We're mapping a single page, and the desired
4308+ * page protection is consistent with the bo.
4309+ */
4310+
4311+ map->bo_kmap_type = bo_map_kmap;
4312+ map->page = drm_ttm_get_page(ttm, start_page);
4313+ map->virtual = kmap(map->page);
4314+ } else {
4315+ /*
4316+ * Populate the part we're mapping;
4317+ */
4318+
4319+ for (i = start_page; i < start_page + num_pages; ++i) {
4320+ d = drm_ttm_get_page(ttm, i);
4321+ if (!d)
4322+ return -ENOMEM;
4323+ }
4324+
4325+ /*
4326+ * We need to use vmap to get the desired page protection
4327+ * or to make the buffer object look contigous.
4328+ */
4329+
4330+ prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
4331+ PAGE_KERNEL :
4332+ drm_kernel_io_prot(man->drm_bus_maptype);
4333+ map->bo_kmap_type = bo_map_vmap;
4334+ map->virtual = vmap(ttm->pages + start_page,
4335+ num_pages, 0, prot);
4336+ }
4337+ return (!map->virtual) ? -ENOMEM : 0;
4338+}
4339+
4340+/*
4341+ * This function is to be used for kernel mapping of buffer objects.
4342+ * It chooses the appropriate mapping method depending on the memory type
4343+ * and caching policy the buffer currently has.
4344+ * Mapping multiple pages or buffers that live in io memory is a bit slow and
4345+ * consumes vmalloc space. Be restrictive with such mappings.
4346+ * Mapping single pages usually returns the logical kernel address,
4347+ * (which is fast)
4348+ * BUG may use slower temporary mappings for high memory pages or
4349+ * uncached / write-combined pages.
4350+ *
4351+ * The function fills in a drm_bo_kmap_obj which can be used to return the
4352+ * kernel virtual address of the buffer.
4353+ *
4354+ * Code servicing a non-priviliged user request is only allowed to map one
4355+ * page at a time. We might need to implement a better scheme to stop such
4356+ * processes from consuming all vmalloc space.
4357+ */
4358+
4359+int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
4360+ unsigned long num_pages, struct drm_bo_kmap_obj *map)
4361+{
4362+ int ret;
4363+ unsigned long bus_base;
4364+ unsigned long bus_offset;
4365+ unsigned long bus_size;
4366+
4367+ map->virtual = NULL;
4368+
4369+ if (num_pages > bo->num_pages)
4370+ return -EINVAL;
4371+ if (start_page > bo->num_pages)
4372+ return -EINVAL;
4373+#if 0
4374+ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
4375+ return -EPERM;
4376+#endif
4377+ ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
4378+ &bus_offset, &bus_size);
4379+
4380+ if (ret)
4381+ return ret;
4382+
4383+ if (bus_size == 0) {
4384+ return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
4385+ } else {
4386+ bus_offset += start_page << PAGE_SHIFT;
4387+ bus_size = num_pages << PAGE_SHIFT;
4388+ return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
4389+ }
4390+}
4391+EXPORT_SYMBOL(drm_bo_kmap);
4392+
4393+void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
4394+{
4395+ if (!map->virtual)
4396+ return;
4397+
4398+ switch (map->bo_kmap_type) {
4399+ case bo_map_iomap:
4400+ iounmap(map->virtual);
4401+ break;
4402+ case bo_map_vmap:
4403+ vunmap(map->virtual);
4404+ break;
4405+ case bo_map_kmap:
4406+ kunmap(map->page);
4407+ break;
4408+ case bo_map_premapped:
4409+ break;
4410+ default:
4411+ BUG();
4412+ }
4413+ map->virtual = NULL;
4414+ map->page = NULL;
4415+}
4416+EXPORT_SYMBOL(drm_bo_kunmap);
4417Index: linux-2.6.27/drivers/gpu/drm/drm_bufs.c
4418===================================================================
4419--- linux-2.6.27.orig/drivers/gpu/drm/drm_bufs.c 2008-10-09 23:13:53.000000000 +0100
4420+++ linux-2.6.27/drivers/gpu/drm/drm_bufs.c 2009-02-05 13:29:33.000000000 +0000
4421@@ -409,6 +409,7 @@
4422 break;
4423 case _DRM_SHM:
4424 vfree(map->handle);
4425+ dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
4426 break;
4427 case _DRM_AGP:
4428 case _DRM_SCATTER_GATHER:
4429@@ -419,6 +420,8 @@
4430 dmah.size = map->size;
4431 __drm_pci_free(dev, &dmah);
4432 break;
4433+ case _DRM_TTM:
4434+ BUG_ON(1);
4435 }
4436 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
4437
4438Index: linux-2.6.27/drivers/gpu/drm/drm_crtc.c
4439===================================================================
4440--- /dev/null 1970-01-01 00:00:00.000000000 +0000
4441+++ linux-2.6.27/drivers/gpu/drm/drm_crtc.c 2009-02-05 13:29:33.000000000 +0000
4442@@ -0,0 +1,2170 @@
4443+/*
4444+ * Copyright (c) 2006-2007 Intel Corporation
4445+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
4446+ *
4447+ * DRM core CRTC related functions
4448+ *
4449+ * Permission to use, copy, modify, distribute, and sell this software and its
4450+ * documentation for any purpose is hereby granted without fee, provided that
4451+ * the above copyright notice appear in all copies and that both that copyright
4452+ * notice and this permission notice appear in supporting documentation, and
4453+ * that the name of the copyright holders not be used in advertising or
4454+ * publicity pertaining to distribution of the software without specific,
4455+ * written prior permission. The copyright holders make no representations
4456+ * about the suitability of this software for any purpose. It is provided "as
4457+ * is" without express or implied warranty.
4458+ *
4459+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
4460+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
4461+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
4462+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
4463+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
4464+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
4465+ * OF THIS SOFTWARE.
4466+ *
4467+ * Authors:
4468+ * Keith Packard
4469+ * Eric Anholt <eric@anholt.net>
4470+ * Dave Airlie <airlied@linux.ie>
4471+ * Jesse Barnes <jesse.barnes@intel.com>
4472+ */
4473+#include <linux/list.h>
4474+#include "drm.h"
4475+#include "drmP.h"
4476+#include "drm_crtc.h"
4477+
4478+/**
4479+ * drm_idr_get - allocate a new identifier
4480+ * @dev: DRM device
4481+ * @ptr: object pointer, used to generate unique ID
4482+ *
4483+ * LOCKING:
4484+ * Caller must hold DRM mode_config lock.
4485+ *
4486+ * Create a unique identifier based on @ptr in @dev's identifier space. Used
4487+ * for tracking modes, CRTCs and outputs.
4488+ *
4489+ * RETURNS:
4490+ * New unique (relative to other objects in @dev) integer identifier for the
4491+ * object.
4492+ */
4493+int drm_idr_get(struct drm_device *dev, void *ptr)
4494+{
4495+ int new_id = 0;
4496+ int ret;
4497+again:
4498+ if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
4499+ DRM_ERROR("Ran out memory getting a mode number\n");
4500+ return 0;
4501+ }
4502+
4503+ ret = idr_get_new_above(&dev->mode_config.crtc_idr, ptr, 1, &new_id);
4504+ if (ret == -EAGAIN)
4505+ goto again;
4506+
4507+ return new_id;
4508+}
4509+
4510+/**
4511+ * drm_idr_put - free an identifer
4512+ * @dev: DRM device
4513+ * @id: ID to free
4514+ *
4515+ * LOCKING:
4516+ * Caller must hold DRM mode_config lock.
4517+ *
4518+ * Free @id from @dev's unique identifier pool.
4519+ */
4520+void drm_idr_put(struct drm_device *dev, int id)
4521+{
4522+ idr_remove(&dev->mode_config.crtc_idr, id);
4523+}
4524+
4525+/**
4526+ * drm_crtc_from_fb - find the CRTC structure associated with an fb
4527+ * @dev: DRM device
4528+ * @fb: framebuffer in question
4529+ *
4530+ * LOCKING:
4531+ * Caller must hold mode_config lock.
4532+ *
4533+ * Find CRTC in the mode_config structure that matches @fb.
4534+ *
4535+ * RETURNS:
4536+ * Pointer to the CRTC or NULL if it wasn't found.
4537+ */
4538+struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev,
4539+ struct drm_framebuffer *fb)
4540+{
4541+ struct drm_crtc *crtc;
4542+
4543+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4544+ if (crtc->fb == fb)
4545+ return crtc;
4546+ }
4547+ return NULL;
4548+}
4549+
4550+/**
4551+ * drm_framebuffer_create - create a new framebuffer object
4552+ * @dev: DRM device
4553+ *
4554+ * LOCKING:
4555+ * Caller must hold mode config lock.
4556+ *
4557+ * Creates a new framebuffer objects and adds it to @dev's DRM mode_config.
4558+ *
4559+ * RETURNS:
4560+ * Pointer to new framebuffer or NULL on error.
4561+ */
4562+struct drm_framebuffer *drm_framebuffer_create(struct drm_device *dev)
4563+{
4564+ struct drm_framebuffer *fb;
4565+
4566+ /* Limit to single framebuffer for now */
4567+ if (dev->mode_config.num_fb > 1) {
4568+ mutex_unlock(&dev->mode_config.mutex);
4569+ DRM_ERROR("Attempt to add multiple framebuffers failed\n");
4570+ return NULL;
4571+ }
4572+
4573+ fb = kzalloc(sizeof(struct drm_framebuffer), GFP_KERNEL);
4574+ if (!fb)
4575+ return NULL;
4576+
4577+ fb->id = drm_idr_get(dev, fb);
4578+ fb->dev = dev;
4579+ dev->mode_config.num_fb++;
4580+ list_add(&fb->head, &dev->mode_config.fb_list);
4581+
4582+ return fb;
4583+}
4584+EXPORT_SYMBOL(drm_framebuffer_create);
4585+
4586+/**
4587+ * drm_framebuffer_destroy - remove a framebuffer object
4588+ * @fb: framebuffer to remove
4589+ *
4590+ * LOCKING:
4591+ * Caller must hold mode config lock.
4592+ *
4593+ * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
4594+ * it, setting it to NULL.
4595+ */
4596+void drm_framebuffer_destroy(struct drm_framebuffer *fb)
4597+{
4598+ struct drm_device *dev = fb->dev;
4599+ struct drm_crtc *crtc;
4600+
4601+ /* remove from any CRTC */
4602+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4603+ if (crtc->fb == fb)
4604+ crtc->fb = NULL;
4605+ }
4606+
4607+ drm_idr_put(dev, fb->id);
4608+ list_del(&fb->head);
4609+ dev->mode_config.num_fb--;
4610+
4611+ kfree(fb);
4612+}
4613+EXPORT_SYMBOL(drm_framebuffer_destroy);
4614+
4615+/**
4616+ * drm_crtc_create - create a new CRTC object
4617+ * @dev: DRM device
4618+ * @funcs: callbacks for the new CRTC
4619+ *
4620+ * LOCKING:
4621+ * Caller must hold mode config lock.
4622+ *
4623+ * Creates a new CRTC object and adds it to @dev's mode_config structure.
4624+ *
4625+ * RETURNS:
4626+ * Pointer to new CRTC object or NULL on error.
4627+ */
4628+struct drm_crtc *drm_crtc_create(struct drm_device *dev,
4629+ const struct drm_crtc_funcs *funcs)
4630+{
4631+ struct drm_crtc *crtc;
4632+
4633+ crtc = kzalloc(sizeof(struct drm_crtc), GFP_KERNEL);
4634+ if (!crtc)
4635+ return NULL;
4636+
4637+ crtc->dev = dev;
4638+ crtc->funcs = funcs;
4639+
4640+ crtc->id = drm_idr_get(dev, crtc);
4641+
4642+ list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
4643+ dev->mode_config.num_crtc++;
4644+
4645+ return crtc;
4646+}
4647+EXPORT_SYMBOL(drm_crtc_create);
4648+
4649+/**
4650+ * drm_crtc_destroy - remove a CRTC object
4651+ * @crtc: CRTC to remove
4652+ *
4653+ * LOCKING:
4654+ * Caller must hold mode config lock.
4655+ *
4656+ * Cleanup @crtc. Calls @crtc's cleanup function, then removes @crtc from
4657+ * its associated DRM device's mode_config. Frees it afterwards.
4658+ */
4659+void drm_crtc_destroy(struct drm_crtc *crtc)
4660+{
4661+ struct drm_device *dev = crtc->dev;
4662+
4663+ if (crtc->funcs->cleanup)
4664+ (*crtc->funcs->cleanup)(crtc);
4665+
4666+ drm_idr_put(dev, crtc->id);
4667+ list_del(&crtc->head);
4668+ dev->mode_config.num_crtc--;
4669+ kfree(crtc);
4670+}
4671+EXPORT_SYMBOL(drm_crtc_destroy);
4672+
4673+/**
4674+ * drm_crtc_in_use - check if a given CRTC is in a mode_config
4675+ * @crtc: CRTC to check
4676+ *
4677+ * LOCKING:
4678+ * Caller must hold mode config lock.
4679+ *
4680+ * Walk @crtc's DRM device's mode_config and see if it's in use.
4681+ *
4682+ * RETURNS:
4683+ * True if @crtc is part of the mode_config, false otherwise.
4684+ */
4685+bool drm_crtc_in_use(struct drm_crtc *crtc)
4686+{
4687+ struct drm_output *output;
4688+ struct drm_device *dev = crtc->dev;
4689+ /* FIXME: Locking around list access? */
4690+ list_for_each_entry(output, &dev->mode_config.output_list, head)
4691+ if (output->crtc == crtc)
4692+ return true;
4693+ return false;
4694+}
4695+EXPORT_SYMBOL(drm_crtc_in_use);
4696+
4697+/*
4698+ * Detailed mode info for a standard 640x480@60Hz monitor
4699+ */
4700+static struct drm_display_mode std_mode[] = {
4701+ { DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 25200, 640, 656,
4702+ 752, 800, 0, 480, 490, 492, 525, 0,
4703+ V_NHSYNC | V_NVSYNC) }, /* 640x480@60Hz */
4704+};
4705+
4706+/**
4707+ * drm_crtc_probe_output_modes - get complete set of display modes
4708+ * @dev: DRM device
4709+ * @maxX: max width for modes
4710+ * @maxY: max height for modes
4711+ *
4712+ * LOCKING:
4713+ * Caller must hold mode config lock.
4714+ *
4715+ * Based on @dev's mode_config layout, scan all the outputs and try to detect
4716+ * modes on them. Modes will first be added to the output's probed_modes
4717+ * list, then culled (based on validity and the @maxX, @maxY parameters) and
4718+ * put into the normal modes list.
4719+ *
4720+ * Intended to be used either at bootup time or when major configuration
4721+ * changes have occurred.
4722+ *
4723+ * FIXME: take into account monitor limits
4724+ */
4725+void drm_crtc_probe_output_modes(struct drm_device *dev, int maxX, int maxY)
4726+{
4727+ struct drm_output *output;
4728+ struct drm_display_mode *mode, *t;
4729+ int ret;
4730+ //if (maxX == 0 || maxY == 0)
4731+ // TODO
4732+
4733+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
4734+
4735+ /* set all modes to the unverified state */
4736+ list_for_each_entry_safe(mode, t, &output->modes, head)
4737+ mode->status = MODE_UNVERIFIED;
4738+
4739+ output->status = (*output->funcs->detect)(output);
4740+
4741+ if (output->status == output_status_disconnected) {
4742+ DRM_DEBUG("%s is disconnected\n", output->name);
4743+ /* TODO set EDID to NULL */
4744+ continue;
4745+ }
4746+
4747+ ret = (*output->funcs->get_modes)(output);
4748+
4749+ if (ret) {
4750+ drm_mode_output_list_update(output);
4751+ }
4752+
4753+ if (maxX && maxY)
4754+ drm_mode_validate_size(dev, &output->modes, maxX,
4755+ maxY, 0);
4756+ list_for_each_entry_safe(mode, t, &output->modes, head) {
4757+ if (mode->status == MODE_OK)
4758+ mode->status = (*output->funcs->mode_valid)(output,mode);
4759+ }
4760+
4761+
4762+ drm_mode_prune_invalid(dev, &output->modes, 1);
4763+
4764+ if (list_empty(&output->modes)) {
4765+ struct drm_display_mode *stdmode;
4766+
4767+ DRM_DEBUG("No valid modes on %s\n", output->name);
4768+
4769+ /* Should we do this here ???
4770+ * When no valid EDID modes are available we end up
4771+ * here and bailed in the past, now we add a standard
4772+ * 640x480@60Hz mode and carry on.
4773+ */
4774+ stdmode = drm_mode_duplicate(dev, &std_mode[0]);
4775+ drm_mode_probed_add(output, stdmode);
4776+ drm_mode_list_concat(&output->probed_modes,
4777+ &output->modes);
4778+
4779+ DRM_DEBUG("Adding standard 640x480 @ 60Hz to %s\n",
4780+ output->name);
4781+ }
4782+
4783+ drm_mode_sort(&output->modes);
4784+
4785+ DRM_DEBUG("Probed modes for %s\n", output->name);
4786+ list_for_each_entry_safe(mode, t, &output->modes, head) {
4787+ mode->vrefresh = drm_mode_vrefresh(mode);
4788+
4789+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
4790+ drm_mode_debug_printmodeline(dev, mode);
4791+ }
4792+ }
4793+}
4794+EXPORT_SYMBOL(drm_crtc_probe_output_modes);
4795+
4796+/**
4797+ * drm_crtc_set_mode - set a mode
4798+ * @crtc: CRTC to program
4799+ * @mode: mode to use
4800+ * @x: width of mode
4801+ * @y: height of mode
4802+ *
4803+ * LOCKING:
4804+ * Caller must hold mode config lock.
4805+ *
4806+ * Try to set @mode on @crtc. Give @crtc and its associated outputs a chance
4807+ * to fixup or reject the mode prior to trying to set it.
4808+ *
4809+ * RETURNS:
4810+ * True if the mode was set successfully, or false otherwise.
4811+ */
4812+bool drm_crtc_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
4813+ int x, int y)
4814+{
4815+ struct drm_device *dev = crtc->dev;
4816+ struct drm_display_mode *adjusted_mode, saved_mode;
4817+ int saved_x, saved_y;
4818+ bool didLock = false;
4819+ bool ret = false;
4820+ struct drm_output *output;
4821+
4822+ adjusted_mode = drm_mode_duplicate(dev, mode);
4823+
4824+ crtc->enabled = drm_crtc_in_use(crtc);
4825+
4826+ if (!crtc->enabled) {
4827+ return true;
4828+ }
4829+
4830+ didLock = crtc->funcs->lock(crtc);
4831+
4832+ saved_mode = crtc->mode;
4833+ saved_x = crtc->x;
4834+ saved_y = crtc->y;
4835+
4836+ /* Update crtc values up front so the driver can rely on them for mode
4837+ * setting.
4838+ */
4839+ crtc->mode = *mode;
4840+ crtc->x = x;
4841+ crtc->y = y;
4842+
4843+ /* XXX short-circuit changes to base location only */
4844+
4845+ /* Pass our mode to the outputs and the CRTC to give them a chance to
4846+ * adjust it according to limitations or output properties, and also
4847+ * a chance to reject the mode entirely.
4848+ */
4849+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
4850+
4851+ if (output->crtc != crtc)
4852+ continue;
4853+
4854+ if (!output->funcs->mode_fixup(output, mode, adjusted_mode)) {
4855+ goto done;
4856+ }
4857+ }
4858+
4859+ if (!crtc->funcs->mode_fixup(crtc, mode, adjusted_mode)) {
4860+ goto done;
4861+ }
4862+
4863+ /* Prepare the outputs and CRTCs before setting the mode. */
4864+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
4865+
4866+ if (output->crtc != crtc)
4867+ continue;
4868+
4869+ /* Disable the output as the first thing we do. */
4870+ output->funcs->prepare(output);
4871+ }
4872+
4873+ crtc->funcs->prepare(crtc);
4874+
4875+ /* Set up the DPLL and any output state that needs to adjust or depend
4876+ * on the DPLL.
4877+ */
4878+ crtc->funcs->mode_set(crtc, mode, adjusted_mode, x, y);
4879+
4880+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
4881+
4882+ if (output->crtc != crtc)
4883+ continue;
4884+
4885+ DRM_INFO("%s: set mode %s %x\n", output->name, mode->name, mode->mode_id);
4886+
4887+ output->funcs->mode_set(output, mode, adjusted_mode);
4888+ }
4889+
4890+ /* Now, enable the clocks, plane, pipe, and outputs that we set up. */
4891+ crtc->funcs->commit(crtc);
4892+
4893+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
4894+
4895+ if (output->crtc != crtc)
4896+ continue;
4897+
4898+ output->funcs->commit(output);
4899+
4900+#if 0 // TODO def RANDR_12_INTERFACE
4901+ if (output->randr_output)
4902+ RRPostPendingProperties (output->randr_output);
4903+#endif
4904+ }
4905+
4906+ /* XXX free adjustedmode */
4907+ drm_mode_destroy(dev, adjusted_mode);
4908+ ret = 1;
4909+ /* TODO */
4910+// if (scrn->pScreen)
4911+// drm_crtc_set_screen_sub_pixel_order(dev);
4912+
4913+done:
4914+ if (!ret) {
4915+ crtc->x = saved_x;
4916+ crtc->y = saved_y;
4917+ crtc->mode = saved_mode;
4918+ }
4919+
4920+ if (didLock)
4921+ crtc->funcs->unlock (crtc);
4922+
4923+ return ret;
4924+}
4925+EXPORT_SYMBOL(drm_crtc_set_mode);
4926+
4927+/**
4928+ * drm_disable_unused_functions - disable unused objects
4929+ * @dev: DRM device
4930+ *
4931+ * LOCKING:
4932+ * Caller must hold mode config lock.
4933+ *
4934+ * If an output or CRTC isn't part of @dev's mode_config, it can be disabled
4935+ * by calling its dpms function, which should power it off.
4936+ */
4937+void drm_disable_unused_functions(struct drm_device *dev)
4938+{
4939+ struct drm_output *output;
4940+ struct drm_crtc *crtc;
4941+
4942+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
4943+ if (!output->crtc)
4944+ (*output->funcs->dpms)(output, DPMSModeOff);
4945+ }
4946+
4947+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4948+ if (!crtc->enabled)
4949+ crtc->funcs->dpms(crtc, DPMSModeOff);
4950+ }
4951+}
4952+EXPORT_SYMBOL(drm_disable_unused_functions);
4953+
4954+/**
4955+ * drm_mode_probed_add - add a mode to the specified output's probed mode list
4956+ * @output: output the new mode
4957+ * @mode: mode data
4958+ *
4959+ * LOCKING:
4960+ * Caller must hold mode config lock.
4961+ *
4962+ * Add @mode to @output's mode list for later use.
4963+ */
4964+void drm_mode_probed_add(struct drm_output *output,
4965+ struct drm_display_mode *mode)
4966+{
4967+ list_add(&mode->head, &output->probed_modes);
4968+}
4969+EXPORT_SYMBOL(drm_mode_probed_add);
4970+
4971+/**
4972+ * drm_mode_remove - remove and free a mode
4973+ * @output: output list to modify
4974+ * @mode: mode to remove
4975+ *
4976+ * LOCKING:
4977+ * Caller must hold mode config lock.
4978+ *
4979+ * Remove @mode from @output's mode list, then free it.
4980+ */
4981+void drm_mode_remove(struct drm_output *output, struct drm_display_mode *mode)
4982+{
4983+ list_del(&mode->head);
4984+ kfree(mode);
4985+}
4986+EXPORT_SYMBOL(drm_mode_remove);
4987+
4988+/**
4989+ * drm_output_create - create a new output
4990+ * @dev: DRM device
4991+ * @funcs: callbacks for this output
4992+ * @name: user visible name of the output
4993+ *
4994+ * LOCKING:
4995+ * Caller must hold @dev's mode_config lock.
4996+ *
4997+ * Creates a new drm_output structure and adds it to @dev's mode_config
4998+ * structure.
4999+ *
5000+ * RETURNS:
5001+ * Pointer to the new output or NULL on error.
5002+ */
5003+struct drm_output *drm_output_create(struct drm_device *dev,
5004+ const struct drm_output_funcs *funcs,
5005+ const char *name)
5006+{
5007+ struct drm_output *output = NULL;
5008+
5009+ output = kzalloc(sizeof(struct drm_output), GFP_KERNEL);
5010+ if (!output)
5011+ return NULL;
5012+
5013+ output->dev = dev;
5014+ output->funcs = funcs;
5015+ output->id = drm_idr_get(dev, output);
5016+ if (name)
5017+ strncpy(output->name, name, DRM_OUTPUT_LEN);
5018+ output->name[DRM_OUTPUT_LEN - 1] = 0;
5019+ output->subpixel_order = SubPixelUnknown;
5020+ INIT_LIST_HEAD(&output->probed_modes);
5021+ INIT_LIST_HEAD(&output->modes);
5022+ /* randr_output? */
5023+ /* output_set_monitor(output)? */
5024+ /* check for output_ignored(output)? */
5025+
5026+ mutex_lock(&dev->mode_config.mutex);
5027+ list_add_tail(&output->head, &dev->mode_config.output_list);
5028+ dev->mode_config.num_output++;
5029+
5030+ mutex_unlock(&dev->mode_config.mutex);
5031+
5032+ return output;
5033+
5034+}
5035+EXPORT_SYMBOL(drm_output_create);
5036+
5037+/**
5038+ * drm_output_destroy - remove an output
5039+ * @output: output to remove
5040+ *
5041+ * LOCKING:
5042+ * Caller must hold @dev's mode_config lock.
5043+ *
5044+ * Call @output's cleanup function, then remove the output from the DRM
5045+ * mode_config after freeing @output's modes.
5046+ */
5047+void drm_output_destroy(struct drm_output *output)
5048+{
5049+ struct drm_device *dev = output->dev;
5050+ struct drm_display_mode *mode, *t;
5051+
5052+ if (*output->funcs->cleanup)
5053+ (*output->funcs->cleanup)(output);
5054+
5055+ list_for_each_entry_safe(mode, t, &output->probed_modes, head)
5056+ drm_mode_remove(output, mode);
5057+
5058+ list_for_each_entry_safe(mode, t, &output->modes, head)
5059+ drm_mode_remove(output, mode);
5060+
5061+ mutex_lock(&dev->mode_config.mutex);
5062+ drm_idr_put(dev, output->id);
5063+ list_del(&output->head);
5064+ mutex_unlock(&dev->mode_config.mutex);
5065+ kfree(output);
5066+}
5067+EXPORT_SYMBOL(drm_output_destroy);
5068+
5069+/**
5070+ * drm_output_rename - rename an output
5071+ * @output: output to rename
5072+ * @name: new user visible name
5073+ *
5074+ * LOCKING:
5075+ * None.
5076+ *
5077+ * Simply stuff a new name into @output's name field, based on @name.
5078+ *
5079+ * RETURNS:
5080+ * True if the name was changed, false otherwise.
5081+ */
5082+bool drm_output_rename(struct drm_output *output, const char *name)
5083+{
5084+ if (!name)
5085+ return false;
5086+
5087+ strncpy(output->name, name, DRM_OUTPUT_LEN);
5088+ output->name[DRM_OUTPUT_LEN - 1] = 0;
5089+
5090+ DRM_DEBUG("Changed name to %s\n", output->name);
5091+// drm_output_set_monitor(output);
5092+// if (drm_output_ignored(output))
5093+// return FALSE;
5094+
5095+ return 1;
5096+}
5097+EXPORT_SYMBOL(drm_output_rename);
5098+
5099+/**
5100+ * drm_mode_create - create a new display mode
5101+ * @dev: DRM device
5102+ *
5103+ * LOCKING:
5104+ * None.
5105+ *
5106+ * Create a new drm_display_mode, give it an ID, and return it.
5107+ *
5108+ * RETURNS:
5109+ * Pointer to new mode on success, NULL on error.
5110+ */
5111+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
5112+{
5113+ struct drm_display_mode *nmode;
5114+
5115+ nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
5116+ if (!nmode)
5117+ return NULL;
5118+
5119+ nmode->mode_id = drm_idr_get(dev, nmode);
5120+ return nmode;
5121+}
5122+EXPORT_SYMBOL(drm_mode_create);
5123+
5124+/**
5125+ * drm_mode_destroy - remove a mode
5126+ * @dev: DRM device
5127+ * @mode: mode to remove
5128+ *
5129+ * LOCKING:
5130+ * Caller must hold mode config lock.
5131+ *
5132+ * Free @mode's unique identifier, then free it.
5133+ */
5134+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
5135+{
5136+ drm_idr_put(dev, mode->mode_id);
5137+
5138+ kfree(mode);
5139+}
5140+EXPORT_SYMBOL(drm_mode_destroy);
5141+
5142+/**
5143+ * drm_mode_config_init - initialize DRM mode_configuration structure
5144+ * @dev: DRM device
5145+ *
5146+ * LOCKING:
5147+ * None, should happen single threaded at init time.
5148+ *
5149+ * Initialize @dev's mode_config structure, used for tracking the graphics
5150+ * configuration of @dev.
5151+ */
5152+void drm_mode_config_init(struct drm_device *dev)
5153+{
5154+ mutex_init(&dev->mode_config.mutex);
5155+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
5156+ INIT_LIST_HEAD(&dev->mode_config.crtc_list);
5157+ INIT_LIST_HEAD(&dev->mode_config.output_list);
5158+ INIT_LIST_HEAD(&dev->mode_config.property_list);
5159+ INIT_LIST_HEAD(&dev->mode_config.usermode_list);
5160+ idr_init(&dev->mode_config.crtc_idr);
5161+}
5162+EXPORT_SYMBOL(drm_mode_config_init);
5163+
5164+/**
5165+ * drm_get_buffer_object - find the buffer object for a given handle
5166+ * @dev: DRM device
5167+ * @bo: pointer to caller's buffer_object pointer
5168+ * @handle: handle to lookup
5169+ *
5170+ * LOCKING:
5171+ * Must take @dev's struct_mutex to protect buffer object lookup.
5172+ *
5173+ * Given @handle, lookup the buffer object in @dev and put it in the caller's
5174+ * @bo pointer.
5175+ *
5176+ * RETURNS:
5177+ * Zero on success, -EINVAL if the handle couldn't be found.
5178+ */
5179+static int drm_get_buffer_object(struct drm_device *dev, struct drm_buffer_object **bo, unsigned long handle)
5180+{
5181+ struct drm_user_object *uo;
5182+ struct drm_hash_item *hash;
5183+ int ret;
5184+
5185+ *bo = NULL;
5186+
5187+ mutex_lock(&dev->struct_mutex);
5188+ ret = drm_ht_find_item(&dev->object_hash, handle, &hash);
5189+ if (ret) {
5190+ DRM_ERROR("Couldn't find handle.\n");
5191+ ret = -EINVAL;
5192+ goto out_err;
5193+ }
5194+
5195+ uo = drm_hash_entry(hash, struct drm_user_object, hash);
5196+ if (uo->type != drm_buffer_type) {
5197+ ret = -EINVAL;
5198+ goto out_err;
5199+ }
5200+
5201+ *bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
5202+ ret = 0;
5203+out_err:
5204+ mutex_unlock(&dev->struct_mutex);
5205+ return ret;
5206+}
5207+
5208+char drm_init_mode[32];
5209+int drm_init_xres;
5210+int drm_init_yres;
5211+EXPORT_SYMBOL(drm_init_mode);
5212+EXPORT_SYMBOL(drm_init_xres);
5213+EXPORT_SYMBOL(drm_init_yres);
5214+
5215+/**
5216+ * drm_pick_crtcs - pick crtcs for output devices
5217+ * @dev: DRM device
5218+ *
5219+ * LOCKING:
5220+ * Caller must hold mode config lock.
5221+ */
5222+static void drm_pick_crtcs (struct drm_device *dev)
5223+{
5224+ int c, o, assigned;
5225+ struct drm_output *output, *output_equal;
5226+ struct drm_crtc *crtc;
5227+ struct drm_display_mode *des_mode = NULL, *modes, *modes_equal;
5228+
5229+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
5230+ output->crtc = NULL;
5231+
5232+ /* Don't hook up outputs that are disconnected ??
5233+ *
5234+ * This is debateable. Do we want fixed /dev/fbX or
5235+ * dynamic on hotplug (need mode code for that though) ?
5236+ *
5237+ * If we don't hook up outputs now, then we only create
5238+ * /dev/fbX for the output that's enabled, that's good as
5239+ * the users console will be on that output.
5240+ *
5241+ * If we do hook up outputs that are disconnected now, then
5242+ * the user may end up having to muck about with the fbcon
5243+ * map flags to assign his console to the enabled output. Ugh.
5244+ */
5245+ if (output->status != output_status_connected)
5246+ continue;
5247+
5248+ des_mode = NULL;
5249+ list_for_each_entry(des_mode, &output->modes, head) {
5250+ if (/* !strcmp(des_mode->name, drm_init_mode) || */
5251+ des_mode->hdisplay==drm_init_xres
5252+ && des_mode->vdisplay==drm_init_yres) {
5253+ des_mode->type |= DRM_MODE_TYPE_USERPREF;
5254+ break;
5255+ }
5256+
5257+ }
5258+ /* No userdef mode (initial mode set from module parameter) */
5259+ if (!des_mode || !(des_mode->type & DRM_MODE_TYPE_USERPREF)) {
5260+ list_for_each_entry(des_mode, &output->modes, head) {
5261+ if (des_mode->type & DRM_MODE_TYPE_PREFERRED)
5262+ break;
5263+ }
5264+ }
5265+
5266+ /* No preferred mode, and no default mode, let's just
5267+ select the first available */
5268+ if (!des_mode || (!(des_mode->type & DRM_MODE_TYPE_PREFERRED)
5269+ && !(des_mode->type & DRM_MODE_TYPE_USERPREF))) {
5270+ list_for_each_entry(des_mode, &output->modes, head) {
5271+ if (des_mode)
5272+ break;
5273+ }
5274+ }
5275+
5276+ c = -1;
5277+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5278+ assigned = 0;
5279+
5280+ c++;
5281+ if ((output->possible_crtcs & (1 << c)) == 0)
5282+ continue;
5283+
5284+ list_for_each_entry(output_equal, &dev->mode_config.output_list, head) {
5285+ if (output->id == output_equal->id)
5286+ continue;
5287+
5288+ /* Find out if crtc has been assigned before */
5289+ if (output_equal->crtc == crtc)
5290+ assigned = 1;
5291+ }
5292+
5293+#if 1 /* continue for now */
5294+ if (assigned)
5295+ continue;
5296+#endif
5297+
5298+ o = -1;
5299+ list_for_each_entry(output_equal, &dev->mode_config.output_list, head) {
5300+ o++;
5301+ if (output->id == output_equal->id)
5302+ continue;
5303+
5304+ list_for_each_entry(modes, &output->modes, head) {
5305+ list_for_each_entry(modes_equal, &output_equal->modes, head) {
5306+ if (drm_mode_equal (modes, modes_equal)) {
5307+ if ((output->possible_clones & output_equal->possible_clones) && (output_equal->crtc == crtc)) {
5308+ printk("Cloning %s (0x%lx) to %s (0x%lx)\n",output->name,output->possible_clones,output_equal->name,output_equal->possible_clones);
5309+ assigned = 0;
5310+ goto clone;
5311+ }
5312+ }
5313+ }
5314+ }
5315+ }
5316+
5317+clone:
5318+ /* crtc has been assigned skip it */
5319+ if (assigned)
5320+ continue;
5321+
5322+ /* Found a CRTC to attach to, do it ! */
5323+ output->crtc = crtc;
5324+ output->crtc->desired_mode = des_mode;
5325+ output->initial_x = 0;
5326+ output->initial_y = 0;
5327+ DRM_DEBUG("Desired mode for CRTC %d is 0x%x:%s\n",c,des_mode->mode_id, des_mode->name);
5328+ break;
5329+ }
5330+ }
5331+}
5332+EXPORT_SYMBOL(drm_pick_crtcs);
5333+
5334+/**
5335+ * drm_initial_config - setup a sane initial output configuration
5336+ * @dev: DRM device
5337+ * @can_grow: this configuration is growable
5338+ *
5339+ * LOCKING:
5340+ * Called at init time, must take mode config lock.
5341+ *
5342+ * Scan the CRTCs and outputs and try to put together an initial setup.
5343+ * At the moment, this is a cloned configuration across all heads with
5344+ * a new framebuffer object as the backing store.
5345+ *
5346+ * RETURNS:
5347+ * Zero if everything went ok, nonzero otherwise.
5348+ */
5349+bool drm_initial_config(struct drm_device *dev, bool can_grow)
5350+{
5351+ struct drm_output *output;
5352+ struct drm_crtc *crtc;
5353+ int ret = false;
5354+
5355+ mutex_lock(&dev->mode_config.mutex);
5356+
5357+ drm_crtc_probe_output_modes(dev, 2048, 2048);
5358+
5359+ drm_pick_crtcs(dev);
5360+
5361+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5362+
5363+ /* can't setup the crtc if there's no assigned mode */
5364+ if (!crtc->desired_mode)
5365+ continue;
5366+
5367+ /* Now setup the fbdev for attached crtcs */
5368+ dev->driver->fb_probe(dev, crtc);
5369+ }
5370+
5371+ /* This is a little screwy, as we've already walked the outputs
5372+ * above, but it's a little bit of magic too. There's the potential
5373+ * for things not to get setup above if an existing device gets
5374+ * re-assigned thus confusing the hardware. By walking the outputs
5375+ * this fixes up their crtc's.
5376+ */
5377+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
5378+
5379+ /* can't setup the output if there's no assigned mode */
5380+ if (!output->crtc || !output->crtc->desired_mode)
5381+ continue;
5382+
5383+ /* and needs an attached fb */
5384+ if (output->crtc->fb)
5385+ drm_crtc_set_mode(output->crtc, output->crtc->desired_mode, 0, 0);
5386+ }
5387+
5388+ drm_disable_unused_functions(dev);
5389+
5390+ mutex_unlock(&dev->mode_config.mutex);
5391+ return ret;
5392+}
5393+EXPORT_SYMBOL(drm_initial_config);
5394+
5395+/**
5396+ * drm_mode_config_cleanup - free up DRM mode_config info
5397+ * @dev: DRM device
5398+ *
5399+ * LOCKING:
5400+ * Caller must hold mode config lock.
5401+ *
5402+ * Free up all the outputs and CRTCs associated with this DRM device, then
5403+ * free up the framebuffers and associated buffer objects.
5404+ *
5405+ * FIXME: cleanup any dangling user buffer objects too
5406+ */
5407+void drm_mode_config_cleanup(struct drm_device *dev)
5408+{
5409+ struct drm_output *output, *ot;
5410+ struct drm_crtc *crtc, *ct;
5411+ struct drm_framebuffer *fb, *fbt;
5412+ struct drm_display_mode *mode, *mt;
5413+ struct drm_property *property, *pt;
5414+
5415+ list_for_each_entry_safe(output, ot, &dev->mode_config.output_list, head) {
5416+ drm_output_destroy(output);
5417+ }
5418+
5419+ list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, head) {
5420+ drm_property_destroy(dev, property);
5421+ }
5422+
5423+ list_for_each_entry_safe(mode, mt, &dev->mode_config.usermode_list, head) {
5424+ drm_mode_destroy(dev, mode);
5425+ }
5426+
5427+ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
5428+ if (fb->bo->type != drm_bo_type_kernel)
5429+ drm_framebuffer_destroy(fb);
5430+ else
5431+ dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
5432+ }
5433+
5434+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
5435+ drm_crtc_destroy(crtc);
5436+ }
5437+
5438+}
5439+EXPORT_SYMBOL(drm_mode_config_cleanup);
5440+
5441+/**
5442+ * drm_crtc_set_config - set a new config from userspace
5443+ * @crtc: CRTC to setup
5444+ * @crtc_info: user provided configuration
5445+ * @new_mode: new mode to set
5446+ * @output_set: set of outputs for the new config
5447+ * @fb: new framebuffer
5448+ *
5449+ * LOCKING:
5450+ * Caller must hold mode config lock.
5451+ *
5452+ * Setup a new configuration, provided by the user in @crtc_info, and enable
5453+ * it.
5454+ *
5455+ * RETURNS:
5456+ * Zero. (FIXME)
5457+ */
5458+int drm_crtc_set_config(struct drm_crtc *crtc, struct drm_mode_crtc *crtc_info, struct drm_display_mode *new_mode, struct drm_output **output_set, struct drm_framebuffer *fb)
5459+{
5460+ struct drm_device *dev = crtc->dev;
5461+ struct drm_crtc **save_crtcs, *new_crtc;
5462+ bool save_enabled = crtc->enabled;
5463+ bool changed;
5464+ struct drm_output *output;
5465+ int count = 0, ro;
5466+
5467+ save_crtcs = kzalloc(dev->mode_config.num_crtc * sizeof(struct drm_crtc *), GFP_KERNEL);
5468+ if (!save_crtcs)
5469+ return -ENOMEM;
5470+
5471+ if (crtc->fb != fb)
5472+ changed = true;
5473+
5474+ if (crtc_info->x != crtc->x || crtc_info->y != crtc->y)
5475+ changed = true;
5476+
5477+ if (new_mode && (crtc->mode.mode_id != new_mode->mode_id))
5478+ changed = true;
5479+
5480+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
5481+ save_crtcs[count++] = output->crtc;
5482+
5483+ if (output->crtc == crtc)
5484+ new_crtc = NULL;
5485+ else
5486+ new_crtc = output->crtc;
5487+
5488+ for (ro = 0; ro < crtc_info->count_outputs; ro++) {
5489+ if (output_set[ro] == output)
5490+ new_crtc = crtc;
5491+ }
5492+ if (new_crtc != output->crtc) {
5493+ changed = true;
5494+ output->crtc = new_crtc;
5495+ }
5496+ }
5497+
5498+ if (changed) {
5499+ crtc->fb = fb;
5500+ crtc->enabled = (new_mode != NULL);
5501+ if (new_mode != NULL) {
5502+ DRM_DEBUG("attempting to set mode from userspace\n");
5503+ drm_mode_debug_printmodeline(dev, new_mode);
5504+ if (!drm_crtc_set_mode(crtc, new_mode, crtc_info->x,
5505+ crtc_info->y)) {
5506+ crtc->enabled = save_enabled;
5507+ count = 0;
5508+ list_for_each_entry(output, &dev->mode_config.output_list, head)
5509+ output->crtc = save_crtcs[count++];
5510+ kfree(save_crtcs);
5511+ return -EINVAL;
5512+ }
5513+ crtc->desired_x = crtc_info->x;
5514+ crtc->desired_y = crtc_info->y;
5515+ crtc->desired_mode = new_mode;
5516+ }
5517+ drm_disable_unused_functions(dev);
5518+ }
5519+ kfree(save_crtcs);
5520+ return 0;
5521+}
5522+
5523+/**
5524+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
5525+ * @out: drm_mode_modeinfo struct to return to the user
5526+ * @in: drm_display_mode to use
5527+ *
5528+ * LOCKING:
5529+ * None.
5530+ *
5531+ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
5532+ * the user.
5533+ */
5534+void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out, struct drm_display_mode *in)
5535+{
5536+
5537+ out->id = in->mode_id;
5538+ out->clock = in->clock;
5539+ out->hdisplay = in->hdisplay;
5540+ out->hsync_start = in->hsync_start;
5541+ out->hsync_end = in->hsync_end;
5542+ out->htotal = in->htotal;
5543+ out->hskew = in->hskew;
5544+ out->vdisplay = in->vdisplay;
5545+ out->vsync_start = in->vsync_start;
5546+ out->vsync_end = in->vsync_end;
5547+ out->vtotal = in->vtotal;
5548+ out->vscan = in->vscan;
5549+ out->vrefresh = in->vrefresh;
5550+ out->flags = in->flags;
5551+ out->type = in->type;
5552+ strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
5553+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
5554+}
5555+
5556+/**
5557+ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
5558+ * @out: drm_display_mode to return to the user
5559+ * @in: drm_mode_modeinfo to use
5560+ *
5561+ * LOCKING:
5562+ * None.
5563+ *
5564+ * Convert a drmo_mode_modeinfo into a drm_display_mode structure to return to
5565+ * the caller.
5566+ */
5567+void drm_crtc_convert_umode(struct drm_display_mode *out, struct drm_mode_modeinfo *in)
5568+{
5569+ out->clock = in->clock;
5570+ out->hdisplay = in->hdisplay;
5571+ out->hsync_start = in->hsync_start;
5572+ out->hsync_end = in->hsync_end;
5573+ out->htotal = in->htotal;
5574+ out->hskew = in->hskew;
5575+ out->vdisplay = in->vdisplay;
5576+ out->vsync_start = in->vsync_start;
5577+ out->vsync_end = in->vsync_end;
5578+ out->vtotal = in->vtotal;
5579+ out->vscan = in->vscan;
5580+ out->vrefresh = in->vrefresh;
5581+ out->flags = in->flags;
5582+ out->type = in->type;
5583+ strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
5584+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
5585+}
5586+
5587+/**
5588+ * drm_mode_getresources - get graphics configuration
5589+ * @inode: inode from the ioctl
5590+ * @filp: file * from the ioctl
5591+ * @cmd: cmd from ioctl
5592+ * @arg: arg from ioctl
5593+ *
5594+ * LOCKING:
5595+ * Takes mode config lock.
5596+ *
5597+ * Construct a set of configuration description structures and return
5598+ * them to the user, including CRTC, output and framebuffer configuration.
5599+ *
5600+ * Called by the user via ioctl.
5601+ *
5602+ * RETURNS:
5603+ * Zero on success, errno on failure.
5604+ */
5605+int drm_mode_getresources(struct drm_device *dev,
5606+ void *data, struct drm_file *file_priv)
5607+{
5608+ struct drm_mode_card_res *card_res = data;
5609+ struct list_head *lh;
5610+ struct drm_framebuffer *fb;
5611+ struct drm_output *output;
5612+ struct drm_crtc *crtc;
5613+ struct drm_mode_modeinfo u_mode;
5614+ struct drm_display_mode *mode;
5615+ int ret = 0;
5616+ int mode_count= 0;
5617+ int output_count = 0;
5618+ int crtc_count = 0;
5619+ int fb_count = 0;
5620+ int copied = 0;
5621+
5622+ memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
5623+
5624+ mutex_lock(&dev->mode_config.mutex);
5625+
5626+ list_for_each(lh, &dev->mode_config.fb_list)
5627+ fb_count++;
5628+
5629+ list_for_each(lh, &dev->mode_config.crtc_list)
5630+ crtc_count++;
5631+
5632+ list_for_each_entry(output, &dev->mode_config.output_list,
5633+ head) {
5634+ output_count++;
5635+ list_for_each(lh, &output->modes)
5636+ mode_count++;
5637+ }
5638+ list_for_each(lh, &dev->mode_config.usermode_list)
5639+ mode_count++;
5640+
5641+ if (card_res->count_modes == 0) {
5642+ DRM_DEBUG("probing modes %dx%d\n", dev->mode_config.max_width, dev->mode_config.max_height);
5643+ drm_crtc_probe_output_modes(dev, dev->mode_config.max_width, dev->mode_config.max_height);
5644+ mode_count = 0;
5645+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
5646+ list_for_each(lh, &output->modes)
5647+ mode_count++;
5648+ }
5649+ list_for_each(lh, &dev->mode_config.usermode_list)
5650+ mode_count++;
5651+ }
5652+
5653+ /* handle this in 4 parts */
5654+ /* FBs */
5655+ if (card_res->count_fbs >= fb_count) {
5656+ copied = 0;
5657+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
5658+ if (put_user(fb->id, card_res->fb_id + copied))
5659+ return -EFAULT;
5660+ copied++;
5661+ }
5662+ }
5663+ card_res->count_fbs = fb_count;
5664+
5665+ /* CRTCs */
5666+ if (card_res->count_crtcs >= crtc_count) {
5667+ copied = 0;
5668+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head){
5669+ DRM_DEBUG("CRTC ID is %d\n", crtc->id);
5670+ if (put_user(crtc->id, card_res->crtc_id + copied))
5671+ return -EFAULT;
5672+ copied++;
5673+ }
5674+ }
5675+ card_res->count_crtcs = crtc_count;
5676+
5677+
5678+ /* Outputs */
5679+ if (card_res->count_outputs >= output_count) {
5680+ copied = 0;
5681+ list_for_each_entry(output, &dev->mode_config.output_list,
5682+ head) {
5683+ DRM_DEBUG("OUTPUT ID is %d\n", output->id);
5684+ if (put_user(output->id, card_res->output_id + copied))
5685+ return -EFAULT;
5686+ copied++;
5687+ }
5688+ }
5689+ card_res->count_outputs = output_count;
5690+
5691+ /* Modes */
5692+ if (card_res->count_modes >= mode_count) {
5693+ copied = 0;
5694+ list_for_each_entry(output, &dev->mode_config.output_list,
5695+ head) {
5696+ list_for_each_entry(mode, &output->modes, head) {
5697+ drm_crtc_convert_to_umode(&u_mode, mode);
5698+ if (copy_to_user(card_res->modes + copied,
5699+ &u_mode, sizeof(u_mode)))
5700+ return -EFAULT;
5701+ copied++;
5702+ }
5703+ }
5704+ /* add in user modes */
5705+ list_for_each_entry(mode, &dev->mode_config.usermode_list, head) {
5706+ drm_crtc_convert_to_umode(&u_mode, mode);
5707+ if (copy_to_user(card_res->modes + copied, &u_mode,
5708+ sizeof(u_mode)))
5709+ return -EFAULT;
5710+ copied++;
5711+ }
5712+ }
5713+ card_res->count_modes = mode_count;
5714+
5715+ DRM_DEBUG("Counted %d %d %d\n", card_res->count_crtcs,
5716+ card_res->count_outputs,
5717+ card_res->count_modes);
5718+
5719+ mutex_unlock(&dev->mode_config.mutex);
5720+ return ret;
5721+}
5722+
5723+/**
5724+ * drm_mode_getcrtc - get CRTC configuration
5725+ * @inode: inode from the ioctl
5726+ * @filp: file * from the ioctl
5727+ * @cmd: cmd from ioctl
5728+ * @arg: arg from ioctl
5729+ *
5730+ * LOCKING:
5731+ * Caller? (FIXME)
5732+ *
5733+ * Construct a CRTC configuration structure to return to the user.
5734+ *
5735+ * Called by the user via ioctl.
5736+ *
5737+ * RETURNS:
5738+ * Zero on success, errno on failure.
5739+ */
5740+int drm_mode_getcrtc(struct drm_device *dev,
5741+ void *data, struct drm_file *file_priv)
5742+{
5743+ struct drm_mode_crtc *crtc_resp = data;
5744+ struct drm_crtc *crtc;
5745+ struct drm_output *output;
5746+ int ocount;
5747+ int ret = 0;
5748+
5749+ mutex_lock(&dev->mode_config.mutex);
5750+ crtc = idr_find(&dev->mode_config.crtc_idr, crtc_resp->crtc_id);
5751+ if (!crtc || (crtc->id != crtc_resp->crtc_id)) {
5752+ ret = -EINVAL;
5753+ goto out;
5754+ }
5755+
5756+ crtc_resp->x = crtc->x;
5757+ crtc_resp->y = crtc->y;
5758+
5759+ if (crtc->fb)
5760+ crtc_resp->fb_id = crtc->fb->id;
5761+ else
5762+ crtc_resp->fb_id = 0;
5763+
5764+ crtc_resp->outputs = 0;
5765+ if (crtc->enabled) {
5766+
5767+ crtc_resp->mode = crtc->mode.mode_id;
5768+ ocount = 0;
5769+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
5770+ if (output->crtc == crtc)
5771+ crtc_resp->outputs |= 1 << (ocount++);
5772+ }
5773+ } else {
5774+ crtc_resp->mode = 0;
5775+ }
5776+
5777+out:
5778+ mutex_unlock(&dev->mode_config.mutex);
5779+ return ret;
5780+}
5781+
5782+/**
5783+ * drm_mode_getoutput - get output configuration
5784+ * @inode: inode from the ioctl
5785+ * @filp: file * from the ioctl
5786+ * @cmd: cmd from ioctl
5787+ * @arg: arg from ioctl
5788+ *
5789+ * LOCKING:
5790+ * Caller? (FIXME)
5791+ *
5792+ * Construct a output configuration structure to return to the user.
5793+ *
5794+ * Called by the user via ioctl.
5795+ *
5796+ * RETURNS:
5797+ * Zero on success, errno on failure.
5798+ */
5799+int drm_mode_getoutput(struct drm_device *dev,
5800+ void *data, struct drm_file *file_priv)
5801+{
5802+ struct drm_mode_get_output *out_resp = data;
5803+ struct drm_output *output;
5804+ struct drm_display_mode *mode;
5805+ int mode_count = 0;
5806+ int props_count = 0;
5807+ int ret = 0;
5808+ int copied = 0;
5809+ int i;
5810+
5811+ DRM_DEBUG("output id %d:\n", out_resp->output);
5812+
5813+ mutex_lock(&dev->mode_config.mutex);
5814+ output= idr_find(&dev->mode_config.crtc_idr, out_resp->output);
5815+ if (!output || (output->id != out_resp->output)) {
5816+ ret = -EINVAL;
5817+ goto out;
5818+ }
5819+
5820+ list_for_each_entry(mode, &output->modes, head)
5821+ mode_count++;
5822+
5823+ for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++)
5824+ if (output->user_mode_ids[i] != 0)
5825+ mode_count++;
5826+
5827+ for (i = 0; i < DRM_OUTPUT_MAX_PROPERTY; i++) {
5828+ if (output->property_ids[i] != 0) {
5829+ props_count++;
5830+ }
5831+ }
5832+
5833+ strncpy(out_resp->name, output->name, DRM_OUTPUT_NAME_LEN);
5834+ out_resp->name[DRM_OUTPUT_NAME_LEN-1] = 0;
5835+
5836+ out_resp->mm_width = output->mm_width;
5837+ out_resp->mm_height = output->mm_height;
5838+ out_resp->subpixel = output->subpixel_order;
5839+ out_resp->connection = output->status;
5840+ if (output->crtc)
5841+ out_resp->crtc = output->crtc->id;
5842+ else
5843+ out_resp->crtc = 0;
5844+
5845+ out_resp->crtcs = output->possible_crtcs;
5846+ out_resp->clones = output->possible_clones;
5847+
5848+ if ((out_resp->count_modes >= mode_count) && mode_count) {
5849+ copied = 0;
5850+ list_for_each_entry(mode, &output->modes, head) {
5851+ out_resp->modes[copied++] = mode->mode_id;
5852+ }
5853+ for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++) {
5854+ if (output->user_mode_ids[i] != 0) {
5855+ if (put_user(output->user_mode_ids[i], out_resp->modes + copied))
5856+ return -EFAULT;
5857+ copied++;
5858+ }
5859+ }
5860+ }
5861+ out_resp->count_modes = mode_count;
5862+
5863+ if ((out_resp->count_props >= props_count) && props_count) {
5864+ copied = 0;
5865+ for (i = 0; i < DRM_OUTPUT_MAX_PROPERTY; i++) {
5866+ if (output->property_ids[i] != 0) {
5867+ if (put_user(output->property_ids[i], out_resp->props + copied)) {
5868+ ret = -EFAULT;
5869+ goto out;
5870+ }
5871+
5872+ if (put_user(output->property_values[i], out_resp->prop_values + copied)) {
5873+ ret = -EFAULT;
5874+ goto out;
5875+ }
5876+ copied++;
5877+ }
5878+ }
5879+ }
5880+ out_resp->count_props = props_count;
5881+
5882+out:
5883+ mutex_unlock(&dev->mode_config.mutex);
5884+ return ret;
5885+}
5886+
5887+/**
5888+ * drm_mode_setcrtc - set CRTC configuration
5889+ * @inode: inode from the ioctl
5890+ * @filp: file * from the ioctl
5891+ * @cmd: cmd from ioctl
5892+ * @arg: arg from ioctl
5893+ *
5894+ * LOCKING:
5895+ * Caller? (FIXME)
5896+ *
5897+ * Build a new CRTC configuration based on user request.
5898+ *
5899+ * Called by the user via ioctl.
5900+ *
5901+ * RETURNS:
5902+ * Zero on success, errno on failure.
5903+ */
5904+int drm_mode_setcrtc(struct drm_device *dev,
5905+ void *data, struct drm_file *file_priv)
5906+{
5907+ struct drm_mode_crtc *crtc_req = data;
5908+ struct drm_crtc *crtc;
5909+ struct drm_output **output_set = NULL, *output;
5910+ struct drm_display_mode *mode;
5911+ struct drm_framebuffer *fb = NULL;
5912+ int ret = 0;
5913+ int i;
5914+
5915+ mutex_lock(&dev->mode_config.mutex);
5916+ crtc = idr_find(&dev->mode_config.crtc_idr, crtc_req->crtc_id);
5917+ if (!crtc || (crtc->id != crtc_req->crtc_id)) {
5918+ DRM_DEBUG("Unknown CRTC ID %d\n", crtc_req->crtc_id);
5919+ ret = -EINVAL;
5920+ goto out;
5921+ }
5922+
5923+ if (crtc_req->mode) {
5924+ /* if we have a mode we need a framebuffer */
5925+ if (crtc_req->fb_id) {
5926+ fb = idr_find(&dev->mode_config.crtc_idr, crtc_req->fb_id);
5927+ if (!fb || (fb->id != crtc_req->fb_id)) {
5928+ DRM_DEBUG("Unknown FB ID%d\n", crtc_req->fb_id);
5929+ ret = -EINVAL;
5930+ goto out;
5931+ }
5932+ }
5933+ mode = idr_find(&dev->mode_config.crtc_idr, crtc_req->mode);
5934+ if (!mode || (mode->mode_id != crtc_req->mode)) {
5935+ struct drm_output *output;
5936+
5937+ list_for_each_entry(output,
5938+ &dev->mode_config.output_list,
5939+ head) {
5940+ list_for_each_entry(mode, &output->modes,
5941+ head) {
5942+ drm_mode_debug_printmodeline(dev,
5943+ mode);
5944+ }
5945+ }
5946+
5947+ DRM_DEBUG("Unknown mode id %d, %p\n", crtc_req->mode, mode);
5948+ ret = -EINVAL;
5949+ goto out;
5950+ }
5951+ } else
5952+ mode = NULL;
5953+
5954+ if (crtc_req->count_outputs == 0 && mode) {
5955+ DRM_DEBUG("Count outputs is 0 but mode set\n");
5956+ ret = -EINVAL;
5957+ goto out;
5958+ }
5959+
5960+ if (crtc_req->count_outputs > 0 && !mode && !fb) {
5961+ DRM_DEBUG("Count outputs is %d but no mode or fb set\n", crtc_req->count_outputs);
5962+ ret = -EINVAL;
5963+ goto out;
5964+ }
5965+
5966+ if (crtc_req->count_outputs > 0) {
5967+ u32 out_id;
5968+ output_set = kmalloc(crtc_req->count_outputs *
5969+ sizeof(struct drm_output *), GFP_KERNEL);
5970+ if (!output_set) {
5971+ ret = -ENOMEM;
5972+ goto out;
5973+ }
5974+
5975+ for (i = 0; i < crtc_req->count_outputs; i++) {
5976+ if (get_user(out_id, &crtc_req->set_outputs[i])) {
5977+ ret = -EFAULT;
5978+ goto out;
5979+ }
5980+
5981+ output = idr_find(&dev->mode_config.crtc_idr, out_id);
5982+ if (!output || (out_id != output->id)) {
5983+ DRM_DEBUG("Output id %d unknown\n", out_id);
5984+ ret = -EINVAL;
5985+ goto out;
5986+ }
5987+
5988+ output_set[i] = output;
5989+ }
5990+ }
5991+
5992+ ret = drm_crtc_set_config(crtc, crtc_req, mode, output_set, fb);
5993+
5994+out:
5995+ mutex_unlock(&dev->mode_config.mutex);
5996+ return ret;
5997+}
5998+
5999+/**
6000+ * drm_mode_addfb - add an FB to the graphics configuration
6001+ * @inode: inode from the ioctl
6002+ * @filp: file * from the ioctl
6003+ * @cmd: cmd from ioctl
6004+ * @arg: arg from ioctl
6005+ *
6006+ * LOCKING:
6007+ * Takes mode config lock.
6008+ *
6009+ * Add a new FB to the specified CRTC, given a user request.
6010+ *
6011+ * Called by the user via ioctl.
6012+ *
6013+ * RETURNS:
6014+ * Zero on success, errno on failure.
6015+ */
6016+int drm_mode_addfb(struct drm_device *dev,
6017+ void *data, struct drm_file *file_priv)
6018+{
6019+ struct drm_mode_fb_cmd *r = data;
6020+ struct drm_mode_config *config = &dev->mode_config;
6021+ struct drm_framebuffer *fb;
6022+ struct drm_buffer_object *bo;
6023+ struct drm_crtc *crtc;
6024+ int ret = 0;
6025+
6026+ if ((config->min_width > r->width) || (r->width > config->max_width)) {
6027+ DRM_ERROR("mode new framebuffer width not within limits\n");
6028+ return -EINVAL;
6029+ }
6030+ if ((config->min_height > r->height) || (r->height > config->max_height)) {
6031+ DRM_ERROR("mode new framebuffer height not within limits\n");
6032+ return -EINVAL;
6033+ }
6034+
6035+ mutex_lock(&dev->mode_config.mutex);
6036+ /* TODO check limits are okay */
6037+ ret = drm_get_buffer_object(dev, &bo, r->handle);
6038+ if (ret || !bo) {
6039+ ret = -EINVAL;
6040+ goto out;
6041+ }
6042+
6043+ /* TODO check buffer is sufficently large */
6044+ /* TODO setup destructor callback */
6045+
6046+ fb = drm_framebuffer_create(dev);
6047+ if (!fb) {
6048+ ret = -EINVAL;
6049+ goto out;
6050+ }
6051+
6052+ fb->width = r->width;
6053+ fb->height = r->height;
6054+ fb->pitch = r->pitch;
6055+ fb->bits_per_pixel = r->bpp;
6056+ fb->depth = r->depth;
6057+ fb->offset = bo->offset;
6058+ fb->bo = bo;
6059+
6060+ r->buffer_id = fb->id;
6061+
6062+ list_add(&fb->filp_head, &file_priv->fbs);
6063+
6064+ /* FIXME: bind the fb to the right crtc */
6065+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6066+ crtc->fb = fb;
6067+ dev->driver->fb_probe(dev, crtc);
6068+ }
6069+
6070+out:
6071+ mutex_unlock(&dev->mode_config.mutex);
6072+ return ret;
6073+}
6074+
6075+/**
6076+ * drm_mode_rmfb - remove an FB from the configuration
6077+ * @inode: inode from the ioctl
6078+ * @filp: file * from the ioctl
6079+ * @cmd: cmd from ioctl
6080+ * @arg: arg from ioctl
6081+ *
6082+ * LOCKING:
6083+ * Takes mode config lock.
6084+ *
6085+ * Remove the FB specified by the user.
6086+ *
6087+ * Called by the user via ioctl.
6088+ *
6089+ * RETURNS:
6090+ * Zero on success, errno on failure.
6091+ */
6092+int drm_mode_rmfb(struct drm_device *dev,
6093+ void *data, struct drm_file *file_priv)
6094+{
6095+ struct drm_framebuffer *fb = 0;
6096+ uint32_t *id = data;
6097+ int ret = 0;
6098+
6099+ mutex_lock(&dev->mode_config.mutex);
6100+ fb = idr_find(&dev->mode_config.crtc_idr, *id);
6101+ /* TODO check that we realy get a framebuffer back. */
6102+ if (!fb || (*id != fb->id)) {
6103+ DRM_ERROR("mode invalid framebuffer id\n");
6104+ ret = -EINVAL;
6105+ goto out;
6106+ }
6107+
6108+ /* TODO check if we own the buffer */
6109+ /* TODO release all crtc connected to the framebuffer */
6110+ /* bind the fb to the crtc for now */
6111+ /* TODO unhock the destructor from the buffer object */
6112+
6113+ if (fb->bo->type != drm_bo_type_kernel)
6114+ drm_framebuffer_destroy(fb);
6115+ else
6116+ dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
6117+
6118+out:
6119+ mutex_unlock(&dev->mode_config.mutex);
6120+ return ret;
6121+}
6122+
6123+/**
6124+ * drm_mode_getfb - get FB info
6125+ * @inode: inode from the ioctl
6126+ * @filp: file * from the ioctl
6127+ * @cmd: cmd from ioctl
6128+ * @arg: arg from ioctl
6129+ *
6130+ * LOCKING:
6131+ * Caller? (FIXME)
6132+ *
6133+ * Lookup the FB given its ID and return info about it.
6134+ *
6135+ * Called by the user via ioctl.
6136+ *
6137+ * RETURNS:
6138+ * Zero on success, errno on failure.
6139+ */
6140+int drm_mode_getfb(struct drm_device *dev,
6141+ void *data, struct drm_file *file_priv)
6142+{
6143+ struct drm_mode_fb_cmd *r = data;
6144+ struct drm_framebuffer *fb;
6145+ int ret = 0;
6146+
6147+ mutex_lock(&dev->mode_config.mutex);
6148+ fb = idr_find(&dev->mode_config.crtc_idr, r->buffer_id);
6149+ if (!fb || (r->buffer_id != fb->id)) {
6150+ DRM_ERROR("invalid framebuffer id\n");
6151+ ret = -EINVAL;
6152+ goto out;
6153+ }
6154+
6155+ r->height = fb->height;
6156+ r->width = fb->width;
6157+ r->depth = fb->depth;
6158+ r->bpp = fb->bits_per_pixel;
6159+ r->handle = fb->bo->base.hash.key;
6160+ r->pitch = fb->pitch;
6161+
6162+out:
6163+ mutex_unlock(&dev->mode_config.mutex);
6164+ return ret;
6165+}
6166+
6167+/**
6168+ * drm_fb_release - remove and free the FBs on this file
6169+ * @filp: file * from the ioctl
6170+ *
6171+ * LOCKING:
6172+ * Takes mode config lock.
6173+ *
6174+ * Destroy all the FBs associated with @filp.
6175+ *
6176+ * Called by the user via ioctl.
6177+ *
6178+ * RETURNS:
6179+ * Zero on success, errno on failure.
6180+ */
6181+void drm_fb_release(struct file *filp)
6182+{
6183+ struct drm_file *priv = filp->private_data;
6184+ struct drm_device *dev = priv->minor->dev;
6185+ struct drm_framebuffer *fb, *tfb;
6186+
6187+ mutex_lock(&dev->mode_config.mutex);
6188+ list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
6189+ list_del(&fb->filp_head);
6190+ if (fb->bo->type != drm_bo_type_kernel)
6191+ drm_framebuffer_destroy(fb);
6192+ else
6193+ dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
6194+ }
6195+ mutex_unlock(&dev->mode_config.mutex);
6196+}
6197+
6198+/*
6199+ *
6200+ */
6201+void drm_mode_addmode(struct drm_device *dev, struct drm_display_mode *user_mode)
6202+{
6203+ user_mode->type |= DRM_MODE_TYPE_USERDEF;
6204+
6205+ user_mode->output_count = 0;
6206+ list_add(&user_mode->head, &dev->mode_config.usermode_list);
6207+}
6208+EXPORT_SYMBOL(drm_mode_addmode);
6209+
6210+int drm_mode_rmmode(struct drm_device *dev, struct drm_display_mode *mode)
6211+{
6212+ struct drm_display_mode *t;
6213+ int ret = -EINVAL;
6214+ list_for_each_entry(t, &dev->mode_config.usermode_list, head) {
6215+ if (t == mode) {
6216+ list_del(&mode->head);
6217+ drm_mode_destroy(dev, mode);
6218+ ret = 0;
6219+ break;
6220+ }
6221+ }
6222+ return ret;
6223+}
6224+EXPORT_SYMBOL(drm_mode_rmmode);
6225+
6226+static int drm_mode_attachmode(struct drm_device *dev,
6227+ struct drm_output *output,
6228+ struct drm_display_mode *mode)
6229+{
6230+ int ret = 0;
6231+ int i;
6232+
6233+ for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++) {
6234+ if (output->user_mode_ids[i] == 0) {
6235+ output->user_mode_ids[i] = mode->mode_id;
6236+ mode->output_count++;
6237+ break;
6238+ }
6239+ }
6240+
6241+ if (i == DRM_OUTPUT_MAX_UMODES)
6242+ ret = -ENOSPC;
6243+
6244+ return ret;
6245+}
6246+
6247+int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
6248+ struct drm_display_mode *mode)
6249+{
6250+ struct drm_output *output;
6251+
6252+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
6253+ if (output->crtc == crtc)
6254+ drm_mode_attachmode(dev, output, mode);
6255+ }
6256+ return 0;
6257+}
6258+EXPORT_SYMBOL(drm_mode_attachmode_crtc);
6259+
6260+static int drm_mode_detachmode(struct drm_device *dev,
6261+ struct drm_output *output,
6262+ struct drm_display_mode *mode)
6263+{
6264+ int found = 0;
6265+ int ret = 0, i;
6266+
6267+ for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++) {
6268+ if (output->user_mode_ids[i] == mode->mode_id) {
6269+ output->user_mode_ids[i] = 0;
6270+ mode->output_count--;
6271+ found = 1;
6272+ }
6273+ }
6274+
6275+ if (!found)
6276+ ret = -EINVAL;
6277+
6278+ return ret;
6279+}
6280+
6281+int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
6282+{
6283+ struct drm_output *output;
6284+
6285+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
6286+ drm_mode_detachmode(dev, output, mode);
6287+ }
6288+ return 0;
6289+}
6290+EXPORT_SYMBOL(drm_mode_detachmode_crtc);
6291+
6292+/**
6293+ * drm_fb_addmode - adds a user defined mode
6294+ * @inode: inode from the ioctl
6295+ * @filp: file * from the ioctl
6296+ * @cmd: cmd from ioctl
6297+ * @arg: arg from ioctl
6298+ *
6299+ * Adds a user specified mode to the kernel.
6300+ *
6301+ * Called by the user via ioctl.
6302+ *
6303+ * RETURNS:
6304+ * writes new mode id into arg.
6305+ * Zero on success, errno on failure.
6306+ */
6307+int drm_mode_addmode_ioctl(struct drm_device *dev,
6308+ void *data, struct drm_file *file_priv)
6309+{
6310+ struct drm_mode_modeinfo *new_mode = data;
6311+ struct drm_display_mode *user_mode;
6312+ int ret = 0;
6313+
6314+ mutex_lock(&dev->mode_config.mutex);
6315+ user_mode = drm_mode_create(dev);
6316+ if (!user_mode) {
6317+ ret = -ENOMEM;
6318+ goto out;
6319+ }
6320+
6321+ drm_crtc_convert_umode(user_mode, new_mode);
6322+
6323+ drm_mode_addmode(dev, user_mode);
6324+ new_mode->id = user_mode->mode_id;
6325+
6326+out:
6327+ mutex_unlock(&dev->mode_config.mutex);
6328+ return ret;
6329+}
6330+
6331+/**
6332+ * drm_fb_rmmode - removes a user defined mode
6333+ * @inode: inode from the ioctl
6334+ * @filp: file * from the ioctl
6335+ * @cmd: cmd from ioctl
6336+ * @arg: arg from ioctl
6337+ *
6338+ * Remove the user defined mode specified by the user.
6339+ *
6340+ * Called by the user via ioctl
6341+ *
6342+ * RETURNS:
6343+ * Zero on success, errno on failure.
6344+ */
6345+int drm_mode_rmmode_ioctl(struct drm_device *dev,
6346+ void *data, struct drm_file *file_priv)
6347+{
6348+ uint32_t *id = data;
6349+ struct drm_display_mode *mode;
6350+ int ret = -EINVAL;
6351+
6352+ mutex_lock(&dev->mode_config.mutex);
6353+ mode = idr_find(&dev->mode_config.crtc_idr, *id);
6354+ if (!mode || (*id != mode->mode_id)) {
6355+ goto out;
6356+ }
6357+
6358+ if (!(mode->type & DRM_MODE_TYPE_USERDEF)) {
6359+ goto out;
6360+ }
6361+
6362+ if (mode->output_count) {
6363+ goto out;
6364+ }
6365+
6366+ ret = drm_mode_rmmode(dev, mode);
6367+
6368+out:
6369+ mutex_unlock(&dev->mode_config.mutex);
6370+ return ret;
6371+}
6372+
6373+/**
6374+ * drm_fb_attachmode - Attach a user mode to an output
6375+ * @inode: inode from the ioctl
6376+ * @filp: file * from the ioctl
6377+ * @cmd: cmd from ioctl
6378+ * @arg: arg from ioctl
6379+ *
6380+ * This attaches a user specified mode to an output.
6381+ * Called by the user via ioctl.
6382+ *
6383+ * RETURNS:
6384+ * Zero on success, errno on failure.
6385+ */
6386+int drm_mode_attachmode_ioctl(struct drm_device *dev,
6387+ void *data, struct drm_file *file_priv)
6388+{
6389+ struct drm_mode_mode_cmd *mode_cmd = data;
6390+ struct drm_output *output;
6391+ struct drm_display_mode *mode;
6392+ int ret = 0;
6393+
6394+ mutex_lock(&dev->mode_config.mutex);
6395+
6396+ mode = idr_find(&dev->mode_config.crtc_idr, mode_cmd->mode_id);
6397+ if (!mode || (mode->mode_id != mode_cmd->mode_id)) {
6398+ ret = -EINVAL;
6399+ goto out;
6400+ }
6401+
6402+ output = idr_find(&dev->mode_config.crtc_idr, mode_cmd->output_id);
6403+ if (!output || (output->id != mode_cmd->output_id)) {
6404+ ret = -EINVAL;
6405+ goto out;
6406+ }
6407+
6408+ ret = drm_mode_attachmode(dev, output, mode);
6409+out:
6410+ mutex_unlock(&dev->mode_config.mutex);
6411+ return ret;
6412+}
6413+
6414+
6415+/**
6416+ * drm_fb_detachmode - Detach a user specified mode from an output
6417+ * @inode: inode from the ioctl
6418+ * @filp: file * from the ioctl
6419+ * @cmd: cmd from ioctl
6420+ * @arg: arg from ioctl
6421+ *
6422+ * Called by the user via ioctl.
6423+ *
6424+ * RETURNS:
6425+ * Zero on success, errno on failure.
6426+ */
6427+int drm_mode_detachmode_ioctl(struct drm_device *dev,
6428+ void *data, struct drm_file *file_priv)
6429+{
6430+ struct drm_mode_mode_cmd *mode_cmd = data;
6431+ struct drm_output *output;
6432+ struct drm_display_mode *mode;
6433+ int ret = 0;
6434+
6435+ mutex_lock(&dev->mode_config.mutex);
6436+
6437+ mode = idr_find(&dev->mode_config.crtc_idr, mode_cmd->mode_id);
6438+ if (!mode || (mode->mode_id != mode_cmd->mode_id)) {
6439+ ret = -EINVAL;
6440+ goto out;
6441+ }
6442+
6443+ output = idr_find(&dev->mode_config.crtc_idr, mode_cmd->output_id);
6444+ if (!output || (output->id != mode_cmd->output_id)) {
6445+ ret = -EINVAL;
6446+ goto out;
6447+ }
6448+
6449+
6450+ ret = drm_mode_detachmode(dev, output, mode);
6451+out:
6452+ mutex_unlock(&dev->mode_config.mutex);
6453+ return ret;
6454+}
6455+
6456+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
6457+ const char *name, int num_values)
6458+{
6459+ struct drm_property *property = NULL;
6460+
6461+ property = kzalloc(sizeof(struct drm_output), GFP_KERNEL);
6462+ if (!property)
6463+ return NULL;
6464+
6465+ property->values = kzalloc(sizeof(uint32_t)*num_values, GFP_KERNEL);
6466+ if (!property->values)
6467+ goto fail;
6468+
6469+ property->id = drm_idr_get(dev, property);
6470+ property->flags = flags;
6471+ property->num_values = num_values;
6472+ INIT_LIST_HEAD(&property->enum_list);
6473+
6474+ if (name)
6475+ strncpy(property->name, name, DRM_PROP_NAME_LEN);
6476+
6477+ list_add_tail(&property->head, &dev->mode_config.property_list);
6478+ return property;
6479+fail:
6480+ kfree(property);
6481+ return NULL;
6482+}
6483+EXPORT_SYMBOL(drm_property_create);
6484+
6485+int drm_property_add_enum(struct drm_property *property, int index,
6486+ uint32_t value, const char *name)
6487+{
6488+ struct drm_property_enum *prop_enum;
6489+
6490+ if (!(property->flags & DRM_MODE_PROP_ENUM))
6491+ return -EINVAL;
6492+
6493+ if (!list_empty(&property->enum_list)) {
6494+ list_for_each_entry(prop_enum, &property->enum_list, head) {
6495+ if (prop_enum->value == value) {
6496+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
6497+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
6498+ return 0;
6499+ }
6500+ }
6501+ }
6502+
6503+ prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
6504+ if (!prop_enum)
6505+ return -ENOMEM;
6506+
6507+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
6508+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
6509+ prop_enum->value = value;
6510+
6511+ property->values[index] = value;
6512+ list_add_tail(&prop_enum->head, &property->enum_list);
6513+ return 0;
6514+}
6515+EXPORT_SYMBOL(drm_property_add_enum);
6516+
6517+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
6518+{
6519+ struct drm_property_enum *prop_enum, *pt;
6520+
6521+ list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
6522+ list_del(&prop_enum->head);
6523+ kfree(prop_enum);
6524+ }
6525+
6526+ kfree(property->values);
6527+ drm_idr_put(dev, property->id);
6528+ list_del(&property->head);
6529+ kfree(property);
6530+}
6531+EXPORT_SYMBOL(drm_property_destroy);
6532+
6533+
6534+int drm_output_attach_property(struct drm_output *output,
6535+ struct drm_property *property, int init_val)
6536+{
6537+ int i;
6538+
6539+ for (i = 0; i < DRM_OUTPUT_MAX_PROPERTY; i++) {
6540+ if (output->property_ids[i] == 0) {
6541+ output->property_ids[i] = property->id;
6542+ output->property_values[i] = init_val;
6543+ break;
6544+ }
6545+ }
6546+
6547+ if (i == DRM_OUTPUT_MAX_PROPERTY)
6548+ return -EINVAL;
6549+ return 0;
6550+}
6551+EXPORT_SYMBOL(drm_output_attach_property);
6552+
6553+int drm_mode_getproperty_ioctl(struct drm_device *dev,
6554+ void *data, struct drm_file *file_priv)
6555+{
6556+ struct drm_mode_get_property *out_resp = data;
6557+ struct drm_property *property;
6558+ int enum_count = 0;
6559+ int value_count = 0;
6560+ int ret = 0, i;
6561+ int copied;
6562+ struct drm_property_enum *prop_enum;
6563+
6564+ mutex_lock(&dev->mode_config.mutex);
6565+ property = idr_find(&dev->mode_config.crtc_idr, out_resp->prop_id);
6566+ if (!property || (property->id != out_resp->prop_id)) {
6567+ ret = -EINVAL;
6568+ goto done;
6569+ }
6570+
6571+
6572+ list_for_each_entry(prop_enum, &property->enum_list, head)
6573+ enum_count++;
6574+
6575+ value_count = property->num_values;
6576+
6577+ strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
6578+ out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
6579+ out_resp->flags = property->flags;
6580+
6581+ if ((out_resp->count_values >= value_count) && value_count) {
6582+ for (i = 0; i < value_count; i++) {
6583+ if (put_user(property->values[i], out_resp->values + i)) {
6584+ ret = -EFAULT;
6585+ goto done;
6586+ }
6587+ }
6588+ }
6589+ out_resp->count_values = value_count;
6590+
6591+ if ((out_resp->count_enums >= enum_count) && enum_count) {
6592+ copied = 0;
6593+ list_for_each_entry(prop_enum, &property->enum_list, head) {
6594+ if (put_user(prop_enum->value, &out_resp->enums[copied].value)) {
6595+ ret = -EFAULT;
6596+ goto done;
6597+ }
6598+
6599+ if (copy_to_user(&out_resp->enums[copied].name,
6600+ prop_enum->name, DRM_PROP_NAME_LEN)) {
6601+ ret = -EFAULT;
6602+ goto done;
6603+ }
6604+ copied++;
6605+ }
6606+ }
6607+ out_resp->count_enums = enum_count;
6608+
6609+done:
6610+ mutex_unlock(&dev->mode_config.mutex);
6611+ return ret;
6612+}
6613Index: linux-2.6.27/drivers/gpu/drm/drm_drv.c
6614===================================================================
6615--- linux-2.6.27.orig/drivers/gpu/drm/drm_drv.c 2009-02-05 13:29:29.000000000 +0000
6616+++ linux-2.6.27/drivers/gpu/drm/drm_drv.c 2009-02-05 13:29:33.000000000 +0000
6617@@ -49,6 +49,9 @@
6618 #include "drmP.h"
6619 #include "drm_core.h"
6620
6621+static void drm_cleanup(struct drm_device * dev);
6622+int drm_fb_loaded = 0;
6623+
6624 static int drm_version(struct drm_device *dev, void *data,
6625 struct drm_file *file_priv);
6626
6627@@ -113,16 +116,48 @@
6628
6629 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6630 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6631-
6632 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
6633-
6634- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
6635-
6636 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6637-
6638- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
6639- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
6640- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
6641+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_ROOT_ONLY),
6642+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_ROOT_ONLY),
6643+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETOUTPUT, drm_mode_getoutput, DRM_MASTER|DRM_ROOT_ONLY),
6644+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_ROOT_ONLY),
6645+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_ROOT_ONLY),
6646+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_ROOT_ONLY),
6647+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_ROOT_ONLY),
6648+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDMODE, drm_mode_addmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
6649+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMMODE, drm_mode_rmmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
6650+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
6651+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
6652+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_ROOT_ONLY),
6653+
6654+ DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
6655+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6656+ DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
6657+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6658+ DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
6659+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6660+ DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
6661+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6662+
6663+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
6664+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
6665+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
6666+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
6667+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH),
6668+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH),
6669+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH),
6670+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
6671+
6672+ DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
6673+ DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
6674+ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
6675+ DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
6676+ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
6677+ DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH),
6678+ DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
6679+ DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
6680+ DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
6681 };
6682
6683 #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
6684@@ -164,7 +199,12 @@
6685 drm_drawable_free_all(dev);
6686 del_timer(&dev->timer);
6687
6688- /* Clear pid list */
6689+ if (dev->unique) {
6690+ drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
6691+ dev->unique = NULL;
6692+ dev->unique_len = 0;
6693+ }
6694+
6695 if (dev->magicfree.next) {
6696 list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
6697 list_del(&pt->head);
6698@@ -236,12 +276,24 @@
6699 dev->lock.file_priv = NULL;
6700 wake_up_interruptible(&dev->lock.lock_queue);
6701 }
6702+ dev->dev_mapping = NULL;
6703 mutex_unlock(&dev->struct_mutex);
6704
6705 DRM_DEBUG("lastclose completed\n");
6706 return 0;
6707 }
6708
6709+void drm_cleanup_pci(struct pci_dev *pdev)
6710+{
6711+ struct drm_device *dev = pci_get_drvdata(pdev);
6712+
6713+ pci_set_drvdata(pdev, NULL);
6714+ pci_release_regions(pdev);
6715+ if (dev)
6716+ drm_cleanup(dev);
6717+}
6718+EXPORT_SYMBOL(drm_cleanup_pci);
6719+
6720 /**
6721 * Module initialization. Called via init_module at module load time, or via
6722 * linux/init/main.c (this is not currently supported).
6723@@ -255,26 +307,68 @@
6724 * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
6725 * after the initialization for driver customization.
6726 */
6727-int drm_init(struct drm_driver *driver)
6728+int drm_init(struct drm_driver *driver,
6729+ struct pci_device_id *pciidlist)
6730 {
6731- struct pci_dev *pdev = NULL;
6732+ struct pci_dev *pdev;
6733 struct pci_device_id *pid;
6734- int i;
6735+ int rc, i;
6736
6737 DRM_DEBUG("\n");
6738
6739- for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
6740- pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
6741+ for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) {
6742+ pid = &pciidlist[i];
6743
6744 pdev = NULL;
6745 /* pass back in pdev to account for multiple identical cards */
6746 while ((pdev =
6747 pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
6748- pid->subdevice, pdev)) != NULL) {
6749- /* stealth mode requires a manual probe */
6750- pci_dev_get(pdev);
6751- drm_get_dev(pdev, pid, driver);
6752+ pid->subdevice, pdev))) {
6753+ /* Are there device class requirements? */
6754+ if ((pid->class != 0)
6755+ && ((pdev->class & pid->class_mask) != pid->class)) {
6756+ continue;
6757+ }
6758+ /* is there already a driver loaded, or (short circuit saves work) */
6759+ /* does something like VesaFB have control of the memory region? */
6760+ if (pci_dev_driver(pdev)
6761+ || pci_request_regions(pdev, "DRM scan")) {
6762+ /* go into stealth mode */
6763+ drm_fb_loaded = 1;
6764+ pci_dev_put(pdev);
6765+ break;
6766+ }
6767+ /* no fbdev or vesadev, put things back and wait for normal probe */
6768+ pci_release_regions(pdev);
6769+ }
6770+ }
6771+
6772+ if (!drm_fb_loaded)
6773+ return pci_register_driver(&driver->pci_driver);
6774+ else {
6775+ for (i = 0; pciidlist[i].vendor != 0; i++) {
6776+ pid = &pciidlist[i];
6777+
6778+ pdev = NULL;
6779+ /* pass back in pdev to account for multiple identical cards */
6780+ while ((pdev =
6781+ pci_get_subsys(pid->vendor, pid->device,
6782+ pid->subvendor, pid->subdevice,
6783+ pdev))) {
6784+ /* Are there device class requirements? */
6785+ if ((pid->class != 0)
6786+ && ((pdev->class & pid->class_mask) != pid->class)) {
6787+ continue;
6788+ }
6789+ /* stealth mode requires a manual probe */
6790+ pci_dev_get(pdev);
6791+ if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) {
6792+ pci_dev_put(pdev);
6793+ return rc;
6794+ }
6795+ }
6796 }
6797+ DRM_INFO("Used old pci detect: framebuffer loaded\n");
6798 }
6799 return 0;
6800 }
6801@@ -298,6 +392,7 @@
6802 }
6803
6804 drm_lastclose(dev);
6805+ drm_ctxbitmap_cleanup(dev);
6806
6807 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
6808 dev->agp && dev->agp->agp_mtrr >= 0) {
6809@@ -308,6 +403,9 @@
6810 DRM_DEBUG("mtrr_del=%d\n", retval);
6811 }
6812
6813+ drm_bo_driver_finish(dev);
6814+ drm_fence_manager_takedown(dev);
6815+
6816 if (drm_core_has_AGP(dev) && dev->agp) {
6817 drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
6818 dev->agp = NULL;
6819@@ -317,7 +415,12 @@
6820 dev->driver->unload(dev);
6821
6822 drm_ht_remove(&dev->map_hash);
6823- drm_ctxbitmap_cleanup(dev);
6824+ drm_mm_takedown(&dev->offset_manager);
6825+ drm_ht_remove(&dev->object_hash);
6826+
6827+
6828+ if (!drm_fb_loaded)
6829+ pci_disable_device(dev->pdev);
6830
6831 drm_put_minor(&dev->primary);
6832 if (drm_put_dev(dev))
6833Index: linux-2.6.27/drivers/gpu/drm/drm_edid.c
6834===================================================================
6835--- /dev/null 1970-01-01 00:00:00.000000000 +0000
6836+++ linux-2.6.27/drivers/gpu/drm/drm_edid.c 2009-02-05 13:29:33.000000000 +0000
6837@@ -0,0 +1,519 @@
6838+/*
6839+ * Copyright (c) 2007 Intel Corporation
6840+ * Jesse Barnes <jesse.barnes@intel.com>
6841+ *
6842+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
6843+ * FB layer.
6844+ * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
6845+ */
6846+#include "drmP.h"
6847+#include <linux/i2c-algo-bit.h>
6848+#include "drm_edid.h"
6849+
6850+#include <acpi/acpi_drivers.h>
6851+
6852+/* Valid EDID header has these bytes */
6853+static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6854+
6855+int drm_get_acpi_edid(char *method, char *edid, ssize_t length)
6856+{
6857+ int status;
6858+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
6859+ union acpi_object *obj;
6860+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
6861+ struct acpi_object_list args = { 1, &arg0 };
6862+
6863+ if (length == 128)
6864+ arg0.integer.value = 1;
6865+ else if (length == 256)
6866+ arg0.integer.value = 2;
6867+ else
6868+ return -EINVAL;
6869+
6870+ status = acpi_evaluate_object(NULL, method, &args, &buffer);
6871+ if (ACPI_FAILURE(status))
6872+ return -ENODEV;
6873+
6874+ obj = buffer.pointer;
6875+
6876+ if (obj && obj->type == ACPI_TYPE_BUFFER)
6877+ memcpy(edid, obj->buffer.pointer, obj->buffer.length);
6878+ else {
6879+ printk(KERN_ERR PREFIX "Invalid _DDC data\n");
6880+ status = -EFAULT;
6881+ kfree(obj);
6882+ }
6883+
6884+ return status;
6885+}
6886+EXPORT_SYMBOL(drm_get_acpi_edid);
6887+
6888+/**
6889+ * edid_valid - sanity check EDID data
6890+ * @edid: EDID data
6891+ *
6892+ * Sanity check the EDID block by looking at the header, the version number
6893+ * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
6894+ * valid.
6895+ */
6896+static bool edid_valid(struct edid *edid)
6897+{
6898+ int i;
6899+ u8 csum = 0;
6900+ u8 *raw_edid = (u8 *)edid;
6901+
6902+ if (memcmp(edid->header, edid_header, sizeof(edid_header)))
6903+ goto bad;
6904+ if (edid->version != 1)
6905+ goto bad;
6906+ if (edid->revision <= 0 || edid->revision > 3)
6907+ goto bad;
6908+
6909+ for (i = 0; i < EDID_LENGTH; i++)
6910+ csum += raw_edid[i];
6911+ if (csum)
6912+ goto bad;
6913+
6914+ return 1;
6915+
6916+bad:
6917+ return 0;
6918+}
6919+
6920+/**
6921+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
6922+ * @t: standard timing params
6923+ *
6924+ * Take the standard timing params (in this case width, aspect, and refresh)
6925+ * and convert them into a real mode using CVT.
6926+ *
6927+ * Punts for now, but should eventually use the FB layer's CVT based mode
6928+ * generation code.
6929+ */
6930+struct drm_display_mode *drm_mode_std(struct drm_device *dev,
6931+ struct std_timing *t)
6932+{
6933+// struct fb_videomode mode;
6934+
6935+// fb_find_mode_cvt(&mode, 0, 0);
6936+ /* JJJ: convert to drm_display_mode */
6937+ struct drm_display_mode *mode;
6938+ int hsize = t->hsize * 8 + 248, vsize;
6939+
6940+ mode = drm_mode_create(dev);
6941+ if (!mode)
6942+ return NULL;
6943+
6944+ if (t->aspect_ratio == 0)
6945+ vsize = (hsize * 10) / 16;
6946+ else if (t->aspect_ratio == 1)
6947+ vsize = (hsize * 3) / 4;
6948+ else if (t->aspect_ratio == 2)
6949+ vsize = (hsize * 4) / 5;
6950+ else
6951+ vsize = (hsize * 9) / 16;
6952+
6953+ drm_mode_set_name(mode);
6954+
6955+ return mode;
6956+}
6957+
6958+/**
6959+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
6960+ * @timing: EDID detailed timing info
6961+ * @preferred: is this a preferred mode?
6962+ *
6963+ * An EDID detailed timing block contains enough info for us to create and
6964+ * return a new struct drm_display_mode. The @preferred flag will be set
6965+ * if this is the display's preferred timing, and we'll use it to indicate
6966+ * to the other layers that this mode is desired.
6967+ */
6968+struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
6969+ struct detailed_timing *timing)
6970+{
6971+ struct drm_display_mode *mode;
6972+ struct detailed_pixel_timing *pt = &timing->data.pixel_data;
6973+
6974+ if (pt->stereo) {
6975+ printk(KERN_WARNING "stereo mode not supported\n");
6976+ return NULL;
6977+ }
6978+ if (!pt->separate_sync) {
6979+ printk(KERN_WARNING "integrated sync not supported\n");
6980+ return NULL;
6981+ }
6982+
6983+ mode = drm_mode_create(dev);
6984+ if (!mode)
6985+ return NULL;
6986+
6987+ mode->type = DRM_MODE_TYPE_DRIVER;
6988+ mode->clock = timing->pixel_clock * 10;
6989+
6990+ mode->hdisplay = (pt->hactive_hi << 8) | pt->hactive_lo;
6991+ mode->hsync_start = mode->hdisplay + ((pt->hsync_offset_hi << 8) |
6992+ pt->hsync_offset_lo);
6993+ mode->hsync_end = mode->hsync_start +
6994+ ((pt->hsync_pulse_width_hi << 8) |
6995+ pt->hsync_pulse_width_lo);
6996+ mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo);
6997+
6998+ mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo;
6999+ mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 8) |
7000+ pt->vsync_offset_lo);
7001+ mode->vsync_end = mode->vsync_start +
7002+ ((pt->vsync_pulse_width_hi << 8) |
7003+ pt->vsync_pulse_width_lo);
7004+ mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo);
7005+
7006+ drm_mode_set_name(mode);
7007+
7008+ if (pt->interlaced)
7009+ mode->flags |= V_INTERLACE;
7010+
7011+ mode->flags |= pt->hsync_positive ? V_PHSYNC : V_NHSYNC;
7012+ mode->flags |= pt->vsync_positive ? V_PVSYNC : V_NVSYNC;
7013+
7014+ return mode;
7015+}
7016+
7017+/*
7018+ * Detailed mode info for the EDID "established modes" data to use.
7019+ */
7020+static struct drm_display_mode edid_est_modes[] = {
7021+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
7022+ 968, 1056, 0, 600, 601, 605, 628, 0,
7023+ V_PHSYNC | V_PVSYNC) }, /* 800x600@60Hz */
7024+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
7025+ 896, 1024, 0, 600, 601, 603, 625, 0,
7026+ V_PHSYNC | V_PVSYNC) }, /* 800x600@56Hz */
7027+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
7028+ 720, 840, 0, 480, 481, 484, 500, 0,
7029+ V_NHSYNC | V_NVSYNC) }, /* 640x480@75Hz */
7030+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
7031+ 704, 832, 0, 480, 489, 491, 520, 0,
7032+ V_NHSYNC | V_NVSYNC) }, /* 640x480@72Hz */
7033+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
7034+ 768, 864, 0, 480, 483, 486, 525, 0,
7035+ V_NHSYNC | V_NVSYNC) }, /* 640x480@67Hz */
7036+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
7037+ 752, 800, 0, 480, 490, 492, 525, 0,
7038+ V_NHSYNC | V_NVSYNC) }, /* 640x480@60Hz */
7039+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
7040+ 846, 900, 0, 400, 421, 423, 449, 0,
7041+ V_NHSYNC | V_NVSYNC) }, /* 720x400@88Hz */
7042+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
7043+ 846, 900, 0, 400, 412, 414, 449, 0,
7044+ V_NHSYNC | V_PVSYNC) }, /* 720x400@70Hz */
7045+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
7046+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
7047+ V_PHSYNC | V_PVSYNC) }, /* 1280x1024@75Hz */
7048+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
7049+ 1136, 1312, 0, 768, 769, 772, 800, 0,
7050+ V_PHSYNC | V_PVSYNC) }, /* 1024x768@75Hz */
7051+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
7052+ 1184, 1328, 0, 768, 771, 777, 806, 0,
7053+ V_NHSYNC | V_NVSYNC) }, /* 1024x768@70Hz */
7054+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
7055+ 1184, 1344, 0, 768, 771, 777, 806, 0,
7056+ V_NHSYNC | V_NVSYNC) }, /* 1024x768@60Hz */
7057+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
7058+ 1208, 1264, 0, 768, 768, 776, 817, 0,
7059+ V_PHSYNC | V_PVSYNC | V_INTERLACE) }, /* 1024x768@43Hz */
7060+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
7061+ 928, 1152, 0, 624, 625, 628, 667, 0,
7062+ V_NHSYNC | V_NVSYNC) }, /* 832x624@75Hz */
7063+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
7064+ 896, 1056, 0, 600, 601, 604, 625, 0,
7065+ V_PHSYNC | V_PVSYNC) }, /* 800x600@75Hz */
7066+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
7067+ 976, 1040, 0, 600, 637, 643, 666, 0,
7068+ V_PHSYNC | V_PVSYNC) }, /* 800x600@72Hz */
7069+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
7070+ 1344, 1600, 0, 864, 865, 868, 900, 0,
7071+ V_PHSYNC | V_PVSYNC) }, /* 1152x864@75Hz */
7072+};
7073+
7074+#define EDID_EST_TIMINGS 16
7075+#define EDID_STD_TIMINGS 8
7076+#define EDID_DETAILED_TIMINGS 4
7077+
7078+/**
7079+ * add_established_modes - get est. modes from EDID and add them
7080+ * @edid: EDID block to scan
7081+ *
7082+ * Each EDID block contains a bitmap of the supported "established modes" list
7083+ * (defined above). Tease them out and add them to the global modes list.
7084+ */
7085+static int add_established_modes(struct drm_output *output, struct edid *edid)
7086+{
7087+ struct drm_device *dev = output->dev;
7088+ unsigned long est_bits = edid->established_timings.t1 |
7089+ (edid->established_timings.t2 << 8) |
7090+ ((edid->established_timings.mfg_rsvd & 0x80) << 9);
7091+ int i, modes = 0;
7092+
7093+ for (i = 0; i <= EDID_EST_TIMINGS; i++)
7094+ if (est_bits & (1<<i)) {
7095+ struct drm_display_mode *newmode;
7096+ newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
7097+ drm_mode_probed_add(output, newmode);
7098+ modes++;
7099+ }
7100+
7101+ return modes;
7102+}
7103+
7104+/**
7105+ * add_standard_modes - get std. modes from EDID and add them
7106+ * @edid: EDID block to scan
7107+ *
7108+ * Standard modes can be calculated using the CVT standard. Grab them from
7109+ * @edid, calculate them, and add them to the list.
7110+ */
7111+static int add_standard_modes(struct drm_output *output, struct edid *edid)
7112+{
7113+ struct drm_device *dev = output->dev;
7114+ int i, modes = 0;
7115+
7116+ for (i = 0; i < EDID_STD_TIMINGS; i++) {
7117+ struct std_timing *t = &edid->standard_timings[i];
7118+ struct drm_display_mode *newmode;
7119+
7120+ /* If std timings bytes are 1, 1 it's empty */
7121+ if (t->hsize == 1 && (t->aspect_ratio | t->vfreq) == 1)
7122+ continue;
7123+
7124+ newmode = drm_mode_std(dev, &edid->standard_timings[i]);
7125+ drm_mode_probed_add(output, newmode);
7126+ modes++;
7127+ }
7128+
7129+ return modes;
7130+}
7131+
7132+/**
7133+ * add_detailed_modes - get detailed mode info from EDID data
7134+ * @edid: EDID block to scan
7135+ *
7136+ * Some of the detailed timing sections may contain mode information. Grab
7137+ * it and add it to the list.
7138+ */
7139+static int add_detailed_info(struct drm_output *output, struct edid *edid)
7140+{
7141+ struct drm_device *dev = output->dev;
7142+ int i, j, modes = 0;
7143+
7144+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
7145+ struct detailed_timing *timing = &edid->detailed_timings[i];
7146+ struct detailed_non_pixel *data = &timing->data.other_data;
7147+ struct drm_display_mode *newmode;
7148+
7149+ /* EDID up to and including 1.2 may put monitor info here */
7150+ if (edid->version == 1 && edid->revision < 3)
7151+ continue;
7152+
7153+ /* Detailed mode timing */
7154+ if (timing->pixel_clock) {
7155+ newmode = drm_mode_detailed(dev, timing);
7156+ /* First detailed mode is preferred */
7157+ if (i == 0 && edid->preferred_timing)
7158+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
7159+ drm_mode_probed_add(output, newmode);
7160+
7161+ modes++;
7162+ continue;
7163+ }
7164+
7165+ /* Other timing or info */
7166+ switch (data->type) {
7167+ case EDID_DETAIL_MONITOR_SERIAL:
7168+ break;
7169+ case EDID_DETAIL_MONITOR_STRING:
7170+ break;
7171+ case EDID_DETAIL_MONITOR_RANGE:
7172+ /* Get monitor range data */
7173+ break;
7174+ case EDID_DETAIL_MONITOR_NAME:
7175+ break;
7176+ case EDID_DETAIL_MONITOR_CPDATA:
7177+ break;
7178+ case EDID_DETAIL_STD_MODES:
7179+ /* Five modes per detailed section */
7180+ for (j = 0; j < 5; i++) {
7181+ struct std_timing *std;
7182+ struct drm_display_mode *newmode;
7183+
7184+ std = &data->data.timings[j];
7185+ newmode = drm_mode_std(dev, std);
7186+ drm_mode_probed_add(output, newmode);
7187+ modes++;
7188+ }
7189+ break;
7190+ default:
7191+ break;
7192+ }
7193+ }
7194+
7195+ return modes;
7196+}
7197+
7198+#define DDC_ADDR 0x50
7199+
7200+static unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter)
7201+{
7202+ unsigned char start = 0x0;
7203+ unsigned char *buf = kmalloc(EDID_LENGTH, GFP_KERNEL);
7204+ struct i2c_msg msgs[] = {
7205+ {
7206+ .addr = DDC_ADDR,
7207+ .flags = 0,
7208+ .len = 1,
7209+ .buf = &start,
7210+ }, {
7211+ .addr = DDC_ADDR,
7212+ .flags = I2C_M_RD,
7213+ .len = EDID_LENGTH,
7214+ .buf = buf,
7215+ }
7216+ };
7217+
7218+ if (!buf) {
7219+ DRM_ERROR("unable to allocate memory for EDID block.\n");
7220+ return NULL;
7221+ }
7222+
7223+ if (i2c_transfer(adapter, msgs, 2) == 2)
7224+ return buf;
7225+
7226+ DRM_INFO("unable to read EDID block.\n");
7227+ kfree(buf);
7228+ return NULL;
7229+}
7230+
7231+unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
7232+{
7233+ struct i2c_algo_bit_data *algo_data = adapter->algo_data;
7234+ unsigned char *edid = NULL;
7235+ int i, j;
7236+
7237+ /*
7238+ * Startup the bus:
7239+ * Set clock line high (but give it time to come up)
7240+ * Then set clock & data low
7241+ */
7242+ algo_data->setscl(algo_data->data, 1);
7243+ udelay(550); /* startup delay */
7244+ algo_data->setscl(algo_data->data, 0);
7245+ algo_data->setsda(algo_data->data, 0);
7246+
7247+ for (i = 0; i < 3; i++) {
7248+ /* For some old monitors we need the
7249+ * following process to initialize/stop DDC
7250+ */
7251+ algo_data->setsda(algo_data->data, 0);
7252+ msleep(13);
7253+
7254+ algo_data->setscl(algo_data->data, 1);
7255+ for (j = 0; j < 5; j++) {
7256+ msleep(10);
7257+ if (algo_data->getscl(algo_data->data))
7258+ break;
7259+ }
7260+ if (j == 5)
7261+ continue;
7262+
7263+ algo_data->setsda(algo_data->data, 0);
7264+ msleep(15);
7265+ algo_data->setscl(algo_data->data, 0);
7266+ msleep(15);
7267+ algo_data->setsda(algo_data->data, 1);
7268+ msleep(15);
7269+
7270+ /* Do the real work */
7271+ edid = drm_do_probe_ddc_edid(adapter);
7272+ algo_data->setsda(algo_data->data, 0);
7273+ algo_data->setscl(algo_data->data, 0);
7274+ msleep(15);
7275+
7276+ algo_data->setscl(algo_data->data, 1);
7277+ for (j = 0; j < 10; j++) {
7278+ msleep(10);
7279+ if (algo_data->getscl(algo_data->data))
7280+ break;
7281+ }
7282+
7283+ algo_data->setsda(algo_data->data, 1);
7284+ msleep(15);
7285+ algo_data->setscl(algo_data->data, 0);
7286+ if (edid)
7287+ break;
7288+ }
7289+ /* Release the DDC lines when done or the Apple Cinema HD display
7290+ * will switch off
7291+ */
7292+ algo_data->setsda(algo_data->data, 0);
7293+ algo_data->setscl(algo_data->data, 0);
7294+ algo_data->setscl(algo_data->data, 1);
7295+
7296+ return edid;
7297+}
7298+EXPORT_SYMBOL(drm_ddc_read);
7299+
7300+/**
7301+ * drm_get_edid - get EDID data, if available
7302+ * @output: output we're probing
7303+ * @adapter: i2c adapter to use for DDC
7304+ *
7305+ * Poke the given output's i2c channel to grab EDID data if possible.
7306+ *
7307+ * Return edid data or NULL if we couldn't find any.
7308+ */
7309+struct edid *drm_get_edid(struct drm_output *output,
7310+ struct i2c_adapter *adapter)
7311+{
7312+ struct edid *edid;
7313+
7314+ edid = (struct edid *)drm_ddc_read(adapter);
7315+ if (!edid) {
7316+ dev_warn(&output->dev->pdev->dev, "%s: no EDID data\n",
7317+ output->name);
7318+ return NULL;
7319+ }
7320+ if (!edid_valid(edid)) {
7321+ dev_warn(&output->dev->pdev->dev, "%s: EDID invalid.\n",
7322+ output->name);
7323+ kfree(edid);
7324+ return NULL;
7325+ }
7326+ return edid;
7327+}
7328+EXPORT_SYMBOL(drm_get_edid);
7329+
7330+/**
7331+ * drm_add_edid_modes - add modes from EDID data, if available
7332+ * @output: output we're probing
7333+ * @edid: edid data
7334+ *
7335+ * Add the specified modes to the output's mode list.
7336+ *
7337+ * Return number of modes added or 0 if we couldn't find any.
7338+ */
7339+int drm_add_edid_modes(struct drm_output *output, struct edid *edid)
7340+{
7341+ int num_modes = 0;
7342+
7343+ if (edid == NULL) {
7344+ return 0;
7345+ }
7346+ if (!edid_valid(edid)) {
7347+ dev_warn(&output->dev->pdev->dev, "%s: EDID invalid.\n",
7348+ output->name);
7349+ return 0;
7350+ }
7351+ num_modes += add_established_modes(output, edid);
7352+ num_modes += add_standard_modes(output, edid);
7353+ num_modes += add_detailed_info(output, edid);
7354+ return num_modes;
7355+}
7356+EXPORT_SYMBOL(drm_add_edid_modes);
7357Index: linux-2.6.27/drivers/gpu/drm/drm_fence.c
7358===================================================================
7359--- /dev/null 1970-01-01 00:00:00.000000000 +0000
7360+++ linux-2.6.27/drivers/gpu/drm/drm_fence.c 2009-02-05 13:29:33.000000000 +0000
7361@@ -0,0 +1,829 @@
7362+/**************************************************************************
7363+ *
7364+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
7365+ * All Rights Reserved.
7366+ *
7367+ * Permission is hereby granted, free of charge, to any person obtaining a
7368+ * copy of this software and associated documentation files (the
7369+ * "Software"), to deal in the Software without restriction, including
7370+ * without limitation the rights to use, copy, modify, merge, publish,
7371+ * distribute, sub license, and/or sell copies of the Software, and to
7372+ * permit persons to whom the Software is furnished to do so, subject to
7373+ * the following conditions:
7374+ *
7375+ * The above copyright notice and this permission notice (including the
7376+ * next paragraph) shall be included in all copies or substantial portions
7377+ * of the Software.
7378+ *
7379+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
7380+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
7381+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
7382+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
7383+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
7384+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
7385+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
7386+ *
7387+ **************************************************************************/
7388+/*
7389+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
7390+ */
7391+
7392+#include "drmP.h"
7393+
7394+
7395+/*
7396+ * Convenience function to be called by fence::wait methods that
7397+ * need polling.
7398+ */
7399+
7400+int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
7401+ int interruptible, uint32_t mask,
7402+ unsigned long end_jiffies)
7403+{
7404+ struct drm_device *dev = fence->dev;
7405+ struct drm_fence_manager *fm = &dev->fm;
7406+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
7407+ uint32_t count = 0;
7408+ int ret;
7409+
7410+ DECLARE_WAITQUEUE(entry, current);
7411+ add_wait_queue(&fc->fence_queue, &entry);
7412+
7413+ ret = 0;
7414+
7415+ for (;;) {
7416+ __set_current_state((interruptible) ?
7417+ TASK_INTERRUPTIBLE :
7418+ TASK_UNINTERRUPTIBLE);
7419+ if (drm_fence_object_signaled(fence, mask))
7420+ break;
7421+ if (time_after_eq(jiffies, end_jiffies)) {
7422+ ret = -EBUSY;
7423+ break;
7424+ }
7425+ if (lazy)
7426+ schedule_timeout(1);
7427+ else if ((++count & 0x0F) == 0){
7428+ __set_current_state(TASK_RUNNING);
7429+ schedule();
7430+ __set_current_state((interruptible) ?
7431+ TASK_INTERRUPTIBLE :
7432+ TASK_UNINTERRUPTIBLE);
7433+ }
7434+ if (interruptible && signal_pending(current)) {
7435+ ret = -EAGAIN;
7436+ break;
7437+ }
7438+ }
7439+ __set_current_state(TASK_RUNNING);
7440+ remove_wait_queue(&fc->fence_queue, &entry);
7441+ return ret;
7442+}
7443+EXPORT_SYMBOL(drm_fence_wait_polling);
7444+
7445+/*
7446+ * Typically called by the IRQ handler.
7447+ */
7448+
7449+void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
7450+ uint32_t sequence, uint32_t type, uint32_t error)
7451+{
7452+ int wake = 0;
7453+ uint32_t diff;
7454+ uint32_t relevant_type;
7455+ uint32_t new_type;
7456+ struct drm_fence_manager *fm = &dev->fm;
7457+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
7458+ struct drm_fence_driver *driver = dev->driver->fence_driver;
7459+ struct list_head *head;
7460+ struct drm_fence_object *fence, *next;
7461+ int found = 0;
7462+
7463+ if (list_empty(&fc->ring))
7464+ return;
7465+
7466+ list_for_each_entry(fence, &fc->ring, ring) {
7467+ diff = (sequence - fence->sequence) & driver->sequence_mask;
7468+ if (diff > driver->wrap_diff) {
7469+ found = 1;
7470+ break;
7471+ }
7472+ }
7473+
7474+ fc->waiting_types &= ~type;
7475+ head = (found) ? &fence->ring : &fc->ring;
7476+
7477+ list_for_each_entry_safe_reverse(fence, next, head, ring) {
7478+ if (&fence->ring == &fc->ring)
7479+ break;
7480+
7481+ if (error) {
7482+ fence->error = error;
7483+ fence->signaled_types = fence->type;
7484+ list_del_init(&fence->ring);
7485+ wake = 1;
7486+ break;
7487+ }
7488+
7489+ if (type & DRM_FENCE_TYPE_EXE)
7490+ type |= fence->native_types;
7491+
7492+ relevant_type = type & fence->type;
7493+ new_type = (fence->signaled_types | relevant_type) ^
7494+ fence->signaled_types;
7495+
7496+ if (new_type) {
7497+ fence->signaled_types |= new_type;
7498+ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
7499+ fence->base.hash.key, fence->signaled_types);
7500+
7501+ if (driver->needed_flush)
7502+ fc->pending_flush |= driver->needed_flush(fence);
7503+
7504+ if (new_type & fence->waiting_types)
7505+ wake = 1;
7506+ }
7507+
7508+ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
7509+
7510+ if (!(fence->type & ~fence->signaled_types)) {
7511+ DRM_DEBUG("Fence completely signaled 0x%08lx\n",
7512+ fence->base.hash.key);
7513+ list_del_init(&fence->ring);
7514+ }
7515+ }
7516+
7517+ /*
7518+ * Reinstate lost waiting types.
7519+ */
7520+
7521+ if ((fc->waiting_types & type) != type) {
7522+ head = head->prev;
7523+ list_for_each_entry(fence, head, ring) {
7524+ if (&fence->ring == &fc->ring)
7525+ break;
7526+ diff = (fc->highest_waiting_sequence - fence->sequence) &
7527+ driver->sequence_mask;
7528+ if (diff > driver->wrap_diff)
7529+ break;
7530+
7531+ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
7532+ }
7533+ }
7534+
7535+ if (wake)
7536+ wake_up_all(&fc->fence_queue);
7537+}
7538+EXPORT_SYMBOL(drm_fence_handler);
7539+
7540+static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
7541+{
7542+ struct drm_fence_manager *fm = &dev->fm;
7543+ unsigned long flags;
7544+
7545+ write_lock_irqsave(&fm->lock, flags);
7546+ list_del_init(ring);
7547+ write_unlock_irqrestore(&fm->lock, flags);
7548+}
7549+
7550+void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
7551+{
7552+ struct drm_fence_object *tmp_fence = *fence;
7553+ struct drm_device *dev = tmp_fence->dev;
7554+ struct drm_fence_manager *fm = &dev->fm;
7555+
7556+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
7557+ *fence = NULL;
7558+ if (atomic_dec_and_test(&tmp_fence->usage)) {
7559+ drm_fence_unring(dev, &tmp_fence->ring);
7560+ DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
7561+ tmp_fence->base.hash.key);
7562+ atomic_dec(&fm->count);
7563+ BUG_ON(!list_empty(&tmp_fence->base.list));
7564+ drm_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
7565+ }
7566+}
7567+EXPORT_SYMBOL(drm_fence_usage_deref_locked);
7568+
7569+void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
7570+{
7571+ struct drm_fence_object *tmp_fence = *fence;
7572+ struct drm_device *dev = tmp_fence->dev;
7573+ struct drm_fence_manager *fm = &dev->fm;
7574+
7575+ *fence = NULL;
7576+ if (atomic_dec_and_test(&tmp_fence->usage)) {
7577+ mutex_lock(&dev->struct_mutex);
7578+ if (atomic_read(&tmp_fence->usage) == 0) {
7579+ drm_fence_unring(dev, &tmp_fence->ring);
7580+ atomic_dec(&fm->count);
7581+ BUG_ON(!list_empty(&tmp_fence->base.list));
7582+ drm_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
7583+ }
7584+ mutex_unlock(&dev->struct_mutex);
7585+ }
7586+}
7587+EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
7588+
7589+struct drm_fence_object
7590+*drm_fence_reference_locked(struct drm_fence_object *src)
7591+{
7592+ DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
7593+
7594+ atomic_inc(&src->usage);
7595+ return src;
7596+}
7597+
7598+void drm_fence_reference_unlocked(struct drm_fence_object **dst,
7599+ struct drm_fence_object *src)
7600+{
7601+ mutex_lock(&src->dev->struct_mutex);
7602+ *dst = src;
7603+ atomic_inc(&src->usage);
7604+ mutex_unlock(&src->dev->struct_mutex);
7605+}
7606+EXPORT_SYMBOL(drm_fence_reference_unlocked);
7607+
7608+static void drm_fence_object_destroy(struct drm_file *priv,
7609+ struct drm_user_object *base)
7610+{
7611+ struct drm_fence_object *fence =
7612+ drm_user_object_entry(base, struct drm_fence_object, base);
7613+
7614+ drm_fence_usage_deref_locked(&fence);
7615+}
7616+
7617+int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
7618+{
7619+ unsigned long flags;
7620+ int signaled;
7621+ struct drm_device *dev = fence->dev;
7622+ struct drm_fence_manager *fm = &dev->fm;
7623+ struct drm_fence_driver *driver = dev->driver->fence_driver;
7624+
7625+ mask &= fence->type;
7626+ read_lock_irqsave(&fm->lock, flags);
7627+ signaled = (mask & fence->signaled_types) == mask;
7628+ read_unlock_irqrestore(&fm->lock, flags);
7629+ if (!signaled && driver->poll) {
7630+ write_lock_irqsave(&fm->lock, flags);
7631+ driver->poll(dev, fence->fence_class, mask);
7632+ signaled = (mask & fence->signaled_types) == mask;
7633+ write_unlock_irqrestore(&fm->lock, flags);
7634+ }
7635+ return signaled;
7636+}
7637+EXPORT_SYMBOL(drm_fence_object_signaled);
7638+
7639+
7640+int drm_fence_object_flush(struct drm_fence_object *fence,
7641+ uint32_t type)
7642+{
7643+ struct drm_device *dev = fence->dev;
7644+ struct drm_fence_manager *fm = &dev->fm;
7645+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
7646+ struct drm_fence_driver *driver = dev->driver->fence_driver;
7647+ unsigned long irq_flags;
7648+ uint32_t saved_pending_flush;
7649+ uint32_t diff;
7650+ int call_flush;
7651+
7652+ if (type & ~fence->type) {
7653+ DRM_ERROR("Flush trying to extend fence type, "
7654+ "0x%x, 0x%x\n", type, fence->type);
7655+ return -EINVAL;
7656+ }
7657+
7658+ write_lock_irqsave(&fm->lock, irq_flags);
7659+ fence->waiting_types |= type;
7660+ fc->waiting_types |= fence->waiting_types;
7661+ diff = (fence->sequence - fc->highest_waiting_sequence) &
7662+ driver->sequence_mask;
7663+
7664+ if (diff < driver->wrap_diff)
7665+ fc->highest_waiting_sequence = fence->sequence;
7666+
7667+ /*
7668+ * fence->waiting_types has changed. Determine whether
7669+ * we need to initiate some kind of flush as a result of this.
7670+ */
7671+
7672+ saved_pending_flush = fc->pending_flush;
7673+ if (driver->needed_flush)
7674+ fc->pending_flush |= driver->needed_flush(fence);
7675+
7676+ if (driver->poll)
7677+ driver->poll(dev, fence->fence_class, fence->waiting_types);
7678+
7679+ call_flush = fc->pending_flush;
7680+ write_unlock_irqrestore(&fm->lock, irq_flags);
7681+
7682+ if (call_flush && driver->flush)
7683+ driver->flush(dev, fence->fence_class);
7684+
7685+ return 0;
7686+}
7687+EXPORT_SYMBOL(drm_fence_object_flush);
7688+
7689+/*
7690+ * Make sure old fence objects are signaled before their fence sequences are
7691+ * wrapped around and reused.
7692+ */
7693+
7694+void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
7695+ uint32_t sequence)
7696+{
7697+ struct drm_fence_manager *fm = &dev->fm;
7698+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
7699+ struct drm_fence_object *fence;
7700+ unsigned long irq_flags;
7701+ struct drm_fence_driver *driver = dev->driver->fence_driver;
7702+ int call_flush;
7703+
7704+ uint32_t diff;
7705+
7706+ write_lock_irqsave(&fm->lock, irq_flags);
7707+
7708+ list_for_each_entry_reverse(fence, &fc->ring, ring) {
7709+ diff = (sequence - fence->sequence) & driver->sequence_mask;
7710+ if (diff <= driver->flush_diff)
7711+ break;
7712+
7713+ fence->waiting_types = fence->type;
7714+ fc->waiting_types |= fence->type;
7715+
7716+ if (driver->needed_flush)
7717+ fc->pending_flush |= driver->needed_flush(fence);
7718+ }
7719+
7720+ if (driver->poll)
7721+ driver->poll(dev, fence_class, fc->waiting_types);
7722+
7723+ call_flush = fc->pending_flush;
7724+ write_unlock_irqrestore(&fm->lock, irq_flags);
7725+
7726+ if (call_flush && driver->flush)
7727+ driver->flush(dev, fence->fence_class);
7728+
7729+ /*
7730+ * FIXME: Shold we implement a wait here for really old fences?
7731+ */
7732+
7733+}
7734+EXPORT_SYMBOL(drm_fence_flush_old);
7735+
7736+int drm_fence_object_wait(struct drm_fence_object *fence,
7737+ int lazy, int ignore_signals, uint32_t mask)
7738+{
7739+ struct drm_device *dev = fence->dev;
7740+ struct drm_fence_driver *driver = dev->driver->fence_driver;
7741+ struct drm_fence_manager *fm = &dev->fm;
7742+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
7743+ int ret = 0;
7744+ unsigned long _end = 3 * DRM_HZ;
7745+
7746+ if (mask & ~fence->type) {
7747+ DRM_ERROR("Wait trying to extend fence type"
7748+ " 0x%08x 0x%08x\n", mask, fence->type);
7749+ BUG();
7750+ return -EINVAL;
7751+ }
7752+
7753+ if (driver->wait)
7754+ return driver->wait(fence, lazy, !ignore_signals, mask);
7755+
7756+
7757+ drm_fence_object_flush(fence, mask);
7758+ if (driver->has_irq(dev, fence->fence_class, mask)) {
7759+ if (!ignore_signals)
7760+ ret = wait_event_interruptible_timeout
7761+ (fc->fence_queue,
7762+ drm_fence_object_signaled(fence, mask),
7763+ 3 * DRM_HZ);
7764+ else
7765+ ret = wait_event_timeout
7766+ (fc->fence_queue,
7767+ drm_fence_object_signaled(fence, mask),
7768+ 3 * DRM_HZ);
7769+
7770+ if (unlikely(ret == -ERESTARTSYS))
7771+ return -EAGAIN;
7772+
7773+ if (unlikely(ret == 0))
7774+ return -EBUSY;
7775+
7776+ return 0;
7777+ }
7778+
7779+ return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
7780+ _end);
7781+}
7782+EXPORT_SYMBOL(drm_fence_object_wait);
7783+
7784+
7785+
7786+int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
7787+ uint32_t fence_class, uint32_t type)
7788+{
7789+ struct drm_device *dev = fence->dev;
7790+ struct drm_fence_manager *fm = &dev->fm;
7791+ struct drm_fence_driver *driver = dev->driver->fence_driver;
7792+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
7793+ unsigned long flags;
7794+ uint32_t sequence;
7795+ uint32_t native_types;
7796+ int ret;
7797+
7798+ drm_fence_unring(dev, &fence->ring);
7799+ ret = driver->emit(dev, fence_class, fence_flags, &sequence,
7800+ &native_types);
7801+ if (ret)
7802+ return ret;
7803+
7804+ write_lock_irqsave(&fm->lock, flags);
7805+ fence->fence_class = fence_class;
7806+ fence->type = type;
7807+ fence->waiting_types = 0;
7808+ fence->signaled_types = 0;
7809+ fence->error = 0;
7810+ fence->sequence = sequence;
7811+ fence->native_types = native_types;
7812+ if (list_empty(&fc->ring))
7813+ fc->highest_waiting_sequence = sequence - 1;
7814+ list_add_tail(&fence->ring, &fc->ring);
7815+ fc->latest_queued_sequence = sequence;
7816+ write_unlock_irqrestore(&fm->lock, flags);
7817+ return 0;
7818+}
7819+EXPORT_SYMBOL(drm_fence_object_emit);
7820+
7821+static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
7822+ uint32_t type,
7823+ uint32_t fence_flags,
7824+ struct drm_fence_object *fence)
7825+{
7826+ int ret = 0;
7827+ unsigned long flags;
7828+ struct drm_fence_manager *fm = &dev->fm;
7829+
7830+ mutex_lock(&dev->struct_mutex);
7831+ atomic_set(&fence->usage, 1);
7832+ mutex_unlock(&dev->struct_mutex);
7833+
7834+ write_lock_irqsave(&fm->lock, flags);
7835+ INIT_LIST_HEAD(&fence->ring);
7836+
7837+ /*
7838+ * Avoid hitting BUG() for kernel-only fence objects.
7839+ */
7840+
7841+ INIT_LIST_HEAD(&fence->base.list);
7842+ fence->fence_class = fence_class;
7843+ fence->type = type;
7844+ fence->signaled_types = 0;
7845+ fence->waiting_types = 0;
7846+ fence->sequence = 0;
7847+ fence->error = 0;
7848+ fence->dev = dev;
7849+ write_unlock_irqrestore(&fm->lock, flags);
7850+ if (fence_flags & DRM_FENCE_FLAG_EMIT) {
7851+ ret = drm_fence_object_emit(fence, fence_flags,
7852+ fence->fence_class, type);
7853+ }
7854+ return ret;
7855+}
7856+
7857+int drm_fence_add_user_object(struct drm_file *priv,
7858+ struct drm_fence_object *fence, int shareable)
7859+{
7860+ struct drm_device *dev = priv->minor->dev;
7861+ int ret;
7862+
7863+ mutex_lock(&dev->struct_mutex);
7864+ ret = drm_add_user_object(priv, &fence->base, shareable);
7865+ if (ret)
7866+ goto out;
7867+ atomic_inc(&fence->usage);
7868+ fence->base.type = drm_fence_type;
7869+ fence->base.remove = &drm_fence_object_destroy;
7870+ DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
7871+out:
7872+ mutex_unlock(&dev->struct_mutex);
7873+ return ret;
7874+}
7875+EXPORT_SYMBOL(drm_fence_add_user_object);
7876+
7877+int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
7878+ uint32_t type, unsigned flags,
7879+ struct drm_fence_object **c_fence)
7880+{
7881+ struct drm_fence_object *fence;
7882+ int ret;
7883+ struct drm_fence_manager *fm = &dev->fm;
7884+
7885+ fence = drm_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
7886+ if (!fence) {
7887+ DRM_INFO("Out of memory creating fence object.\n");
7888+ return -ENOMEM;
7889+ }
7890+ ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
7891+ if (ret) {
7892+ drm_fence_usage_deref_unlocked(&fence);
7893+ return ret;
7894+ }
7895+ *c_fence = fence;
7896+ atomic_inc(&fm->count);
7897+
7898+ return 0;
7899+}
7900+EXPORT_SYMBOL(drm_fence_object_create);
7901+
7902+void drm_fence_manager_init(struct drm_device *dev)
7903+{
7904+ struct drm_fence_manager *fm = &dev->fm;
7905+ struct drm_fence_class_manager *fence_class;
7906+ struct drm_fence_driver *fed = dev->driver->fence_driver;
7907+ int i;
7908+ unsigned long flags;
7909+
7910+ rwlock_init(&fm->lock);
7911+ write_lock_irqsave(&fm->lock, flags);
7912+ fm->initialized = 0;
7913+ if (!fed)
7914+ goto out_unlock;
7915+
7916+ fm->initialized = 1;
7917+ fm->num_classes = fed->num_classes;
7918+ BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
7919+
7920+ for (i = 0; i < fm->num_classes; ++i) {
7921+ fence_class = &fm->fence_class[i];
7922+
7923+ memset(fence_class, 0, sizeof(*fence_class));
7924+ INIT_LIST_HEAD(&fence_class->ring);
7925+ DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
7926+ }
7927+
7928+ atomic_set(&fm->count, 0);
7929+ out_unlock:
7930+ write_unlock_irqrestore(&fm->lock, flags);
7931+}
7932+
7933+void drm_fence_fill_arg(struct drm_fence_object *fence,
7934+ struct drm_fence_arg *arg)
7935+{
7936+ struct drm_device *dev = fence->dev;
7937+ struct drm_fence_manager *fm = &dev->fm;
7938+ unsigned long irq_flags;
7939+
7940+ read_lock_irqsave(&fm->lock, irq_flags);
7941+ arg->handle = fence->base.hash.key;
7942+ arg->fence_class = fence->fence_class;
7943+ arg->type = fence->type;
7944+ arg->signaled = fence->signaled_types;
7945+ arg->error = fence->error;
7946+ arg->sequence = fence->sequence;
7947+ read_unlock_irqrestore(&fm->lock, irq_flags);
7948+}
7949+EXPORT_SYMBOL(drm_fence_fill_arg);
7950+
7951+void drm_fence_manager_takedown(struct drm_device *dev)
7952+{
7953+}
7954+
7955+struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
7956+ uint32_t handle)
7957+{
7958+ struct drm_device *dev = priv->minor->dev;
7959+ struct drm_user_object *uo;
7960+ struct drm_fence_object *fence;
7961+
7962+ mutex_lock(&dev->struct_mutex);
7963+ uo = drm_lookup_user_object(priv, handle);
7964+ if (!uo || (uo->type != drm_fence_type)) {
7965+ mutex_unlock(&dev->struct_mutex);
7966+ return NULL;
7967+ }
7968+ fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
7969+ mutex_unlock(&dev->struct_mutex);
7970+ return fence;
7971+}
7972+
7973+int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
7974+{
7975+ int ret;
7976+ struct drm_fence_manager *fm = &dev->fm;
7977+ struct drm_fence_arg *arg = data;
7978+ struct drm_fence_object *fence;
7979+ ret = 0;
7980+
7981+ if (!fm->initialized) {
7982+ DRM_ERROR("The DRM driver does not support fencing.\n");
7983+ return -EINVAL;
7984+ }
7985+
7986+ if (arg->flags & DRM_FENCE_FLAG_EMIT)
7987+ LOCK_TEST_WITH_RETURN(dev, file_priv);
7988+ ret = drm_fence_object_create(dev, arg->fence_class,
7989+ arg->type, arg->flags, &fence);
7990+ if (ret)
7991+ return ret;
7992+ ret = drm_fence_add_user_object(file_priv, fence,
7993+ arg->flags &
7994+ DRM_FENCE_FLAG_SHAREABLE);
7995+ if (ret) {
7996+ drm_fence_usage_deref_unlocked(&fence);
7997+ return ret;
7998+ }
7999+
8000+ /*
8001+ * usage > 0. No need to lock dev->struct_mutex;
8002+ */
8003+
8004+ arg->handle = fence->base.hash.key;
8005+
8006+ drm_fence_fill_arg(fence, arg);
8007+ drm_fence_usage_deref_unlocked(&fence);
8008+
8009+ return ret;
8010+}
8011+
8012+int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
8013+{
8014+ int ret;
8015+ struct drm_fence_manager *fm = &dev->fm;
8016+ struct drm_fence_arg *arg = data;
8017+ struct drm_fence_object *fence;
8018+ struct drm_user_object *uo;
8019+ ret = 0;
8020+
8021+ if (!fm->initialized) {
8022+ DRM_ERROR("The DRM driver does not support fencing.\n");
8023+ return -EINVAL;
8024+ }
8025+
8026+ ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
8027+ if (ret)
8028+ return ret;
8029+ fence = drm_lookup_fence_object(file_priv, arg->handle);
8030+ drm_fence_fill_arg(fence, arg);
8031+ drm_fence_usage_deref_unlocked(&fence);
8032+
8033+ return ret;
8034+}
8035+
8036+
8037+int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
8038+{
8039+ int ret;
8040+ struct drm_fence_manager *fm = &dev->fm;
8041+ struct drm_fence_arg *arg = data;
8042+ ret = 0;
8043+
8044+ if (!fm->initialized) {
8045+ DRM_ERROR("The DRM driver does not support fencing.\n");
8046+ return -EINVAL;
8047+ }
8048+
8049+ return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
8050+}
8051+
8052+int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
8053+{
8054+ int ret;
8055+ struct drm_fence_manager *fm = &dev->fm;
8056+ struct drm_fence_arg *arg = data;
8057+ struct drm_fence_object *fence;
8058+ ret = 0;
8059+
8060+ if (!fm->initialized) {
8061+ DRM_ERROR("The DRM driver does not support fencing.\n");
8062+ return -EINVAL;
8063+ }
8064+
8065+ fence = drm_lookup_fence_object(file_priv, arg->handle);
8066+ if (!fence)
8067+ return -EINVAL;
8068+
8069+ drm_fence_fill_arg(fence, arg);
8070+ drm_fence_usage_deref_unlocked(&fence);
8071+
8072+ return ret;
8073+}
8074+
8075+int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
8076+{
8077+ int ret;
8078+ struct drm_fence_manager *fm = &dev->fm;
8079+ struct drm_fence_arg *arg = data;
8080+ struct drm_fence_object *fence;
8081+ ret = 0;
8082+
8083+ if (!fm->initialized) {
8084+ DRM_ERROR("The DRM driver does not support fencing.\n");
8085+ return -EINVAL;
8086+ }
8087+
8088+ fence = drm_lookup_fence_object(file_priv, arg->handle);
8089+ if (!fence)
8090+ return -EINVAL;
8091+ ret = drm_fence_object_flush(fence, arg->type);
8092+
8093+ drm_fence_fill_arg(fence, arg);
8094+ drm_fence_usage_deref_unlocked(&fence);
8095+
8096+ return ret;
8097+}
8098+
8099+
8100+int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
8101+{
8102+ int ret;
8103+ struct drm_fence_manager *fm = &dev->fm;
8104+ struct drm_fence_arg *arg = data;
8105+ struct drm_fence_object *fence;
8106+ ret = 0;
8107+
8108+ if (!fm->initialized) {
8109+ DRM_ERROR("The DRM driver does not support fencing.\n");
8110+ return -EINVAL;
8111+ }
8112+
8113+ fence = drm_lookup_fence_object(file_priv, arg->handle);
8114+ if (!fence)
8115+ return -EINVAL;
8116+ ret = drm_fence_object_wait(fence,
8117+ arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
8118+ 0, arg->type);
8119+
8120+ drm_fence_fill_arg(fence, arg);
8121+ drm_fence_usage_deref_unlocked(&fence);
8122+
8123+ return ret;
8124+}
8125+
8126+
8127+int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
8128+{
8129+ int ret;
8130+ struct drm_fence_manager *fm = &dev->fm;
8131+ struct drm_fence_arg *arg = data;
8132+ struct drm_fence_object *fence;
8133+ ret = 0;
8134+
8135+ if (!fm->initialized) {
8136+ DRM_ERROR("The DRM driver does not support fencing.\n");
8137+ return -EINVAL;
8138+ }
8139+
8140+ LOCK_TEST_WITH_RETURN(dev, file_priv);
8141+ fence = drm_lookup_fence_object(file_priv, arg->handle);
8142+ if (!fence)
8143+ return -EINVAL;
8144+ ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
8145+ arg->type);
8146+
8147+ drm_fence_fill_arg(fence, arg);
8148+ drm_fence_usage_deref_unlocked(&fence);
8149+
8150+ return ret;
8151+}
8152+
8153+int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
8154+{
8155+ int ret;
8156+ struct drm_fence_manager *fm = &dev->fm;
8157+ struct drm_fence_arg *arg = data;
8158+ struct drm_fence_object *fence;
8159+ ret = 0;
8160+
8161+ if (!fm->initialized) {
8162+ DRM_ERROR("The DRM driver does not support fencing.\n");
8163+ return -EINVAL;
8164+ }
8165+
8166+ if (!dev->bm.initialized) {
8167+ DRM_ERROR("Buffer object manager is not initialized\n");
8168+ return -EINVAL;
8169+ }
8170+ LOCK_TEST_WITH_RETURN(dev, file_priv);
8171+ ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
8172+ NULL, &fence);
8173+ if (ret)
8174+ return ret;
8175+
8176+ if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
8177+ ret = drm_fence_add_user_object(file_priv, fence,
8178+ arg->flags &
8179+ DRM_FENCE_FLAG_SHAREABLE);
8180+ if (ret)
8181+ return ret;
8182+ }
8183+
8184+ arg->handle = fence->base.hash.key;
8185+
8186+ drm_fence_fill_arg(fence, arg);
8187+ drm_fence_usage_deref_unlocked(&fence);
8188+
8189+ return ret;
8190+}
8191Index: linux-2.6.27/drivers/gpu/drm/drm_fops.c
8192===================================================================
8193--- linux-2.6.27.orig/drivers/gpu/drm/drm_fops.c 2009-02-05 13:29:29.000000000 +0000
8194+++ linux-2.6.27/drivers/gpu/drm/drm_fops.c 2009-02-05 13:29:33.000000000 +0000
8195@@ -231,6 +231,7 @@
8196 int minor_id = iminor(inode);
8197 struct drm_file *priv;
8198 int ret;
8199+ int i, j;
8200
8201 if (filp->f_flags & O_EXCL)
8202 return -EBUSY; /* No exclusive opens */
8203@@ -255,9 +256,21 @@
8204 priv->lock_count = 0;
8205
8206 INIT_LIST_HEAD(&priv->lhead);
8207+ INIT_LIST_HEAD(&priv->refd_objects);
8208+ INIT_LIST_HEAD(&priv->fbs);
8209
8210- if (dev->driver->driver_features & DRIVER_GEM)
8211- drm_gem_open(dev, priv);
8212+ for (i = 0; i < _DRM_NO_REF_TYPES; ++i) {
8213+ ret = drm_ht_create(&priv->refd_object_hash[i],
8214+ DRM_FILE_HASH_ORDER);
8215+ if (ret)
8216+ break;
8217+ }
8218+
8219+ if (ret) {
8220+ for (j = 0; j < i; ++j)
8221+ drm_ht_remove(&priv->refd_object_hash[j]);
8222+ goto out_free;
8223+ }
8224
8225 if (dev->driver->open) {
8226 ret = dev->driver->open(dev, priv);
8227@@ -314,6 +327,33 @@
8228 }
8229 EXPORT_SYMBOL(drm_fasync);
8230
8231+static void drm_object_release(struct file *filp)
8232+{
8233+ struct drm_file *priv = filp->private_data;
8234+ struct list_head *head;
8235+ struct drm_ref_object *ref_object;
8236+ int i;
8237+
8238+ /*
8239+ * Free leftover ref objects created by me. Note that we cannot use
8240+ * list_for_each() here, as the struct_mutex may be temporarily
8241+ * released by the remove_() functions, and thus the lists may be
8242+ * altered.
8243+ * Also, a drm_remove_ref_object() will not remove it
8244+ * from the list unless its refcount is 1.
8245+ */
8246+
8247+ head = &priv->refd_objects;
8248+ while (head->next != head) {
8249+ ref_object = list_entry(head->next, struct drm_ref_object, list);
8250+ drm_remove_ref_object(priv, ref_object);
8251+ head = &priv->refd_objects;
8252+ }
8253+
8254+ for (i = 0; i < _DRM_NO_REF_TYPES; ++i)
8255+ drm_ht_remove(&priv->refd_object_hash[i]);
8256+}
8257+
8258 /**
8259 * Release file.
8260 *
8261@@ -403,9 +443,6 @@
8262 dev->driver->reclaim_buffers(dev, file_priv);
8263 }
8264
8265- if (dev->driver->driver_features & DRIVER_GEM)
8266- drm_gem_release(dev, file_priv);
8267-
8268 drm_fasync(-1, filp, 0);
8269
8270 mutex_lock(&dev->ctxlist_mutex);
8271@@ -430,6 +467,8 @@
8272 mutex_unlock(&dev->ctxlist_mutex);
8273
8274 mutex_lock(&dev->struct_mutex);
8275+ drm_fb_release(filp);
8276+ drm_object_release(filp);
8277 if (file_priv->remove_auth_on_close == 1) {
8278 struct drm_file *temp;
8279
8280Index: linux-2.6.27/drivers/gpu/drm/drm_hashtab.c
8281===================================================================
8282--- linux-2.6.27.orig/drivers/gpu/drm/drm_hashtab.c 2008-10-09 23:13:53.000000000 +0100
8283+++ linux-2.6.27/drivers/gpu/drm/drm_hashtab.c 2009-02-05 13:29:33.000000000 +0000
8284@@ -29,7 +29,7 @@
8285 * Simple open hash tab implementation.
8286 *
8287 * Authors:
8288- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
8289+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
8290 */
8291
8292 #include "drmP.h"
8293Index: linux-2.6.27/drivers/gpu/drm/drm_irq.c
8294===================================================================
8295--- linux-2.6.27.orig/drivers/gpu/drm/drm_irq.c 2009-02-05 13:29:29.000000000 +0000
8296+++ linux-2.6.27/drivers/gpu/drm/drm_irq.c 2009-02-05 13:29:33.000000000 +0000
8297@@ -70,6 +70,7 @@
8298
8299 return 0;
8300 }
8301+#if 0
8302
8303 static void vblank_disable_fn(unsigned long arg)
8304 {
8305@@ -184,6 +185,8 @@
8306 }
8307 EXPORT_SYMBOL(drm_vblank_init);
8308
8309+#endif
8310+
8311 /**
8312 * Install IRQ handler.
8313 *
8314@@ -221,6 +224,17 @@
8315
8316 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
8317
8318+ if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
8319+ init_waitqueue_head(&dev->vbl_queue);
8320+
8321+ spin_lock_init(&dev->vbl_lock);
8322+
8323+ INIT_LIST_HEAD(&dev->vbl_sigs);
8324+ INIT_LIST_HEAD(&dev->vbl_sigs2);
8325+
8326+ dev->vbl_pending = 0;
8327+ }
8328+
8329 /* Before installing handler */
8330 dev->driver->irq_preinstall(dev);
8331
8332@@ -281,8 +295,6 @@
8333
8334 free_irq(dev->pdev->irq, dev);
8335
8336- drm_vblank_cleanup(dev);
8337-
8338 dev->locked_tasklet_func = NULL;
8339
8340 return 0;
8341@@ -326,174 +338,6 @@
8342 }
8343
8344 /**
8345- * drm_vblank_count - retrieve "cooked" vblank counter value
8346- * @dev: DRM device
8347- * @crtc: which counter to retrieve
8348- *
8349- * Fetches the "cooked" vblank count value that represents the number of
8350- * vblank events since the system was booted, including lost events due to
8351- * modesetting activity.
8352- */
8353-u32 drm_vblank_count(struct drm_device *dev, int crtc)
8354-{
8355- return atomic_read(&dev->_vblank_count[crtc]);
8356-}
8357-EXPORT_SYMBOL(drm_vblank_count);
8358-
8359-/**
8360- * drm_update_vblank_count - update the master vblank counter
8361- * @dev: DRM device
8362- * @crtc: counter to update
8363- *
8364- * Call back into the driver to update the appropriate vblank counter
8365- * (specified by @crtc). Deal with wraparound, if it occurred, and
8366- * update the last read value so we can deal with wraparound on the next
8367- * call if necessary.
8368- *
8369- * Only necessary when going from off->on, to account for frames we
8370- * didn't get an interrupt for.
8371- *
8372- * Note: caller must hold dev->vbl_lock since this reads & writes
8373- * device vblank fields.
8374- */
8375-static void drm_update_vblank_count(struct drm_device *dev, int crtc)
8376-{
8377- u32 cur_vblank, diff;
8378-
8379- /*
8380- * Interrupts were disabled prior to this call, so deal with counter
8381- * wrap if needed.
8382- * NOTE! It's possible we lost a full dev->max_vblank_count events
8383- * here if the register is small or we had vblank interrupts off for
8384- * a long time.
8385- */
8386- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
8387- diff = cur_vblank - dev->last_vblank[crtc];
8388- if (cur_vblank < dev->last_vblank[crtc]) {
8389- diff += dev->max_vblank_count;
8390-
8391- DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
8392- crtc, dev->last_vblank[crtc], cur_vblank, diff);
8393- }
8394-
8395- DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
8396- crtc, diff);
8397-
8398- atomic_add(diff, &dev->_vblank_count[crtc]);
8399-}
8400-
8401-/**
8402- * drm_vblank_get - get a reference count on vblank events
8403- * @dev: DRM device
8404- * @crtc: which CRTC to own
8405- *
8406- * Acquire a reference count on vblank events to avoid having them disabled
8407- * while in use.
8408- *
8409- * RETURNS
8410- * Zero on success, nonzero on failure.
8411- */
8412-int drm_vblank_get(struct drm_device *dev, int crtc)
8413-{
8414- unsigned long irqflags;
8415- int ret = 0;
8416-
8417- spin_lock_irqsave(&dev->vbl_lock, irqflags);
8418- /* Going from 0->1 means we have to enable interrupts again */
8419- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
8420- !dev->vblank_enabled[crtc]) {
8421- ret = dev->driver->enable_vblank(dev, crtc);
8422- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
8423- if (ret)
8424- atomic_dec(&dev->vblank_refcount[crtc]);
8425- else {
8426- dev->vblank_enabled[crtc] = 1;
8427- drm_update_vblank_count(dev, crtc);
8428- }
8429- }
8430- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
8431-
8432- return ret;
8433-}
8434-EXPORT_SYMBOL(drm_vblank_get);
8435-
8436-/**
8437- * drm_vblank_put - give up ownership of vblank events
8438- * @dev: DRM device
8439- * @crtc: which counter to give up
8440- *
8441- * Release ownership of a given vblank counter, turning off interrupts
8442- * if possible.
8443- */
8444-void drm_vblank_put(struct drm_device *dev, int crtc)
8445-{
8446- /* Last user schedules interrupt disable */
8447- if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
8448- mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
8449-}
8450-EXPORT_SYMBOL(drm_vblank_put);
8451-
8452-/**
8453- * drm_modeset_ctl - handle vblank event counter changes across mode switch
8454- * @DRM_IOCTL_ARGS: standard ioctl arguments
8455- *
8456- * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
8457- * ioctls around modesetting so that any lost vblank events are accounted for.
8458- *
8459- * Generally the counter will reset across mode sets. If interrupts are
8460- * enabled around this call, we don't have to do anything since the counter
8461- * will have already been incremented.
8462- */
8463-int drm_modeset_ctl(struct drm_device *dev, void *data,
8464- struct drm_file *file_priv)
8465-{
8466- struct drm_modeset_ctl *modeset = data;
8467- unsigned long irqflags;
8468- int crtc, ret = 0;
8469-
8470- /* If drm_vblank_init() hasn't been called yet, just no-op */
8471- if (!dev->num_crtcs)
8472- goto out;
8473-
8474- crtc = modeset->crtc;
8475- if (crtc >= dev->num_crtcs) {
8476- ret = -EINVAL;
8477- goto out;
8478- }
8479-
8480- /*
8481- * To avoid all the problems that might happen if interrupts
8482- * were enabled/disabled around or between these calls, we just
8483- * have the kernel take a reference on the CRTC (just once though
8484- * to avoid corrupting the count if multiple, mismatch calls occur),
8485- * so that interrupts remain enabled in the interim.
8486- */
8487- switch (modeset->cmd) {
8488- case _DRM_PRE_MODESET:
8489- if (!dev->vblank_inmodeset[crtc]) {
8490- dev->vblank_inmodeset[crtc] = 1;
8491- drm_vblank_get(dev, crtc);
8492- }
8493- break;
8494- case _DRM_POST_MODESET:
8495- if (dev->vblank_inmodeset[crtc]) {
8496- spin_lock_irqsave(&dev->vbl_lock, irqflags);
8497- dev->vblank_disable_allowed = 1;
8498- dev->vblank_inmodeset[crtc] = 0;
8499- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
8500- drm_vblank_put(dev, crtc);
8501- }
8502- break;
8503- default:
8504- ret = -EINVAL;
8505- break;
8506- }
8507-
8508-out:
8509- return ret;
8510-}
8511-
8512-/**
8513 * Wait for VBLANK.
8514 *
8515 * \param inode device inode.
8516@@ -512,14 +356,14 @@
8517 *
8518 * If a signal is not requested, then calls vblank_wait().
8519 */
8520-int drm_wait_vblank(struct drm_device *dev, void *data,
8521- struct drm_file *file_priv)
8522+int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
8523 {
8524 union drm_wait_vblank *vblwait = data;
8525+ struct timeval now;
8526 int ret = 0;
8527- unsigned int flags, seq, crtc;
8528+ unsigned int flags, seq;
8529
8530- if ((!dev->pdev->irq) || (!dev->irq_enabled))
8531+ if ((!dev->irq) || (!dev->irq_enabled))
8532 return -EINVAL;
8533
8534 if (vblwait->request.type &
8535@@ -531,17 +375,13 @@
8536 }
8537
8538 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
8539- crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
8540
8541- if (crtc >= dev->num_crtcs)
8542+ if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
8543+ DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
8544 return -EINVAL;
8545
8546- ret = drm_vblank_get(dev, crtc);
8547- if (ret) {
8548- DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
8549- return ret;
8550- }
8551- seq = drm_vblank_count(dev, crtc);
8552+ seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
8553+ : &dev->vbl_received);
8554
8555 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
8556 case _DRM_VBLANK_RELATIVE:
8557@@ -550,8 +390,7 @@
8558 case _DRM_VBLANK_ABSOLUTE:
8559 break;
8560 default:
8561- ret = -EINVAL;
8562- goto done;
8563+ return -EINVAL;
8564 }
8565
8566 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
8567@@ -561,7 +400,8 @@
8568
8569 if (flags & _DRM_VBLANK_SIGNAL) {
8570 unsigned long irqflags;
8571- struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
8572+ struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
8573+ ? &dev->vbl_sigs2 : &dev->vbl_sigs;
8574 struct drm_vbl_sig *vbl_sig;
8575
8576 spin_lock_irqsave(&dev->vbl_lock, irqflags);
8577@@ -582,29 +422,22 @@
8578 }
8579 }
8580
8581- if (atomic_read(&dev->vbl_signal_pending) >= 100) {
8582+ if (dev->vbl_pending >= 100) {
8583 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
8584- ret = -EBUSY;
8585- goto done;
8586+ return -EBUSY;
8587 }
8588
8589- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
8590+ dev->vbl_pending++;
8591
8592- vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
8593- DRM_MEM_DRIVER);
8594- if (!vbl_sig) {
8595- ret = -ENOMEM;
8596- goto done;
8597- }
8598+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
8599
8600- ret = drm_vblank_get(dev, crtc);
8601- if (ret) {
8602- drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
8603- DRM_MEM_DRIVER);
8604- return ret;
8605+ if (!
8606+ (vbl_sig =
8607+ drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
8608+ return -ENOMEM;
8609 }
8610
8611- atomic_inc(&dev->vbl_signal_pending);
8612+ memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
8613
8614 vbl_sig->sequence = vblwait->request.sequence;
8615 vbl_sig->info.si_signo = vblwait->request.signal;
8616@@ -618,29 +451,20 @@
8617
8618 vblwait->reply.sequence = seq;
8619 } else {
8620- DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
8621- vblwait->request.sequence, crtc);
8622- DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
8623- ((drm_vblank_count(dev, crtc)
8624- - vblwait->request.sequence) <= (1 << 23)));
8625-
8626- if (ret != -EINTR) {
8627- struct timeval now;
8628-
8629- do_gettimeofday(&now);
8630-
8631- vblwait->reply.tval_sec = now.tv_sec;
8632- vblwait->reply.tval_usec = now.tv_usec;
8633- vblwait->reply.sequence = drm_vblank_count(dev, crtc);
8634- DRM_DEBUG("returning %d to client\n",
8635- vblwait->reply.sequence);
8636- } else {
8637- DRM_DEBUG("vblank wait interrupted by signal\n");
8638- }
8639+ if (flags & _DRM_VBLANK_SECONDARY) {
8640+ if (dev->driver->vblank_wait2)
8641+ ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
8642+ } else if (dev->driver->vblank_wait)
8643+ ret =
8644+ dev->driver->vblank_wait(dev,
8645+ &vblwait->request.sequence);
8646+
8647+ do_gettimeofday(&now);
8648+ vblwait->reply.tval_sec = now.tv_sec;
8649+ vblwait->reply.tval_usec = now.tv_usec;
8650 }
8651
8652-done:
8653- drm_vblank_put(dev, crtc);
8654+ done:
8655 return ret;
8656 }
8657
8658@@ -648,57 +472,43 @@
8659 * Send the VBLANK signals.
8660 *
8661 * \param dev DRM device.
8662- * \param crtc CRTC where the vblank event occurred
8663 *
8664 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
8665 *
8666 * If a signal is not requested, then calls vblank_wait().
8667 */
8668-static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
8669+void drm_vbl_send_signals(struct drm_device * dev)
8670 {
8671- struct drm_vbl_sig *vbl_sig, *tmp;
8672- struct list_head *vbl_sigs;
8673- unsigned int vbl_seq;
8674 unsigned long flags;
8675+ int i;
8676
8677 spin_lock_irqsave(&dev->vbl_lock, flags);
8678
8679- vbl_sigs = &dev->vbl_sigs[crtc];
8680- vbl_seq = drm_vblank_count(dev, crtc);
8681+ for (i = 0; i < 2; i++) {
8682+ struct drm_vbl_sig *vbl_sig, *tmp;
8683+ struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
8684+ unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
8685+ &dev->vbl_received);
8686+
8687+ list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
8688+ if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
8689+ vbl_sig->info.si_code = vbl_seq;
8690+ send_sig_info(vbl_sig->info.si_signo,
8691+ &vbl_sig->info, vbl_sig->task);
8692+
8693+ list_del(&vbl_sig->head);
8694
8695- list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
8696- if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
8697- vbl_sig->info.si_code = vbl_seq;
8698- send_sig_info(vbl_sig->info.si_signo,
8699- &vbl_sig->info, vbl_sig->task);
8700-
8701- list_del(&vbl_sig->head);
8702-
8703- drm_free(vbl_sig, sizeof(*vbl_sig),
8704- DRM_MEM_DRIVER);
8705- atomic_dec(&dev->vbl_signal_pending);
8706- drm_vblank_put(dev, crtc);
8707- }
8708+ drm_free(vbl_sig, sizeof(*vbl_sig),
8709+ DRM_MEM_DRIVER);
8710+
8711+ dev->vbl_pending--;
8712+ }
8713+ }
8714 }
8715
8716 spin_unlock_irqrestore(&dev->vbl_lock, flags);
8717 }
8718-
8719-/**
8720- * drm_handle_vblank - handle a vblank event
8721- * @dev: DRM device
8722- * @crtc: where this event occurred
8723- *
8724- * Drivers should call this routine in their vblank interrupt handlers to
8725- * update the vblank counter and send any signals that may be pending.
8726- */
8727-void drm_handle_vblank(struct drm_device *dev, int crtc)
8728-{
8729- atomic_inc(&dev->_vblank_count[crtc]);
8730- DRM_WAKEUP(&dev->vbl_queue[crtc]);
8731- drm_vbl_send_signals(dev, crtc);
8732-}
8733-EXPORT_SYMBOL(drm_handle_vblank);
8734+EXPORT_SYMBOL(drm_vbl_send_signals);
8735
8736 /**
8737 * Tasklet wrapper function.
8738Index: linux-2.6.27/drivers/gpu/drm/drm_mm.c
8739===================================================================
8740--- linux-2.6.27.orig/drivers/gpu/drm/drm_mm.c 2009-02-05 13:29:29.000000000 +0000
8741+++ linux-2.6.27/drivers/gpu/drm/drm_mm.c 2009-02-05 13:29:33.000000000 +0000
8742@@ -38,7 +38,7 @@
8743 * Aligned allocations can also see improvement.
8744 *
8745 * Authors:
8746- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
8747+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
8748 */
8749
8750 #include "drmP.h"
8751Index: linux-2.6.27/drivers/gpu/drm/drm_modes.c
8752===================================================================
8753--- /dev/null 1970-01-01 00:00:00.000000000 +0000
8754+++ linux-2.6.27/drivers/gpu/drm/drm_modes.c 2009-02-05 13:29:33.000000000 +0000
8755@@ -0,0 +1,560 @@
8756+/*
8757+ * Copyright © 1997-2003 by The XFree86 Project, Inc.
8758+ *
8759+ * Permission is hereby granted, free of charge, to any person obtaining a
8760+ * copy of this software and associated documentation files (the "Software"),
8761+ * to deal in the Software without restriction, including without limitation
8762+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8763+ * and/or sell copies of the Software, and to permit persons to whom the
8764+ * Software is furnished to do so, subject to the following conditions:
8765+ *
8766+ * The above copyright notice and this permission notice shall be included in
8767+ * all copies or substantial portions of the Software.
8768+ *
8769+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
8770+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
8771+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
8772+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
8773+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
8774+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
8775+ * OTHER DEALINGS IN THE SOFTWARE.
8776+ *
8777+ * Except as contained in this notice, the name of the copyright holder(s)
8778+ * and author(s) shall not be used in advertising or otherwise to promote
8779+ * the sale, use or other dealings in this Software without prior written
8780+ * authorization from the copyright holder(s) and author(s).
8781+ */
8782+/*
8783+ * Copyright © 2007 Dave Airlie
8784+ */
8785+
8786+#include <linux/list.h>
8787+#include "drmP.h"
8788+#include "drm.h"
8789+#include "drm_crtc.h"
8790+
8791+/**
8792+ * drm_mode_debug_printmodeline - debug print a mode
8793+ * @dev: DRM device
8794+ * @mode: mode to print
8795+ *
8796+ * LOCKING:
8797+ * None.
8798+ *
8799+ * Describe @mode using DRM_DEBUG.
8800+ */
8801+void drm_mode_debug_printmodeline(struct drm_device *dev,
8802+ struct drm_display_mode *mode)
8803+{
8804+ DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x\n",
8805+ mode->mode_id, mode->name, mode->vrefresh, mode->clock,
8806+ mode->hdisplay, mode->hsync_start,
8807+ mode->hsync_end, mode->htotal,
8808+ mode->vdisplay, mode->vsync_start,
8809+ mode->vsync_end, mode->vtotal, mode->type);
8810+}
8811+EXPORT_SYMBOL(drm_mode_debug_printmodeline);
8812+
8813+/**
8814+ * drm_mode_set_name - set the name on a mode
8815+ * @mode: name will be set in this mode
8816+ *
8817+ * LOCKING:
8818+ * None.
8819+ *
8820+ * Set the name of @mode to a standard format.
8821+ */
8822+void drm_mode_set_name(struct drm_display_mode *mode)
8823+{
8824+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
8825+ mode->vdisplay);
8826+}
8827+EXPORT_SYMBOL(drm_mode_set_name);
8828+
8829+/**
8830+ * drm_mode_list_concat - move modes from one list to another
8831+ * @head: source list
8832+ * @new: dst list
8833+ *
8834+ * LOCKING:
8835+ * Caller must ensure both lists are locked.
8836+ *
8837+ * Move all the modes from @head to @new.
8838+ */
8839+void drm_mode_list_concat(struct list_head *head, struct list_head *new)
8840+{
8841+
8842+ struct list_head *entry, *tmp;
8843+
8844+ list_for_each_safe(entry, tmp, head) {
8845+ list_move_tail(entry, new);
8846+ }
8847+}
8848+
8849+/**
8850+ * drm_mode_width - get the width of a mode
8851+ * @mode: mode
8852+ *
8853+ * LOCKING:
8854+ * None.
8855+ *
8856+ * Return @mode's width (hdisplay) value.
8857+ *
8858+ * FIXME: is this needed?
8859+ *
8860+ * RETURNS:
8861+ * @mode->hdisplay
8862+ */
8863+int drm_mode_width(struct drm_display_mode *mode)
8864+{
8865+ return mode->hdisplay;
8866+
8867+}
8868+EXPORT_SYMBOL(drm_mode_width);
8869+
8870+/**
8871+ * drm_mode_height - get the height of a mode
8872+ * @mode: mode
8873+ *
8874+ * LOCKING:
8875+ * None.
8876+ *
8877+ * Return @mode's height (vdisplay) value.
8878+ *
8879+ * FIXME: is this needed?
8880+ *
8881+ * RETURNS:
8882+ * @mode->vdisplay
8883+ */
8884+int drm_mode_height(struct drm_display_mode *mode)
8885+{
8886+ return mode->vdisplay;
8887+}
8888+EXPORT_SYMBOL(drm_mode_height);
8889+
8890+/**
8891+ * drm_mode_vrefresh - get the vrefresh of a mode
8892+ * @mode: mode
8893+ *
8894+ * LOCKING:
8895+ * None.
8896+ *
8897+ * Return @mode's vrefresh rate or calculate it if necessary.
8898+ *
8899+ * FIXME: why is this needed? shouldn't vrefresh be set already?
8900+ *
8901+ * RETURNS:
8902+ * Vertical refresh rate of @mode x 1000. For precision reasons.
8903+ */
8904+int drm_mode_vrefresh(struct drm_display_mode *mode)
8905+{
8906+ int refresh = 0;
8907+ unsigned int calc_val;
8908+
8909+ if (mode->vrefresh > 0)
8910+ refresh = mode->vrefresh;
8911+ else if (mode->htotal > 0 && mode->vtotal > 0) {
8912+ /* work out vrefresh the value will be x1000 */
8913+ calc_val = (mode->clock * 1000);
8914+
8915+ calc_val /= mode->htotal;
8916+ calc_val *= 1000;
8917+ calc_val /= mode->vtotal;
8918+
8919+ refresh = calc_val;
8920+ if (mode->flags & V_INTERLACE)
8921+ refresh *= 2;
8922+ if (mode->flags & V_DBLSCAN)
8923+ refresh /= 2;
8924+ if (mode->vscan > 1)
8925+ refresh /= mode->vscan;
8926+ }
8927+ return refresh;
8928+}
8929+EXPORT_SYMBOL(drm_mode_vrefresh);
8930+
8931+/**
8932+ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
8933+ * @p: mode
8934+ * @adjust_flags: unused? (FIXME)
8935+ *
8936+ * LOCKING:
8937+ * None.
8938+ *
8939+ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
8940+ */
8941+void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
8942+{
8943+ if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
8944+ return;
8945+
8946+ p->crtc_hdisplay = p->hdisplay;
8947+ p->crtc_hsync_start = p->hsync_start;
8948+ p->crtc_hsync_end = p->hsync_end;
8949+ p->crtc_htotal = p->htotal;
8950+ p->crtc_hskew = p->hskew;
8951+ p->crtc_vdisplay = p->vdisplay;
8952+ p->crtc_vsync_start = p->vsync_start;
8953+ p->crtc_vsync_end = p->vsync_end;
8954+ p->crtc_vtotal = p->vtotal;
8955+
8956+ if (p->flags & V_INTERLACE) {
8957+ if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
8958+ p->crtc_vdisplay /= 2;
8959+ p->crtc_vsync_start /= 2;
8960+ p->crtc_vsync_end /= 2;
8961+ p->crtc_vtotal /= 2;
8962+ }
8963+
8964+ p->crtc_vtotal |= 1;
8965+ }
8966+
8967+ if (p->flags & V_DBLSCAN) {
8968+ p->crtc_vdisplay *= 2;
8969+ p->crtc_vsync_start *= 2;
8970+ p->crtc_vsync_end *= 2;
8971+ p->crtc_vtotal *= 2;
8972+ }
8973+
8974+ if (p->vscan > 1) {
8975+ p->crtc_vdisplay *= p->vscan;
8976+ p->crtc_vsync_start *= p->vscan;
8977+ p->crtc_vsync_end *= p->vscan;
8978+ p->crtc_vtotal *= p->vscan;
8979+ }
8980+
8981+ p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
8982+ p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
8983+ p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
8984+ p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
8985+
8986+ p->crtc_hadjusted = false;
8987+ p->crtc_vadjusted = false;
8988+}
8989+EXPORT_SYMBOL(drm_mode_set_crtcinfo);
8990+
8991+
8992+/**
8993+ * drm_mode_duplicate - allocate and duplicate an existing mode
8994+ * @m: mode to duplicate
8995+ *
8996+ * LOCKING:
8997+ * None.
8998+ *
8999+ * Just allocate a new mode, copy the existing mode into it, and return
9000+ * a pointer to it. Used to create new instances of established modes.
9001+ */
9002+struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
9003+ struct drm_display_mode *mode)
9004+{
9005+ struct drm_display_mode *nmode;
9006+ int new_id;
9007+
9008+ nmode = drm_mode_create(dev);
9009+ if (!nmode)
9010+ return NULL;
9011+
9012+ new_id = nmode->mode_id;
9013+ *nmode = *mode;
9014+ nmode->mode_id = new_id;
9015+ INIT_LIST_HEAD(&nmode->head);
9016+ return nmode;
9017+}
9018+EXPORT_SYMBOL(drm_mode_duplicate);
9019+
9020+/**
9021+ * drm_mode_equal - test modes for equality
9022+ * @mode1: first mode
9023+ * @mode2: second mode
9024+ *
9025+ * LOCKING:
9026+ * None.
9027+ *
9028+ * Check to see if @mode1 and @mode2 are equivalent.
9029+ *
9030+ * RETURNS:
9031+ * True if the modes are equal, false otherwise.
9032+ */
9033+bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
9034+{
9035+ if (mode1->clock == mode2->clock &&
9036+ mode1->hdisplay == mode2->hdisplay &&
9037+ mode1->hsync_start == mode2->hsync_start &&
9038+ mode1->hsync_end == mode2->hsync_end &&
9039+ mode1->htotal == mode2->htotal &&
9040+ mode1->hskew == mode2->hskew &&
9041+ mode1->vdisplay == mode2->vdisplay &&
9042+ mode1->vsync_start == mode2->vsync_start &&
9043+ mode1->vsync_end == mode2->vsync_end &&
9044+ mode1->vtotal == mode2->vtotal &&
9045+ mode1->vscan == mode2->vscan &&
9046+ mode1->flags == mode2->flags)
9047+ return true;
9048+
9049+ return false;
9050+}
9051+EXPORT_SYMBOL(drm_mode_equal);
9052+
9053+/**
9054+ * drm_mode_validate_size - make sure modes adhere to size constraints
9055+ * @dev: DRM device
9056+ * @mode_list: list of modes to check
9057+ * @maxX: maximum width
9058+ * @maxY: maximum height
9059+ * @maxPitch: max pitch
9060+ *
9061+ * LOCKING:
9062+ * Caller must hold a lock protecting @mode_list.
9063+ *
9064+ * The DRM device (@dev) has size and pitch limits. Here we validate the
9065+ * modes we probed for @dev against those limits and set their status as
9066+ * necessary.
9067+ */
9068+void drm_mode_validate_size(struct drm_device *dev,
9069+ struct list_head *mode_list,
9070+ int maxX, int maxY, int maxPitch)
9071+{
9072+ struct drm_display_mode *mode;
9073+
9074+ list_for_each_entry(mode, mode_list, head) {
9075+ if (maxPitch > 0 && mode->hdisplay > maxPitch)
9076+ mode->status = MODE_BAD_WIDTH;
9077+
9078+ if (maxX > 0 && mode->hdisplay > maxX)
9079+ mode->status = MODE_VIRTUAL_X;
9080+
9081+ if (maxY > 0 && mode->vdisplay > maxY)
9082+ mode->status = MODE_VIRTUAL_Y;
9083+ }
9084+}
9085+EXPORT_SYMBOL(drm_mode_validate_size);
9086+
9087+/**
9088+ * drm_mode_validate_clocks - validate modes against clock limits
9089+ * @dev: DRM device
9090+ * @mode_list: list of modes to check
9091+ * @min: minimum clock rate array
9092+ * @max: maximum clock rate array
9093+ * @n_ranges: number of clock ranges (size of arrays)
9094+ *
9095+ * LOCKING:
9096+ * Caller must hold a lock protecting @mode_list.
9097+ *
9098+ * Some code may need to check a mode list against the clock limits of the
9099+ * device in question. This function walks the mode list, testing to make
9100+ * sure each mode falls within a given range (defined by @min and @max
9101+ * arrays) and sets @mode->status as needed.
9102+ */
9103+void drm_mode_validate_clocks(struct drm_device *dev,
9104+ struct list_head *mode_list,
9105+ int *min, int *max, int n_ranges)
9106+{
9107+ struct drm_display_mode *mode;
9108+ int i;
9109+
9110+ list_for_each_entry(mode, mode_list, head) {
9111+ bool good = false;
9112+ for (i = 0; i < n_ranges; i++) {
9113+ if (mode->clock >= min[i] && mode->clock <= max[i]) {
9114+ good = true;
9115+ break;
9116+ }
9117+ }
9118+ if (!good)
9119+ mode->status = MODE_CLOCK_RANGE;
9120+ }
9121+}
9122+EXPORT_SYMBOL(drm_mode_validate_clocks);
9123+
9124+/**
9125+ * drm_mode_prune_invalid - remove invalid modes from mode list
9126+ * @dev: DRM device
9127+ * @mode_list: list of modes to check
9128+ * @verbose: be verbose about it
9129+ *
9130+ * LOCKING:
9131+ * Caller must hold a lock protecting @mode_list.
9132+ *
9133+ * Once mode list generation is complete, a caller can use this routine to
9134+ * remove invalid modes from a mode list. If any of the modes have a
9135+ * status other than %MODE_OK, they are removed from @mode_list and freed.
9136+ */
9137+void drm_mode_prune_invalid(struct drm_device *dev,
9138+ struct list_head *mode_list, bool verbose)
9139+{
9140+ struct drm_display_mode *mode, *t;
9141+
9142+ list_for_each_entry_safe(mode, t, mode_list, head) {
9143+ if (mode->status != MODE_OK) {
9144+ list_del(&mode->head);
9145+ if (verbose) {
9146+ drm_mode_debug_printmodeline(dev, mode);
9147+ DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status);
9148+ }
9149+ kfree(mode);
9150+ }
9151+ }
9152+}
9153+
9154+/**
9155+ * drm_mode_compare - compare modes for favorability
9156+ * @lh_a: list_head for first mode
9157+ * @lh_b: list_head for second mode
9158+ *
9159+ * LOCKING:
9160+ * None.
9161+ *
9162+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
9163+ * which is better.
9164+ *
9165+ * RETURNS:
9166+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
9167+ * positive if @lh_b is better than @lh_a.
9168+ */
9169+static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
9170+{
9171+ struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
9172+ struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
9173+ int diff;
9174+
9175+ diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
9176+ ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
9177+ if (diff)
9178+ return diff;
9179+ diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
9180+ if (diff)
9181+ return diff;
9182+ diff = b->clock - a->clock;
9183+ return diff;
9184+}
9185+
9186+/* FIXME: what we don't have a list sort function? */
9187+/* list sort from Mark J Roberts (mjr@znex.org) */
9188+void list_sort(struct list_head *head, int (*cmp)(struct list_head *a, struct list_head *b))
9189+{
9190+ struct list_head *p, *q, *e, *list, *tail, *oldhead;
9191+ int insize, nmerges, psize, qsize, i;
9192+
9193+ list = head->next;
9194+ list_del(head);
9195+ insize = 1;
9196+ for (;;) {
9197+ p = oldhead = list;
9198+ list = tail = NULL;
9199+ nmerges = 0;
9200+
9201+ while (p) {
9202+ nmerges++;
9203+ q = p;
9204+ psize = 0;
9205+ for (i = 0; i < insize; i++) {
9206+ psize++;
9207+ q = q->next == oldhead ? NULL : q->next;
9208+ if (!q)
9209+ break;
9210+ }
9211+
9212+ qsize = insize;
9213+ while (psize > 0 || (qsize > 0 && q)) {
9214+ if (!psize) {
9215+ e = q;
9216+ q = q->next;
9217+ qsize--;
9218+ if (q == oldhead)
9219+ q = NULL;
9220+ } else if (!qsize || !q) {
9221+ e = p;
9222+ p = p->next;
9223+ psize--;
9224+ if (p == oldhead)
9225+ p = NULL;
9226+ } else if (cmp(p, q) <= 0) {
9227+ e = p;
9228+ p = p->next;
9229+ psize--;
9230+ if (p == oldhead)
9231+ p = NULL;
9232+ } else {
9233+ e = q;
9234+ q = q->next;
9235+ qsize--;
9236+ if (q == oldhead)
9237+ q = NULL;
9238+ }
9239+ if (tail)
9240+ tail->next = e;
9241+ else
9242+ list = e;
9243+ e->prev = tail;
9244+ tail = e;
9245+ }
9246+ p = q;
9247+ }
9248+
9249+ tail->next = list;
9250+ list->prev = tail;
9251+
9252+ if (nmerges <= 1)
9253+ break;
9254+
9255+ insize *= 2;
9256+ }
9257+
9258+ head->next = list;
9259+ head->prev = list->prev;
9260+ list->prev->next = head;
9261+ list->prev = head;
9262+}
9263+
9264+/**
9265+ * drm_mode_sort - sort mode list
9266+ * @mode_list: list to sort
9267+ *
9268+ * LOCKING:
9269+ * Caller must hold a lock protecting @mode_list.
9270+ *
9271+ * Sort @mode_list by favorability, putting good modes first.
9272+ */
9273+void drm_mode_sort(struct list_head *mode_list)
9274+{
9275+ list_sort(mode_list, drm_mode_compare);
9276+}
9277+
9278+
9279+/**
9280+ * drm_mode_output_list_update - update the mode list for the output
9281+ * @output: the output to update
9282+ *
9283+ * LOCKING:
9284+ * Caller must hold a lock protecting @mode_list.
9285+ *
9286+ * This moves the modes from the @output probed_modes list
9287+ * to the actual mode list. It compares the probed mode against the current
9288+ * list and only adds different modes. All modes unverified after this point
9289+ * will be removed by the prune invalid modes.
9290+ */
9291+void drm_mode_output_list_update(struct drm_output *output)
9292+{
9293+ struct drm_display_mode *mode;
9294+ struct drm_display_mode *pmode, *pt;
9295+ int found_it;
9296+ list_for_each_entry_safe(pmode, pt, &output->probed_modes,
9297+ head) {
9298+ found_it = 0;
9299+ /* go through current modes checking for the new probed mode */
9300+ list_for_each_entry(mode, &output->modes, head) {
9301+ if (drm_mode_equal(pmode, mode)) {
9302+ found_it = 1;
9303+ /* if equal delete the probed mode */
9304+ mode->status = pmode->status;
9305+ list_del(&pmode->head);
9306+ kfree(pmode);
9307+ break;
9308+ }
9309+ }
9310+
9311+ if (!found_it) {
9312+ list_move_tail(&pmode->head, &output->modes);
9313+ }
9314+ }
9315+}
9316Index: linux-2.6.27/drivers/gpu/drm/drm_object.c
9317===================================================================
9318--- /dev/null 1970-01-01 00:00:00.000000000 +0000
9319+++ linux-2.6.27/drivers/gpu/drm/drm_object.c 2009-02-05 13:29:33.000000000 +0000
9320@@ -0,0 +1,294 @@
9321+/**************************************************************************
9322+ *
9323+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
9324+ * All Rights Reserved.
9325+ *
9326+ * Permission is hereby granted, free of charge, to any person obtaining a
9327+ * copy of this software and associated documentation files (the
9328+ * "Software"), to deal in the Software without restriction, including
9329+ * without limitation the rights to use, copy, modify, merge, publish,
9330+ * distribute, sub license, and/or sell copies of the Software, and to
9331+ * permit persons to whom the Software is furnished to do so, subject to
9332+ * the following conditions:
9333+ *
9334+ * The above copyright notice and this permission notice (including the
9335+ * next paragraph) shall be included in all copies or substantial portions
9336+ * of the Software.
9337+ *
9338+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
9339+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
9340+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
9341+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
9342+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
9343+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
9344+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
9345+ *
9346+ **************************************************************************/
9347+/*
9348+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
9349+ */
9350+
9351+#include "drmP.h"
9352+
9353+int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
9354+ int shareable)
9355+{
9356+ struct drm_device *dev = priv->minor->dev;
9357+ int ret;
9358+
9359+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
9360+
9361+ /* The refcount will be bumped to 1 when we add the ref object below. */
9362+ atomic_set(&item->refcount, 0);
9363+ item->shareable = shareable;
9364+ item->owner = priv;
9365+
9366+ ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
9367+ (unsigned long)item, 32, 0, 0);
9368+ if (ret)
9369+ return ret;
9370+
9371+ ret = drm_add_ref_object(priv, item, _DRM_REF_USE);
9372+ if (ret)
9373+ ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
9374+
9375+ return ret;
9376+}
9377+EXPORT_SYMBOL(drm_add_user_object);
9378+
9379+struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)
9380+{
9381+ struct drm_device *dev = priv->minor->dev;
9382+ struct drm_hash_item *hash;
9383+ int ret;
9384+ struct drm_user_object *item;
9385+
9386+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
9387+
9388+ ret = drm_ht_find_item(&dev->object_hash, key, &hash);
9389+ if (ret)
9390+ return NULL;
9391+
9392+ item = drm_hash_entry(hash, struct drm_user_object, hash);
9393+
9394+ if (priv != item->owner) {
9395+ struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE];
9396+ ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
9397+ if (ret) {
9398+ DRM_ERROR("Object not registered for usage\n");
9399+ return NULL;
9400+ }
9401+ }
9402+ return item;
9403+}
9404+EXPORT_SYMBOL(drm_lookup_user_object);
9405+
9406+static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)
9407+{
9408+ struct drm_device *dev = priv->minor->dev;
9409+ int ret;
9410+
9411+ if (atomic_dec_and_test(&item->refcount)) {
9412+ ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
9413+ BUG_ON(ret);
9414+ item->remove(priv, item);
9415+ }
9416+}
9417+
9418+static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro,
9419+ enum drm_ref_type action)
9420+{
9421+ int ret = 0;
9422+
9423+ switch (action) {
9424+ case _DRM_REF_USE:
9425+ atomic_inc(&ro->refcount);
9426+ break;
9427+ default:
9428+ if (!ro->ref_struct_locked) {
9429+ break;
9430+ } else {
9431+ ro->ref_struct_locked(priv, ro, action);
9432+ }
9433+ }
9434+ return ret;
9435+}
9436+
9437+int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object,
9438+ enum drm_ref_type ref_action)
9439+{
9440+ int ret = 0;
9441+ struct drm_ref_object *item;
9442+ struct drm_open_hash *ht = &priv->refd_object_hash[ref_action];
9443+
9444+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
9445+ if (!referenced_object->shareable && priv != referenced_object->owner) {
9446+ DRM_ERROR("Not allowed to reference this object\n");
9447+ return -EINVAL;
9448+ }
9449+
9450+ /*
9451+ * If this is not a usage reference, Check that usage has been registered
9452+ * first. Otherwise strange things may happen on destruction.
9453+ */
9454+
9455+ if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
9456+ item =
9457+ drm_lookup_ref_object(priv, referenced_object,
9458+ _DRM_REF_USE);
9459+ if (!item) {
9460+ DRM_ERROR
9461+ ("Object not registered for usage by this client\n");
9462+ return -EINVAL;
9463+ }
9464+ }
9465+
9466+ if (NULL !=
9467+ (item =
9468+ drm_lookup_ref_object(priv, referenced_object, ref_action))) {
9469+ atomic_inc(&item->refcount);
9470+ return drm_object_ref_action(priv, referenced_object,
9471+ ref_action);
9472+ }
9473+
9474+ item = drm_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
9475+ if (item == NULL) {
9476+ DRM_ERROR("Could not allocate reference object\n");
9477+ return -ENOMEM;
9478+ }
9479+
9480+ atomic_set(&item->refcount, 1);
9481+ item->hash.key = (unsigned long)referenced_object;
9482+ ret = drm_ht_insert_item(ht, &item->hash);
9483+ item->unref_action = ref_action;
9484+
9485+ if (ret)
9486+ goto out;
9487+
9488+ list_add(&item->list, &priv->refd_objects);
9489+ ret = drm_object_ref_action(priv, referenced_object, ref_action);
9490+out:
9491+ return ret;
9492+}
9493+
9494+struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
9495+ struct drm_user_object *referenced_object,
9496+ enum drm_ref_type ref_action)
9497+{
9498+ struct drm_hash_item *hash;
9499+ int ret;
9500+
9501+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
9502+ ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
9503+ (unsigned long)referenced_object, &hash);
9504+ if (ret)
9505+ return NULL;
9506+
9507+ return drm_hash_entry(hash, struct drm_ref_object, hash);
9508+}
9509+EXPORT_SYMBOL(drm_lookup_ref_object);
9510+
9511+static void drm_remove_other_references(struct drm_file *priv,
9512+ struct drm_user_object *ro)
9513+{
9514+ int i;
9515+ struct drm_open_hash *ht;
9516+ struct drm_hash_item *hash;
9517+ struct drm_ref_object *item;
9518+
9519+ for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
9520+ ht = &priv->refd_object_hash[i];
9521+ while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
9522+ item = drm_hash_entry(hash, struct drm_ref_object, hash);
9523+ drm_remove_ref_object(priv, item);
9524+ }
9525+ }
9526+}
9527+
9528+void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item)
9529+{
9530+ int ret;
9531+ struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key;
9532+ struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action];
9533+ enum drm_ref_type unref_action;
9534+
9535+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
9536+ unref_action = item->unref_action;
9537+ if (atomic_dec_and_test(&item->refcount)) {
9538+ ret = drm_ht_remove_item(ht, &item->hash);
9539+ BUG_ON(ret);
9540+ list_del_init(&item->list);
9541+ if (unref_action == _DRM_REF_USE)
9542+ drm_remove_other_references(priv, user_object);
9543+ drm_free(item, sizeof(*item), DRM_MEM_OBJECTS);
9544+ }
9545+
9546+ switch (unref_action) {
9547+ case _DRM_REF_USE:
9548+ drm_deref_user_object(priv, user_object);
9549+ break;
9550+ default:
9551+ BUG_ON(!user_object->unref);
9552+ user_object->unref(priv, user_object, unref_action);
9553+ break;
9554+ }
9555+
9556+}
9557+EXPORT_SYMBOL(drm_remove_ref_object);
9558+
9559+int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
9560+ enum drm_object_type type, struct drm_user_object **object)
9561+{
9562+ struct drm_device *dev = priv->minor->dev;
9563+ struct drm_user_object *uo;
9564+ struct drm_hash_item *hash;
9565+ int ret;
9566+
9567+ mutex_lock(&dev->struct_mutex);
9568+ ret = drm_ht_find_item(&dev->object_hash, user_token, &hash);
9569+ if (ret) {
9570+ DRM_ERROR("Could not find user object to reference.\n");
9571+ goto out_err;
9572+ }
9573+ uo = drm_hash_entry(hash, struct drm_user_object, hash);
9574+ if (uo->type != type) {
9575+ ret = -EINVAL;
9576+ goto out_err;
9577+ }
9578+ ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
9579+ if (ret)
9580+ goto out_err;
9581+ mutex_unlock(&dev->struct_mutex);
9582+ *object = uo;
9583+ return 0;
9584+out_err:
9585+ mutex_unlock(&dev->struct_mutex);
9586+ return ret;
9587+}
9588+
9589+int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
9590+ enum drm_object_type type)
9591+{
9592+ struct drm_device *dev = priv->minor->dev;
9593+ struct drm_user_object *uo;
9594+ struct drm_ref_object *ro;
9595+ int ret;
9596+
9597+ mutex_lock(&dev->struct_mutex);
9598+ uo = drm_lookup_user_object(priv, user_token);
9599+ if (!uo || (uo->type != type)) {
9600+ ret = -EINVAL;
9601+ goto out_err;
9602+ }
9603+ ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
9604+ if (!ro) {
9605+ ret = -EINVAL;
9606+ goto out_err;
9607+ }
9608+ drm_remove_ref_object(priv, ro);
9609+ mutex_unlock(&dev->struct_mutex);
9610+ return 0;
9611+out_err:
9612+ mutex_unlock(&dev->struct_mutex);
9613+ return ret;
9614+}
9615Index: linux-2.6.27/drivers/gpu/drm/drm_regman.c
9616===================================================================
9617--- /dev/null 1970-01-01 00:00:00.000000000 +0000
9618+++ linux-2.6.27/drivers/gpu/drm/drm_regman.c 2009-02-05 13:29:33.000000000 +0000
9619@@ -0,0 +1,200 @@
9620+/**************************************************************************
9621+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
9622+ * All Rights Reserved.
9623+ *
9624+ * Permission is hereby granted, free of charge, to any person obtaining a
9625+ * copy of this software and associated documentation files (the
9626+ * "Software"), to deal in the Software without restriction, including
9627+ * without limitation the rights to use, copy, modify, merge, publish,
9628+ * distribute, sub license, and/or sell copies of the Software, and to
9629+ * permit persons to whom the Software is furnished to do so, subject to
9630+ * the following conditions:
9631+ *
9632+ * The above copyright notice and this permission notice (including the
9633+ * next paragraph) shall be included in all copies or substantial portions
9634+ * of the Software.
9635+ *
9636+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
9637+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
9638+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
9639+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
9640+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
9641+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
9642+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
9643+ *
9644+ **************************************************************************/
9645+/*
9646+ * An allocate-fence manager implementation intended for sets of base-registers
9647+ * or tiling-registers.
9648+ */
9649+
9650+#include "drmP.h"
9651+
9652+/*
9653+ * Allocate a compatible register and put it on the unfenced list.
9654+ */
9655+
9656+int drm_regs_alloc(struct drm_reg_manager *manager,
9657+ const void *data,
9658+ uint32_t fence_class,
9659+ uint32_t fence_type,
9660+ int interruptible, int no_wait, struct drm_reg **reg)
9661+{
9662+ struct drm_reg *entry, *next_entry;
9663+ int ret;
9664+
9665+ *reg = NULL;
9666+
9667+ /*
9668+ * Search the unfenced list.
9669+ */
9670+
9671+ list_for_each_entry(entry, &manager->unfenced, head) {
9672+ if (manager->reg_reusable(entry, data)) {
9673+ entry->new_fence_type |= fence_type;
9674+ goto out;
9675+ }
9676+ }
9677+
9678+ /*
9679+ * Search the lru list.
9680+ */
9681+
9682+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
9683+ struct drm_fence_object *fence = entry->fence;
9684+ if (fence->fence_class == fence_class &&
9685+ (entry->fence_type & fence_type) == entry->fence_type &&
9686+ manager->reg_reusable(entry, data)) {
9687+ list_del(&entry->head);
9688+ entry->new_fence_type = fence_type;
9689+ list_add_tail(&entry->head, &manager->unfenced);
9690+ goto out;
9691+ }
9692+ }
9693+
9694+ /*
9695+ * Search the free list.
9696+ */
9697+
9698+ list_for_each_entry(entry, &manager->free, head) {
9699+ list_del(&entry->head);
9700+ entry->new_fence_type = fence_type;
9701+ list_add_tail(&entry->head, &manager->unfenced);
9702+ goto out;
9703+ }
9704+
9705+ if (no_wait)
9706+ return -EBUSY;
9707+
9708+ /*
9709+ * Go back to the lru list and try to expire fences.
9710+ */
9711+
9712+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
9713+ BUG_ON(!entry->fence);
9714+ ret = drm_fence_object_wait(entry->fence, 0, !interruptible,
9715+ entry->fence_type);
9716+ if (ret)
9717+ return ret;
9718+
9719+ drm_fence_usage_deref_unlocked(&entry->fence);
9720+ list_del(&entry->head);
9721+ entry->new_fence_type = fence_type;
9722+ list_add_tail(&entry->head, &manager->unfenced);
9723+ goto out;
9724+ }
9725+
9726+ /*
9727+ * Oops. All registers are used up :(.
9728+ */
9729+
9730+ return -EBUSY;
9731+out:
9732+ *reg = entry;
9733+ return 0;
9734+}
9735+EXPORT_SYMBOL(drm_regs_alloc);
9736+
9737+void drm_regs_fence(struct drm_reg_manager *manager,
9738+ struct drm_fence_object *fence)
9739+{
9740+ struct drm_reg *entry;
9741+ struct drm_reg *next_entry;
9742+
9743+ if (!fence) {
9744+
9745+ /*
9746+ * Old fence (if any) is still valid.
9747+ * Put back on free and lru lists.
9748+ */
9749+
9750+ list_for_each_entry_safe_reverse(entry, next_entry,
9751+ &manager->unfenced, head) {
9752+ list_del(&entry->head);
9753+ list_add(&entry->head, (entry->fence) ?
9754+ &manager->lru : &manager->free);
9755+ }
9756+ } else {
9757+
9758+ /*
9759+ * Fence with a new fence and put on lru list.
9760+ */
9761+
9762+ list_for_each_entry_safe(entry, next_entry, &manager->unfenced,
9763+ head) {
9764+ list_del(&entry->head);
9765+ if (entry->fence)
9766+ drm_fence_usage_deref_unlocked(&entry->fence);
9767+ drm_fence_reference_unlocked(&entry->fence, fence);
9768+
9769+ entry->fence_type = entry->new_fence_type;
9770+ BUG_ON((entry->fence_type & fence->type) !=
9771+ entry->fence_type);
9772+
9773+ list_add_tail(&entry->head, &manager->lru);
9774+ }
9775+ }
9776+}
9777+EXPORT_SYMBOL(drm_regs_fence);
9778+
9779+void drm_regs_free(struct drm_reg_manager *manager)
9780+{
9781+ struct drm_reg *entry;
9782+ struct drm_reg *next_entry;
9783+
9784+ drm_regs_fence(manager, NULL);
9785+
9786+ list_for_each_entry_safe(entry, next_entry, &manager->free, head) {
9787+ list_del(&entry->head);
9788+ manager->reg_destroy(entry);
9789+ }
9790+
9791+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
9792+
9793+ (void)drm_fence_object_wait(entry->fence, 1, 1,
9794+ entry->fence_type);
9795+ list_del(&entry->head);
9796+ drm_fence_usage_deref_unlocked(&entry->fence);
9797+ manager->reg_destroy(entry);
9798+ }
9799+}
9800+EXPORT_SYMBOL(drm_regs_free);
9801+
9802+void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg)
9803+{
9804+ reg->fence = NULL;
9805+ list_add_tail(&reg->head, &manager->free);
9806+}
9807+EXPORT_SYMBOL(drm_regs_add);
9808+
9809+void drm_regs_init(struct drm_reg_manager *manager,
9810+ int (*reg_reusable) (const struct drm_reg *, const void *),
9811+ void (*reg_destroy) (struct drm_reg *))
9812+{
9813+ INIT_LIST_HEAD(&manager->free);
9814+ INIT_LIST_HEAD(&manager->lru);
9815+ INIT_LIST_HEAD(&manager->unfenced);
9816+ manager->reg_reusable = reg_reusable;
9817+ manager->reg_destroy = reg_destroy;
9818+}
9819+EXPORT_SYMBOL(drm_regs_init);
9820Index: linux-2.6.27/drivers/gpu/drm/drm_sman.c
9821===================================================================
9822--- linux-2.6.27.orig/drivers/gpu/drm/drm_sman.c 2008-10-09 23:13:53.000000000 +0100
9823+++ linux-2.6.27/drivers/gpu/drm/drm_sman.c 2009-02-05 13:29:33.000000000 +0000
9824@@ -33,7 +33,7 @@
9825 * struct or a context identifier.
9826 *
9827 * Authors:
9828- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
9829+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
9830 */
9831
9832 #include "drm_sman.h"
9833Index: linux-2.6.27/drivers/gpu/drm/drm_stub.c
9834===================================================================
9835--- linux-2.6.27.orig/drivers/gpu/drm/drm_stub.c 2009-02-05 13:29:29.000000000 +0000
9836+++ linux-2.6.27/drivers/gpu/drm/drm_stub.c 2009-02-05 13:29:33.000000000 +0000
9837@@ -97,6 +97,7 @@
9838 init_timer(&dev->timer);
9839 mutex_init(&dev->struct_mutex);
9840 mutex_init(&dev->ctxlist_mutex);
9841+ mutex_init(&dev->bm.evict_mutex);
9842
9843 idr_init(&dev->drw_idr);
9844
9845@@ -113,6 +114,18 @@
9846 return -ENOMEM;
9847 }
9848
9849+ if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
9850+ DRM_FILE_PAGE_OFFSET_SIZE)) {
9851+ drm_ht_remove(&dev->map_hash);
9852+ return -ENOMEM;
9853+ }
9854+
9855+ if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
9856+ drm_ht_remove(&dev->map_hash);
9857+ drm_mm_takedown(&dev->offset_manager);
9858+ return -ENOMEM;
9859+ }
9860+
9861 /* the DRM has 6 basic counters */
9862 dev->counters = 6;
9863 dev->types[0] = _DRM_STAT_LOCK;
9864@@ -152,15 +165,7 @@
9865 goto error_out_unreg;
9866 }
9867
9868- if (driver->driver_features & DRIVER_GEM) {
9869- retcode = drm_gem_init(dev);
9870- if (retcode) {
9871- DRM_ERROR("Cannot initialize graphics execution "
9872- "manager (GEM)\n");
9873- goto error_out_unreg;
9874- }
9875- }
9876-
9877+ drm_fence_manager_init(dev);
9878 return 0;
9879
9880 error_out_unreg:
9881@@ -284,6 +289,8 @@
9882 drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
9883 return ret;
9884 }
9885+EXPORT_SYMBOL(drm_get_dev);
9886+
9887
9888 /**
9889 * Put a device minor number.
9890Index: linux-2.6.27/drivers/gpu/drm/drm_ttm.c
9891===================================================================
9892--- /dev/null 1970-01-01 00:00:00.000000000 +0000
9893+++ linux-2.6.27/drivers/gpu/drm/drm_ttm.c 2009-02-05 13:29:33.000000000 +0000
9894@@ -0,0 +1,430 @@
9895+/**************************************************************************
9896+ *
9897+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
9898+ * All Rights Reserved.
9899+ *
9900+ * Permission is hereby granted, free of charge, to any person obtaining a
9901+ * copy of this software and associated documentation files (the
9902+ * "Software"), to deal in the Software without restriction, including
9903+ * without limitation the rights to use, copy, modify, merge, publish,
9904+ * distribute, sub license, and/or sell copies of the Software, and to
9905+ * permit persons to whom the Software is furnished to do so, subject to
9906+ * the following conditions:
9907+ *
9908+ * The above copyright notice and this permission notice (including the
9909+ * next paragraph) shall be included in all copies or substantial portions
9910+ * of the Software.
9911+ *
9912+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
9913+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
9914+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
9915+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
9916+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
9917+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
9918+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
9919+ *
9920+ **************************************************************************/
9921+/*
9922+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
9923+ */
9924+
9925+#include "drmP.h"
9926+#include <asm/agp.h>
9927+
9928+static void drm_ttm_ipi_handler(void *null)
9929+{
9930+ flush_agp_cache();
9931+}
9932+
9933+void drm_ttm_cache_flush(void)
9934+{
9935+ if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1) != 0)
9936+ DRM_ERROR("Timed out waiting for drm cache flush.\n");
9937+}
9938+EXPORT_SYMBOL(drm_ttm_cache_flush);
9939+
9940+/*
9941+ * Use kmalloc if possible. Otherwise fall back to vmalloc.
9942+ */
9943+
9944+static void ttm_alloc_pages(struct drm_ttm *ttm)
9945+{
9946+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
9947+ ttm->pages = NULL;
9948+
9949+ if (size <= PAGE_SIZE)
9950+ ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
9951+
9952+ if (!ttm->pages) {
9953+ ttm->pages = vmalloc_user(size);
9954+ if (ttm->pages)
9955+ ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
9956+ }
9957+}
9958+
9959+static void ttm_free_pages(struct drm_ttm *ttm)
9960+{
9961+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
9962+
9963+ if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) {
9964+ vfree(ttm->pages);
9965+ ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC;
9966+ } else {
9967+ drm_free(ttm->pages, size, DRM_MEM_TTM);
9968+ }
9969+ ttm->pages = NULL;
9970+}
9971+
9972+static struct page *drm_ttm_alloc_page(void)
9973+{
9974+ struct page *page;
9975+
9976+ page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
9977+ if (!page)
9978+ return NULL;
9979+ return page;
9980+}
9981+
9982+/*
9983+ * Change caching policy for the linear kernel map
9984+ * for range of pages in a ttm.
9985+ */
9986+
9987+static int drm_set_caching(struct drm_ttm *ttm, int noncached)
9988+{
9989+ int i;
9990+ struct page **cur_page;
9991+ int do_tlbflush = 0;
9992+
9993+ if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
9994+ return 0;
9995+
9996+ if (noncached)
9997+ drm_ttm_cache_flush();
9998+
9999+ for (i = 0; i < ttm->num_pages; ++i) {
10000+ cur_page = ttm->pages + i;
10001+ if (*cur_page) {
10002+ if (!PageHighMem(*cur_page)) {
10003+ if (noncached) {
10004+ map_page_into_agp(*cur_page);
10005+ } else {
10006+ unmap_page_from_agp(*cur_page);
10007+ }
10008+ do_tlbflush = 1;
10009+ }
10010+ }
10011+ }
10012+ //if (do_tlbflush)
10013+ // flush_agp_mappings();
10014+
10015+ DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
10016+
10017+ return 0;
10018+}
10019+
10020+
10021+static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
10022+{
10023+ int write;
10024+ int dirty;
10025+ struct page *page;
10026+ int i;
10027+
10028+ BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
10029+ write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0);
10030+ dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
10031+
10032+ for (i = 0; i < ttm->num_pages; ++i) {
10033+ page = ttm->pages[i];
10034+ if (page == NULL)
10035+ continue;
10036+
10037+ if (page == ttm->dummy_read_page) {
10038+ BUG_ON(write);
10039+ continue;
10040+ }
10041+
10042+ if (write && dirty && !PageReserved(page))
10043+ set_page_dirty_lock(page);
10044+
10045+ ttm->pages[i] = NULL;
10046+ put_page(page);
10047+ }
10048+}
10049+
10050+static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
10051+{
10052+ int i;
10053+ struct drm_buffer_manager *bm = &ttm->dev->bm;
10054+ struct page **cur_page;
10055+
10056+ for (i = 0; i < ttm->num_pages; ++i) {
10057+ cur_page = ttm->pages + i;
10058+ if (*cur_page) {
10059+ if (page_count(*cur_page) != 1)
10060+ DRM_ERROR("Erroneous page count. Leaking pages.\n");
10061+ if (page_mapped(*cur_page))
10062+ DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
10063+ __free_page(*cur_page);
10064+ --bm->cur_pages;
10065+ }
10066+ }
10067+}
10068+
10069+/*
10070+ * Free all resources associated with a ttm.
10071+ */
10072+
10073+int drm_destroy_ttm(struct drm_ttm *ttm)
10074+{
10075+ struct drm_ttm_backend *be;
10076+
10077+ if (!ttm)
10078+ return 0;
10079+
10080+ be = ttm->be;
10081+ if (be) {
10082+ be->func->destroy(be);
10083+ ttm->be = NULL;
10084+ }
10085+
10086+ if (ttm->pages) {
10087+ if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
10088+ drm_set_caching(ttm, 0);
10089+
10090+ if (ttm->page_flags & DRM_TTM_PAGE_USER)
10091+ drm_ttm_free_user_pages(ttm);
10092+ else
10093+ drm_ttm_free_alloced_pages(ttm);
10094+
10095+ ttm_free_pages(ttm);
10096+ }
10097+
10098+ return 0;
10099+}
10100+
10101+struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
10102+{
10103+ struct page *p;
10104+ struct drm_buffer_manager *bm = &ttm->dev->bm;
10105+
10106+ p = ttm->pages[index];
10107+ if (!p) {
10108+ p = drm_ttm_alloc_page();
10109+ if (!p)
10110+ return NULL;
10111+ ttm->pages[index] = p;
10112+ ++bm->cur_pages;
10113+ }
10114+ return p;
10115+}
10116+EXPORT_SYMBOL(drm_ttm_get_page);
10117+
10118+int drm_ttm_set_user(struct drm_ttm *ttm,
10119+ struct task_struct *tsk,
10120+ int write,
10121+ unsigned long start,
10122+ unsigned long num_pages,
10123+ struct page *dummy_read_page)
10124+{
10125+ struct mm_struct *mm = tsk->mm;
10126+ int ret;
10127+ int i;
10128+
10129+ BUG_ON(num_pages != ttm->num_pages);
10130+
10131+ ttm->dummy_read_page = dummy_read_page;
10132+ ttm->page_flags |= DRM_TTM_PAGE_USER |
10133+ ((write) ? DRM_TTM_PAGE_USER_WRITE : 0);
10134+
10135+
10136+ down_read(&mm->mmap_sem);
10137+ ret = get_user_pages(tsk, mm, start, num_pages,
10138+ write, 0, ttm->pages, NULL);
10139+ up_read(&mm->mmap_sem);
10140+
10141+ if (ret != num_pages && write) {
10142+ drm_ttm_free_user_pages(ttm);
10143+ return -ENOMEM;
10144+ }
10145+
10146+ for (i = 0; i < num_pages; ++i) {
10147+ if (ttm->pages[i] == NULL)
10148+ ttm->pages[i] = ttm->dummy_read_page;
10149+ }
10150+
10151+ return 0;
10152+}
10153+
10154+int drm_ttm_populate(struct drm_ttm *ttm)
10155+{
10156+ struct page *page;
10157+ unsigned long i;
10158+ struct drm_ttm_backend *be;
10159+
10160+ if (ttm->state != ttm_unpopulated)
10161+ return 0;
10162+
10163+ be = ttm->be;
10164+ for (i = 0; i < ttm->num_pages; ++i) {
10165+ page = drm_ttm_get_page(ttm, i);
10166+ if (!page)
10167+ return -ENOMEM;
10168+ }
10169+ be->func->populate(be, ttm->num_pages, ttm->pages);
10170+ ttm->state = ttm_unbound;
10171+ return 0;
10172+}
10173+
10174+static inline size_t drm_size_align(size_t size)
10175+{
10176+ size_t tmpSize = 4;
10177+ if (size > PAGE_SIZE)
10178+ return PAGE_ALIGN(size);
10179+ while (tmpSize < size)
10180+ tmpSize <<= 1;
10181+
10182+ return (size_t) tmpSize;
10183+}
10184+
10185+/*
10186+ * Calculate the estimated pinned memory usage of a ttm.
10187+ */
10188+
10189+unsigned long drm_ttm_size(struct drm_device *dev,
10190+ unsigned long num_pages,
10191+ int user_bo)
10192+{
10193+ struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
10194+ unsigned long tmp;
10195+
10196+ tmp = drm_size_align(sizeof(struct drm_ttm)) +
10197+ drm_size_align(num_pages * sizeof(struct page *)) +
10198+ ((user_bo) ? 0 : drm_size_align(num_pages * PAGE_SIZE));
10199+
10200+ if (bo_driver->backend_size)
10201+ tmp += bo_driver->backend_size(dev, num_pages);
10202+ else
10203+ tmp += drm_size_align(num_pages * sizeof(struct page *)) +
10204+ 3*drm_size_align(sizeof(struct drm_ttm_backend));
10205+ return tmp;
10206+}
10207+
10208+
10209+/*
10210+ * Initialize a ttm.
10211+ */
10212+
10213+struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size)
10214+{
10215+ struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
10216+ struct drm_ttm *ttm;
10217+
10218+ if (!bo_driver)
10219+ return NULL;
10220+
10221+ ttm = drm_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
10222+ if (!ttm)
10223+ return NULL;
10224+
10225+ ttm->dev = dev;
10226+ atomic_set(&ttm->vma_count, 0);
10227+
10228+ ttm->destroy = 0;
10229+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
10230+
10231+ ttm->page_flags = 0;
10232+
10233+ /*
10234+ * Account also for AGP module memory usage.
10235+ */
10236+
10237+ ttm_alloc_pages(ttm);
10238+ if (!ttm->pages) {
10239+ drm_destroy_ttm(ttm);
10240+ DRM_ERROR("Failed allocating page table\n");
10241+ return NULL;
10242+ }
10243+ ttm->be = bo_driver->create_ttm_backend_entry(dev);
10244+ if (!ttm->be) {
10245+ drm_destroy_ttm(ttm);
10246+ DRM_ERROR("Failed creating ttm backend entry\n");
10247+ return NULL;
10248+ }
10249+ ttm->state = ttm_unpopulated;
10250+ return ttm;
10251+}
10252+
10253+/*
10254+ * Unbind a ttm region from the aperture.
10255+ */
10256+
10257+void drm_ttm_evict(struct drm_ttm *ttm)
10258+{
10259+ struct drm_ttm_backend *be = ttm->be;
10260+ int ret;
10261+
10262+ if (ttm->state == ttm_bound) {
10263+ ret = be->func->unbind(be);
10264+ BUG_ON(ret);
10265+ }
10266+
10267+ ttm->state = ttm_evicted;
10268+}
10269+
10270+void drm_ttm_fixup_caching(struct drm_ttm *ttm)
10271+{
10272+
10273+ if (ttm->state == ttm_evicted) {
10274+ struct drm_ttm_backend *be = ttm->be;
10275+ if (be->func->needs_ub_cache_adjust(be))
10276+ drm_set_caching(ttm, 0);
10277+ ttm->state = ttm_unbound;
10278+ }
10279+}
10280+
10281+void drm_ttm_unbind(struct drm_ttm *ttm)
10282+{
10283+ if (ttm->state == ttm_bound)
10284+ drm_ttm_evict(ttm);
10285+
10286+ drm_ttm_fixup_caching(ttm);
10287+}
10288+
10289+int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
10290+{
10291+ struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
10292+ int ret = 0;
10293+ struct drm_ttm_backend *be;
10294+
10295+ if (!ttm)
10296+ return -EINVAL;
10297+ if (ttm->state == ttm_bound)
10298+ return 0;
10299+
10300+ be = ttm->be;
10301+
10302+ ret = drm_ttm_populate(ttm);
10303+ if (ret)
10304+ return ret;
10305+
10306+ if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
10307+ drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
10308+ else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
10309+ bo_driver->ttm_cache_flush)
10310+ bo_driver->ttm_cache_flush(ttm);
10311+
10312+ ret = be->func->bind(be, bo_mem);
10313+ if (ret) {
10314+ ttm->state = ttm_evicted;
10315+ DRM_ERROR("Couldn't bind backend.\n");
10316+ return ret;
10317+ }
10318+
10319+ ttm->state = ttm_bound;
10320+ if (ttm->page_flags & DRM_TTM_PAGE_USER)
10321+ ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
10322+ return 0;
10323+}
10324+EXPORT_SYMBOL(drm_bind_ttm);
10325Index: linux-2.6.27/drivers/gpu/drm/drm_vm.c
10326===================================================================
10327--- linux-2.6.27.orig/drivers/gpu/drm/drm_vm.c 2008-10-09 23:13:53.000000000 +0100
10328+++ linux-2.6.27/drivers/gpu/drm/drm_vm.c 2009-02-05 13:29:33.000000000 +0000
10329@@ -40,6 +40,10 @@
10330
10331 static void drm_vm_open(struct vm_area_struct *vma);
10332 static void drm_vm_close(struct vm_area_struct *vma);
10333+static int drm_bo_mmap_locked(struct vm_area_struct *vma,
10334+ struct file *filp,
10335+ drm_local_map_t *map);
10336+
10337
10338 static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
10339 {
10340@@ -267,6 +271,9 @@
10341 dmah.size = map->size;
10342 __drm_pci_free(dev, &dmah);
10343 break;
10344+ case _DRM_TTM:
10345+ BUG_ON(1);
10346+ break;
10347 }
10348 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
10349 }
10350@@ -647,6 +654,8 @@
10351 vma->vm_flags |= VM_RESERVED;
10352 vma->vm_page_prot = drm_dma_prot(map->type, vma);
10353 break;
10354+ case _DRM_TTM:
10355+ return drm_bo_mmap_locked(vma, filp, map);
10356 default:
10357 return -EINVAL; /* This should never happen. */
10358 }
10359@@ -671,3 +680,213 @@
10360 return ret;
10361 }
10362 EXPORT_SYMBOL(drm_mmap);
10363+
10364+/**
10365+ * buffer object vm functions.
10366+ */
10367+
10368+/**
10369+ * \c Pagefault method for buffer objects.
10370+ *
10371+ * \param vma Virtual memory area.
10372+ * \param address File offset.
10373+ * \return Error or refault. The pfn is manually inserted.
10374+ *
10375+ * It's important that pfns are inserted while holding the bo->mutex lock.
10376+ * otherwise we might race with unmap_mapping_range() which is always
10377+ * called with the bo->mutex lock held.
10378+ *
10379+ * We're modifying the page attribute bits of the vma->vm_page_prot field,
10380+ * without holding the mmap_sem in write mode. Only in read mode.
10381+ * These bits are not used by the mm subsystem code, and we consider them
10382+ * protected by the bo->mutex lock.
10383+ */
10384+
10385+#define DRM_NOPFN_EXTRA 15 /* Fault 16 pages at a time in */
10386+
10387+int drm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10388+{
10389+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
10390+ unsigned long page_offset;
10391+ struct page *page = NULL;
10392+ struct drm_ttm *ttm = NULL;
10393+ struct drm_device *dev;
10394+ unsigned long pfn;
10395+ int err;
10396+ unsigned long bus_base;
10397+ unsigned long bus_offset;
10398+ unsigned long bus_size;
10399+ int i;
10400+ unsigned long ret = VM_FAULT_NOPAGE;
10401+ unsigned long address = (unsigned long)vmf->virtual_address;
10402+
10403+ if (address > vma->vm_end)
10404+ return VM_FAULT_SIGBUS;
10405+
10406+ dev = bo->dev;
10407+ err = drm_bo_read_lock(&dev->bm.bm_lock);
10408+ if (err)
10409+ return VM_FAULT_NOPAGE;
10410+
10411+ err = mutex_lock_interruptible(&bo->mutex);
10412+ if (err) {
10413+ drm_bo_read_unlock(&dev->bm.bm_lock);
10414+ return VM_FAULT_NOPAGE;
10415+ }
10416+
10417+ err = drm_bo_wait(bo, 0, 0, 0);
10418+ if (err) {
10419+ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
10420+ goto out_unlock;
10421+ }
10422+
10423+ /*
10424+ * If buffer happens to be in a non-mappable location,
10425+ * move it to a mappable.
10426+ */
10427+
10428+ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
10429+ uint32_t new_mask = bo->mem.mask |
10430+ DRM_BO_FLAG_MAPPABLE |
10431+ DRM_BO_FLAG_FORCE_MAPPABLE;
10432+ err = drm_bo_move_buffer(bo, new_mask, 0, 0);
10433+ if (err) {
10434+ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
10435+ goto out_unlock;
10436+ }
10437+ }
10438+
10439+ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
10440+ &bus_size);
10441+
10442+ if (err) {
10443+ ret = VM_FAULT_SIGBUS;
10444+ goto out_unlock;
10445+ }
10446+
10447+ page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
10448+
10449+ if (bus_size) {
10450+ struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
10451+
10452+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
10453+ vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
10454+ } else {
10455+ ttm = bo->ttm;
10456+
10457+ drm_ttm_fixup_caching(ttm);
10458+ page = drm_ttm_get_page(ttm, page_offset);
10459+ if (!page) {
10460+ ret = VM_FAULT_OOM;
10461+ goto out_unlock;
10462+ }
10463+ pfn = page_to_pfn(page);
10464+ vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
10465+ vm_get_page_prot(vma->vm_flags) :
10466+ drm_io_prot(_DRM_TTM, vma);
10467+ }
10468+
10469+ err = vm_insert_pfn(vma, address, pfn);
10470+ if (err) {
10471+ ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
10472+ goto out_unlock;
10473+ }
10474+
10475+ for (i=0; i<DRM_NOPFN_EXTRA; ++i) {
10476+
10477+ if (++page_offset == bo->mem.num_pages)
10478+ break;
10479+ address = vma->vm_start + (page_offset << PAGE_SHIFT);
10480+ if (address >= vma->vm_end)
10481+ break;
10482+ if (bus_size) {
10483+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT)
10484+ + page_offset;
10485+ } else {
10486+ page = drm_ttm_get_page(ttm, page_offset);
10487+ if (!page)
10488+ break;
10489+ pfn = page_to_pfn(page);
10490+ }
10491+ if (vm_insert_pfn(vma, address, pfn))
10492+ break;
10493+ }
10494+out_unlock:
10495+ mutex_unlock(&bo->mutex);
10496+ drm_bo_read_unlock(&dev->bm.bm_lock);
10497+ return ret;
10498+}
10499+EXPORT_SYMBOL(drm_bo_vm_fault);
10500+
10501+static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
10502+{
10503+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
10504+
10505+ drm_vm_open_locked(vma);
10506+ atomic_inc(&bo->usage);
10507+}
10508+
10509+/**
10510+ * \c vma open method for buffer objects.
10511+ *
10512+ * \param vma virtual memory area.
10513+ */
10514+
10515+static void drm_bo_vm_open(struct vm_area_struct *vma)
10516+{
10517+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
10518+ struct drm_device *dev = bo->dev;
10519+
10520+ mutex_lock(&dev->struct_mutex);
10521+ drm_bo_vm_open_locked(vma);
10522+ mutex_unlock(&dev->struct_mutex);
10523+}
10524+
10525+/**
10526+ * \c vma close method for buffer objects.
10527+ *
10528+ * \param vma virtual memory area.
10529+ */
10530+
10531+static void drm_bo_vm_close(struct vm_area_struct *vma)
10532+{
10533+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
10534+ struct drm_device *dev = bo->dev;
10535+
10536+ drm_vm_close(vma);
10537+ if (bo) {
10538+ mutex_lock(&dev->struct_mutex);
10539+ drm_bo_usage_deref_locked((struct drm_buffer_object **)
10540+ &vma->vm_private_data);
10541+ mutex_unlock(&dev->struct_mutex);
10542+ }
10543+ return;
10544+}
10545+
10546+static struct vm_operations_struct drm_bo_vm_ops = {
10547+ .fault = drm_bo_vm_fault,
10548+ .open = drm_bo_vm_open,
10549+ .close = drm_bo_vm_close,
10550+};
10551+
10552+/**
10553+ * mmap buffer object memory.
10554+ *
10555+ * \param vma virtual memory area.
10556+ * \param file_priv DRM file private.
10557+ * \param map The buffer object drm map.
10558+ * \return zero on success or a negative number on failure.
10559+ */
10560+
10561+int drm_bo_mmap_locked(struct vm_area_struct *vma,
10562+ struct file *filp,
10563+ drm_local_map_t *map)
10564+{
10565+ vma->vm_ops = &drm_bo_vm_ops;
10566+ vma->vm_private_data = map->handle;
10567+ vma->vm_file = filp;
10568+ vma->vm_flags |= VM_RESERVED | VM_IO;
10569+ vma->vm_flags |= VM_PFNMAP;
10570+ drm_bo_vm_open_locked(vma);
10571+ return 0;
10572+}
10573Index: linux-2.6.27/drivers/gpu/drm/psb/Makefile
10574===================================================================
10575--- /dev/null 1970-01-01 00:00:00.000000000 +0000
10576+++ linux-2.6.27/drivers/gpu/drm/psb/Makefile 2009-02-05 13:29:33.000000000 +0000
10577@@ -0,0 +1,13 @@
10578+#
10579+# Makefile for the drm device driver. This driver provides support for the
10580+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
10581+
10582+ccflags-y := -Iinclude/drm
10583+
10584+psb-y := psb_drv.o psb_mmu.o psb_sgx.o psb_irq.o psb_fence.o psb_buffer.o \
10585+ psb_gtt.o psb_setup.o psb_i2c.o psb_fb.o psb_msvdx.o \
10586+ psb_msvdxinit.o psb_regman.o psb_reset.o psb_scene.o \
10587+ psb_schedule.o psb_xhw.o
10588+
10589+
10590+obj-$(CONFIG_DRM_PSB) += psb.o
10591Index: linux-2.6.27/drivers/gpu/drm/psb/i915_drv.h
10592===================================================================
10593--- /dev/null 1970-01-01 00:00:00.000000000 +0000
10594+++ linux-2.6.27/drivers/gpu/drm/psb/i915_drv.h 2009-02-05 13:29:33.000000000 +0000
10595@@ -0,0 +1,795 @@
10596+/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
10597+ */
10598+/*
10599+ *
10600+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
10601+ * All Rights Reserved.
10602+ *
10603+ * Permission is hereby granted, free of charge, to any person obtaining a
10604+ * copy of this software and associated documentation files (the
10605+ * "Software"), to deal in the Software without restriction, including
10606+ * without limitation the rights to use, copy, modify, merge, publish,
10607+ * distribute, sub license, and/or sell copies of the Software, and to
10608+ * permit persons to whom the Software is furnished to do so, subject to
10609+ * the following conditions:
10610+ *
10611+ * The above copyright notice and this permission notice (including the
10612+ * next paragraph) shall be included in all copies or substantial portions
10613+ * of the Software.
10614+ *
10615+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
10616+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10617+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
10618+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
10619+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
10620+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
10621+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10622+ *
10623+ */
10624+
10625+#ifndef _I915_DRV_H_
10626+#define _I915_DRV_H_
10627+
10628+#include "i915_reg.h"
10629+
10630+/* General customization:
10631+ */
10632+
10633+#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
10634+
10635+#define DRIVER_NAME "i915"
10636+#define DRIVER_DESC "Intel Graphics"
10637+#define DRIVER_DATE "20070209"
10638+
10639+#if defined(__linux__)
10640+#define I915_HAVE_FENCE
10641+#define I915_HAVE_BUFFER
10642+#endif
10643+
10644+/* Interface history:
10645+ *
10646+ * 1.1: Original.
10647+ * 1.2: Add Power Management
10648+ * 1.3: Add vblank support
10649+ * 1.4: Fix cmdbuffer path, add heap destroy
10650+ * 1.5: Add vblank pipe configuration
10651+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
10652+ * - Support vertical blank on secondary display pipe
10653+ * 1.8: New ioctl for ARB_Occlusion_Query
10654+ * 1.9: Usable page flipping and triple buffering
10655+ * 1.10: Plane/pipe disentangling
10656+ * 1.11: TTM superioctl
10657+ */
10658+#define DRIVER_MAJOR 1
10659+#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
10660+#define DRIVER_MINOR 11
10661+#else
10662+#define DRIVER_MINOR 6
10663+#endif
10664+#define DRIVER_PATCHLEVEL 0
10665+
10666+#define DRM_DRIVER_PRIVATE_T struct drm_i915_private
10667+
10668+#ifdef I915_HAVE_BUFFER
10669+#define I915_MAX_VALIDATE_BUFFERS 4096
10670+#endif
10671+
10672+struct drm_i915_ring_buffer {
10673+ int tail_mask;
10674+ unsigned long Start;
10675+ unsigned long End;
10676+ unsigned long Size;
10677+ u8 *virtual_start;
10678+ int head;
10679+ int tail;
10680+ int space;
10681+ drm_local_map_t map;
10682+};
10683+
10684+struct mem_block {
10685+ struct mem_block *next;
10686+ struct mem_block *prev;
10687+ int start;
10688+ int size;
10689+ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
10690+};
10691+
10692+struct drm_i915_vbl_swap {
10693+ struct list_head head;
10694+ drm_drawable_t drw_id;
10695+ unsigned int plane;
10696+ unsigned int sequence;
10697+ int flip;
10698+};
10699+
10700+struct drm_i915_private {
10701+ struct drm_buffer_object *ring_buffer;
10702+ drm_local_map_t *sarea;
10703+ drm_local_map_t *mmio_map;
10704+
10705+ unsigned long mmiobase;
10706+ unsigned long mmiolen;
10707+
10708+ struct drm_i915_sarea *sarea_priv;
10709+ struct drm_i915_ring_buffer ring;
10710+
10711+ struct drm_dma_handle *status_page_dmah;
10712+ void *hw_status_page;
10713+ dma_addr_t dma_status_page;
10714+ uint32_t counter;
10715+ unsigned int status_gfx_addr;
10716+ drm_local_map_t hws_map;
10717+
10718+ unsigned int cpp;
10719+ int use_mi_batchbuffer_start;
10720+
10721+ wait_queue_head_t irq_queue;
10722+ atomic_t irq_received;
10723+ atomic_t irq_emitted;
10724+
10725+ int tex_lru_log_granularity;
10726+ int allow_batchbuffer;
10727+ struct mem_block *agp_heap;
10728+ unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
10729+ int vblank_pipe;
10730+ DRM_SPINTYPE user_irq_lock;
10731+ int user_irq_refcount;
10732+ int fence_irq_on;
10733+ uint32_t irq_enable_reg;
10734+ int irq_enabled;
10735+
10736+#ifdef I915_HAVE_FENCE
10737+ uint32_t flush_sequence;
10738+ uint32_t flush_flags;
10739+ uint32_t flush_pending;
10740+ uint32_t saved_flush_status;
10741+ uint32_t reported_sequence;
10742+ int reported_sequence_valid;
10743+#endif
10744+#ifdef I915_HAVE_BUFFER
10745+ void *agp_iomap;
10746+ unsigned int max_validate_buffers;
10747+ struct mutex cmdbuf_mutex;
10748+#endif
10749+
10750+ DRM_SPINTYPE swaps_lock;
10751+ struct drm_i915_vbl_swap vbl_swaps;
10752+ unsigned int swaps_pending;
10753+
10754+ /* LVDS info */
10755+ int backlight_duty_cycle; /* restore backlight to this value */
10756+ bool panel_wants_dither;
10757+ struct drm_display_mode *panel_fixed_mode;
10758+
10759+ /* Register state */
10760+ u8 saveLBB;
10761+ u32 saveDSPACNTR;
10762+ u32 saveDSPBCNTR;
10763+ u32 savePIPEACONF;
10764+ u32 savePIPEBCONF;
10765+ u32 savePIPEASRC;
10766+ u32 savePIPEBSRC;
10767+ u32 saveFPA0;
10768+ u32 saveFPA1;
10769+ u32 saveDPLL_A;
10770+ u32 saveDPLL_A_MD;
10771+ u32 saveHTOTAL_A;
10772+ u32 saveHBLANK_A;
10773+ u32 saveHSYNC_A;
10774+ u32 saveVTOTAL_A;
10775+ u32 saveVBLANK_A;
10776+ u32 saveVSYNC_A;
10777+ u32 saveBCLRPAT_A;
10778+ u32 saveDSPASTRIDE;
10779+ u32 saveDSPASIZE;
10780+ u32 saveDSPAPOS;
10781+ u32 saveDSPABASE;
10782+ u32 saveDSPASURF;
10783+ u32 saveDSPATILEOFF;
10784+ u32 savePFIT_PGM_RATIOS;
10785+ u32 saveBLC_PWM_CTL;
10786+ u32 saveBLC_PWM_CTL2;
10787+ u32 saveFPB0;
10788+ u32 saveFPB1;
10789+ u32 saveDPLL_B;
10790+ u32 saveDPLL_B_MD;
10791+ u32 saveHTOTAL_B;
10792+ u32 saveHBLANK_B;
10793+ u32 saveHSYNC_B;
10794+ u32 saveVTOTAL_B;
10795+ u32 saveVBLANK_B;
10796+ u32 saveVSYNC_B;
10797+ u32 saveBCLRPAT_B;
10798+ u32 saveDSPBSTRIDE;
10799+ u32 saveDSPBSIZE;
10800+ u32 saveDSPBPOS;
10801+ u32 saveDSPBBASE;
10802+ u32 saveDSPBSURF;
10803+ u32 saveDSPBTILEOFF;
10804+ u32 saveVCLK_DIVISOR_VGA0;
10805+ u32 saveVCLK_DIVISOR_VGA1;
10806+ u32 saveVCLK_POST_DIV;
10807+ u32 saveVGACNTRL;
10808+ u32 saveADPA;
10809+ u32 saveLVDS;
10810+ u32 saveLVDSPP_ON;
10811+ u32 saveLVDSPP_OFF;
10812+ u32 saveDVOA;
10813+ u32 saveDVOB;
10814+ u32 saveDVOC;
10815+ u32 savePP_ON;
10816+ u32 savePP_OFF;
10817+ u32 savePP_CONTROL;
10818+ u32 savePP_CYCLE;
10819+ u32 savePFIT_CONTROL;
10820+ u32 save_palette_a[256];
10821+ u32 save_palette_b[256];
10822+ u32 saveFBC_CFB_BASE;
10823+ u32 saveFBC_LL_BASE;
10824+ u32 saveFBC_CONTROL;
10825+ u32 saveFBC_CONTROL2;
10826+ u32 saveSWF0[16];
10827+ u32 saveSWF1[16];
10828+ u32 saveSWF2[3];
10829+ u8 saveMSR;
10830+ u8 saveSR[8];
10831+ u8 saveGR[24];
10832+ u8 saveAR_INDEX;
10833+ u8 saveAR[20];
10834+ u8 saveDACMASK;
10835+ u8 saveDACDATA[256*3]; /* 256 3-byte colors */
10836+ u8 saveCR[36];
10837+};
10838+
10839+enum intel_chip_family {
10840+ CHIP_I8XX = 0x01,
10841+ CHIP_I9XX = 0x02,
10842+ CHIP_I915 = 0x04,
10843+ CHIP_I965 = 0x08,
10844+ CHIP_POULSBO = 0x10,
10845+};
10846+
10847+extern struct drm_ioctl_desc i915_ioctls[];
10848+extern int i915_max_ioctl;
10849+
10850+ /* i915_dma.c */
10851+extern void i915_kernel_lost_context(struct drm_device * dev);
10852+extern int i915_driver_load(struct drm_device *, unsigned long flags);
10853+extern int i915_driver_unload(struct drm_device *dev);
10854+extern void i915_driver_lastclose(struct drm_device * dev);
10855+extern void i915_driver_preclose(struct drm_device *dev,
10856+ struct drm_file *file_priv);
10857+extern int i915_driver_device_is_agp(struct drm_device * dev);
10858+extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
10859+ unsigned long arg);
10860+extern void i915_emit_breadcrumb(struct drm_device *dev);
10861+extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync);
10862+extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush);
10863+extern int i915_driver_firstopen(struct drm_device *dev);
10864+extern int i915_do_cleanup_pageflip(struct drm_device *dev);
10865+extern int i915_dma_cleanup(struct drm_device *dev);
10866+
10867+/* i915_irq.c */
10868+extern int i915_irq_emit(struct drm_device *dev, void *data,
10869+ struct drm_file *file_priv);
10870+extern int i915_irq_wait(struct drm_device *dev, void *data,
10871+ struct drm_file *file_priv);
10872+
10873+extern void i915_driver_wait_next_vblank(struct drm_device *dev, int pipe);
10874+extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
10875+extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
10876+extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
10877+extern void i915_driver_irq_preinstall(struct drm_device * dev);
10878+extern void i915_driver_irq_postinstall(struct drm_device * dev);
10879+extern void i915_driver_irq_uninstall(struct drm_device * dev);
10880+extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
10881+ struct drm_file *file_priv);
10882+extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
10883+ struct drm_file *file_priv);
10884+extern int i915_emit_irq(struct drm_device * dev);
10885+extern void i915_user_irq_on(struct drm_i915_private *dev_priv);
10886+extern void i915_user_irq_off(struct drm_i915_private *dev_priv);
10887+extern void i915_enable_interrupt (struct drm_device *dev);
10888+extern int i915_vblank_swap(struct drm_device *dev, void *data,
10889+ struct drm_file *file_priv);
10890+
10891+/* i915_mem.c */
10892+extern int i915_mem_alloc(struct drm_device *dev, void *data,
10893+ struct drm_file *file_priv);
10894+extern int i915_mem_free(struct drm_device *dev, void *data,
10895+ struct drm_file *file_priv);
10896+extern int i915_mem_init_heap(struct drm_device *dev, void *data,
10897+ struct drm_file *file_priv);
10898+extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
10899+ struct drm_file *file_priv);
10900+extern void i915_mem_takedown(struct mem_block **heap);
10901+extern void i915_mem_release(struct drm_device * dev,
10902+ struct drm_file *file_priv,
10903+ struct mem_block *heap);
10904+#ifdef I915_HAVE_FENCE
10905+/* i915_fence.c */
10906+extern void i915_fence_handler(struct drm_device *dev);
10907+extern void i915_invalidate_reported_sequence(struct drm_device *dev);
10908+
10909+#endif
10910+
10911+#ifdef I915_HAVE_BUFFER
10912+/* i915_buffer.c */
10913+extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
10914+extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
10915+ uint32_t *type);
10916+extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
10917+extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
10918+ struct drm_mem_type_manager *man);
10919+extern uint32_t i915_evict_mask(struct drm_buffer_object *bo);
10920+extern int i915_move(struct drm_buffer_object *bo, int evict,
10921+ int no_wait, struct drm_bo_mem_reg *new_mem);
10922+void i915_flush_ttm(struct drm_ttm *ttm);
10923+#endif
10924+
10925+#ifdef __linux__
10926+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
10927+extern void intel_init_chipset_flush_compat(struct drm_device *dev);
10928+extern void intel_fini_chipset_flush_compat(struct drm_device *dev);
10929+#endif
10930+#endif
10931+
10932+
10933+/* modesetting */
10934+extern void intel_modeset_init(struct drm_device *dev);
10935+extern void intel_modeset_cleanup(struct drm_device *dev);
10936+
10937+
10938+#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
10939+#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
10940+#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
10941+#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
10942+
10943+#define I915_VERBOSE 0
10944+
10945+#define RING_LOCALS unsigned int outring, ringmask, outcount; \
10946+ volatile char *virt;
10947+
10948+#define BEGIN_LP_RING(n) do { \
10949+ if (I915_VERBOSE) \
10950+ DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \
10951+ (n), __FUNCTION__); \
10952+ if (dev_priv->ring.space < (n)*4) \
10953+ i915_wait_ring(dev, (n)*4, __FUNCTION__); \
10954+ outcount = 0; \
10955+ outring = dev_priv->ring.tail; \
10956+ ringmask = dev_priv->ring.tail_mask; \
10957+ virt = dev_priv->ring.virtual_start; \
10958+} while (0)
10959+
10960+#define OUT_RING(n) do { \
10961+ if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
10962+ *(volatile unsigned int *)(virt + outring) = (n); \
10963+ outcount++; \
10964+ outring += 4; \
10965+ outring &= ringmask; \
10966+} while (0)
10967+
10968+#define ADVANCE_LP_RING() do { \
10969+ if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
10970+ dev_priv->ring.tail = outring; \
10971+ dev_priv->ring.space -= outcount * 4; \
10972+ I915_WRITE(LP_RING + RING_TAIL, outring); \
10973+} while(0)
10974+
10975+#define MI_NOOP (0x00 << 23)
10976+
10977+extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
10978+
10979+/*
10980+ * The Bridge device's PCI config space has information about the
10981+ * fb aperture size and the amount of pre-reserved memory.
10982+ */
10983+#define INTEL_GMCH_CTRL 0x52
10984+#define INTEL_GMCH_ENABLED 0x4
10985+#define INTEL_GMCH_MEM_MASK 0x1
10986+#define INTEL_GMCH_MEM_64M 0x1
10987+#define INTEL_GMCH_MEM_128M 0
10988+
10989+#define INTEL_855_GMCH_GMS_MASK (0x7 << 4)
10990+#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
10991+#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
10992+#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
10993+#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
10994+#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
10995+#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
10996+
10997+#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
10998+#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
10999+
11000+/* Extended config space */
11001+#define LBB 0xf4
11002+
11003+/* VGA stuff */
11004+
11005+#define VGA_ST01_MDA 0x3ba
11006+#define VGA_ST01_CGA 0x3da
11007+
11008+#define VGA_MSR_WRITE 0x3c2
11009+#define VGA_MSR_READ 0x3cc
11010+#define VGA_MSR_MEM_EN (1<<1)
11011+#define VGA_MSR_CGA_MODE (1<<0)
11012+
11013+#define VGA_SR_INDEX 0x3c4
11014+#define VGA_SR_DATA 0x3c5
11015+
11016+#define VGA_AR_INDEX 0x3c0
11017+#define VGA_AR_VID_EN (1<<5)
11018+#define VGA_AR_DATA_WRITE 0x3c0
11019+#define VGA_AR_DATA_READ 0x3c1
11020+
11021+#define VGA_GR_INDEX 0x3ce
11022+#define VGA_GR_DATA 0x3cf
11023+/* GR05 */
11024+#define VGA_GR_MEM_READ_MODE_SHIFT 3
11025+#define VGA_GR_MEM_READ_MODE_PLANE 1
11026+/* GR06 */
11027+#define VGA_GR_MEM_MODE_MASK 0xc
11028+#define VGA_GR_MEM_MODE_SHIFT 2
11029+#define VGA_GR_MEM_A0000_AFFFF 0
11030+#define VGA_GR_MEM_A0000_BFFFF 1
11031+#define VGA_GR_MEM_B0000_B7FFF 2
11032+#define VGA_GR_MEM_B0000_BFFFF 3
11033+
11034+#define VGA_DACMASK 0x3c6
11035+#define VGA_DACRX 0x3c7
11036+#define VGA_DACWX 0x3c8
11037+#define VGA_DACDATA 0x3c9
11038+
11039+#define VGA_CR_INDEX_MDA 0x3b4
11040+#define VGA_CR_DATA_MDA 0x3b5
11041+#define VGA_CR_INDEX_CGA 0x3d4
11042+#define VGA_CR_DATA_CGA 0x3d5
11043+
11044+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
11045+#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
11046+#define CMD_REPORT_HEAD (7<<23)
11047+#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
11048+#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
11049+
11050+#define CMD_MI_FLUSH (0x04 << 23)
11051+#define MI_NO_WRITE_FLUSH (1 << 2)
11052+#define MI_READ_FLUSH (1 << 0)
11053+#define MI_EXE_FLUSH (1 << 1)
11054+#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
11055+#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
11056+
11057+/* Packet to load a register value from the ring/batch command stream:
11058+ */
11059+#define CMD_MI_LOAD_REGISTER_IMM ((0x22 << 23)|0x1)
11060+
11061+#define BB1_START_ADDR_MASK (~0x7)
11062+#define BB1_PROTECTED (1<<0)
11063+#define BB1_UNPROTECTED (0<<0)
11064+#define BB2_END_ADDR_MASK (~0x7)
11065+
11066+#define I915REG_HWS_PGA 0x02080
11067+
11068+/* Framebuffer compression */
11069+#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
11070+#define FBC_LL_BASE 0x03204 /* 4k page aligned */
11071+#define FBC_CONTROL 0x03208
11072+#define FBC_CTL_EN (1<<31)
11073+#define FBC_CTL_PERIODIC (1<<30)
11074+#define FBC_CTL_INTERVAL_SHIFT (16)
11075+#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
11076+#define FBC_CTL_STRIDE_SHIFT (5)
11077+#define FBC_CTL_FENCENO (1<<0)
11078+#define FBC_COMMAND 0x0320c
11079+#define FBC_CMD_COMPRESS (1<<0)
11080+#define FBC_STATUS 0x03210
11081+#define FBC_STAT_COMPRESSING (1<<31)
11082+#define FBC_STAT_COMPRESSED (1<<30)
11083+#define FBC_STAT_MODIFIED (1<<29)
11084+#define FBC_STAT_CURRENT_LINE (1<<0)
11085+#define FBC_CONTROL2 0x03214
11086+#define FBC_CTL_FENCE_DBL (0<<4)
11087+#define FBC_CTL_IDLE_IMM (0<<2)
11088+#define FBC_CTL_IDLE_FULL (1<<2)
11089+#define FBC_CTL_IDLE_LINE (2<<2)
11090+#define FBC_CTL_IDLE_DEBUG (3<<2)
11091+#define FBC_CTL_CPU_FENCE (1<<1)
11092+#define FBC_CTL_PLANEA (0<<0)
11093+#define FBC_CTL_PLANEB (1<<0)
11094+#define FBC_FENCE_OFF 0x0321b
11095+
11096+#define FBC_LL_SIZE (1536)
11097+#define FBC_LL_PAD (32)
11098+
11099+/* Interrupt bits:
11100+ */
11101+#define USER_INT_FLAG (1<<1)
11102+#define VSYNC_PIPEB_FLAG (1<<5)
11103+#define VSYNC_PIPEA_FLAG (1<<7)
11104+#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
11105+
11106+#define I915REG_HWSTAM 0x02098
11107+#define I915REG_INT_IDENTITY_R 0x020a4
11108+#define I915REG_INT_MASK_R 0x020a8
11109+#define I915REG_INT_ENABLE_R 0x020a0
11110+#define I915REG_INSTPM 0x020c0
11111+
11112+#define I915REG_PIPEASTAT 0x70024
11113+#define I915REG_PIPEBSTAT 0x71024
11114+
11115+#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
11116+#define I915_VBLANK_CLEAR (1UL<<1)
11117+
11118+#define GPIOA 0x5010
11119+#define GPIOB 0x5014
11120+#define GPIOC 0x5018
11121+#define GPIOD 0x501c
11122+#define GPIOE 0x5020
11123+#define GPIOF 0x5024
11124+#define GPIOG 0x5028
11125+#define GPIOH 0x502c
11126+# define GPIO_CLOCK_DIR_MASK (1 << 0)
11127+# define GPIO_CLOCK_DIR_IN (0 << 1)
11128+# define GPIO_CLOCK_DIR_OUT (1 << 1)
11129+# define GPIO_CLOCK_VAL_MASK (1 << 2)
11130+# define GPIO_CLOCK_VAL_OUT (1 << 3)
11131+# define GPIO_CLOCK_VAL_IN (1 << 4)
11132+# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
11133+# define GPIO_DATA_DIR_MASK (1 << 8)
11134+# define GPIO_DATA_DIR_IN (0 << 9)
11135+# define GPIO_DATA_DIR_OUT (1 << 9)
11136+# define GPIO_DATA_VAL_MASK (1 << 10)
11137+# define GPIO_DATA_VAL_OUT (1 << 11)
11138+# define GPIO_DATA_VAL_IN (1 << 12)
11139+# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
11140+
11141+/* p317, 319
11142+ */
11143+#define VCLK2_VCO_M 0x6008 /* treat as 16 bit? (includes msbs) */
11144+#define VCLK2_VCO_N 0x600a
11145+#define VCLK2_VCO_DIV_SEL 0x6012
11146+
11147+#define VCLK_DIVISOR_VGA0 0x6000
11148+#define VCLK_DIVISOR_VGA1 0x6004
11149+#define VCLK_POST_DIV 0x6010
11150+/** Selects a post divisor of 4 instead of 2. */
11151+# define VGA1_PD_P2_DIV_4 (1 << 15)
11152+/** Overrides the p2 post divisor field */
11153+# define VGA1_PD_P1_DIV_2 (1 << 13)
11154+# define VGA1_PD_P1_SHIFT 8
11155+/** P1 value is 2 greater than this field */
11156+# define VGA1_PD_P1_MASK (0x1f << 8)
11157+/** Selects a post divisor of 4 instead of 2. */
11158+# define VGA0_PD_P2_DIV_4 (1 << 7)
11159+/** Overrides the p2 post divisor field */
11160+# define VGA0_PD_P1_DIV_2 (1 << 5)
11161+# define VGA0_PD_P1_SHIFT 0
11162+/** P1 value is 2 greater than this field */
11163+# define VGA0_PD_P1_MASK (0x1f << 0)
11164+
11165+#define POST_DIV_SELECT 0x70
11166+#define POST_DIV_1 0x00
11167+#define POST_DIV_2 0x10
11168+#define POST_DIV_4 0x20
11169+#define POST_DIV_8 0x30
11170+#define POST_DIV_16 0x40
11171+#define POST_DIV_32 0x50
11172+#define VCO_LOOP_DIV_BY_4M 0x00
11173+#define VCO_LOOP_DIV_BY_16M 0x04
11174+
11175+#define SRX_INDEX 0x3c4
11176+#define SRX_DATA 0x3c5
11177+#define SR01 1
11178+#define SR01_SCREEN_OFF (1<<5)
11179+
11180+#define PPCR 0x61204
11181+#define PPCR_ON (1<<0)
11182+
11183+#define DVOA 0x61120
11184+#define DVOA_ON (1<<31)
11185+#define DVOB 0x61140
11186+#define DVOB_ON (1<<31)
11187+#define DVOC 0x61160
11188+#define DVOC_ON (1<<31)
11189+#define LVDS 0x61180
11190+#define LVDS_ON (1<<31)
11191+
11192+#define ADPA 0x61100
11193+#define ADPA_DPMS_MASK (~(3<<10))
11194+#define ADPA_DPMS_ON (0<<10)
11195+#define ADPA_DPMS_SUSPEND (1<<10)
11196+#define ADPA_DPMS_STANDBY (2<<10)
11197+#define ADPA_DPMS_OFF (3<<10)
11198+
11199+#define NOPID 0x2094
11200+#define LP_RING 0x2030
11201+#define HP_RING 0x2040
11202+/* The binner has its own ring buffer:
11203+ */
11204+#define HWB_RING 0x2400
11205+
11206+#define RING_TAIL 0x00
11207+#define TAIL_ADDR 0x001FFFF8
11208+#define RING_HEAD 0x04
11209+#define HEAD_WRAP_COUNT 0xFFE00000
11210+#define HEAD_WRAP_ONE 0x00200000
11211+#define HEAD_ADDR 0x001FFFFC
11212+#define RING_START 0x08
11213+#define START_ADDR 0x0xFFFFF000
11214+#define RING_LEN 0x0C
11215+#define RING_NR_PAGES 0x001FF000
11216+#define RING_REPORT_MASK 0x00000006
11217+#define RING_REPORT_64K 0x00000002
11218+#define RING_REPORT_128K 0x00000004
11219+#define RING_NO_REPORT 0x00000000
11220+#define RING_VALID_MASK 0x00000001
11221+#define RING_VALID 0x00000001
11222+#define RING_INVALID 0x00000000
11223+
11224+/* Instruction parser error reg:
11225+ */
11226+#define IPEIR 0x2088
11227+
11228+/* Scratch pad debug 0 reg:
11229+ */
11230+#define SCPD0 0x209c
11231+
11232+/* Error status reg:
11233+ */
11234+#define ESR 0x20b8
11235+
11236+/* Secondary DMA fetch address debug reg:
11237+ */
11238+#define DMA_FADD_S 0x20d4
11239+
11240+/* Cache mode 0 reg.
11241+ * - Manipulating render cache behaviour is central
11242+ * to the concept of zone rendering, tuning this reg can help avoid
11243+ * unnecessary render cache reads and even writes (for z/stencil)
11244+ * at beginning and end of scene.
11245+ *
11246+ * - To change a bit, write to this reg with a mask bit set and the
11247+ * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
11248+ */
11249+#define Cache_Mode_0 0x2120
11250+#define CM0_MASK_SHIFT 16
11251+#define CM0_IZ_OPT_DISABLE (1<<6)
11252+#define CM0_ZR_OPT_DISABLE (1<<5)
11253+#define CM0_DEPTH_EVICT_DISABLE (1<<4)
11254+#define CM0_COLOR_EVICT_DISABLE (1<<3)
11255+#define CM0_DEPTH_WRITE_DISABLE (1<<1)
11256+#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
11257+
11258+
11259+/* Graphics flush control. A CPU write flushes the GWB of all writes.
11260+ * The data is discarded.
11261+ */
11262+#define GFX_FLSH_CNTL 0x2170
11263+
11264+/* Binner control. Defines the location of the bin pointer list:
11265+ */
11266+#define BINCTL 0x2420
11267+#define BC_MASK (1 << 9)
11268+
11269+/* Binned scene info.
11270+ */
11271+#define BINSCENE 0x2428
11272+#define BS_OP_LOAD (1 << 8)
11273+#define BS_MASK (1 << 22)
11274+
11275+/* Bin command parser debug reg:
11276+ */
11277+#define BCPD 0x2480
11278+
11279+/* Bin memory control debug reg:
11280+ */
11281+#define BMCD 0x2484
11282+
11283+/* Bin data cache debug reg:
11284+ */
11285+#define BDCD 0x2488
11286+
11287+/* Binner pointer cache debug reg:
11288+ */
11289+#define BPCD 0x248c
11290+
11291+/* Binner scratch pad debug reg:
11292+ */
11293+#define BINSKPD 0x24f0
11294+
11295+/* HWB scratch pad debug reg:
11296+ */
11297+#define HWBSKPD 0x24f4
11298+
11299+/* Binner memory pool reg:
11300+ */
11301+#define BMP_BUFFER 0x2430
11302+#define BMP_PAGE_SIZE_4K (0 << 10)
11303+#define BMP_BUFFER_SIZE_SHIFT 1
11304+#define BMP_ENABLE (1 << 0)
11305+
11306+/* Get/put memory from the binner memory pool:
11307+ */
11308+#define BMP_GET 0x2438
11309+#define BMP_PUT 0x2440
11310+#define BMP_OFFSET_SHIFT 5
11311+
11312+/* 3D state packets:
11313+ */
11314+#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
11315+
11316+#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
11317+#define SC_UPDATE_SCISSOR (0x1<<1)
11318+#define SC_ENABLE_MASK (0x1<<0)
11319+#define SC_ENABLE (0x1<<0)
11320+
11321+#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
11322+
11323+#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
11324+#define SCI_YMIN_MASK (0xffff<<16)
11325+#define SCI_XMIN_MASK (0xffff<<0)
11326+#define SCI_YMAX_MASK (0xffff<<16)
11327+#define SCI_XMAX_MASK (0xffff<<0)
11328+
11329+#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
11330+#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
11331+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
11332+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
11333+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
11334+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
11335+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
11336+
11337+#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
11338+
11339+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
11340+#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
11341+#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
11342+#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
11343+#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
11344+#define BLT_DEPTH_8 (0<<24)
11345+#define BLT_DEPTH_16_565 (1<<24)
11346+#define BLT_DEPTH_16_1555 (2<<24)
11347+#define BLT_DEPTH_32 (3<<24)
11348+#define BLT_ROP_GXCOPY (0xcc<<16)
11349+
11350+#define MI_BATCH_BUFFER ((0x30<<23)|1)
11351+#define MI_BATCH_BUFFER_START (0x31<<23)
11352+#define MI_BATCH_BUFFER_END (0xA<<23)
11353+#define MI_BATCH_NON_SECURE (1)
11354+
11355+#define MI_BATCH_NON_SECURE_I965 (1<<8)
11356+
11357+#define MI_WAIT_FOR_EVENT ((0x3<<23))
11358+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
11359+#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
11360+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
11361+
11362+#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
11363+
11364+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
11365+#define ASYNC_FLIP (1<<22)
11366+#define DISPLAY_PLANE_A (0<<20)
11367+#define DISPLAY_PLANE_B (1<<20)
11368+
11369+/* Display regs */
11370+#define DSPACNTR 0x70180
11371+#define DSPBCNTR 0x71180
11372+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
11373+
11374+/* Define the region of interest for the binner:
11375+ */
11376+#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
11377+
11378+#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
11379+
11380+#define BREADCRUMB_BITS 31
11381+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
11382+
11383+#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
11384+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
11385+
11386+#define PRIMARY_RINGBUFFER_SIZE (128*1024)
11387+
11388+#define BLC_PWM_CTL2 0x61250
11389+
11390+#endif
11391Index: linux-2.6.27/drivers/gpu/drm/psb/i915_reg.h
11392===================================================================
11393--- /dev/null 1970-01-01 00:00:00.000000000 +0000
11394+++ linux-2.6.27/drivers/gpu/drm/psb/i915_reg.h 2009-02-05 18:29:58.000000000 +0000
11395@@ -0,0 +1,98 @@
11396+/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
11397+ * All Rights Reserved.
11398+ *
11399+ * Permission is hereby granted, free of charge, to any person obtaining a
11400+ * copy of this software and associated documentation files (the
11401+ * "Software"), to deal in the Software without restriction, including
11402+ * without limitation the rights to use, copy, modify, merge, publish,
11403+ * distribute, sub license, and/or sell copies of the Software, and to
11404+ * permit persons to whom the Software is furnished to do so, subject to
11405+ * the following conditions:
11406+ *
11407+ * The above copyright notice and this permission notice (including the
11408+ * next paragraph) shall be included in all copies or substantial portions
11409+ * of the Software.
11410+ *
11411+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11412+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11413+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
11414+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
11415+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
11416+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
11417+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
11418+ */
11419+
11420+#include "../i915/i915_reg.h"
11421+
11422+#define I915_GCFGC 0xf0
11423+#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
11424+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
11425+#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
11426+#define I915_DISPLAY_CLOCK_MASK (7 << 4)
11427+
11428+#define I855_HPLLCC 0xc0
11429+#define I855_CLOCK_CONTROL_MASK (3 << 0)
11430+#define I855_CLOCK_133_200 (0 << 0)
11431+#define I855_CLOCK_100_200 (1 << 0)
11432+#define I855_CLOCK_100_133 (2 << 0)
11433+#define I855_CLOCK_166_250 (3 << 0)
11434+
11435+#define LVDSPP_ON 0x61208
11436+#define LVDSPP_OFF 0x6120c
11437+#define PP_CYCLE 0x61210
11438+
11439+
11440+
11441+#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
11442+#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
11443+#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
11444+#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
11445+#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
11446+
11447+#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G)*/
11448+#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
11449+#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
11450+#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG)
11451+
11452+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
11453+ (dev)->pci_device == 0x2982 || \
11454+ (dev)->pci_device == 0x2992 || \
11455+ (dev)->pci_device == 0x29A2 || \
11456+ (dev)->pci_device == 0x2A02 || \
11457+ (dev)->pci_device == 0x2A12)
11458+
11459+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
11460+
11461+#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
11462+ (dev)->pci_device == 0x29B2 || \
11463+ (dev)->pci_device == 0x29D2)
11464+
11465+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
11466+ IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev))
11467+
11468+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
11469+ IS_I945GM(dev) || IS_I965GM(dev) || IS_POULSBO(dev))
11470+
11471+#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \
11472+ ((dev)->pci_device == 0x8109))
11473+
11474+#define FPA0 0x06040
11475+#define FPA1 0x06044
11476+#define FPB0 0x06048
11477+#define FPB1 0x0604c
11478+#define FP_N_DIV_MASK 0x003f0000
11479+#define FP_N_DIV_SHIFT 16
11480+#define FP_M1_DIV_MASK 0x00003f00
11481+#define FP_M1_DIV_SHIFT 8
11482+#define FP_M2_DIV_MASK 0x0000003f
11483+#define FP_M2_DIV_SHIFT 0
11484+
11485+#define DSPABASE 0x70184
11486+#define DSPBBASE 0x71184
11487+#define DSPAKEYVAL 0x70194
11488+#define DSPAKEYMASK 0x70198
11489+
11490+#define VSYNCSHIFT_A 0x60028
11491+#define VSYNCSHIFT_B 0x61028
11492+#define DPLL_B_MD 0x06020
11493+
11494Index: linux-2.6.27/drivers/gpu/drm/psb/intel_crt.c
11495===================================================================
11496--- /dev/null 1970-01-01 00:00:00.000000000 +0000
11497+++ linux-2.6.27/drivers/gpu/drm/psb/intel_crt.c 2009-02-05 13:29:33.000000000 +0000
11498@@ -0,0 +1,242 @@
11499+/*
11500+ * Copyright © 2006-2007 Intel Corporation
11501+ *
11502+ * Permission is hereby granted, free of charge, to any person obtaining a
11503+ * copy of this software and associated documentation files (the "Software"),
11504+ * to deal in the Software without restriction, including without limitation
11505+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11506+ * and/or sell copies of the Software, and to permit persons to whom the
11507+ * Software is furnished to do so, subject to the following conditions:
11508+ *
11509+ * The above copyright notice and this permission notice (including the next
11510+ * paragraph) shall be included in all copies or substantial portions of the
11511+ * Software.
11512+ *
11513+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
11514+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11515+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
11516+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
11517+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
11518+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
11519+ * DEALINGS IN THE SOFTWARE.
11520+ *
11521+ * Authors:
11522+ * Eric Anholt <eric@anholt.net>
11523+ */
11524+
11525+#include <linux/i2c.h>
11526+
11527+static void intel_crt_dpms(struct drm_output *output, int mode)
11528+{
11529+ struct drm_device *dev = output->dev;
11530+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
11531+ u32 temp;
11532+
11533+ temp = I915_READ(ADPA);
11534+ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
11535+ temp &= ~ADPA_DAC_ENABLE;
11536+
11537+ switch(mode) {
11538+ case DPMSModeOn:
11539+ temp |= ADPA_DAC_ENABLE;
11540+ break;
11541+ case DPMSModeStandby:
11542+ temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
11543+ break;
11544+ case DPMSModeSuspend:
11545+ temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
11546+ break;
11547+ case DPMSModeOff:
11548+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
11549+ break;
11550+ }
11551+
11552+ I915_WRITE(ADPA, temp);
11553+}
11554+
11555+static void intel_crt_save(struct drm_output *output)
11556+{
11557+
11558+}
11559+
11560+static void intel_crt_restore(struct drm_output *output)
11561+{
11562+
11563+}
11564+
11565+static int intel_crt_mode_valid(struct drm_output *output,
11566+ struct drm_display_mode *mode)
11567+{
11568+ if (mode->flags & V_DBLSCAN)
11569+ return MODE_NO_DBLESCAN;
11570+
11571+ if (mode->clock > 400000 || mode->clock < 25000)
11572+ return MODE_CLOCK_RANGE;
11573+
11574+ return MODE_OK;
11575+}
11576+
11577+static bool intel_crt_mode_fixup(struct drm_output *output,
11578+ struct drm_display_mode *mode,
11579+ struct drm_display_mode *adjusted_mode)
11580+{
11581+ return true;
11582+}
11583+
11584+static void intel_crt_mode_set(struct drm_output *output,
11585+ struct drm_display_mode *mode,
11586+ struct drm_display_mode *adjusted_mode)
11587+{
11588+ struct drm_device *dev = output->dev;
11589+ struct drm_crtc *crtc = output->crtc;
11590+ struct intel_crtc *intel_crtc = crtc->driver_private;
11591+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
11592+ int dpll_md_reg;
11593+ u32 adpa, dpll_md;
11594+
11595+ if (intel_crtc->pipe == 0)
11596+ dpll_md_reg = DPLL_A_MD;
11597+ else
11598+ dpll_md_reg = DPLL_B_MD;
11599+
11600+ /*
11601+ * Disable separate mode multiplier used when cloning SDVO to CRT
11602+ * XXX this needs to be adjusted when we really are cloning
11603+ */
11604+ if (IS_I965G(dev)) {
11605+ dpll_md = I915_READ(dpll_md_reg);
11606+ I915_WRITE(dpll_md_reg,
11607+ dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
11608+ }
11609+
11610+ adpa = 0;
11611+ if (adjusted_mode->flags & V_PHSYNC)
11612+ adpa |= ADPA_HSYNC_ACTIVE_HIGH;
11613+ if (adjusted_mode->flags & V_PVSYNC)
11614+ adpa |= ADPA_VSYNC_ACTIVE_HIGH;
11615+
11616+ if (intel_crtc->pipe == 0)
11617+ adpa |= ADPA_PIPE_A_SELECT;
11618+ else
11619+ adpa |= ADPA_PIPE_B_SELECT;
11620+
11621+ I915_WRITE(ADPA, adpa);
11622+}
11623+
11624+/**
11625+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
11626+ *
11627+ * Only for I945G/GM.
11628+ *
11629+ * \return TRUE if CRT is connected.
11630+ * \return FALSE if CRT is disconnected.
11631+ */
11632+static bool intel_crt_detect_hotplug(struct drm_output *output)
11633+{
11634+ struct drm_device *dev = output->dev;
11635+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
11636+ u32 temp;
11637+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
11638+
11639+ temp = I915_READ(PORT_HOTPLUG_EN);
11640+
11641+ I915_WRITE(PORT_HOTPLUG_EN,
11642+ temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
11643+
11644+ do {
11645+ if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
11646+ break;
11647+ msleep(1);
11648+ } while (time_after(timeout, jiffies));
11649+
11650+ if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
11651+ CRT_HOTPLUG_MONITOR_COLOR)
11652+ return true;
11653+
11654+ return false;
11655+}
11656+
11657+static bool intel_crt_detect_ddc(struct drm_output *output)
11658+{
11659+ struct intel_output *intel_output = output->driver_private;
11660+
11661+ /* CRT should always be at 0, but check anyway */
11662+ if (intel_output->type != INTEL_OUTPUT_ANALOG)
11663+ return false;
11664+
11665+ return intel_ddc_probe(output);
11666+}
11667+
11668+static enum drm_output_status intel_crt_detect(struct drm_output *output)
11669+{
11670+ struct drm_device *dev = output->dev;
11671+
11672+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) {
11673+ if (intel_crt_detect_hotplug(output))
11674+ return output_status_connected;
11675+ else
11676+ return output_status_disconnected;
11677+ }
11678+
11679+ if (intel_crt_detect_ddc(output))
11680+ return output_status_connected;
11681+
11682+ /* TODO use load detect */
11683+ return output_status_unknown;
11684+}
11685+
11686+static void intel_crt_destroy(struct drm_output *output)
11687+{
11688+ struct intel_output *intel_output = output->driver_private;
11689+
11690+ intel_i2c_destroy(intel_output->ddc_bus);
11691+ kfree(output->driver_private);
11692+}
11693+
11694+static int intel_crt_get_modes(struct drm_output *output)
11695+{
11696+ return intel_ddc_get_modes(output);
11697+}
11698+
11699+/*
11700+ * Routines for controlling stuff on the analog port
11701+ */
11702+static const struct drm_output_funcs intel_crt_output_funcs = {
11703+ .dpms = intel_crt_dpms,
11704+ .save = intel_crt_save,
11705+ .restore = intel_crt_restore,
11706+ .mode_valid = intel_crt_mode_valid,
11707+ .mode_fixup = intel_crt_mode_fixup,
11708+ .prepare = intel_output_prepare,
11709+ .mode_set = intel_crt_mode_set,
11710+ .commit = intel_output_commit,
11711+ .detect = intel_crt_detect,
11712+ .get_modes = intel_crt_get_modes,
11713+ .cleanup = intel_crt_destroy,
11714+};
11715+
11716+void intel_crt_init(struct drm_device *dev)
11717+{
11718+ struct drm_output *output;
11719+ struct intel_output *intel_output;
11720+
11721+ output = drm_output_create(dev, &intel_crt_output_funcs, "VGA");
11722+
11723+ intel_output = kmalloc(sizeof(struct intel_output), GFP_KERNEL);
11724+ if (!intel_output) {
11725+ drm_output_destroy(output);
11726+ return;
11727+ }
11728+ /* Set up the DDC bus. */
11729+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
11730+ if (!intel_output->ddc_bus) {
11731+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
11732+ "failed.\n");
11733+ return;
11734+ }
11735+
11736+ intel_output->type = INTEL_OUTPUT_ANALOG;
11737+ output->driver_private = intel_output;
11738+ output->interlace_allowed = 0;
11739+ output->doublescan_allowed = 0;
11740+}
11741Index: linux-2.6.27/drivers/gpu/drm/psb/intel_display.c
11742===================================================================
11743--- /dev/null 1970-01-01 00:00:00.000000000 +0000
11744+++ linux-2.6.27/drivers/gpu/drm/psb/intel_display.c 2009-02-05 13:29:33.000000000 +0000
11745@@ -0,0 +1,1472 @@
11746+/*
11747+ * Copyright © 2006-2007 Intel Corporation
11748+ *
11749+ * Permission is hereby granted, free of charge, to any person obtaining a
11750+ * copy of this software and associated documentation files (the "Software"),
11751+ * to deal in the Software without restriction, including without limitation
11752+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11753+ * and/or sell copies of the Software, and to permit persons to whom the
11754+ * Software is furnished to do so, subject to the following conditions:
11755+ *
11756+ * The above copyright notice and this permission notice (including the next
11757+ * paragraph) shall be included in all copies or substantial portions of the
11758+ * Software.
11759+ *
11760+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
11761+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11762+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
11763+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
11764+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
11765+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
11766+ * DEALINGS IN THE SOFTWARE.
11767+ *
11768+ * Authors:
11769+ * Eric Anholt <eric@anholt.net>
11770+ */
11771+
11772+#include <linux/i2c.h>
11773+
11774+bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
11775+
11776+typedef struct {
11777+ /* given values */
11778+ int n;
11779+ int m1, m2;
11780+ int p1, p2;
11781+ /* derived values */
11782+ int dot;
11783+ int vco;
11784+ int m;
11785+ int p;
11786+} intel_clock_t;
11787+
11788+typedef struct {
11789+ int min, max;
11790+} intel_range_t;
11791+
11792+typedef struct {
11793+ int dot_limit;
11794+ int p2_slow, p2_fast;
11795+} intel_p2_t;
11796+
11797+#define INTEL_P2_NUM 2
11798+
11799+typedef struct {
11800+ intel_range_t dot, vco, n, m, m1, m2, p, p1;
11801+ intel_p2_t p2;
11802+} intel_limit_t;
11803+
11804+#define I8XX_DOT_MIN 25000
11805+#define I8XX_DOT_MAX 350000
11806+#define I8XX_VCO_MIN 930000
11807+#define I8XX_VCO_MAX 1400000
11808+#define I8XX_N_MIN 3
11809+#define I8XX_N_MAX 16
11810+#define I8XX_M_MIN 96
11811+#define I8XX_M_MAX 140
11812+#define I8XX_M1_MIN 18
11813+#define I8XX_M1_MAX 26
11814+#define I8XX_M2_MIN 6
11815+#define I8XX_M2_MAX 16
11816+#define I8XX_P_MIN 4
11817+#define I8XX_P_MAX 128
11818+#define I8XX_P1_MIN 2
11819+#define I8XX_P1_MAX 33
11820+#define I8XX_P1_LVDS_MIN 1
11821+#define I8XX_P1_LVDS_MAX 6
11822+#define I8XX_P2_SLOW 4
11823+#define I8XX_P2_FAST 2
11824+#define I8XX_P2_LVDS_SLOW 14
11825+#define I8XX_P2_LVDS_FAST 14 /* No fast option */
11826+#define I8XX_P2_SLOW_LIMIT 165000
11827+
11828+#define I9XX_DOT_MIN 20000
11829+#define I9XX_DOT_MAX 400000
11830+#define I9XX_VCO_MIN 1400000
11831+#define I9XX_VCO_MAX 2800000
11832+#define I9XX_N_MIN 3
11833+#define I9XX_N_MAX 8
11834+#define I9XX_M_MIN 70
11835+#define I9XX_M_MAX 120
11836+#define I9XX_M1_MIN 10
11837+#define I9XX_M1_MAX 20
11838+#define I9XX_M2_MIN 5
11839+#define I9XX_M2_MAX 9
11840+#define I9XX_P_SDVO_DAC_MIN 5
11841+#define I9XX_P_SDVO_DAC_MAX 80
11842+#define I9XX_P_LVDS_MIN 7
11843+#define I9XX_P_LVDS_MAX 98
11844+#define I9XX_P1_MIN 1
11845+#define I9XX_P1_MAX 8
11846+#define I9XX_P2_SDVO_DAC_SLOW 10
11847+#define I9XX_P2_SDVO_DAC_FAST 5
11848+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
11849+#define I9XX_P2_LVDS_SLOW 14
11850+#define I9XX_P2_LVDS_FAST 7
11851+#define I9XX_P2_LVDS_SLOW_LIMIT 112000
11852+
11853+#define INTEL_LIMIT_I8XX_DVO_DAC 0
11854+#define INTEL_LIMIT_I8XX_LVDS 1
11855+#define INTEL_LIMIT_I9XX_SDVO_DAC 2
11856+#define INTEL_LIMIT_I9XX_LVDS 3
11857+
11858+static const intel_limit_t intel_limits[] = {
11859+ { /* INTEL_LIMIT_I8XX_DVO_DAC */
11860+ .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
11861+ .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
11862+ .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
11863+ .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
11864+ .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
11865+ .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
11866+ .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
11867+ .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
11868+ .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
11869+ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
11870+ },
11871+ { /* INTEL_LIMIT_I8XX_LVDS */
11872+ .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
11873+ .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
11874+ .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
11875+ .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
11876+ .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
11877+ .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
11878+ .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
11879+ .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
11880+ .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
11881+ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
11882+ },
11883+ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
11884+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
11885+ .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
11886+ .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
11887+ .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
11888+ .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
11889+ .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
11890+ .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
11891+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
11892+ .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
11893+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
11894+ },
11895+ { /* INTEL_LIMIT_I9XX_LVDS */
11896+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
11897+ .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
11898+ .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
11899+ .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
11900+ .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
11901+ .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
11902+ .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX },
11903+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
11904+ /* The single-channel range is 25-112Mhz, and dual-channel
11905+ * is 80-224Mhz. Prefer single channel as much as possible.
11906+ */
11907+ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
11908+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
11909+ },
11910+};
11911+
11912+static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
11913+{
11914+ struct drm_device *dev = crtc->dev;
11915+ const intel_limit_t *limit;
11916+
11917+ if (IS_I9XX(dev)) {
11918+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
11919+ limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
11920+ else
11921+ limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
11922+ } else {
11923+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
11924+ limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
11925+ else
11926+ limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
11927+ }
11928+ return limit;
11929+}
11930+
11931+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
11932+
11933+static void i8xx_clock(int refclk, intel_clock_t *clock)
11934+{
11935+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
11936+ clock->p = clock->p1 * clock->p2;
11937+ clock->vco = refclk * clock->m / (clock->n + 2);
11938+ clock->dot = clock->vco / clock->p;
11939+}
11940+
11941+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
11942+
11943+static void i9xx_clock(int refclk, intel_clock_t *clock)
11944+{
11945+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
11946+ clock->p = clock->p1 * clock->p2;
11947+ clock->vco = refclk * clock->m / (clock->n + 2);
11948+ clock->dot = clock->vco / clock->p;
11949+}
11950+
11951+static void intel_clock(struct drm_device *dev, int refclk,
11952+ intel_clock_t *clock)
11953+{
11954+ if (IS_I9XX(dev))
11955+ return i9xx_clock (refclk, clock);
11956+ else
11957+ return i8xx_clock (refclk, clock);
11958+}
11959+
11960+/**
11961+ * Returns whether any output on the specified pipe is of the specified type
11962+ */
11963+bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
11964+{
11965+ struct drm_device *dev = crtc->dev;
11966+ struct drm_mode_config *mode_config = &dev->mode_config;
11967+ struct drm_output *l_entry;
11968+
11969+ list_for_each_entry(l_entry, &mode_config->output_list, head) {
11970+ if (l_entry->crtc == crtc) {
11971+ struct intel_output *intel_output = l_entry->driver_private;
11972+ if (intel_output->type == type)
11973+ return true;
11974+ }
11975+ }
11976+ return false;
11977+}
11978+
11979+#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
11980+/**
11981+ * Returns whether the given set of divisors are valid for a given refclk with
11982+ * the given outputs.
11983+ */
11984+
11985+static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
11986+{
11987+ const intel_limit_t *limit = intel_limit (crtc);
11988+
11989+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
11990+ INTELPllInvalid ("p1 out of range\n");
11991+ if (clock->p < limit->p.min || limit->p.max < clock->p)
11992+ INTELPllInvalid ("p out of range\n");
11993+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
11994+ INTELPllInvalid ("m2 out of range\n");
11995+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
11996+ INTELPllInvalid ("m1 out of range\n");
11997+ if (clock->m1 <= clock->m2)
11998+ INTELPllInvalid ("m1 <= m2\n");
11999+ if (clock->m < limit->m.min || limit->m.max < clock->m)
12000+ INTELPllInvalid ("m out of range\n");
12001+ if (clock->n < limit->n.min || limit->n.max < clock->n)
12002+ INTELPllInvalid ("n out of range\n");
12003+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
12004+ INTELPllInvalid ("vco out of range\n");
12005+ /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
12006+ * output, etc., rather than just a single range.
12007+ */
12008+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
12009+ INTELPllInvalid ("dot out of range\n");
12010+
12011+ return true;
12012+}
12013+
12014+/**
12015+ * Returns a set of divisors for the desired target clock with the given
12016+ * refclk, or FALSE. The returned values represent the clock equation:
12017+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
12018+ */
12019+static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
12020+ int refclk, intel_clock_t *best_clock)
12021+{
12022+ struct drm_device *dev = crtc->dev;
12023+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12024+ intel_clock_t clock;
12025+ const intel_limit_t *limit = intel_limit(crtc);
12026+ int err = target;
12027+
12028+ if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
12029+ (I915_READ(LVDS) & LVDS_PORT_EN) != 0) {
12030+ /*
12031+ * For LVDS, if the panel is on, just rely on its current
12032+ * settings for dual-channel. We haven't figured out how to
12033+ * reliably set up different single/dual channel state, if we
12034+ * even can.
12035+ */
12036+ if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
12037+ LVDS_CLKB_POWER_UP)
12038+ clock.p2 = limit->p2.p2_fast;
12039+ else
12040+ clock.p2 = limit->p2.p2_slow;
12041+ } else {
12042+ if (target < limit->p2.dot_limit)
12043+ clock.p2 = limit->p2.p2_slow;
12044+ else
12045+ clock.p2 = limit->p2.p2_fast;
12046+ }
12047+
12048+ memset (best_clock, 0, sizeof (*best_clock));
12049+
12050+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
12051+ for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
12052+ clock.m2 <= limit->m2.max; clock.m2++) {
12053+ for (clock.n = limit->n.min; clock.n <= limit->n.max;
12054+ clock.n++) {
12055+ for (clock.p1 = limit->p1.min;
12056+ clock.p1 <= limit->p1.max; clock.p1++) {
12057+ int this_err;
12058+
12059+ intel_clock(dev, refclk, &clock);
12060+
12061+ if (!intel_PLL_is_valid(crtc, &clock))
12062+ continue;
12063+
12064+ this_err = abs(clock.dot - target);
12065+ if (this_err < err) {
12066+ *best_clock = clock;
12067+ err = this_err;
12068+ }
12069+ }
12070+ }
12071+ }
12072+ }
12073+
12074+ return (err != target);
12075+}
12076+
12077+#if 0
12078+void
12079+intel_set_vblank(struct drm_device *dev)
12080+{
12081+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12082+ struct drm_crtc *crtc;
12083+ struct intel_crtc *intel_crtc;
12084+ int vbl_pipe = 0;
12085+
12086+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
12087+ intel_crtc = crtc->driver_private;
12088+
12089+ if (crtc->enabled)
12090+ vbl_pipe |= (1<<intel_crtc->pipe);
12091+ }
12092+
12093+ dev_priv->vblank_pipe = vbl_pipe;
12094+ i915_enable_interrupt(dev);
12095+}
12096+#endif
12097+
12098+void
12099+intel_wait_for_vblank(struct drm_device *dev)
12100+{
12101+ /* Wait for 20ms, i.e. one cycle at 50hz. */
12102+ udelay(20000);
12103+}
12104+
12105+void
12106+intel_pipe_set_base(struct drm_crtc *crtc, int x, int y)
12107+{
12108+ struct drm_device *dev = crtc->dev;
12109+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12110+ struct intel_crtc *intel_crtc = crtc->driver_private;
12111+ int pipe = intel_crtc->pipe;
12112+ unsigned long Start, Offset;
12113+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
12114+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
12115+
12116+ Start = crtc->fb->offset;
12117+ Offset = y * crtc->fb->pitch + x;
12118+
12119+ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
12120+ if (IS_I965G(dev)) {
12121+ I915_WRITE(dspbase, Offset);
12122+ I915_READ(dspbase);
12123+ I915_WRITE(dspsurf, Start);
12124+ I915_READ(dspsurf);
12125+ } else {
12126+ I915_WRITE(dspbase, Start + Offset);
12127+ I915_READ(dspbase);
12128+ }
12129+
12130+
12131+ if (!dev_priv->sarea_priv)
12132+ return;
12133+
12134+ switch (pipe) {
12135+ case 0:
12136+ dev_priv->sarea_priv->planeA_x = x;
12137+ dev_priv->sarea_priv->planeA_y = y;
12138+ break;
12139+ case 1:
12140+ dev_priv->sarea_priv->planeB_x = x;
12141+ dev_priv->sarea_priv->planeB_y = y;
12142+ break;
12143+ default:
12144+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
12145+ break;
12146+ }
12147+}
12148+
12149+/**
12150+ * Sets the power management mode of the pipe and plane.
12151+ *
12152+ * This code should probably grow support for turning the cursor off and back
12153+ * on appropriately at the same time as we're turning the pipe off/on.
12154+ */
12155+static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
12156+{
12157+ struct drm_device *dev = crtc->dev;
12158+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12159+ struct intel_crtc *intel_crtc = crtc->driver_private;
12160+ int pipe = intel_crtc->pipe;
12161+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
12162+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
12163+ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
12164+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
12165+ u32 temp, temp2;
12166+ bool enabled;
12167+
12168+ /* XXX: When our outputs are all unaware of DPMS modes other than off
12169+ * and on, we should map those modes to DPMSModeOff in the CRTC.
12170+ */
12171+ switch (mode) {
12172+ case DPMSModeOn:
12173+ case DPMSModeStandby:
12174+ case DPMSModeSuspend:
12175+ /* Enable the DPLL */
12176+ temp = I915_READ(dpll_reg);
12177+ if ((temp & DPLL_VCO_ENABLE) == 0) {
12178+ I915_WRITE(dpll_reg, temp);
12179+ I915_READ(dpll_reg);
12180+ /* Wait for the clocks to stabilize. */
12181+ udelay(150);
12182+ I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
12183+ I915_READ(dpll_reg);
12184+ /* Wait for the clocks to stabilize. */
12185+ udelay(150);
12186+ I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
12187+ I915_READ(dpll_reg);
12188+ /* Wait for the clocks to stabilize. */
12189+ udelay(150);
12190+ }
12191+
12192+ /* Enable the pipe */
12193+ temp = I915_READ(pipeconf_reg);
12194+ if ((temp & PIPEACONF_ENABLE) == 0)
12195+ I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
12196+
12197+ /* Enable the plane */
12198+ temp = I915_READ(dspcntr_reg);
12199+ if (mode != DPMSModeOn)
12200+ temp2 = temp & ~DISPLAY_PLANE_ENABLE;
12201+ else
12202+ temp2 = temp | DISPLAY_PLANE_ENABLE;
12203+
12204+ if (temp != temp2) {
12205+ I915_WRITE(dspcntr_reg, temp2);
12206+ /* Flush the plane changes */
12207+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
12208+ }
12209+
12210+ intel_crtc_load_lut(crtc);
12211+
12212+ /* Give the overlay scaler a chance to enable if it's on this pipe */
12213+ //intel_crtc_dpms_video(crtc, TRUE); TODO
12214+ break;
12215+ case DPMSModeOff:
12216+ /* Give the overlay scaler a chance to disable if it's on this pipe */
12217+ //intel_crtc_dpms_video(crtc, FALSE); TODO
12218+
12219+ /* Disable display plane */
12220+ temp = I915_READ(dspcntr_reg);
12221+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
12222+ I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
12223+ /* Flush the plane changes */
12224+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
12225+ I915_READ(dspbase_reg);
12226+ }
12227+
12228+ if (!IS_I9XX(dev)) {
12229+ /* Wait for vblank for the disable to take effect */
12230+ intel_wait_for_vblank(dev);
12231+ }
12232+
12233+ /* Next, disable display pipes */
12234+ temp = I915_READ(pipeconf_reg);
12235+ if ((temp & PIPEACONF_ENABLE) != 0) {
12236+ I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
12237+ I915_READ(pipeconf_reg);
12238+ }
12239+
12240+ /* Wait for vblank for the disable to take effect. */
12241+ intel_wait_for_vblank(dev);
12242+
12243+ temp = I915_READ(dpll_reg);
12244+ if ((temp & DPLL_VCO_ENABLE) != 0) {
12245+ I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
12246+ I915_READ(dpll_reg);
12247+ }
12248+
12249+ /* Wait for the clocks to turn off. */
12250+ udelay(150);
12251+ break;
12252+ }
12253+
12254+
12255+ if (!dev_priv->sarea_priv)
12256+ return;
12257+
12258+ enabled = crtc->enabled && mode != DPMSModeOff;
12259+
12260+ switch (pipe) {
12261+ case 0:
12262+ dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0;
12263+ dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0;
12264+ break;
12265+ case 1:
12266+ dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0;
12267+ dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0;
12268+ break;
12269+ default:
12270+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
12271+ break;
12272+ }
12273+}
12274+
12275+static bool intel_crtc_lock(struct drm_crtc *crtc)
12276+{
12277+ /* Sync the engine before mode switch */
12278+// i830WaitSync(crtc->scrn);
12279+
12280+#if 0 // TODO def XF86DRI
12281+ return I830DRILock(crtc->scrn);
12282+#else
12283+ return FALSE;
12284+#endif
12285+}
12286+
12287+static void intel_crtc_unlock (struct drm_crtc *crtc)
12288+{
12289+#if 0 // TODO def XF86DRI
12290+ I830DRIUnlock (crtc->scrn);
12291+#endif
12292+}
12293+
12294+static void intel_crtc_prepare (struct drm_crtc *crtc)
12295+{
12296+ crtc->funcs->dpms(crtc, DPMSModeOff);
12297+}
12298+
12299+static void intel_crtc_commit (struct drm_crtc *crtc)
12300+{
12301+ crtc->funcs->dpms(crtc, DPMSModeOn);
12302+}
12303+
12304+void intel_output_prepare (struct drm_output *output)
12305+{
12306+ /* lvds has its own version of prepare see intel_lvds_prepare */
12307+ output->funcs->dpms(output, DPMSModeOff);
12308+}
12309+
12310+void intel_output_commit (struct drm_output *output)
12311+{
12312+ /* lvds has its own version of commit see intel_lvds_commit */
12313+ output->funcs->dpms(output, DPMSModeOn);
12314+}
12315+
12316+static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
12317+ struct drm_display_mode *mode,
12318+ struct drm_display_mode *adjusted_mode)
12319+{
12320+ return true;
12321+}
12322+
12323+
12324+/** Returns the core display clock speed for i830 - i945 */
12325+int intel_get_core_clock_speed(struct drm_device *dev)
12326+{
12327+
12328+ /* Core clock values taken from the published datasheets.
12329+ * The 830 may go up to 166 Mhz, which we should check.
12330+ */
12331+ if (IS_I945G(dev))
12332+ return 400000;
12333+ else if (IS_I915G(dev))
12334+ return 333000;
12335+ else if (IS_I945GM(dev) || IS_POULSBO(dev) || IS_845G(dev))
12336+ return 200000;
12337+ else if (IS_I915GM(dev)) {
12338+ u16 gcfgc = 0;
12339+
12340+ pci_read_config_word(dev->pdev, I915_GCFGC, &gcfgc);
12341+
12342+ if (gcfgc & I915_LOW_FREQUENCY_ENABLE)
12343+ return 133000;
12344+ else {
12345+ switch (gcfgc & I915_DISPLAY_CLOCK_MASK) {
12346+ case I915_DISPLAY_CLOCK_333_MHZ:
12347+ return 333000;
12348+ default:
12349+ case I915_DISPLAY_CLOCK_190_200_MHZ:
12350+ return 190000;
12351+ }
12352+ }
12353+ } else if (IS_I865G(dev))
12354+ return 266000;
12355+ else if (IS_I855(dev)) {
12356+#if 0
12357+ PCITAG bridge = pciTag(0, 0, 0); /* This is always the host bridge */
12358+ u16 hpllcc = pciReadWord(bridge, I855_HPLLCC);
12359+
12360+#endif
12361+ u16 hpllcc = 0;
12362+ /* Assume that the hardware is in the high speed state. This
12363+ * should be the default.
12364+ */
12365+ switch (hpllcc & I855_CLOCK_CONTROL_MASK) {
12366+ case I855_CLOCK_133_200:
12367+ case I855_CLOCK_100_200:
12368+ return 200000;
12369+ case I855_CLOCK_166_250:
12370+ return 250000;
12371+ case I855_CLOCK_100_133:
12372+ return 133000;
12373+ }
12374+ } else /* 852, 830 */
12375+ return 133000;
12376+
12377+ return 0; /* Silence gcc warning */
12378+}
12379+
12380+
12381+/**
12382+ * Return the pipe currently connected to the panel fitter,
12383+ * or -1 if the panel fitter is not present or not in use
12384+ */
12385+int intel_panel_fitter_pipe (struct drm_device *dev)
12386+{
12387+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12388+ u32 pfit_control;
12389+
12390+ /* i830 doesn't have a panel fitter */
12391+ if (IS_I830(dev))
12392+ return -1;
12393+
12394+ pfit_control = I915_READ(PFIT_CONTROL);
12395+
12396+ /* See if the panel fitter is in use */
12397+ if ((pfit_control & PFIT_ENABLE) == 0)
12398+ return -1;
12399+
12400+ /* 965 can place panel fitter on either pipe */
12401+ if (IS_I965G(dev))
12402+ return (pfit_control >> 29) & 0x3;
12403+
12404+ /* older chips can only use pipe 1 */
12405+ return 1;
12406+}
12407+
12408+#define WA_NO_FB_GARBAGE_DISPLAY
12409+#ifdef WA_NO_FB_GARBAGE_DISPLAY
12410+static u32 fp_reg_value[2];
12411+static u32 dpll_reg_value[2];
12412+static u32 dpll_md_reg_value[2];
12413+static u32 dspcntr_reg_value[2];
12414+static u32 pipeconf_reg_value[2];
12415+static u32 htot_reg_value[2];
12416+static u32 hblank_reg_value[2];
12417+static u32 hsync_reg_value[2];
12418+static u32 vtot_reg_value[2];
12419+static u32 vblank_reg_value[2];
12420+static u32 vsync_reg_value[2];
12421+static u32 dspsize_reg_value[2];
12422+static u32 dspstride_reg_value[2];
12423+static u32 dsppos_reg_value[2];
12424+static u32 pipesrc_reg_value[2];
12425+
12426+static u32 dspbase_value[2];
12427+
12428+static u32 lvds_reg_value[2];
12429+static u32 vgacntrl_reg_value[2];
12430+static u32 pfit_control_reg_value[2];
12431+
12432+void intel_crtc_mode_restore(struct drm_crtc *crtc)
12433+{
12434+ struct drm_device *dev = crtc->dev;
12435+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12436+ struct intel_crtc *intel_crtc = crtc->driver_private;
12437+ int pipe = intel_crtc->pipe;
12438+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
12439+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
12440+ int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
12441+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
12442+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
12443+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
12444+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
12445+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
12446+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
12447+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
12448+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
12449+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
12450+ int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
12451+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
12452+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
12453+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
12454+
12455+ bool ok, is_sdvo = false, is_dvo = false;
12456+ bool is_crt = false, is_lvds = false, is_tv = false;
12457+ struct drm_mode_config *mode_config = &dev->mode_config;
12458+ struct drm_output *output;
12459+
12460+ list_for_each_entry(output, &mode_config->output_list, head) {
12461+ struct intel_output *intel_output = output->driver_private;
12462+
12463+ if (output->crtc != crtc)
12464+ continue;
12465+
12466+ switch (intel_output->type) {
12467+ case INTEL_OUTPUT_LVDS:
12468+ is_lvds = TRUE;
12469+ break;
12470+ case INTEL_OUTPUT_SDVO:
12471+ is_sdvo = TRUE;
12472+ break;
12473+ case INTEL_OUTPUT_DVO:
12474+ is_dvo = TRUE;
12475+ break;
12476+ case INTEL_OUTPUT_TVOUT:
12477+ is_tv = TRUE;
12478+ break;
12479+ case INTEL_OUTPUT_ANALOG:
12480+ is_crt = TRUE;
12481+ break;
12482+ }
12483+ if(is_lvds && ((lvds_reg_value[pipe] & LVDS_PORT_EN) == 0))
12484+ {
12485+ printk("%s: is_lvds but not the boot display, so return\n",
12486+ __FUNCTION__);
12487+ return;
12488+ }
12489+ output->funcs->prepare(output);
12490+ }
12491+
12492+ intel_crtc_prepare(crtc);
12493+ /* Disable the panel fitter if it was on our pipe */
12494+ if (intel_panel_fitter_pipe(dev) == pipe)
12495+ I915_WRITE(PFIT_CONTROL, 0);
12496+
12497+ if (dpll_reg_value[pipe] & DPLL_VCO_ENABLE) {
12498+ I915_WRITE(fp_reg, fp_reg_value[pipe]);
12499+ I915_WRITE(dpll_reg, dpll_reg_value[pipe]& ~DPLL_VCO_ENABLE);
12500+ I915_READ(dpll_reg);
12501+ udelay(150);
12502+ }
12503+
12504+ /*
12505+ if(is_lvds)
12506+ I915_WRITE(LVDS, lvds_reg_value[pipe]);
12507+ */
12508+ if (is_lvds) {
12509+ I915_WRITE(LVDS, lvds_reg_value[pipe]);
12510+ I915_READ(LVDS);
12511+ }
12512+
12513+ I915_WRITE(fp_reg, fp_reg_value[pipe]);
12514+ I915_WRITE(dpll_reg, dpll_reg_value[pipe]);
12515+ I915_READ(dpll_reg);
12516+ udelay(150);
12517+ //I915_WRITE(dpll_md_reg, dpll_md_reg_value[pipe]);
12518+ I915_WRITE(dpll_reg, dpll_reg_value[pipe]);
12519+ I915_READ(dpll_reg);
12520+ udelay(150);
12521+ I915_WRITE(htot_reg, htot_reg_value[pipe]);
12522+ I915_WRITE(hblank_reg, hblank_reg_value[pipe]);
12523+ I915_WRITE(hsync_reg, hsync_reg_value[pipe]);
12524+ I915_WRITE(vtot_reg, vtot_reg_value[pipe]);
12525+ I915_WRITE(vblank_reg, vblank_reg_value[pipe]);
12526+ I915_WRITE(vsync_reg, vsync_reg_value[pipe]);
12527+ I915_WRITE(dspstride_reg, dspstride_reg_value[pipe]);
12528+ I915_WRITE(dspsize_reg, dspsize_reg_value[pipe]);
12529+ I915_WRITE(dsppos_reg, dsppos_reg_value[pipe]);
12530+ I915_WRITE(pipesrc_reg, pipesrc_reg_value[pipe]);
12531+ I915_WRITE(pipeconf_reg, pipeconf_reg_value[pipe]);
12532+ I915_READ(pipeconf_reg);
12533+ intel_wait_for_vblank(dev);
12534+ I915_WRITE(dspcntr_reg, dspcntr_reg_value[pipe]);
12535+ I915_WRITE(dspbase, dspbase_value[pipe]);
12536+ I915_READ(dspbase);
12537+ I915_WRITE(VGACNTRL, vgacntrl_reg_value[pipe]);
12538+ intel_wait_for_vblank(dev);
12539+ I915_WRITE(PFIT_CONTROL, pfit_control_reg_value[pipe]);
12540+
12541+ intel_crtc_commit(crtc);
12542+ list_for_each_entry(output, &mode_config->output_list, head) {
12543+ if (output->crtc != crtc)
12544+ continue;
12545+
12546+ output->funcs->commit(output);
12547+ //output->funcs->dpms(output, DPMSModeOff);
12548+ //printk("turn off the display first\n");
12549+ }
12550+ return;
12551+}
12552+
12553+void intel_crtc_mode_save(struct drm_crtc *crtc)
12554+{
12555+ struct drm_device *dev = crtc->dev;
12556+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12557+ struct intel_crtc *intel_crtc = crtc->driver_private;
12558+ int pipe = intel_crtc->pipe;
12559+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
12560+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
12561+ int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
12562+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
12563+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
12564+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
12565+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
12566+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
12567+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
12568+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
12569+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
12570+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
12571+ int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
12572+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
12573+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
12574+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
12575+ bool ok, is_sdvo = false, is_dvo = false;
12576+ bool is_crt = false, is_lvds = false, is_tv = false;
12577+ struct drm_mode_config *mode_config = &dev->mode_config;
12578+ struct drm_output *output;
12579+
12580+ list_for_each_entry(output, &mode_config->output_list, head) {
12581+ struct intel_output *intel_output = output->driver_private;
12582+
12583+ if (output->crtc != crtc)
12584+ continue;
12585+
12586+ switch (intel_output->type) {
12587+ case INTEL_OUTPUT_LVDS:
12588+ is_lvds = TRUE;
12589+ break;
12590+ case INTEL_OUTPUT_SDVO:
12591+ is_sdvo = TRUE;
12592+ break;
12593+ case INTEL_OUTPUT_DVO:
12594+ is_dvo = TRUE;
12595+ break;
12596+ case INTEL_OUTPUT_TVOUT:
12597+ is_tv = TRUE;
12598+ break;
12599+ case INTEL_OUTPUT_ANALOG:
12600+ is_crt = TRUE;
12601+ break;
12602+ }
12603+ }
12604+
12605+ fp_reg_value[pipe] = I915_READ(fp_reg);
12606+ dpll_reg_value[pipe] = I915_READ(dpll_reg);
12607+ dpll_md_reg_value[pipe] = I915_READ(dpll_md_reg);
12608+ dspcntr_reg_value[pipe] = I915_READ(dspcntr_reg);
12609+ pipeconf_reg_value[pipe] = I915_READ(pipeconf_reg);
12610+ htot_reg_value[pipe] = I915_READ(htot_reg);
12611+ hblank_reg_value[pipe] = I915_READ(hblank_reg);
12612+ hsync_reg_value[pipe] = I915_READ(hsync_reg);
12613+ vtot_reg_value[pipe] = I915_READ(vtot_reg);
12614+ vblank_reg_value[pipe] = I915_READ(vblank_reg);
12615+ vsync_reg_value[pipe] = I915_READ(vsync_reg);
12616+ dspsize_reg_value[pipe] = I915_READ(dspsize_reg);
12617+ dspstride_reg_value[pipe] = I915_READ(dspstride_reg);
12618+ dsppos_reg_value[pipe] = I915_READ(dsppos_reg);
12619+ pipesrc_reg_value[pipe] = I915_READ(pipesrc_reg);
12620+ dspbase_value[pipe] = I915_READ(dspbase);
12621+ if(is_lvds)
12622+ lvds_reg_value[pipe] = I915_READ(LVDS);
12623+ vgacntrl_reg_value[pipe] = I915_READ(VGACNTRL);
12624+ pfit_control_reg_value[pipe] = I915_READ(PFIT_CONTROL);
12625+}
12626+#endif
12627+
12628+static void intel_crtc_mode_set(struct drm_crtc *crtc,
12629+ struct drm_display_mode *mode,
12630+ struct drm_display_mode *adjusted_mode,
12631+ int x, int y)
12632+{
12633+ struct drm_device *dev = crtc->dev;
12634+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12635+ struct intel_crtc *intel_crtc = crtc->driver_private;
12636+ int pipe = intel_crtc->pipe;
12637+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
12638+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
12639+ int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
12640+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
12641+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
12642+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
12643+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
12644+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
12645+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
12646+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
12647+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
12648+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
12649+ int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
12650+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
12651+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
12652+ int refclk;
12653+ intel_clock_t clock;
12654+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
12655+ bool ok, is_sdvo = false, is_dvo = false;
12656+ bool is_crt = false, is_lvds = false, is_tv = false;
12657+ struct drm_mode_config *mode_config = &dev->mode_config;
12658+ struct drm_output *output;
12659+
12660+ if (!crtc->fb) {
12661+ DRM_ERROR("Can't set mode without attached fb\n");
12662+ return;
12663+ }
12664+
12665+ list_for_each_entry(output, &mode_config->output_list, head) {
12666+ struct intel_output *intel_output = output->driver_private;
12667+
12668+ if (output->crtc != crtc)
12669+ continue;
12670+
12671+ switch (intel_output->type) {
12672+ case INTEL_OUTPUT_LVDS:
12673+ is_lvds = TRUE;
12674+ break;
12675+ case INTEL_OUTPUT_SDVO:
12676+ is_sdvo = TRUE;
12677+ break;
12678+ case INTEL_OUTPUT_DVO:
12679+ is_dvo = TRUE;
12680+ break;
12681+ case INTEL_OUTPUT_TVOUT:
12682+ is_tv = TRUE;
12683+ break;
12684+ case INTEL_OUTPUT_ANALOG:
12685+ is_crt = TRUE;
12686+ break;
12687+ }
12688+ }
12689+
12690+ if (IS_I9XX(dev)) {
12691+ refclk = 96000;
12692+ } else {
12693+ refclk = 48000;
12694+ }
12695+
12696+ ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
12697+ if (!ok) {
12698+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
12699+ return;
12700+ }
12701+
12702+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
12703+
12704+ dpll = DPLL_VGA_MODE_DIS;
12705+ if (IS_I9XX(dev)) {
12706+ if (is_lvds) {
12707+ dpll |= DPLLB_MODE_LVDS;
12708+ if (IS_POULSBO(dev))
12709+ dpll |= DPLL_DVO_HIGH_SPEED;
12710+ } else
12711+ dpll |= DPLLB_MODE_DAC_SERIAL;
12712+ if (is_sdvo) {
12713+ dpll |= DPLL_DVO_HIGH_SPEED;
12714+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_POULSBO(dev)) {
12715+ int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
12716+ dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
12717+ }
12718+ }
12719+
12720+ /* compute bitmask from p1 value */
12721+ dpll |= (1 << (clock.p1 - 1)) << 16;
12722+ switch (clock.p2) {
12723+ case 5:
12724+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
12725+ break;
12726+ case 7:
12727+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
12728+ break;
12729+ case 10:
12730+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
12731+ break;
12732+ case 14:
12733+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
12734+ break;
12735+ }
12736+ if (IS_I965G(dev))
12737+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
12738+ } else {
12739+ if (is_lvds) {
12740+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
12741+ } else {
12742+ if (clock.p1 == 2)
12743+ dpll |= PLL_P1_DIVIDE_BY_TWO;
12744+ else
12745+ dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
12746+ if (clock.p2 == 4)
12747+ dpll |= PLL_P2_DIVIDE_BY_4;
12748+ }
12749+ }
12750+
12751+ if (is_tv) {
12752+ /* XXX: just matching BIOS for now */
12753+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
12754+ dpll |= 3;
12755+ }
12756+#if 0
12757+ else if (is_lvds)
12758+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
12759+#endif
12760+ else
12761+ dpll |= PLL_REF_INPUT_DREFCLK;
12762+
12763+ /* setup pipeconf */
12764+ pipeconf = I915_READ(pipeconf_reg);
12765+
12766+ /* Set up the display plane register */
12767+ dspcntr = DISPPLANE_GAMMA_ENABLE;
12768+
12769+ switch (crtc->fb->bits_per_pixel) {
12770+ case 8:
12771+ dspcntr |= DISPPLANE_8BPP;
12772+ break;
12773+ case 16:
12774+ if (crtc->fb->depth == 15)
12775+ dspcntr |= DISPPLANE_15_16BPP;
12776+ else
12777+ dspcntr |= DISPPLANE_16BPP;
12778+ break;
12779+ case 32:
12780+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
12781+ break;
12782+ default:
12783+ DRM_ERROR("Unknown color depth\n");
12784+ return;
12785+ }
12786+
12787+
12788+ if (pipe == 0)
12789+ dspcntr |= DISPPLANE_SEL_PIPE_A;
12790+ else
12791+ dspcntr |= DISPPLANE_SEL_PIPE_B;
12792+
12793+ if (pipe == 0 && !IS_I965G(dev)) {
12794+ /* Enable pixel doubling when the dot clock is > 90% of the (display)
12795+ * core speed.
12796+ *
12797+ * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
12798+ * pipe == 0 check?
12799+ */
12800+ if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10)
12801+ pipeconf |= PIPEACONF_DOUBLE_WIDE;
12802+ else
12803+ pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
12804+ }
12805+
12806+ dspcntr |= DISPLAY_PLANE_ENABLE;
12807+ pipeconf |= PIPEACONF_ENABLE;
12808+ dpll |= DPLL_VCO_ENABLE;
12809+
12810+
12811+ /* Disable the panel fitter if it was on our pipe */
12812+ if (intel_panel_fitter_pipe(dev) == pipe)
12813+ I915_WRITE(PFIT_CONTROL, 0);
12814+
12815+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
12816+ drm_mode_debug_printmodeline(dev, mode);
12817+
12818+ /*psbPrintPll("chosen", &clock);*/
12819+ DRM_DEBUG("clock regs: 0x%08x, 0x%08x,dspntr is 0x%8x, pipeconf is 0x%8x\n", (int)dpll,
12820+ (int)fp,(int)dspcntr,(int)pipeconf);
12821+#if 0
12822+ if (!xf86ModesEqual(mode, adjusted_mode)) {
12823+ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
12824+ "Adjusted mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
12825+ xf86PrintModeline(pScrn->scrnIndex, mode);
12826+ }
12827+ i830PrintPll("chosen", &clock);
12828+#endif
12829+
12830+ if (dpll & DPLL_VCO_ENABLE) {
12831+ I915_WRITE(fp_reg, fp);
12832+ I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
12833+ I915_READ(dpll_reg);
12834+ udelay(150);
12835+ }
12836+
12837+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
12838+ * This is an exception to the general rule that mode_set doesn't turn
12839+ * things on.
12840+ */
12841+ if (is_lvds) {
12842+ u32 lvds = I915_READ(LVDS);
12843+
12844+ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
12845+ /* Set the B0-B3 data pairs corresponding to whether we're going to
12846+ * set the DPLLs for dual-channel mode or not.
12847+ */
12848+ if (clock.p2 == 7)
12849+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
12850+ else
12851+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
12852+
12853+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
12854+ * appropriately here, but we need to look more thoroughly into how
12855+ * panels behave in the two modes.
12856+ */
12857+
12858+ I915_WRITE(LVDS, lvds);
12859+ I915_READ(LVDS);
12860+ }
12861+
12862+ I915_WRITE(fp_reg, fp);
12863+ I915_WRITE(dpll_reg, dpll);
12864+ I915_READ(dpll_reg);
12865+ /* Wait for the clocks to stabilize. */
12866+ udelay(150);
12867+
12868+ if (IS_I965G(dev)) {
12869+ int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
12870+ I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
12871+ ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
12872+ } else {
12873+ /* write it again -- the BIOS does, after all */
12874+ I915_WRITE(dpll_reg, dpll);
12875+ }
12876+ I915_READ(dpll_reg);
12877+ /* Wait for the clocks to stabilize. */
12878+ udelay(150);
12879+
12880+ I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
12881+ ((adjusted_mode->crtc_htotal - 1) << 16));
12882+ I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
12883+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
12884+ I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
12885+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
12886+ I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
12887+ ((adjusted_mode->crtc_vtotal - 1) << 16));
12888+ I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
12889+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
12890+ I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
12891+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
12892+ I915_WRITE(dspstride_reg, crtc->fb->pitch);
12893+ /* pipesrc and dspsize control the size that is scaled from, which should
12894+ * always be the user's requested size.
12895+ */
12896+ I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
12897+ I915_WRITE(dsppos_reg, 0);
12898+ I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
12899+ I915_WRITE(pipeconf_reg, pipeconf);
12900+ I915_READ(pipeconf_reg);
12901+
12902+ intel_wait_for_vblank(dev);
12903+
12904+ I915_WRITE(dspcntr_reg, dspcntr);
12905+
12906+ /* Flush the plane changes */
12907+ intel_pipe_set_base(crtc, x, y);
12908+
12909+#if 0
12910+ intel_set_vblank(dev);
12911+#endif
12912+
12913+ /* Disable the VGA plane that we never use */
12914+ I915_WRITE(VGACNTRL, VGA_DISP_DISABLE);
12915+
12916+ intel_wait_for_vblank(dev);
12917+}
12918+
12919+/** Loads the palette/gamma unit for the CRTC with the prepared values */
12920+void intel_crtc_load_lut(struct drm_crtc *crtc)
12921+{
12922+ struct drm_device *dev = crtc->dev;
12923+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12924+ struct intel_crtc *intel_crtc = crtc->driver_private;
12925+ int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
12926+ int i;
12927+
12928+ /* The clocks have to be on to load the palette. */
12929+ if (!crtc->enabled)
12930+ return;
12931+
12932+ for (i = 0; i < 256; i++) {
12933+ I915_WRITE(palreg + 4 * i,
12934+ (intel_crtc->lut_r[i] << 16) |
12935+ (intel_crtc->lut_g[i] << 8) |
12936+ intel_crtc->lut_b[i]);
12937+ }
12938+}
12939+
12940+/** Sets the color ramps on behalf of RandR */
12941+static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
12942+ u16 blue, int regno)
12943+{
12944+ struct intel_crtc *intel_crtc = crtc->driver_private;
12945+
12946+ intel_crtc->lut_r[regno] = red >> 8;
12947+ intel_crtc->lut_g[regno] = green >> 8;
12948+ intel_crtc->lut_b[regno] = blue >> 8;
12949+}
12950+
12951+/* Returns the clock of the currently programmed mode of the given pipe. */
12952+static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
12953+{
12954+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12955+ struct intel_crtc *intel_crtc = crtc->driver_private;
12956+ int pipe = intel_crtc->pipe;
12957+ u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
12958+ u32 fp;
12959+ intel_clock_t clock;
12960+
12961+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
12962+ fp = I915_READ((pipe == 0) ? FPA0 : FPB0);
12963+ else
12964+ fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
12965+
12966+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
12967+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
12968+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
12969+ if (IS_I9XX(dev)) {
12970+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
12971+ DPLL_FPA01_P1_POST_DIV_SHIFT);
12972+
12973+ switch (dpll & DPLL_MODE_MASK) {
12974+ case DPLLB_MODE_DAC_SERIAL:
12975+ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
12976+ 5 : 10;
12977+ break;
12978+ case DPLLB_MODE_LVDS:
12979+ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
12980+ 7 : 14;
12981+ break;
12982+ default:
12983+ DRM_DEBUG("Unknown DPLL mode %08x in programmed "
12984+ "mode\n", (int)(dpll & DPLL_MODE_MASK));
12985+ return 0;
12986+ }
12987+
12988+ /* XXX: Handle the 100Mhz refclk */
12989+ i9xx_clock(96000, &clock);
12990+ } else {
12991+ bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
12992+
12993+ if (is_lvds) {
12994+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
12995+ DPLL_FPA01_P1_POST_DIV_SHIFT);
12996+ clock.p2 = 14;
12997+
12998+ if ((dpll & PLL_REF_INPUT_MASK) ==
12999+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
13000+ /* XXX: might not be 66MHz */
13001+ i8xx_clock(66000, &clock);
13002+ } else
13003+ i8xx_clock(48000, &clock);
13004+ } else {
13005+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
13006+ clock.p1 = 2;
13007+ else {
13008+ clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
13009+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
13010+ }
13011+ if (dpll & PLL_P2_DIVIDE_BY_4)
13012+ clock.p2 = 4;
13013+ else
13014+ clock.p2 = 2;
13015+
13016+ i8xx_clock(48000, &clock);
13017+ }
13018+ }
13019+
13020+ /* XXX: It would be nice to validate the clocks, but we can't reuse
13021+ * i830PllIsValid() because it relies on the xf86_config output
13022+ * configuration being accurate, which it isn't necessarily.
13023+ */
13024+
13025+ return clock.dot;
13026+}
13027+
13028+/** Returns the currently programmed mode of the given pipe. */
13029+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
13030+ struct drm_crtc *crtc)
13031+{
13032+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13033+ struct intel_crtc *intel_crtc = crtc->driver_private;
13034+ int pipe = intel_crtc->pipe;
13035+ struct drm_display_mode *mode;
13036+ int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
13037+ int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
13038+ int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
13039+ int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
13040+
13041+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
13042+ if (!mode)
13043+ return NULL;
13044+
13045+ mode->clock = intel_crtc_clock_get(dev, crtc);
13046+ mode->hdisplay = (htot & 0xffff) + 1;
13047+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
13048+ mode->hsync_start = (hsync & 0xffff) + 1;
13049+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
13050+ mode->vdisplay = (vtot & 0xffff) + 1;
13051+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
13052+ mode->vsync_start = (vsync & 0xffff) + 1;
13053+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
13054+
13055+ drm_mode_set_name(mode);
13056+ drm_mode_set_crtcinfo(mode, 0);
13057+
13058+ return mode;
13059+}
13060+
13061+static const struct drm_crtc_funcs intel_crtc_funcs = {
13062+ .dpms = intel_crtc_dpms,
13063+ .lock = intel_crtc_lock,
13064+ .unlock = intel_crtc_unlock,
13065+ .mode_fixup = intel_crtc_mode_fixup,
13066+ .mode_set = intel_crtc_mode_set,
13067+ .gamma_set = intel_crtc_gamma_set,
13068+ .prepare = intel_crtc_prepare,
13069+ .commit = intel_crtc_commit,
13070+};
13071+
13072+
13073+void intel_crtc_init(struct drm_device *dev, int pipe)
13074+{
13075+ struct drm_crtc *crtc;
13076+ struct intel_crtc *intel_crtc;
13077+ int i;
13078+
13079+ crtc = drm_crtc_create(dev, &intel_crtc_funcs);
13080+ if (crtc == NULL)
13081+ return;
13082+
13083+ intel_crtc = kzalloc(sizeof(struct intel_crtc), GFP_KERNEL);
13084+ if (intel_crtc == NULL) {
13085+ kfree(crtc);
13086+ return;
13087+ }
13088+
13089+ intel_crtc->pipe = pipe;
13090+ for (i = 0; i < 256; i++) {
13091+ intel_crtc->lut_r[i] = i;
13092+ intel_crtc->lut_g[i] = i;
13093+ intel_crtc->lut_b[i] = i;
13094+ }
13095+
13096+ crtc->driver_private = intel_crtc;
13097+}
13098+
13099+struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
13100+{
13101+ struct drm_crtc *crtc = NULL;
13102+
13103+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
13104+ struct intel_crtc *intel_crtc = crtc->driver_private;
13105+ if (intel_crtc->pipe == pipe)
13106+ break;
13107+ }
13108+ return crtc;
13109+}
13110+
13111+int intel_output_clones(struct drm_device *dev, int type_mask)
13112+{
13113+ int index_mask = 0;
13114+ struct drm_output *output;
13115+ int entry = 0;
13116+
13117+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
13118+ struct intel_output *intel_output = output->driver_private;
13119+ if (type_mask & (1 << intel_output->type))
13120+ index_mask |= (1 << entry);
13121+ entry++;
13122+ }
13123+ return index_mask;
13124+}
13125+
13126+
13127+static void intel_setup_outputs(struct drm_device *dev)
13128+{
13129+ struct drm_output *output;
13130+
13131+ if (!IS_POULSBO(dev))
13132+ intel_crt_init(dev);
13133+
13134+ /* Set up integrated LVDS */
13135+ if (IS_MOBILE(dev) && !IS_I830(dev))
13136+ intel_lvds_init(dev);
13137+
13138+ if (IS_I9XX(dev)) {
13139+ intel_sdvo_init(dev, SDVOB);
13140+ intel_sdvo_init(dev, SDVOC);
13141+ }
13142+
13143+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
13144+ struct intel_output *intel_output = output->driver_private;
13145+ int crtc_mask = 0, clone_mask = 0;
13146+
13147+ /* valid crtcs */
13148+ switch(intel_output->type) {
13149+ case INTEL_OUTPUT_DVO:
13150+ case INTEL_OUTPUT_SDVO:
13151+ crtc_mask = ((1 << 0)|
13152+ (1 << 1));
13153+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
13154+ (1 << INTEL_OUTPUT_DVO) |
13155+ (1 << INTEL_OUTPUT_SDVO));
13156+ break;
13157+ case INTEL_OUTPUT_ANALOG:
13158+ crtc_mask = ((1 << 0)|
13159+ (1 << 1));
13160+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
13161+ (1 << INTEL_OUTPUT_DVO) |
13162+ (1 << INTEL_OUTPUT_SDVO));
13163+ break;
13164+ case INTEL_OUTPUT_LVDS:
13165+ crtc_mask = (1 << 1);
13166+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
13167+ break;
13168+ case INTEL_OUTPUT_TVOUT:
13169+ crtc_mask = ((1 << 0) |
13170+ (1 << 1));
13171+ clone_mask = (1 << INTEL_OUTPUT_TVOUT);
13172+ break;
13173+ }
13174+ output->possible_crtcs = crtc_mask;
13175+ output->possible_clones = intel_output_clones(dev, clone_mask);
13176+ }
13177+}
13178+
13179+void intel_modeset_init(struct drm_device *dev)
13180+{
13181+ int num_pipe;
13182+ int i;
13183+
13184+ drm_mode_config_init(dev);
13185+
13186+ dev->mode_config.min_width = 0;
13187+ dev->mode_config.min_height = 0;
13188+
13189+ dev->mode_config.max_width = 4096;
13190+ dev->mode_config.max_height = 4096;
13191+
13192+ /* set memory base */
13193+ if (IS_I9XX(dev))
13194+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
13195+ else
13196+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
13197+
13198+ if (IS_MOBILE(dev) || IS_I9XX(dev))
13199+ num_pipe = 2;
13200+ else
13201+ num_pipe = 1;
13202+ DRM_DEBUG("%d display pipe%s available.\n",
13203+ num_pipe, num_pipe > 1 ? "s" : "");
13204+
13205+ for (i = 0; i < num_pipe; i++) {
13206+ intel_crtc_init(dev, i);
13207+ }
13208+
13209+ intel_setup_outputs(dev);
13210+
13211+ //drm_initial_config(dev, false);
13212+}
13213+
13214+void intel_modeset_cleanup(struct drm_device *dev)
13215+{
13216+ drm_mode_config_cleanup(dev);
13217+}
13218Index: linux-2.6.27/drivers/gpu/drm/psb/intel_drv.h
13219===================================================================
13220--- /dev/null 1970-01-01 00:00:00.000000000 +0000
13221+++ linux-2.6.27/drivers/gpu/drm/psb/intel_drv.h 2009-02-05 13:29:33.000000000 +0000
13222@@ -0,0 +1,91 @@
13223+/*
13224+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
13225+ * Copyright (c) 2007 Intel Corporation
13226+ * Jesse Barnes <jesse.barnes@intel.com>
13227+ */
13228+#ifndef __INTEL_DRV_H__
13229+#define __INTEL_DRV_H__
13230+
13231+#include <linux/i2c.h>
13232+#include <linux/i2c-id.h>
13233+#include <linux/i2c-algo-bit.h>
13234+#include "drm_crtc.h"
13235+
13236+/*
13237+ * Display related stuff
13238+ */
13239+
13240+/* store information about an Ixxx DVO */
13241+/* The i830->i865 use multiple DVOs with multiple i2cs */
13242+/* the i915, i945 have a single sDVO i2c bus - which is different */
13243+#define MAX_OUTPUTS 6
13244+
13245+#define INTEL_I2C_BUS_DVO 1
13246+#define INTEL_I2C_BUS_SDVO 2
13247+
13248+/* these are outputs from the chip - integrated only
13249+ external chips are via DVO or SDVO output */
13250+#define INTEL_OUTPUT_UNUSED 0
13251+#define INTEL_OUTPUT_ANALOG 1
13252+#define INTEL_OUTPUT_DVO 2
13253+#define INTEL_OUTPUT_SDVO 3
13254+#define INTEL_OUTPUT_LVDS 4
13255+#define INTEL_OUTPUT_TVOUT 5
13256+
13257+#define INTEL_DVO_CHIP_NONE 0
13258+#define INTEL_DVO_CHIP_LVDS 1
13259+#define INTEL_DVO_CHIP_TMDS 2
13260+#define INTEL_DVO_CHIP_TVOUT 4
13261+
13262+struct intel_i2c_chan {
13263+ struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
13264+ u32 reg; /* GPIO reg */
13265+ struct i2c_adapter adapter;
13266+ struct i2c_algo_bit_data algo;
13267+ u8 slave_addr;
13268+};
13269+
13270+struct intel_output {
13271+ int type;
13272+ struct intel_i2c_chan *i2c_bus; /* for control functions */
13273+ struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
13274+ bool load_detect_tmp;
13275+ void *dev_priv;
13276+};
13277+
13278+struct intel_crtc {
13279+ int pipe;
13280+ u8 lut_r[256], lut_g[256], lut_b[256];
13281+};
13282+
13283+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
13284+ const char *name);
13285+void intel_i2c_destroy(struct intel_i2c_chan *chan);
13286+int intel_ddc_get_modes(struct drm_output *output);
13287+extern bool intel_ddc_probe(struct drm_output *output);
13288+
13289+extern void intel_crt_init(struct drm_device *dev);
13290+extern void intel_sdvo_init(struct drm_device *dev, int output_device);
13291+extern void intel_lvds_init(struct drm_device *dev);
13292+
13293+extern void intel_crtc_load_lut(struct drm_crtc *crtc);
13294+extern void intel_output_prepare (struct drm_output *output);
13295+extern void intel_output_commit (struct drm_output *output);
13296+extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
13297+ struct drm_crtc *crtc);
13298+extern void intel_wait_for_vblank(struct drm_device *dev);
13299+extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
13300+
13301+extern int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
13302+extern int intelfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
13303+
13304+extern void intel_modeset_init(struct drm_device *dev);
13305+extern void intel_modeset_cleanup(struct drm_device *dev);
13306+
13307+#define WA_NO_FB_GARBAGE_DISPLAY
13308+#ifdef WA_NO_FB_GARBAGE_DISPLAY
13309+extern void intel_crtc_mode_restore(struct drm_crtc *crtc);
13310+extern void intel_crtc_mode_save(struct drm_crtc *crtc);
13311+#endif
13312+
13313+#endif /* __INTEL_DRV_H__ */
13314Index: linux-2.6.27/drivers/gpu/drm/psb/intel_lvds.c
13315===================================================================
13316--- /dev/null 1970-01-01 00:00:00.000000000 +0000
13317+++ linux-2.6.27/drivers/gpu/drm/psb/intel_lvds.c 2009-02-05 13:29:33.000000000 +0000
13318@@ -0,0 +1,913 @@
13319+/*
13320+ * Copyright © 2006-2007 Intel Corporation
13321+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
13322+ *
13323+ * Permission is hereby granted, free of charge, to any person obtaining a
13324+ * copy of this software and associated documentation files (the "Software"),
13325+ * to deal in the Software without restriction, including without limitation
13326+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13327+ * and/or sell copies of the Software, and to permit persons to whom the
13328+ * Software is furnished to do so, subject to the following conditions:
13329+ *
13330+ * The above copyright notice and this permission notice (including the next
13331+ * paragraph) shall be included in all copies or substantial portions of the
13332+ * Software.
13333+ *
13334+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13335+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13336+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
13337+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
13338+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
13339+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
13340+ * DEALINGS IN THE SOFTWARE.
13341+ *
13342+ * Authors:
13343+ * Eric Anholt <eric@anholt.net>
13344+ * Dave Airlie <airlied@linux.ie>
13345+ * Jesse Barnes <jesse.barnes@intel.com>
13346+ */
13347+
13348+#include <linux/i2c.h>
13349+#include <linux/backlight.h>
13350+#include "drm_crtc.h"
13351+#include "drm_edid.h"
13352+#include "intel_lvds.h"
13353+
13354+#include <acpi/acpi_drivers.h>
13355+
13356+int drm_intel_ignore_acpi = 0;
13357+MODULE_PARM_DESC(ignore_acpi, "Ignore ACPI");
13358+module_param_named(ignore_acpi, drm_intel_ignore_acpi, int, 0600);
13359+
13360+uint8_t blc_type;
13361+uint8_t blc_pol;
13362+uint8_t blc_freq;
13363+uint8_t blc_minbrightness;
13364+uint8_t blc_i2caddr;
13365+uint8_t blc_brightnesscmd;
13366+int lvds_backlight; /* restore backlight to this value */
13367+
13368+struct intel_i2c_chan *lvds_i2c_bus;
13369+u32 CoreClock;
13370+u32 PWMControlRegFreq;
13371+
13372+unsigned char * dev_OpRegion = NULL;
13373+unsigned int dev_OpRegionSize;
13374+
13375+#define PCI_PORT5_REG80_FFUSE 0xD0058000
13376+#define PCI_PORT5_REG80_MAXRES_INT_EN 0x0040
13377+#define MAX_HDISPLAY 800
13378+#define MAX_VDISPLAY 480
13379+bool sku_bMaxResEnableInt = false;
13380+
13381+/** Set BLC through I2C*/
13382+static int
13383+LVDSI2CSetBacklight(struct drm_device *dev, unsigned char ch)
13384+{
13385+ u8 out_buf[2];
13386+ struct i2c_msg msgs[] = {
13387+ {
13388+ .addr = lvds_i2c_bus->slave_addr,
13389+ .flags = 0,
13390+ .len = 2,
13391+ .buf = out_buf,
13392+ }
13393+ };
13394+
13395+ DRM_INFO("LVDSI2CSetBacklight: the slave_addr is 0x%x, the backlight value is %d\n", lvds_i2c_bus->slave_addr, ch);
13396+
13397+ out_buf[0] = blc_brightnesscmd;
13398+ out_buf[1] = ch;
13399+
13400+ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
13401+ {
13402+ DRM_INFO("LVDSI2CSetBacklight: i2c_transfer done\n");
13403+ return true;
13404+ }
13405+
13406+ DRM_ERROR("msg: i2c_transfer error\n");
13407+ return false;
13408+}
13409+
13410+/**
13411+ * Calculate PWM control register value.
13412+ */
13413+static int
13414+LVDSCalculatePWMCtrlRegFreq(struct drm_device *dev)
13415+{
13416+ unsigned long value = 0;
13417+
13418+ DRM_INFO("Enter LVDSCalculatePWMCtrlRegFreq.\n");
13419+ if (blc_freq == 0) {
13420+ DRM_ERROR("LVDSCalculatePWMCtrlRegFreq: Frequency Requested is 0.\n");
13421+ return FALSE;
13422+ }
13423+ value = (CoreClock * MHz);
13424+ value = (value / BLC_PWM_FREQ_CALC_CONSTANT);
13425+ value = (value * BLC_PWM_PRECISION_FACTOR);
13426+ value = (value / blc_freq);
13427+ value = (value / BLC_PWM_PRECISION_FACTOR);
13428+
13429+ if (value > (unsigned long)BLC_MAX_PWM_REG_FREQ ||
13430+ value < (unsigned long)BLC_MIN_PWM_REG_FREQ) {
13431+ return FALSE;
13432+ } else {
13433+ PWMControlRegFreq = ((u32)value & ~BLC_PWM_LEGACY_MODE_ENABLE);
13434+ return TRUE;
13435+ }
13436+}
13437+
13438+/**
13439+ * Returns the maximum level of the backlight duty cycle field.
13440+ */
13441+static u32
13442+LVDSGetPWMMaxBacklight(struct drm_device *dev)
13443+{
13444+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13445+ u32 max_pwm_blc = 0;
13446+
13447+ max_pwm_blc = ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> \
13448+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
13449+
13450+ if (!(max_pwm_blc & BLC_MAX_PWM_REG_FREQ)) {
13451+ if (LVDSCalculatePWMCtrlRegFreq(dev)) {
13452+ max_pwm_blc = PWMControlRegFreq;
13453+ }
13454+ }
13455+
13456+ DRM_INFO("LVDSGetPWMMaxBacklight: the max_pwm_blc is %d.\n", max_pwm_blc);
13457+ return max_pwm_blc;
13458+}
13459+
13460+
13461+/**
13462+ * Sets the backlight level.
13463+ *
13464+ * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
13465+ */
13466+static void intel_lvds_set_backlight(struct drm_device *dev, int level)
13467+{
13468+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13469+ //u32 blc_pwm_ctl;
13470+
13471+ /*
13472+ blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
13473+ I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
13474+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
13475+ */
13476+ u32 newbacklight = 0;
13477+
13478+ DRM_INFO("intel_lvds_set_backlight: the level is %d\n", level);
13479+
13480+ if(blc_type == BLC_I2C_TYPE){
13481+ newbacklight = BRIGHTNESS_MASK & ((unsigned long)level * \
13482+ BRIGHTNESS_MASK /BRIGHTNESS_MAX_LEVEL);
13483+
13484+ if (blc_pol == BLC_POLARITY_INVERSE) {
13485+ newbacklight = BRIGHTNESS_MASK - newbacklight;
13486+ }
13487+
13488+ LVDSI2CSetBacklight(dev, newbacklight);
13489+
13490+ } else if (blc_type == BLC_PWM_TYPE) {
13491+ u32 max_pwm_blc = LVDSGetPWMMaxBacklight(dev);
13492+
13493+ u32 blc_pwm_duty_cycle;
13494+
13495+ /* Provent LVDS going to total black */
13496+ if ( level < 20) {
13497+ level = 20;
13498+ }
13499+ blc_pwm_duty_cycle = level * max_pwm_blc/BRIGHTNESS_MAX_LEVEL;
13500+
13501+ if (blc_pol == BLC_POLARITY_INVERSE) {
13502+ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
13503+ }
13504+
13505+ blc_pwm_duty_cycle &= BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
13506+
13507+ I915_WRITE(BLC_PWM_CTL,
13508+ (max_pwm_blc << BACKLIGHT_PWM_CTL_SHIFT)| (blc_pwm_duty_cycle));
13509+ }
13510+}
13511+
13512+/**
13513+ * Returns the maximum level of the backlight duty cycle field.
13514+ */
13515+static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
13516+{
13517+ return BRIGHTNESS_MAX_LEVEL;
13518+ /*
13519+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13520+
13521+ return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
13522+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
13523+ */
13524+}
13525+
13526+/**
13527+ * Sets the power state for the panel.
13528+ */
13529+static void intel_lvds_set_power(struct drm_device *dev, bool on)
13530+{
13531+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13532+ u32 pp_status;
13533+
13534+ DRM_INFO("intel_lvds_set_power: %d\n", on);
13535+ if (on) {
13536+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
13537+ POWER_TARGET_ON);
13538+ do {
13539+ pp_status = I915_READ(PP_STATUS);
13540+ } while ((pp_status & PP_ON) == 0);
13541+
13542+ intel_lvds_set_backlight(dev, lvds_backlight);
13543+ } else {
13544+ intel_lvds_set_backlight(dev, 0);
13545+
13546+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) &
13547+ ~POWER_TARGET_ON);
13548+ do {
13549+ pp_status = I915_READ(PP_STATUS);
13550+ } while (pp_status & PP_ON);
13551+ }
13552+}
13553+
13554+static void intel_lvds_dpms(struct drm_output *output, int mode)
13555+{
13556+ struct drm_device *dev = output->dev;
13557+
13558+ DRM_INFO("intel_lvds_dpms: the mode is %d\n", mode);
13559+ if (mode == DPMSModeOn)
13560+ intel_lvds_set_power(dev, true);
13561+ else
13562+ intel_lvds_set_power(dev, false);
13563+
13564+ /* XXX: We never power down the LVDS pairs. */
13565+}
13566+
13567+static void intel_lvds_save(struct drm_output *output)
13568+{
13569+ struct drm_device *dev = output->dev;
13570+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13571+
13572+ dev_priv->savePP_ON = I915_READ(LVDSPP_ON);
13573+ dev_priv->savePP_OFF = I915_READ(LVDSPP_OFF);
13574+ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
13575+ dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
13576+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
13577+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
13578+ BACKLIGHT_DUTY_CYCLE_MASK);
13579+
13580+ /*
13581+ * If the light is off at server startup, just make it full brightness
13582+ */
13583+ if (dev_priv->backlight_duty_cycle == 0)
13584+ lvds_backlight=
13585+ intel_lvds_get_max_backlight(dev);
13586+}
13587+
13588+static void intel_lvds_restore(struct drm_output *output)
13589+{
13590+ struct drm_device *dev = output->dev;
13591+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13592+
13593+ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
13594+ I915_WRITE(LVDSPP_ON, dev_priv->savePP_ON);
13595+ I915_WRITE(LVDSPP_OFF, dev_priv->savePP_OFF);
13596+ I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
13597+ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
13598+ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
13599+ intel_lvds_set_power(dev, true);
13600+ else
13601+ intel_lvds_set_power(dev, false);
13602+}
13603+
13604+static int intel_lvds_mode_valid(struct drm_output *output,
13605+ struct drm_display_mode *mode)
13606+{
13607+ struct drm_device *dev = output->dev;
13608+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13609+ struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
13610+
13611+ if (fixed_mode) {
13612+ if (mode->hdisplay > fixed_mode->hdisplay)
13613+ return MODE_PANEL;
13614+ if (mode->vdisplay > fixed_mode->vdisplay)
13615+ return MODE_PANEL;
13616+ }
13617+
13618+ if (IS_POULSBO(dev) && sku_bMaxResEnableInt) {
13619+ if (mode->hdisplay > MAX_HDISPLAY)
13620+ return MODE_PANEL;
13621+ if (mode->vdisplay > MAX_VDISPLAY)
13622+ return MODE_PANEL;
13623+ }
13624+
13625+ return MODE_OK;
13626+}
13627+
13628+static bool intel_lvds_mode_fixup(struct drm_output *output,
13629+ struct drm_display_mode *mode,
13630+ struct drm_display_mode *adjusted_mode)
13631+{
13632+ struct drm_device *dev = output->dev;
13633+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13634+ struct intel_crtc *intel_crtc = output->crtc->driver_private;
13635+ struct drm_output *tmp_output;
13636+
13637+ /* Should never happen!! */
13638+ if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
13639+ DRM_ERROR(KERN_ERR "Can't support LVDS on pipe A\n");
13640+ return false;
13641+ }
13642+
13643+ /* Should never happen!! */
13644+ list_for_each_entry(tmp_output, &dev->mode_config.output_list, head) {
13645+ if (tmp_output != output && tmp_output->crtc == output->crtc) {
13646+ DRM_ERROR("Can't enable LVDS and another "
13647+ "output on the same pipe\n");
13648+ return false;
13649+ }
13650+ }
13651+
13652+ /*
13653+ * If we have timings from the BIOS for the panel, put them in
13654+ * to the adjusted mode. The CRTC will be set up for this mode,
13655+ * with the panel scaling set up to source from the H/VDisplay
13656+ * of the original mode.
13657+ */
13658+ if (dev_priv->panel_fixed_mode != NULL) {
13659+ adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
13660+ adjusted_mode->hsync_start =
13661+ dev_priv->panel_fixed_mode->hsync_start;
13662+ adjusted_mode->hsync_end =
13663+ dev_priv->panel_fixed_mode->hsync_end;
13664+ adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
13665+ adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
13666+ adjusted_mode->vsync_start =
13667+ dev_priv->panel_fixed_mode->vsync_start;
13668+ adjusted_mode->vsync_end =
13669+ dev_priv->panel_fixed_mode->vsync_end;
13670+ adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
13671+ adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
13672+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
13673+ }
13674+
13675+ /*
13676+ * XXX: It would be nice to support lower refresh rates on the
13677+ * panels to reduce power consumption, and perhaps match the
13678+ * user's requested refresh rate.
13679+ */
13680+
13681+ return true;
13682+}
13683+
13684+static void intel_lvds_prepare(struct drm_output *output)
13685+{
13686+ struct drm_device *dev = output->dev;
13687+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13688+
13689+ DRM_INFO("intel_lvds_prepare\n");
13690+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
13691+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
13692+ BACKLIGHT_DUTY_CYCLE_MASK);
13693+
13694+ intel_lvds_set_power(dev, false);
13695+}
13696+
13697+static void intel_lvds_commit( struct drm_output *output)
13698+{
13699+ struct drm_device *dev = output->dev;
13700+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13701+
13702+ DRM_INFO("intel_lvds_commit\n");
13703+ if (dev_priv->backlight_duty_cycle == 0)
13704+ //dev_priv->backlight_duty_cycle =
13705+ lvds_backlight =
13706+ intel_lvds_get_max_backlight(dev);
13707+
13708+ intel_lvds_set_power(dev, true);
13709+}
13710+
13711+static void intel_lvds_mode_set(struct drm_output *output,
13712+ struct drm_display_mode *mode,
13713+ struct drm_display_mode *adjusted_mode)
13714+{
13715+ struct drm_device *dev = output->dev;
13716+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13717+ struct intel_crtc *intel_crtc = output->crtc->driver_private;
13718+ u32 pfit_control;
13719+
13720+ /*
13721+ * The LVDS pin pair will already have been turned on in the
13722+ * intel_crtc_mode_set since it has a large impact on the DPLL
13723+ * settings.
13724+ */
13725+
13726+ /*
13727+ * Enable automatic panel scaling so that non-native modes fill the
13728+ * screen. Should be enabled before the pipe is enabled, according to
13729+ * register description and PRM.
13730+ */
13731+ if (mode->hdisplay != adjusted_mode->hdisplay ||
13732+ mode->vdisplay != adjusted_mode->vdisplay)
13733+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
13734+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
13735+ HORIZ_INTERP_BILINEAR);
13736+ else
13737+ pfit_control = 0;
13738+
13739+ if (!IS_I965G(dev)) {
13740+ if (dev_priv->panel_wants_dither)
13741+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
13742+ }
13743+ else
13744+ pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
13745+
13746+ I915_WRITE(PFIT_CONTROL, pfit_control);
13747+}
13748+
13749+/**
13750+ * Detect the LVDS connection.
13751+ *
13752+ * This always returns OUTPUT_STATUS_CONNECTED. This output should only have
13753+ * been set up if the LVDS was actually connected anyway.
13754+ */
13755+static enum drm_output_status intel_lvds_detect(struct drm_output *output)
13756+{
13757+ return output_status_connected;
13758+}
13759+
13760+/**
13761+ * Return the list of DDC modes if available.
13762+ */
13763+static int intel_lvds_get_modes(struct drm_output *output)
13764+{
13765+ struct drm_device *dev = output->dev;
13766+ struct intel_output *intel_output = output->driver_private;
13767+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13768+ struct edid *edid;
13769+
13770+ /* Try reading DDC from the adapter */
13771+ edid = (struct edid *)drm_ddc_read(&intel_output->ddc_bus->adapter);
13772+
13773+ if (!edid) {
13774+ DRM_INFO("%s: no EDID data from device, reading ACPI _DDC data.\n",
13775+ output->name);
13776+ edid = kzalloc(sizeof(struct edid), GFP_KERNEL);
13777+ drm_get_acpi_edid(ACPI_EDID_LCD, (char*)edid, 128);
13778+ }
13779+
13780+ if (edid)
13781+ drm_add_edid_modes(output, edid);
13782+
13783+ /* Didn't get an EDID */
13784+ if (!output->monitor_info) {
13785+ struct drm_display_info *dspinfo;
13786+ dspinfo = kzalloc(sizeof(*output->monitor_info), GFP_KERNEL);
13787+ if (!dspinfo)
13788+ goto out;
13789+
13790+ /* Set wide sync ranges so we get all modes
13791+ * handed to valid_mode for checking
13792+ */
13793+ dspinfo->min_vfreq = 0;
13794+ dspinfo->max_vfreq = 200;
13795+ dspinfo->min_hfreq = 0;
13796+ dspinfo->max_hfreq = 200;
13797+ output->monitor_info = dspinfo;
13798+ }
13799+
13800+out:
13801+ if (dev_priv->panel_fixed_mode != NULL) {
13802+ struct drm_display_mode *mode =
13803+ drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
13804+ drm_mode_probed_add(output, mode);
13805+ return 1;
13806+ }
13807+
13808+ return 0;
13809+}
13810+
13811+/* added by alek du to add /sys/class/backlight interface */
13812+static int update_bl_status(struct backlight_device *bd)
13813+{
13814+ int value = bd->props.brightness;
13815+
13816+ struct drm_device *dev = bl_get_data(bd);
13817+
13818+ lvds_backlight = value;
13819+ intel_lvds_set_backlight(dev, value);
13820+ /*value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0;
13821+ intel_lvds_set_power(dev,value);*/
13822+ return 0;
13823+}
13824+
13825+static int read_brightness(struct backlight_device *bd)
13826+{
13827+ return bd->props.brightness;
13828+}
13829+
13830+static struct backlight_device *psbbl_device = NULL;
13831+static struct backlight_ops psbbl_ops = {
13832+ .get_brightness = read_brightness,
13833+ .update_status = update_bl_status,
13834+};
13835+
13836+/**
13837+ * intel_lvds_destroy - unregister and free LVDS structures
13838+ * @output: output to free
13839+ *
13840+ * Unregister the DDC bus for this output then free the driver private
13841+ * structure.
13842+ */
13843+static void intel_lvds_destroy(struct drm_output *output)
13844+{
13845+ struct intel_output *intel_output = output->driver_private;
13846+
13847+ if (psbbl_device){
13848+ backlight_device_unregister(psbbl_device);
13849+ }
13850+ if(dev_OpRegion != NULL)
13851+ iounmap(dev_OpRegion);
13852+ intel_i2c_destroy(intel_output->ddc_bus);
13853+ intel_i2c_destroy(lvds_i2c_bus);
13854+ kfree(output->driver_private);
13855+}
13856+
13857+static const struct drm_output_funcs intel_lvds_output_funcs = {
13858+ .dpms = intel_lvds_dpms,
13859+ .save = intel_lvds_save,
13860+ .restore = intel_lvds_restore,
13861+ .mode_valid = intel_lvds_mode_valid,
13862+ .mode_fixup = intel_lvds_mode_fixup,
13863+ .prepare = intel_lvds_prepare,
13864+ .mode_set = intel_lvds_mode_set,
13865+ .commit = intel_lvds_commit,
13866+ .detect = intel_lvds_detect,
13867+ .get_modes = intel_lvds_get_modes,
13868+ .cleanup = intel_lvds_destroy
13869+};
13870+
13871+int intel_get_acpi_dod(char *method)
13872+{
13873+ int status;
13874+ int found = 0;
13875+ int i;
13876+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
13877+ union acpi_object *dod = NULL;
13878+ union acpi_object *obj;
13879+
13880+ status = acpi_evaluate_object(NULL, method, NULL, &buffer);
13881+ if (ACPI_FAILURE(status))
13882+ return -ENODEV;
13883+
13884+ dod = buffer.pointer;
13885+ if (!dod || (dod->type != ACPI_TYPE_PACKAGE)) {
13886+ status = -EFAULT;
13887+ goto out;
13888+ }
13889+
13890+ DRM_DEBUG("Found %d video heads in _DOD\n", dod->package.count);
13891+
13892+ for (i = 0; i < dod->package.count; i++) {
13893+ obj = &dod->package.elements[i];
13894+
13895+ if (obj->type != ACPI_TYPE_INTEGER) {
13896+ DRM_DEBUG("Invalid _DOD data\n");
13897+ } else {
13898+ DRM_DEBUG("dod element[%d] = 0x%x\n", i,
13899+ (int)obj->integer.value);
13900+
13901+ /* look for an LVDS type */
13902+ if (obj->integer.value & 0x00000400)
13903+ found = 1;
13904+ }
13905+ }
13906+ out:
13907+ kfree(buffer.pointer);
13908+ return found;
13909+}
13910+/**
13911+ * intel_lvds_init - setup LVDS outputs on this device
13912+ * @dev: drm device
13913+ *
13914+ * Create the output, register the LVDS DDC bus, and try to figure out what
13915+ * modes we can display on the LVDS panel (if present).
13916+ */
13917+void intel_lvds_init(struct drm_device *dev)
13918+{
13919+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13920+ struct drm_output *output;
13921+ struct intel_output *intel_output;
13922+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
13923+ struct drm_crtc *crtc;
13924+ u32 lvds;
13925+ int pipe;
13926+
13927+ if (!drm_intel_ignore_acpi && !intel_get_acpi_dod(ACPI_DOD))
13928+ return;
13929+
13930+ output = drm_output_create(dev, &intel_lvds_output_funcs, "LVDS");
13931+ if (!output)
13932+ return;
13933+
13934+ intel_output = kmalloc(sizeof(struct intel_output), GFP_KERNEL);
13935+ if (!intel_output) {
13936+ drm_output_destroy(output);
13937+ return;
13938+ }
13939+
13940+ intel_output->type = INTEL_OUTPUT_LVDS;
13941+ output->driver_private = intel_output;
13942+ output->subpixel_order = SubPixelHorizontalRGB;
13943+ output->interlace_allowed = FALSE;
13944+ output->doublescan_allowed = FALSE;
13945+
13946+ //initialize the I2C bus and BLC data
13947+ lvds_i2c_bus = intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
13948+ if (!lvds_i2c_bus) {
13949+ dev_printk(KERN_ERR, &dev->pdev->dev, "i2c bus registration "
13950+ "failed.\n");
13951+ return;
13952+ }
13953+ lvds_i2c_bus->slave_addr = 0x2c;//0x58;
13954+ lvds_backlight = BRIGHTNESS_MAX_LEVEL;
13955+ blc_type = 0;
13956+ blc_pol = 0;
13957+
13958+ if (1) { //get the BLC init data from VBT
13959+ u32 OpRegion_Phys;
13960+ unsigned int OpRegion_Size = 0x100;
13961+ OpRegionPtr OpRegion;
13962+ char *OpRegion_String = "IntelGraphicsMem";
13963+
13964+ struct vbt_header *vbt;
13965+ struct bdb_header *bdb;
13966+ int vbt_off, bdb_off, bdb_block_off, block_size;
13967+ int panel_type = -1;
13968+ unsigned char *bios;
13969+ unsigned char *vbt_buf;
13970+
13971+ pci_read_config_dword(dev->pdev, 0xFC, &OpRegion_Phys);
13972+
13973+ //dev_OpRegion = phys_to_virt(OpRegion_Phys);
13974+ dev_OpRegion = ioremap(OpRegion_Phys, OpRegion_Size);
13975+ dev_OpRegionSize = OpRegion_Size;
13976+
13977+ OpRegion = (OpRegionPtr) dev_OpRegion;
13978+
13979+ if (!memcmp(OpRegion->sign, OpRegion_String, 16)) {
13980+ unsigned int OpRegion_NewSize;
13981+
13982+ OpRegion_NewSize = OpRegion->size * 1024;
13983+
13984+ dev_OpRegionSize = OpRegion_NewSize;
13985+
13986+ iounmap(dev_OpRegion);
13987+ dev_OpRegion = ioremap(OpRegion_Phys, OpRegion_NewSize);
13988+ } else {
13989+ iounmap(dev_OpRegion);
13990+ dev_OpRegion = NULL;
13991+ }
13992+
13993+ if((dev_OpRegion != NULL)&&(dev_OpRegionSize >= OFFSET_OPREGION_VBT)) {
13994+ DRM_INFO("intel_lvds_init: OpRegion has the VBT address\n");
13995+ vbt_buf = dev_OpRegion + OFFSET_OPREGION_VBT;
13996+ vbt = (struct vbt_header *)(dev_OpRegion + OFFSET_OPREGION_VBT);
13997+ } else {
13998+ DRM_INFO("intel_lvds_init: No OpRegion, use the bios at fixed address 0xc0000\n");
13999+ bios = phys_to_virt(0xC0000);
14000+ if(*((u16 *)bios) != 0xAA55){
14001+ bios = NULL;
14002+ DRM_ERROR("the bios is incorrect\n");
14003+ goto blc_out;
14004+ }
14005+ vbt_off = bios[0x1a] | (bios[0x1a + 1] << 8);
14006+ DRM_INFO("intel_lvds_init: the vbt off is %x\n", vbt_off);
14007+ vbt_buf = bios + vbt_off;
14008+ vbt = (struct vbt_header *)(bios + vbt_off);
14009+ }
14010+
14011+ bdb_off = vbt->bdb_offset;
14012+ bdb = (struct bdb_header *)(vbt_buf + bdb_off);
14013+
14014+ DRM_INFO("intel_lvds_init: The bdb->signature is %s, the bdb_off is %d\n",bdb->signature, bdb_off);
14015+
14016+ if (memcmp(bdb->signature, "BIOS_DATA_BLOCK ", 16) != 0) {
14017+ DRM_ERROR("the vbt is error\n");
14018+ goto blc_out;
14019+ }
14020+
14021+ for (bdb_block_off = bdb->header_size; bdb_block_off < bdb->bdb_size;
14022+ bdb_block_off += block_size) {
14023+ int start = bdb_off + bdb_block_off;
14024+ int id, num_entries;
14025+ struct lvds_bdb_1 *lvds1;
14026+ struct lvds_blc *lvdsblc;
14027+ struct lvds_bdb_blc *bdbblc;
14028+
14029+ id = vbt_buf[start];
14030+ block_size = (vbt_buf[start + 1] | (vbt_buf[start + 2] << 8)) + 3;
14031+ switch (id) {
14032+ case 40:
14033+ lvds1 = (struct lvds_bdb_1 *)(vbt_buf+ start);
14034+ panel_type = lvds1->panel_type;
14035+ //if (lvds1->caps & LVDS_CAP_DITHER)
14036+ // *panelWantsDither = TRUE;
14037+ break;
14038+
14039+ case 43:
14040+ bdbblc = (struct lvds_bdb_blc *)(vbt_buf + start);
14041+ num_entries = bdbblc->table_size? (bdbblc->size - \
14042+ sizeof(bdbblc->table_size))/bdbblc->table_size : 0;
14043+ if (num_entries << 16 && bdbblc->table_size == sizeof(struct lvds_blc)) {
14044+ lvdsblc = (struct lvds_blc *)(vbt_buf + start + sizeof(struct lvds_bdb_blc));
14045+ lvdsblc += panel_type;
14046+ blc_type = lvdsblc->type;
14047+ blc_pol = lvdsblc->pol;
14048+ blc_freq = lvdsblc->freq;
14049+ blc_minbrightness = lvdsblc->minbrightness;
14050+ blc_i2caddr = lvdsblc->i2caddr;
14051+ blc_brightnesscmd = lvdsblc->brightnesscmd;
14052+ DRM_INFO("intel_lvds_init: BLC Data in BIOS VBT tables: datasize=%d paneltype=%d \
14053+ type=0x%02x pol=0x%02x freq=0x%04x minlevel=0x%02x \
14054+ i2caddr=0x%02x cmd=0x%02x \n",
14055+ 0,
14056+ panel_type,
14057+ lvdsblc->type,
14058+ lvdsblc->pol,
14059+ lvdsblc->freq,
14060+ lvdsblc->minbrightness,
14061+ lvdsblc->i2caddr,
14062+ lvdsblc->brightnesscmd);
14063+ }
14064+ break;
14065+ }
14066+ }
14067+
14068+ }
14069+
14070+ if(1){
14071+ //get the Core Clock for calculating MAX PWM value
14072+ //check whether the MaxResEnableInt is
14073+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
14074+ u32 clock;
14075+ u32 sku_value = 0;
14076+ unsigned int CoreClocks[] = {
14077+ 100,
14078+ 133,
14079+ 150,
14080+ 178,
14081+ 200,
14082+ 266,
14083+ 266,
14084+ 266
14085+ };
14086+ if(pci_root)
14087+ {
14088+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
14089+ pci_read_config_dword(pci_root, 0xD4, &clock);
14090+ CoreClock = CoreClocks[clock & 0x07];
14091+ DRM_INFO("intel_lvds_init: the CoreClock is %d\n", CoreClock);
14092+
14093+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
14094+ pci_read_config_dword(pci_root, 0xD4, &sku_value);
14095+ sku_bMaxResEnableInt = (sku_value & PCI_PORT5_REG80_MAXRES_INT_EN)? true : false;
14096+ DRM_INFO("intel_lvds_init: sku_value is 0x%08x\n", sku_value);
14097+ DRM_INFO("intel_lvds_init: sku_bMaxResEnableInt is %d\n", sku_bMaxResEnableInt);
14098+ }
14099+ }
14100+
14101+ if ((blc_type == BLC_I2C_TYPE) || (blc_type == BLC_PWM_TYPE)){
14102+ /* add /sys/class/backlight interface as standard */
14103+ psbbl_device = backlight_device_register("psblvds", &dev->pdev->dev, dev, &psbbl_ops);
14104+ if (psbbl_device){
14105+ psbbl_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
14106+ psbbl_device->props.brightness = lvds_backlight;
14107+ psbbl_device->props.power = FB_BLANK_UNBLANK;
14108+ backlight_update_status(psbbl_device);
14109+ }
14110+ }
14111+
14112+blc_out:
14113+
14114+ /* Set up the DDC bus. */
14115+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
14116+ if (!intel_output->ddc_bus) {
14117+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
14118+ "failed.\n");
14119+ intel_i2c_destroy(lvds_i2c_bus);
14120+ return;
14121+ }
14122+
14123+ /*
14124+ * Attempt to get the fixed panel mode from DDC. Assume that the
14125+ * preferred mode is the right one.
14126+ */
14127+ intel_lvds_get_modes(output);
14128+
14129+ list_for_each_entry(scan, &output->probed_modes, head) {
14130+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
14131+ dev_priv->panel_fixed_mode =
14132+ drm_mode_duplicate(dev, scan);
14133+ goto out; /* FIXME: check for quirks */
14134+ }
14135+ }
14136+
14137+ /*
14138+ * If we didn't get EDID, try checking if the panel is already turned
14139+ * on. If so, assume that whatever is currently programmed is the
14140+ * correct mode.
14141+ */
14142+ lvds = I915_READ(LVDS);
14143+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
14144+ crtc = intel_get_crtc_from_pipe(dev, pipe);
14145+
14146+ if (crtc && (lvds & LVDS_PORT_EN)) {
14147+ dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
14148+ if (dev_priv->panel_fixed_mode) {
14149+ dev_priv->panel_fixed_mode->type |=
14150+ DRM_MODE_TYPE_PREFERRED;
14151+ goto out; /* FIXME: check for quirks */
14152+ }
14153+ }
14154+
14155+ /* If we still don't have a mode after all that, give up. */
14156+ if (!dev_priv->panel_fixed_mode)
14157+ goto failed;
14158+
14159+ /* FIXME: probe the BIOS for modes and check for LVDS quirks */
14160+#if 0
14161+ /* Get the LVDS fixed mode out of the BIOS. We should support LVDS
14162+ * with the BIOS being unavailable or broken, but lack the
14163+ * configuration options for now.
14164+ */
14165+ bios_mode = intel_bios_get_panel_mode(pScrn);
14166+ if (bios_mode != NULL) {
14167+ if (dev_priv->panel_fixed_mode != NULL) {
14168+ if (dev_priv->debug_modes &&
14169+ !xf86ModesEqual(dev_priv->panel_fixed_mode,
14170+ bios_mode))
14171+ {
14172+ xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
14173+ "BIOS panel mode data doesn't match probed data, "
14174+ "continuing with probed.\n");
14175+ xf86DrvMsg(pScrn->scrnIndex, X_INFO, "BIOS mode:\n");
14176+ xf86PrintModeline(pScrn->scrnIndex, bios_mode);
14177+ xf86DrvMsg(pScrn->scrnIndex, X_INFO, "probed mode:\n");
14178+ xf86PrintModeline(pScrn->scrnIndex, dev_priv->panel_fixed_mode);
14179+ xfree(bios_mode->name);
14180+ xfree(bios_mode);
14181+ }
14182+ } else {
14183+ dev_priv->panel_fixed_mode = bios_mode;
14184+ }
14185+ } else {
14186+ xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
14187+ "Couldn't detect panel mode. Disabling panel\n");
14188+ goto disable_exit;
14189+ }
14190+
14191+ /*
14192+ * Blacklist machines with BIOSes that list an LVDS panel without
14193+ * actually having one.
14194+ */
14195+ if (dev_priv->PciInfo->chipType == PCI_CHIP_I945_GM) {
14196+ /* aopen mini pc */
14197+ if (dev_priv->PciInfo->subsysVendor == 0xa0a0)
14198+ goto disable_exit;
14199+
14200+ if ((dev_priv->PciInfo->subsysVendor == 0x8086) &&
14201+ (dev_priv->PciInfo->subsysCard == 0x7270)) {
14202+ /* It's a Mac Mini or Macbook Pro.
14203+ *
14204+ * Apple hardware is out to get us. The macbook pro
14205+ * has a real LVDS panel, but the mac mini does not,
14206+ * and they have the same device IDs. We'll
14207+ * distinguish by panel size, on the assumption
14208+ * that Apple isn't about to make any machines with an
14209+ * 800x600 display.
14210+ */
14211+
14212+ if (dev_priv->panel_fixed_mode != NULL &&
14213+ dev_priv->panel_fixed_mode->HDisplay == 800 &&
14214+ dev_priv->panel_fixed_mode->VDisplay == 600)
14215+ {
14216+ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
14217+ "Suspected Mac Mini, ignoring the LVDS\n");
14218+ goto disable_exit;
14219+ }
14220+ }
14221+ }
14222+
14223+#endif
14224+
14225+out:
14226+ return;
14227+
14228+failed:
14229+ DRM_DEBUG("No LVDS modes found, disabling.\n");
14230+ drm_output_destroy(output); /* calls intel_lvds_destroy above */
14231+}
14232Index: linux-2.6.27/drivers/gpu/drm/psb/intel_lvds.h
14233===================================================================
14234--- /dev/null 1970-01-01 00:00:00.000000000 +0000
14235+++ linux-2.6.27/drivers/gpu/drm/psb/intel_lvds.h 2009-02-05 13:29:33.000000000 +0000
14236@@ -0,0 +1,174 @@
14237+/*
14238+ * Copyright © 2006-2007 Intel Corporation
14239+ *
14240+ * Permission is hereby granted, free of charge, to any person obtaining a
14241+ * copy of this software and associated documentation files (the "Software"),
14242+ * to deal in the Software without restriction, including without limitation
14243+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14244+ * and/or sell copies of the Software, and to permit persons to whom the
14245+ * Software is furnished to do so, subject to the following conditions:
14246+ *
14247+ * The above copyright notice and this permission notice (including the next
14248+ * paragraph) shall be included in all copies or substantial portions of the
14249+ * Software.
14250+ *
14251+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14252+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14253+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
14254+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
14255+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
14256+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
14257+ * DEALINGS IN THE SOFTWARE.
14258+ *
14259+ */
14260+
14261+/**
14262+ * @file lvds definitions and structures.
14263+ */
14264+
14265+#define BLC_I2C_TYPE 0x01
14266+#define BLC_PWM_TYPE 0x02
14267+#define BRIGHTNESS_MASK 0xff
14268+#define BRIGHTNESS_MAX_LEVEL 100
14269+#define BLC_POLARITY_NORMAL 0
14270+#define BLC_POLARITY_INVERSE 1
14271+#define BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xfffe)
14272+#define BACKLIGHT_PWM_CTL_SHIFT (16)
14273+#define BLC_MAX_PWM_REG_FREQ 0xfffe
14274+#define BLC_MIN_PWM_REG_FREQ 0x2
14275+#define BLC_PWM_LEGACY_MODE_ENABLE 0x0001
14276+#define BLC_PWM_PRECISION_FACTOR 10//10000000
14277+#define BLC_PWM_FREQ_CALC_CONSTANT 32
14278+#define MHz 1000000
14279+#define OFFSET_OPREGION_VBT 0x400
14280+
14281+typedef struct OpRegion_Header
14282+{
14283+ char sign[16];
14284+ u32 size;
14285+ u32 over;
14286+ char sver[32];
14287+ char vver[16];
14288+ char gver[16];
14289+ u32 mbox;
14290+ char rhd1[164];
14291+} OpRegionRec, *OpRegionPtr;
14292+
14293+struct vbt_header
14294+{
14295+ char signature[20]; /**< Always starts with 'VBT$' */
14296+ u16 version; /**< decimal */
14297+ u16 header_size; /**< in bytes */
14298+ u16 vbt_size; /**< in bytes */
14299+ u8 vbt_checksum;
14300+ u8 reserved0;
14301+ u32 bdb_offset; /**< from beginning of VBT */
14302+ u32 aim1_offset; /**< from beginning of VBT */
14303+ u32 aim2_offset; /**< from beginning of VBT */
14304+ u32 aim3_offset; /**< from beginning of VBT */
14305+ u32 aim4_offset; /**< from beginning of VBT */
14306+} __attribute__ ((packed));
14307+
14308+struct bdb_header
14309+{
14310+ char signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
14311+ u16 version; /**< decimal */
14312+ u16 header_size; /**< in bytes */
14313+ u16 bdb_size; /**< in bytes */
14314+} __attribute__ ((packed));
14315+
14316+#define LVDS_CAP_EDID (1 << 6)
14317+#define LVDS_CAP_DITHER (1 << 5)
14318+#define LVDS_CAP_PFIT_AUTO_RATIO (1 << 4)
14319+#define LVDS_CAP_PFIT_GRAPHICS_MODE (1 << 3)
14320+#define LVDS_CAP_PFIT_TEXT_MODE (1 << 2)
14321+#define LVDS_CAP_PFIT_GRAPHICS (1 << 1)
14322+#define LVDS_CAP_PFIT_TEXT (1 << 0)
14323+struct lvds_bdb_1
14324+{
14325+ u8 id; /**< 40 */
14326+ u16 size;
14327+ u8 panel_type;
14328+ u8 reserved0;
14329+ u16 caps;
14330+} __attribute__ ((packed));
14331+
14332+struct lvds_bdb_2_fp_params
14333+{
14334+ u16 x_res;
14335+ u16 y_res;
14336+ u32 lvds_reg;
14337+ u32 lvds_reg_val;
14338+ u32 pp_on_reg;
14339+ u32 pp_on_reg_val;
14340+ u32 pp_off_reg;
14341+ u32 pp_off_reg_val;
14342+ u32 pp_cycle_reg;
14343+ u32 pp_cycle_reg_val;
14344+ u32 pfit_reg;
14345+ u32 pfit_reg_val;
14346+ u16 terminator;
14347+} __attribute__ ((packed));
14348+
14349+struct lvds_bdb_2_fp_edid_dtd
14350+{
14351+ u16 dclk; /**< In 10khz */
14352+ u8 hactive;
14353+ u8 hblank;
14354+ u8 high_h; /**< 7:4 = hactive 11:8, 3:0 = hblank 11:8 */
14355+ u8 vactive;
14356+ u8 vblank;
14357+ u8 high_v; /**< 7:4 = vactive 11:8, 3:0 = vblank 11:8 */
14358+ u8 hsync_off;
14359+ u8 hsync_pulse_width;
14360+ u8 vsync_off;
14361+ u8 high_hsync_off; /**< 7:6 = hsync off 9:8 */
14362+ u8 h_image;
14363+ u8 v_image;
14364+ u8 max_hv;
14365+ u8 h_border;
14366+ u8 v_border;
14367+ u8 flags;
14368+#define FP_EDID_FLAG_VSYNC_POSITIVE (1 << 2)
14369+#define FP_EDID_FLAG_HSYNC_POSITIVE (1 << 1)
14370+} __attribute__ ((packed));
14371+
14372+struct lvds_bdb_2_entry
14373+{
14374+ u16 fp_params_offset; /**< From beginning of BDB */
14375+ u8 fp_params_size;
14376+ u16 fp_edid_dtd_offset;
14377+ u8 fp_edid_dtd_size;
14378+ u16 fp_edid_pid_offset;
14379+ u8 fp_edid_pid_size;
14380+} __attribute__ ((packed));
14381+
14382+struct lvds_bdb_2
14383+{
14384+ u8 id; /**< 41 */
14385+ u16 size;
14386+ u8 table_size; /* not sure on this one */
14387+ struct lvds_bdb_2_entry panels[16];
14388+} __attribute__ ((packed));
14389+
14390+
14391+struct lvds_bdb_blc
14392+{
14393+ u8 id; /**< 43 */
14394+ u16 size;
14395+ u8 table_size;
14396+} __attribute__ ((packed));
14397+
14398+struct lvds_blc
14399+{
14400+ u8 type:2;
14401+ u8 pol:1;
14402+ u8 gpio:3;
14403+ u8 gmbus:2;
14404+ u16 freq;
14405+ u8 minbrightness;
14406+ u8 i2caddr;
14407+ u8 brightnesscmd;
14408+ /* more... */
14409+} __attribute__ ((packed));
14410+
14411Index: linux-2.6.27/drivers/gpu/drm/psb/intel_modes.c
14412===================================================================
14413--- /dev/null 1970-01-01 00:00:00.000000000 +0000
14414+++ linux-2.6.27/drivers/gpu/drm/psb/intel_modes.c 2009-02-05 13:29:33.000000000 +0000
14415@@ -0,0 +1,60 @@
14416+/*
14417+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
14418+ * Copyright (c) 2007 Intel Corporation
14419+ * Jesse Barnes <jesse.barnes@intel.com>
14420+ */
14421+
14422+#include <linux/i2c.h>
14423+#include <linux/fb.h>
14424+
14425+/**
14426+ * intel_ddc_probe
14427+ *
14428+ */
14429+bool intel_ddc_probe(struct drm_output *output)
14430+{
14431+ struct intel_output *intel_output = output->driver_private;
14432+ u8 out_buf[] = { 0x0, 0x0};
14433+ u8 buf[2];
14434+ int ret;
14435+ struct i2c_msg msgs[] = {
14436+ {
14437+ .addr = 0x50,
14438+ .flags = 0,
14439+ .len = 1,
14440+ .buf = out_buf,
14441+ },
14442+ {
14443+ .addr = 0x50,
14444+ .flags = I2C_M_RD,
14445+ .len = 1,
14446+ .buf = buf,
14447+ }
14448+ };
14449+
14450+ ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2);
14451+ if (ret == 2)
14452+ return true;
14453+
14454+ return false;
14455+}
14456+
14457+/**
14458+ * intel_ddc_get_modes - get modelist from monitor
14459+ * @output: DRM output device to use
14460+ *
14461+ * Fetch the EDID information from @output using the DDC bus.
14462+ */
14463+int intel_ddc_get_modes(struct drm_output *output)
14464+{
14465+ struct intel_output *intel_output = output->driver_private;
14466+ struct edid *edid;
14467+ int ret = 0;
14468+
14469+ edid = drm_get_edid(output, &intel_output->ddc_bus->adapter);
14470+ if (edid) {
14471+ ret = drm_add_edid_modes(output, edid);
14472+ kfree(edid);
14473+ }
14474+ return ret;
14475+}
14476Index: linux-2.6.27/drivers/gpu/drm/psb/intel_sdvo.c
14477===================================================================
14478--- /dev/null 1970-01-01 00:00:00.000000000 +0000
14479+++ linux-2.6.27/drivers/gpu/drm/psb/intel_sdvo.c 2009-02-05 13:29:33.000000000 +0000
14480@@ -0,0 +1,3973 @@
14481+/*
14482+ * Copyright © 2006-2007 Intel Corporation
14483+ *
14484+ * Permission is hereby granted, free of charge, to any person obtaining a
14485+ * copy of this software and associated documentation files (the "Software"),
14486+ * to deal in the Software without restriction, including without limitation
14487+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14488+ * and/or sell copies of the Software, and to permit persons to whom the
14489+ * Software is furnished to do so, subject to the following conditions:
14490+ *
14491+ * The above copyright notice and this permission notice (including the next
14492+ * paragraph) shall be included in all copies or substantial portions of the
14493+ * Software.
14494+ *
14495+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14496+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14497+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
14498+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
14499+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
14500+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
14501+ * DEALINGS IN THE SOFTWARE.
14502+ *
14503+ * Authors:
14504+ * Eric Anholt <eric@anholt.net>
14505+ */
14506+/*
14507+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
14508+ * Jesse Barnes <jesse.barnes@intel.com>
14509+ */
14510+
14511+#include <linux/i2c.h>
14512+#include <linux/delay.h>
14513+#include "drm_crtc.h"
14514+#include "intel_sdvo_regs.h"
14515+
14516+#define MAX_VAL 1000
14517+#define DPLL_CLOCK_PHASE_9 (1<<9 | 1<<12)
14518+
14519+#define PCI_PORT5_REG80_FFUSE 0xD0058000
14520+#define PCI_PORT5_REG80_SDVO_DISABLE 0x0020
14521+
14522+#define SII_1392_WA
14523+#ifdef SII_1392_WA
14524+int SII_1392=0;
14525+extern int drm_psb_no_fb;
14526+#endif
14527+
14528+typedef struct _EXTVDATA
14529+{
14530+ u32 Value;
14531+ u32 Default;
14532+ u32 Min;
14533+ u32 Max;
14534+ u32 Step; // arbitrary unit (e.g. pixel, percent) returned during VP_COMMAND_GET
14535+} EXTVDATA, *PEXTVDATA;
14536+
14537+typedef struct _sdvo_display_params
14538+{
14539+ EXTVDATA FlickerFilter; /* Flicker Filter : for TV onl */
14540+ EXTVDATA AdaptiveFF; /* Adaptive Flicker Filter : for TV onl */
14541+ EXTVDATA TwoD_FlickerFilter; /* 2D Flicker Filter : for TV onl */
14542+ EXTVDATA Brightness; /* Brightness : for TV & CRT onl */
14543+ EXTVDATA Contrast; /* Contrast : for TV & CRT onl */
14544+ EXTVDATA PositionX; /* Horizontal Position : for all device */
14545+ EXTVDATA PositionY; /* Vertical Position : for all device */
14546+ /*EXTVDATA OverScanX; Horizontal Overscan : for TV onl */
14547+ EXTVDATA DotCrawl; /* Dot crawl value : for TV onl */
14548+ EXTVDATA ChromaFilter; /* Chroma Filter : for TV onl */
14549+ /* EXTVDATA OverScanY; Vertical Overscan : for TV onl */
14550+ EXTVDATA LumaFilter; /* Luma Filter : for TV only */
14551+ EXTVDATA Sharpness; /* Sharpness : for TV & CRT onl */
14552+ EXTVDATA Saturation; /* Saturation : for TV & CRT onl */
14553+ EXTVDATA Hue; /* Hue : for TV & CRT onl */
14554+ EXTVDATA Dither; /* Dither : For LVDS onl */
14555+} sdvo_display_params;
14556+
14557+typedef enum _SDVO_PICTURE_ASPECT_RATIO_T
14558+{
14559+ UAIM_PAR_NO_DATA = 0x00000000,
14560+ UAIM_PAR_4_3 = 0x00000100,
14561+ UAIM_PAR_16_9 = 0x00000200,
14562+ UAIM_PAR_FUTURE = 0x00000300,
14563+ UAIM_PAR_MASK = 0x00000300,
14564+} SDVO_PICTURE_ASPECT_RATIO_T;
14565+
14566+typedef enum _SDVO_FORMAT_ASPECT_RATIO_T
14567+{
14568+ UAIM_FAR_NO_DATA = 0x00000000,
14569+ UAIM_FAR_SAME_AS_PAR = 0x00002000,
14570+ UAIM_FAR_4_BY_3_CENTER = 0x00002400,
14571+ UAIM_FAR_16_BY_9_CENTER = 0x00002800,
14572+ UAIM_FAR_14_BY_9_CENTER = 0x00002C00,
14573+ UAIM_FAR_16_BY_9_LETTERBOX_TOP = 0x00000800,
14574+ UAIM_FAR_14_BY_9_LETTERBOX_TOP = 0x00000C00,
14575+ UAIM_FAR_GT_16_BY_9_LETTERBOX_CENTER = 0x00002000,
14576+ UAIM_FAR_4_BY_3_SNP_14_BY_9_CENTER = 0x00003400, /* With shoot and protect 14:9 cente */
14577+ UAIM_FAR_16_BY_9_SNP_14_BY_9_CENTER = 0x00003800, /* With shoot and protect 14:9 cente */
14578+ UAIM_FAR_16_BY_9_SNP_4_BY_3_CENTER = 0x00003C00, /* With shoot and protect 4:3 cente */
14579+ UAIM_FAR_MASK = 0x00003C00,
14580+} SDVO_FORMAT_ASPECT_RATIO_T;
14581+
14582+// TV image aspect ratio
14583+typedef enum _CP_IMAGE_ASPECT_RATIO
14584+{
14585+ CP_ASPECT_RATIO_FF_4_BY_3 = 0,
14586+ CP_ASPECT_RATIO_14_BY_9_CENTER = 1,
14587+ CP_ASPECT_RATIO_14_BY_9_TOP = 2,
14588+ CP_ASPECT_RATIO_16_BY_9_CENTER = 3,
14589+ CP_ASPECT_RATIO_16_BY_9_TOP = 4,
14590+ CP_ASPECT_RATIO_GT_16_BY_9_CENTER = 5,
14591+ CP_ASPECT_RATIO_FF_4_BY_3_PROT_CENTER = 6,
14592+ CP_ASPECT_RATIO_FF_16_BY_9_ANAMORPHIC = 7,
14593+} CP_IMAGE_ASPECT_RATIO;
14594+
14595+typedef struct _SDVO_ANCILLARY_INFO_T
14596+{
14597+ CP_IMAGE_ASPECT_RATIO AspectRatio;
14598+ u32 RedistCtrlFlag; /* Redistribution control flag (get and set */
14599+} SDVO_ANCILLARY_INFO_T, *PSDVO_ANCILLARY_INFO_T;
14600+
14601+struct intel_sdvo_priv {
14602+ struct intel_i2c_chan *i2c_bus;
14603+ int slaveaddr;
14604+ int output_device;
14605+
14606+ u16 active_outputs;
14607+
14608+ struct intel_sdvo_caps caps;
14609+ int pixel_clock_min, pixel_clock_max;
14610+
14611+ int save_sdvo_mult;
14612+ u16 save_active_outputs;
14613+ struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
14614+ struct intel_sdvo_dtd save_output_dtd[16];
14615+ u32 save_SDVOX;
14616+ /**
14617+ * SDVO TV encoder support
14618+ */
14619+ u32 ActiveDevice; /* CRT, TV, LVDS, TMDS */
14620+ u32 TVStandard; /* PAL, NTSC */
14621+ int TVOutput; /* S-Video, CVBS,YPbPr,RGB */
14622+ int TVMode; /* SDTV/HDTV/SECAM mod */
14623+ u32 TVStdBitmask;
14624+ u32 dwSDVOHDTVBitMask;
14625+ u32 dwSDVOSDTVBitMask;
14626+ u8 byInputWiring;
14627+ bool bGetClk;
14628+ u32 dwMaxDotClk;
14629+ u32 dwMinDotClk;
14630+
14631+ u32 dwMaxInDotClk;
14632+ u32 dwMinInDotClk;
14633+
14634+ u32 dwMaxOutDotClk;
14635+ u32 dwMinOutDotClk;
14636+ u32 dwSupportedEnhancements;
14637+ EXTVDATA OverScanY; /* Vertical Overscan : for TV onl */
14638+ EXTVDATA OverScanX; /* Horizontal Overscan : for TV onl */
14639+ sdvo_display_params dispParams;
14640+ SDVO_ANCILLARY_INFO_T AncillaryInfo;
14641+};
14642+
14643+/* Define TV mode type */
14644+/* The full set are defined in xf86str.h*/
14645+#define M_T_TV 0x80
14646+
14647+typedef struct _tv_mode_t
14648+{
14649+ /* the following data is detailed mode information as it would be passed to the hardware: */
14650+ struct drm_display_mode mode_entry;
14651+ u32 dwSupportedSDTVvss;
14652+ u32 dwSupportedHDTVvss;
14653+ bool m_preferred;
14654+ bool isTVMode;
14655+} tv_mode_t;
14656+
14657+static tv_mode_t tv_modes[] = {
14658+ {
14659+ .mode_entry =
14660+ {DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x2625a00 / 1000, 800, 840, 968, 1056, 0,
14661+ 600, 601,
14662+ 604, 628, 0, V_PHSYNC | V_PVSYNC)},
14663+ .dwSupportedSDTVvss = TVSTANDARD_SDTV_ALL,
14664+ .dwSupportedHDTVvss = TVSTANDARD_HDTV_ALL,
14665+ .m_preferred = TRUE,
14666+ .isTVMode = TRUE,
14667+ },
14668+ {
14669+ .mode_entry =
14670+ {DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x3dfd240 / 1000, 1024, 0x418, 0x49f, 0x540,
14671+ 0, 768,
14672+ 0x303, 0x308, 0x325, 0, V_PHSYNC | V_PVSYNC)},
14673+ .dwSupportedSDTVvss = TVSTANDARD_SDTV_ALL,
14674+ .dwSupportedHDTVvss = TVSTANDARD_HDTV_ALL,
14675+ .m_preferred = FALSE,
14676+ .isTVMode = TRUE,
14677+ },
14678+ {
14679+ .mode_entry =
14680+ {DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x1978ff0 / 1000, 720, 0x2e1, 0x326, 0x380, 0,
14681+ 480,
14682+ 0x1f0, 0x1e1, 0x1f1, 0, V_PHSYNC | V_PVSYNC)},
14683+ .dwSupportedSDTVvss =
14684+ TVSTANDARD_NTSC_M | TVSTANDARD_NTSC_M_J | TVSTANDARD_NTSC_433,
14685+ .dwSupportedHDTVvss = 0x0,
14686+ .m_preferred = FALSE,
14687+ .isTVMode = TRUE,
14688+ },
14689+ {
14690+ /*Modeline "720x576_SDVO" 0.96 720 756 788 864 576 616 618 700 +vsync */
14691+ .mode_entry =
14692+ {DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x1f25a20 / 1000, 720, 756, 788, 864, 0, 576,
14693+ 616,
14694+ 618, 700, 0, V_PHSYNC | V_PVSYNC)},
14695+ .dwSupportedSDTVvss =
14696+ (TVSTANDARD_PAL_B | TVSTANDARD_PAL_D | TVSTANDARD_PAL_H |
14697+ TVSTANDARD_PAL_I | TVSTANDARD_PAL_N | TVSTANDARD_SECAM_B |
14698+ TVSTANDARD_SECAM_D | TVSTANDARD_SECAM_G | TVSTANDARD_SECAM_H |
14699+ TVSTANDARD_SECAM_K | TVSTANDARD_SECAM_K1 | TVSTANDARD_SECAM_L |
14700+ TVSTANDARD_PAL_G | TVSTANDARD_SECAM_L1),
14701+ .dwSupportedHDTVvss = 0x0,
14702+ .m_preferred = FALSE,
14703+ .isTVMode = TRUE,
14704+ },
14705+ {
14706+ .mode_entry =
14707+ {DRM_MODE("1280x720@60",DRM_MODE_TYPE_DRIVER | M_T_TV, 74250000 / 1000, 1280, 1390, 1430, 1650, 0,
14708+ 720,
14709+ 725, 730, 750, 0, V_PHSYNC | V_PVSYNC)},
14710+ .dwSupportedSDTVvss = 0x0,
14711+ .dwSupportedHDTVvss = HDTV_SMPTE_296M_720p60,
14712+ .m_preferred = FALSE,
14713+ .isTVMode = TRUE,
14714+ },
14715+ {
14716+ .mode_entry =
14717+ {DRM_MODE("1280x720@50", DRM_MODE_TYPE_DRIVER | M_T_TV, 74250000 / 1000, 1280, 1720, 1759, 1980, 0,
14718+ 720,
14719+ 725, 730, 750, 0, V_PHSYNC | V_PVSYNC)},
14720+ .dwSupportedSDTVvss = 0x0,
14721+ .dwSupportedHDTVvss = HDTV_SMPTE_296M_720p50,
14722+ .m_preferred = FALSE,
14723+ .isTVMode = TRUE,
14724+ },
14725+ {
14726+ .mode_entry =
14727+ {DRM_MODE("1920x1080@60", DRM_MODE_TYPE_DRIVER | M_T_TV, 148500000 / 1000, 1920, 2008, 2051, 2200, 0,
14728+ 1080,
14729+ 1084, 1088, 1124, 0, V_PHSYNC | V_PVSYNC)},
14730+ .dwSupportedSDTVvss = 0x0,
14731+ .dwSupportedHDTVvss = HDTV_SMPTE_274M_1080i60,
14732+ .m_preferred = FALSE,
14733+ .isTVMode = TRUE,
14734+ },
14735+};
14736+
14737+#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0])
14738+
14739+typedef struct {
14740+ /* given values */
14741+ int n;
14742+ int m1, m2;
14743+ int p1, p2;
14744+ /* derived values */
14745+ int dot;
14746+ int vco;
14747+ int m;
14748+ int p;
14749+} ex_intel_clock_t;
14750+
14751+
14752+/**
14753+ * Writes the SDVOB or SDVOC with the given value, but always writes both
14754+ * SDVOB and SDVOC to work around apparent hardware issues (according to
14755+ * comments in the BIOS).
14756+ */
14757+static void intel_sdvo_write_sdvox(struct drm_output *output, u32 val)
14758+{
14759+ struct drm_device *dev = output->dev;
14760+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
14761+ struct intel_output *intel_output = output->driver_private;
14762+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
14763+ u32 bval = val, cval = val;
14764+ int i;
14765+
14766+ if (sdvo_priv->output_device == SDVOB)
14767+ cval = I915_READ(SDVOC);
14768+ else
14769+ bval = I915_READ(SDVOB);
14770+ /*
14771+ * Write the registers twice for luck. Sometimes,
14772+ * writing them only once doesn't appear to 'stick'.
14773+ * The BIOS does this too. Yay, magic
14774+ */
14775+ for (i = 0; i < 2; i++)
14776+ {
14777+ I915_WRITE(SDVOB, bval);
14778+ I915_READ(SDVOB);
14779+ I915_WRITE(SDVOC, cval);
14780+ I915_READ(SDVOC);
14781+ }
14782+}
14783+
14784+static bool intel_sdvo_read_byte(struct drm_output *output, u8 addr,
14785+ u8 *ch)
14786+{
14787+ struct intel_output *intel_output = output->driver_private;
14788+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
14789+ u8 out_buf[2];
14790+ u8 buf[2];
14791+ int ret;
14792+
14793+ struct i2c_msg msgs[] = {
14794+ {
14795+ .addr = sdvo_priv->i2c_bus->slave_addr,
14796+ .flags = 0,
14797+ .len = 1,
14798+ .buf = out_buf,
14799+ },
14800+ {
14801+ .addr = sdvo_priv->i2c_bus->slave_addr,
14802+ .flags = I2C_M_RD,
14803+ .len = 1,
14804+ .buf = buf,
14805+ }
14806+ };
14807+
14808+ out_buf[0] = addr;
14809+ out_buf[1] = 0;
14810+
14811+ if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2)
14812+ {
14813+// DRM_DEBUG("got back from addr %02X = %02x\n", out_buf[0], buf[0]);
14814+ *ch = buf[0];
14815+ return true;
14816+ }
14817+
14818+ DRM_DEBUG("i2c transfer returned %d\n", ret);
14819+ return false;
14820+}
14821+
14822+
14823+#if 0
14824+static bool intel_sdvo_read_byte_quiet(struct drm_output *output, int addr,
14825+ u8 *ch)
14826+{
14827+ return true;
14828+
14829+}
14830+#endif
14831+
14832+static bool intel_sdvo_write_byte(struct drm_output *output, int addr,
14833+ u8 ch)
14834+{
14835+ struct intel_output *intel_output = output->driver_private;
14836+ u8 out_buf[2];
14837+ struct i2c_msg msgs[] = {
14838+ {
14839+ .addr = intel_output->i2c_bus->slave_addr,
14840+ .flags = 0,
14841+ .len = 2,
14842+ .buf = out_buf,
14843+ }
14844+ };
14845+
14846+ out_buf[0] = addr;
14847+ out_buf[1] = ch;
14848+
14849+ if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1)
14850+ {
14851+ return true;
14852+ }
14853+ return false;
14854+}
14855+
14856+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
14857+/** Mapping of command numbers to names, for debug output */
14858+const static struct _sdvo_cmd_name {
14859+ u8 cmd;
14860+ char *name;
14861+} sdvo_cmd_names[] = {
14862+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
14863+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
14864+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
14865+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
14866+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
14867+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
14868+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
14869+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
14870+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
14871+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
14872+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
14873+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
14874+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
14875+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
14876+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
14877+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
14878+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
14879+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
14880+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
14881+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
14882+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
14883+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
14884+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
14885+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
14886+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
14887+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
14888+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
14889+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
14890+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
14891+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
14892+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
14893+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
14894+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
14895+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
14896+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
14897+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
14898+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
14899+};
14900+
14901+#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
14902+#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv)
14903+
14904+static void intel_sdvo_write_cmd(struct drm_output *output, u8 cmd,
14905+ void *args, int args_len)
14906+{
14907+ struct intel_output *intel_output = output->driver_private;
14908+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
14909+ int i;
14910+
14911+ if (drm_debug) {
14912+ DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
14913+ for (i = 0; i < args_len; i++)
14914+ printk("%02X ", ((u8 *)args)[i]);
14915+ for (; i < 8; i++)
14916+ printk(" ");
14917+ for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) {
14918+ if (cmd == sdvo_cmd_names[i].cmd) {
14919+ printk("(%s)", sdvo_cmd_names[i].name);
14920+ break;
14921+ }
14922+ }
14923+ if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0]))
14924+ printk("(%02X)",cmd);
14925+ printk("\n");
14926+ }
14927+
14928+ for (i = 0; i < args_len; i++) {
14929+ intel_sdvo_write_byte(output, SDVO_I2C_ARG_0 - i, ((u8*)args)[i]);
14930+ }
14931+
14932+ intel_sdvo_write_byte(output, SDVO_I2C_OPCODE, cmd);
14933+}
14934+
14935+static const char *cmd_status_names[] = {
14936+ "Power on",
14937+ "Success",
14938+ "Not supported",
14939+ "Invalid arg",
14940+ "Pending",
14941+ "Target not specified",
14942+ "Scaling not supported"
14943+};
14944+
14945+static u8 intel_sdvo_read_response(struct drm_output *output, void *response,
14946+ int response_len)
14947+{
14948+ struct intel_output *intel_output = output->driver_private;
14949+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
14950+ int i;
14951+ u8 status;
14952+ u8 retry = 50;
14953+
14954+ while (retry--) {
14955+ /* Read the command response */
14956+ for (i = 0; i < response_len; i++) {
14957+ intel_sdvo_read_byte(output, SDVO_I2C_RETURN_0 + i,
14958+ &((u8 *)response)[i]);
14959+ }
14960+
14961+ /* read the return status */
14962+ intel_sdvo_read_byte(output, SDVO_I2C_CMD_STATUS, &status);
14963+
14964+ if (drm_debug) {
14965+ DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
14966+ for (i = 0; i < response_len; i++)
14967+ printk("%02X ", ((u8 *)response)[i]);
14968+ for (; i < 8; i++)
14969+ printk(" ");
14970+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
14971+ printk("(%s)", cmd_status_names[status]);
14972+ else
14973+ printk("(??? %d)", status);
14974+ printk("\n");
14975+ }
14976+
14977+ if (status != SDVO_CMD_STATUS_PENDING)
14978+ return status;
14979+
14980+ mdelay(50);
14981+ }
14982+
14983+ return status;
14984+}
14985+
14986+int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
14987+{
14988+ if (mode->clock >= 100000)
14989+ return 1;
14990+ else if (mode->clock >= 50000)
14991+ return 2;
14992+ else
14993+ return 4;
14994+}
14995+
14996+/**
14997+ * Don't check status code from this as it switches the bus back to the
14998+ * SDVO chips which defeats the purpose of doing a bus switch in the first
14999+ * place.
15000+ */
15001+void intel_sdvo_set_control_bus_switch(struct drm_output *output, u8 target)
15002+{
15003+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
15004+}
15005+
15006+static bool intel_sdvo_set_target_input(struct drm_output *output, bool target_0, bool target_1)
15007+{
15008+ struct intel_sdvo_set_target_input_args targets = {0};
15009+ u8 status;
15010+
15011+ if (target_0 && target_1)
15012+ return SDVO_CMD_STATUS_NOTSUPP;
15013+
15014+ if (target_1)
15015+ targets.target_1 = 1;
15016+
15017+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TARGET_INPUT, &targets,
15018+ sizeof(targets));
15019+
15020+ status = intel_sdvo_read_response(output, NULL, 0);
15021+
15022+ return (status == SDVO_CMD_STATUS_SUCCESS);
15023+}
15024+
15025+/**
15026+ * Return whether each input is trained.
15027+ *
15028+ * This function is making an assumption about the layout of the response,
15029+ * which should be checked against the docs.
15030+ */
15031+static bool intel_sdvo_get_trained_inputs(struct drm_output *output, bool *input_1, bool *input_2)
15032+{
15033+ struct intel_sdvo_get_trained_inputs_response response;
15034+ u8 status;
15035+
15036+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
15037+ status = intel_sdvo_read_response(output, &response, sizeof(response));
15038+ if (status != SDVO_CMD_STATUS_SUCCESS)
15039+ return false;
15040+
15041+ *input_1 = response.input0_trained;
15042+ *input_2 = response.input1_trained;
15043+ return true;
15044+}
15045+
15046+static bool intel_sdvo_get_active_outputs(struct drm_output *output,
15047+ u16 *outputs)
15048+{
15049+ u8 status;
15050+
15051+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
15052+ status = intel_sdvo_read_response(output, outputs, sizeof(*outputs));
15053+
15054+ return (status == SDVO_CMD_STATUS_SUCCESS);
15055+}
15056+
15057+static bool intel_sdvo_set_active_outputs(struct drm_output *output,
15058+ u16 outputs)
15059+{
15060+ u8 status;
15061+
15062+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
15063+ sizeof(outputs));
15064+ status = intel_sdvo_read_response(output, NULL, 0);
15065+ return (status == SDVO_CMD_STATUS_SUCCESS);
15066+}
15067+
15068+static bool intel_sdvo_set_encoder_power_state(struct drm_output *output,
15069+ int mode)
15070+{
15071+ u8 status, state = SDVO_ENCODER_STATE_ON;
15072+
15073+ switch (mode) {
15074+ case DPMSModeOn:
15075+ state = SDVO_ENCODER_STATE_ON;
15076+ break;
15077+ case DPMSModeStandby:
15078+ state = SDVO_ENCODER_STATE_STANDBY;
15079+ break;
15080+ case DPMSModeSuspend:
15081+ state = SDVO_ENCODER_STATE_SUSPEND;
15082+ break;
15083+ case DPMSModeOff:
15084+ state = SDVO_ENCODER_STATE_OFF;
15085+ break;
15086+ }
15087+
15088+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
15089+ sizeof(state));
15090+ status = intel_sdvo_read_response(output, NULL, 0);
15091+
15092+ return (status == SDVO_CMD_STATUS_SUCCESS);
15093+}
15094+
15095+static bool intel_sdvo_get_input_pixel_clock_range(struct drm_output *output,
15096+ int *clock_min,
15097+ int *clock_max)
15098+{
15099+ struct intel_sdvo_pixel_clock_range clocks;
15100+ u8 status;
15101+
15102+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
15103+ NULL, 0);
15104+
15105+ status = intel_sdvo_read_response(output, &clocks, sizeof(clocks));
15106+
15107+ if (status != SDVO_CMD_STATUS_SUCCESS)
15108+ return false;
15109+
15110+ /* Convert the values from units of 10 kHz to kHz. */
15111+ *clock_min = clocks.min * 10;
15112+ *clock_max = clocks.max * 10;
15113+
15114+ return true;
15115+}
15116+
15117+static bool intel_sdvo_set_target_output(struct drm_output *output,
15118+ u16 outputs)
15119+{
15120+ u8 status;
15121+
15122+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
15123+ sizeof(outputs));
15124+
15125+ status = intel_sdvo_read_response(output, NULL, 0);
15126+ return (status == SDVO_CMD_STATUS_SUCCESS);
15127+}
15128+
15129+static bool intel_sdvo_get_timing(struct drm_output *output, u8 cmd,
15130+ struct intel_sdvo_dtd *dtd)
15131+{
15132+ u8 status;
15133+
15134+ intel_sdvo_write_cmd(output, cmd, NULL, 0);
15135+ status = intel_sdvo_read_response(output, &dtd->part1,
15136+ sizeof(dtd->part1));
15137+ if (status != SDVO_CMD_STATUS_SUCCESS)
15138+ return false;
15139+
15140+ intel_sdvo_write_cmd(output, cmd + 1, NULL, 0);
15141+ status = intel_sdvo_read_response(output, &dtd->part2,
15142+ sizeof(dtd->part2));
15143+ if (status != SDVO_CMD_STATUS_SUCCESS)
15144+ return false;
15145+
15146+ return true;
15147+}
15148+
15149+static bool intel_sdvo_get_input_timing(struct drm_output *output,
15150+ struct intel_sdvo_dtd *dtd)
15151+{
15152+ return intel_sdvo_get_timing(output,
15153+ SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
15154+}
15155+
15156+static bool intel_sdvo_get_output_timing(struct drm_output *output,
15157+ struct intel_sdvo_dtd *dtd)
15158+{
15159+ return intel_sdvo_get_timing(output,
15160+ SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
15161+}
15162+
15163+static bool intel_sdvo_set_timing(struct drm_output *output, u8 cmd,
15164+ struct intel_sdvo_dtd *dtd)
15165+{
15166+ u8 status;
15167+
15168+ intel_sdvo_write_cmd(output, cmd, &dtd->part1, sizeof(dtd->part1));
15169+ status = intel_sdvo_read_response(output, NULL, 0);
15170+ if (status != SDVO_CMD_STATUS_SUCCESS)
15171+ return false;
15172+
15173+ intel_sdvo_write_cmd(output, cmd + 1, &dtd->part2, sizeof(dtd->part2));
15174+ status = intel_sdvo_read_response(output, NULL, 0);
15175+ if (status != SDVO_CMD_STATUS_SUCCESS)
15176+ return false;
15177+
15178+ return true;
15179+}
15180+
15181+static bool intel_sdvo_set_input_timing(struct drm_output *output,
15182+ struct intel_sdvo_dtd *dtd)
15183+{
15184+ return intel_sdvo_set_timing(output,
15185+ SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
15186+}
15187+
15188+static bool intel_sdvo_set_output_timing(struct drm_output *output,
15189+ struct intel_sdvo_dtd *dtd)
15190+{
15191+ return intel_sdvo_set_timing(output,
15192+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
15193+}
15194+
15195+#if 0
15196+static bool intel_sdvo_get_preferred_input_timing(struct drm_output *output,
15197+ struct intel_sdvo_dtd *dtd)
15198+{
15199+ struct intel_output *intel_output = output->driver_private;
15200+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15201+ u8 status;
15202+
15203+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
15204+ NULL, 0);
15205+
15206+ status = intel_sdvo_read_response(output, &dtd->part1,
15207+ sizeof(dtd->part1));
15208+ if (status != SDVO_CMD_STATUS_SUCCESS)
15209+ return false;
15210+
15211+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
15212+ NULL, 0);
15213+ status = intel_sdvo_read_response(output, &dtd->part2,
15214+ sizeof(dtd->part2));
15215+ if (status != SDVO_CMD_STATUS_SUCCESS)
15216+ return false;
15217+
15218+ return true;
15219+}
15220+#endif
15221+
15222+static int intel_sdvo_get_clock_rate_mult(struct drm_output *output)
15223+{
15224+ u8 response, status;
15225+
15226+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
15227+ status = intel_sdvo_read_response(output, &response, 1);
15228+
15229+ if (status != SDVO_CMD_STATUS_SUCCESS) {
15230+ DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
15231+ return SDVO_CLOCK_RATE_MULT_1X;
15232+ } else {
15233+ DRM_DEBUG("Current clock rate multiplier: %d\n", response);
15234+ }
15235+
15236+ return response;
15237+}
15238+
15239+static bool intel_sdvo_set_clock_rate_mult(struct drm_output *output, u8 val)
15240+{
15241+ u8 status;
15242+
15243+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
15244+ status = intel_sdvo_read_response(output, NULL, 0);
15245+ if (status != SDVO_CMD_STATUS_SUCCESS)
15246+ return false;
15247+
15248+ return true;
15249+}
15250+
15251+static bool intel_sdvo_mode_fixup(struct drm_output *output,
15252+ struct drm_display_mode *mode,
15253+ struct drm_display_mode *adjusted_mode)
15254+{
15255+ /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
15256+ * device will be told of the multiplier during mode_set.
15257+ */
15258+ DRM_DEBUG("xxintel_sdvo_fixup\n");
15259+ adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
15260+ return true;
15261+}
15262+
15263+#if 0
15264+static void i830_sdvo_map_hdtvstd_bitmask(struct drm_output * output)
15265+{
15266+ struct intel_output *intel_output = output->driver_private;
15267+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15268+
15269+ switch (sdvo_priv->TVStandard) {
15270+ case HDTV_SMPTE_274M_1080i50:
15271+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080i50;
15272+ break;
15273+
15274+ case HDTV_SMPTE_274M_1080i59:
15275+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080i59;
15276+ break;
15277+
15278+ case HDTV_SMPTE_274M_1080i60:
15279+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080i60;
15280+ break;
15281+ case HDTV_SMPTE_274M_1080p60:
15282+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080p60;
15283+ break;
15284+ case HDTV_SMPTE_296M_720p59:
15285+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_296M_720p59;
15286+ break;
15287+
15288+ case HDTV_SMPTE_296M_720p60:
15289+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_296M_720p60;
15290+ break;
15291+
15292+ case HDTV_SMPTE_296M_720p50:
15293+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_296M_720p50;
15294+ break;
15295+
15296+ case HDTV_SMPTE_293M_480p59:
15297+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_293M_480p59;
15298+ break;
15299+
15300+ case HDTV_SMPTE_293M_480p60:
15301+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_EIA_7702A_480p60;
15302+ break;
15303+
15304+ case HDTV_SMPTE_170M_480i59:
15305+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_170M_480i59;
15306+ break;
15307+
15308+ case HDTV_ITURBT601_576i50:
15309+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_ITURBT601_576i50;
15310+ break;
15311+
15312+ case HDTV_ITURBT601_576p50:
15313+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_ITURBT601_576p50;
15314+ break;
15315+ default:
15316+ DRM_DEBUG("ERROR: Unknown TV Standard!!!\n");
15317+ /*Invalid return 0 */
15318+ sdvo_priv->TVStdBitmask = 0;
15319+ }
15320+
15321+}
15322+
15323+static void i830_sdvo_map_sdtvstd_bitmask(struct drm_output * output)
15324+{
15325+ struct intel_output *intel_output = output->driver_private;
15326+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15327+
15328+ switch (sdvo_priv->TVStandard) {
15329+ case TVSTANDARD_NTSC_M:
15330+ sdvo_priv->TVStdBitmask = SDVO_NTSC_M;
15331+ break;
15332+
15333+ case TVSTANDARD_NTSC_M_J:
15334+ sdvo_priv->TVStdBitmask = SDVO_NTSC_M_J;
15335+ break;
15336+
15337+ case TVSTANDARD_NTSC_433:
15338+ sdvo_priv->TVStdBitmask = SDVO_NTSC_433;
15339+ break;
15340+
15341+ case TVSTANDARD_PAL_B:
15342+ sdvo_priv->TVStdBitmask = SDVO_PAL_B;
15343+ break;
15344+
15345+ case TVSTANDARD_PAL_D:
15346+ sdvo_priv->TVStdBitmask = SDVO_PAL_D;
15347+ break;
15348+
15349+ case TVSTANDARD_PAL_G:
15350+ sdvo_priv->TVStdBitmask = SDVO_PAL_G;
15351+ break;
15352+
15353+ case TVSTANDARD_PAL_H:
15354+ sdvo_priv->TVStdBitmask = SDVO_PAL_H;
15355+ break;
15356+
15357+ case TVSTANDARD_PAL_I:
15358+ sdvo_priv->TVStdBitmask = SDVO_PAL_I;
15359+ break;
15360+
15361+ case TVSTANDARD_PAL_M:
15362+ sdvo_priv->TVStdBitmask = SDVO_PAL_M;
15363+ break;
15364+
15365+ case TVSTANDARD_PAL_N:
15366+ sdvo_priv->TVStdBitmask = SDVO_PAL_N;
15367+ break;
15368+
15369+ case TVSTANDARD_PAL_60:
15370+ sdvo_priv->TVStdBitmask = SDVO_PAL_60;
15371+ break;
15372+
15373+ case TVSTANDARD_SECAM_B:
15374+ sdvo_priv->TVStdBitmask = SDVO_SECAM_B;
15375+ break;
15376+
15377+ case TVSTANDARD_SECAM_D:
15378+ sdvo_priv->TVStdBitmask = SDVO_SECAM_D;
15379+ break;
15380+
15381+ case TVSTANDARD_SECAM_G:
15382+ sdvo_priv->TVStdBitmask = SDVO_SECAM_G;
15383+ break;
15384+
15385+ case TVSTANDARD_SECAM_K:
15386+ sdvo_priv->TVStdBitmask = SDVO_SECAM_K;
15387+ break;
15388+
15389+ case TVSTANDARD_SECAM_K1:
15390+ sdvo_priv->TVStdBitmask = SDVO_SECAM_K1;
15391+ break;
15392+
15393+ case TVSTANDARD_SECAM_L:
15394+ sdvo_priv->TVStdBitmask = SDVO_SECAM_L;
15395+ break;
15396+
15397+ case TVSTANDARD_SECAM_L1:
15398+ DRM_DEBUG("TVSTANDARD_SECAM_L1 not supported by encoder\n");
15399+ break;
15400+
15401+ case TVSTANDARD_SECAM_H:
15402+ DRM_DEBUG("TVSTANDARD_SECAM_H not supported by encoder\n");
15403+ break;
15404+
15405+ default:
15406+ DRM_DEBUG("ERROR: Unknown TV Standard\n");
15407+ /*Invalid return 0 */
15408+ sdvo_priv->TVStdBitmask = 0;
15409+ break;
15410+ }
15411+}
15412+#endif
15413+
15414+static bool i830_sdvo_set_tvoutputs_formats(struct drm_output * output)
15415+{
15416+ u8 byArgs[6];
15417+ u8 status;
15418+ struct intel_output *intel_output = output->driver_private;
15419+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15420+
15421+
15422+ /* Make all fields of the args/ret to zero */
15423+ memset(byArgs, 0, sizeof(byArgs));
15424+
15425+ if (sdvo_priv->TVMode & (TVMODE_SDTV)) {
15426+ /* Fill up the arguement value */
15427+ byArgs[0] = (u8) (sdvo_priv->TVStdBitmask & 0xFF);
15428+ byArgs[1] = (u8) ((sdvo_priv->TVStdBitmask >> 8) & 0xFF);
15429+ byArgs[2] = (u8) ((sdvo_priv->TVStdBitmask >> 16) & 0xFF);
15430+ } else {
15431+ /* Fill up the arguement value */
15432+ byArgs[0] = 0;
15433+ byArgs[1] = 0;
15434+ byArgs[2] = (u8) ((sdvo_priv->TVStdBitmask & 0xFF));
15435+ byArgs[3] = (u8) ((sdvo_priv->TVStdBitmask >> 8) & 0xFF);
15436+ byArgs[4] = (u8) ((sdvo_priv->TVStdBitmask >> 16) & 0xFF);
15437+ byArgs[5] = (u8) ((sdvo_priv->TVStdBitmask >> 24) & 0xFF);
15438+ }
15439+
15440+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMATS, byArgs, 6);
15441+ status = intel_sdvo_read_response(output, NULL, 0);
15442+
15443+ if (status != SDVO_CMD_STATUS_SUCCESS)
15444+ return FALSE;
15445+
15446+ return TRUE;
15447+
15448+}
15449+
15450+static bool i830_sdvo_create_preferred_input_timing(struct drm_output * output,
15451+ struct drm_display_mode * mode)
15452+{
15453+ u8 byArgs[7];
15454+ u8 status;
15455+ u32 dwClk;
15456+ u32 dwHActive, dwVActive;
15457+ bool bIsInterlaced, bIsScaled;
15458+
15459+ /* Make all fields of the args/ret to zero */
15460+ memset(byArgs, 0, sizeof(byArgs));
15461+
15462+ /* Fill up the arguement values */
15463+ dwHActive = mode->crtc_hdisplay;
15464+ dwVActive = mode->crtc_vdisplay;
15465+
15466+ dwClk = mode->clock * 1000 / 10000;
15467+ byArgs[0] = (u8) (dwClk & 0xFF);
15468+ byArgs[1] = (u8) ((dwClk >> 8) & 0xFF);
15469+
15470+ /* HActive & VActive should not exceed 12 bits each. So check it */
15471+ if ((dwHActive > 0xFFF) || (dwVActive > 0xFFF))
15472+ return FALSE;
15473+
15474+ byArgs[2] = (u8) (dwHActive & 0xFF);
15475+ byArgs[3] = (u8) ((dwHActive >> 8) & 0xF);
15476+ byArgs[4] = (u8) (dwVActive & 0xFF);
15477+ byArgs[5] = (u8) ((dwVActive >> 8) & 0xF);
15478+
15479+ bIsInterlaced = 1;
15480+ bIsScaled = 0;
15481+
15482+ byArgs[6] = bIsInterlaced ? 1 : 0;
15483+ byArgs[6] |= bIsScaled ? 2 : 0;
15484+
15485+ intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMINGS,
15486+ byArgs, 7);
15487+ status = intel_sdvo_read_response(output, NULL, 0);
15488+
15489+ if (status != SDVO_CMD_STATUS_SUCCESS)
15490+ return FALSE;
15491+
15492+ return TRUE;
15493+
15494+}
15495+
15496+static bool i830_sdvo_get_preferred_input_timing(struct drm_output * output,
15497+ struct intel_sdvo_dtd *output_dtd)
15498+{
15499+ return intel_sdvo_get_timing(output,
15500+ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
15501+ output_dtd);
15502+}
15503+
15504+static bool i830_sdvo_set_current_inoutmap(struct drm_output * output, u32 in0outputmask,
15505+ u32 in1outputmask)
15506+{
15507+ u8 byArgs[4];
15508+ u8 status;
15509+
15510+ /* Make all fields of the args/ret to zero */
15511+ memset(byArgs, 0, sizeof(byArgs));
15512+
15513+ /* Fill up the arguement values; */
15514+ byArgs[0] = (u8) (in0outputmask & 0xFF);
15515+ byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF);
15516+ byArgs[2] = (u8) (in1outputmask & 0xFF);
15517+ byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF);
15518+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4);
15519+ status = intel_sdvo_read_response(output, NULL, 0);
15520+
15521+ if (status != SDVO_CMD_STATUS_SUCCESS)
15522+ return FALSE;
15523+
15524+ return TRUE;
15525+
15526+}
15527+
15528+void i830_sdvo_set_iomap(struct drm_output * output)
15529+{
15530+ u32 dwCurrentSDVOIn0 = 0;
15531+ u32 dwCurrentSDVOIn1 = 0;
15532+ u32 dwDevMask = 0;
15533+
15534+ struct intel_output *intel_output = output->driver_private;
15535+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15536+
15537+
15538+ /* Please DO NOT change the following code. */
15539+ /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */
15540+ /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */
15541+ if (sdvo_priv->byInputWiring & (SDVOB_IN0 | SDVOC_IN0)) {
15542+ switch (sdvo_priv->ActiveDevice) {
15543+ case SDVO_DEVICE_LVDS:
15544+ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
15545+ break;
15546+
15547+ case SDVO_DEVICE_TMDS:
15548+ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
15549+ break;
15550+
15551+ case SDVO_DEVICE_TV:
15552+ dwDevMask =
15553+ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 |
15554+ SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
15555+ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
15556+ break;
15557+
15558+ case SDVO_DEVICE_CRT:
15559+ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
15560+ break;
15561+ }
15562+ dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask);
15563+ } else if (sdvo_priv->byInputWiring & (SDVOB_IN1 | SDVOC_IN1)) {
15564+ switch (sdvo_priv->ActiveDevice) {
15565+ case SDVO_DEVICE_LVDS:
15566+ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
15567+ break;
15568+
15569+ case SDVO_DEVICE_TMDS:
15570+ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
15571+ break;
15572+
15573+ case SDVO_DEVICE_TV:
15574+ dwDevMask =
15575+ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 |
15576+ SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
15577+ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
15578+ break;
15579+
15580+ case SDVO_DEVICE_CRT:
15581+ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
15582+ break;
15583+ }
15584+ dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask);
15585+ }
15586+
15587+ i830_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0,
15588+ dwCurrentSDVOIn1);
15589+}
15590+
15591+static bool i830_sdvo_get_input_output_pixelclock_range(struct drm_output * output,
15592+ bool direction)
15593+{
15594+ u8 byRets[4];
15595+ u8 status;
15596+
15597+ struct intel_output *intel_output = output->driver_private;
15598+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15599+
15600+ /* Make all fields of the args/ret to zero */
15601+ memset(byRets, 0, sizeof(byRets));
15602+ if (direction) /* output pixel clock */
15603+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE,
15604+ NULL, 0);
15605+ else
15606+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
15607+ NULL, 0);
15608+ status = intel_sdvo_read_response(output, byRets, 4);
15609+
15610+ if (status != SDVO_CMD_STATUS_SUCCESS)
15611+ return FALSE;
15612+
15613+ if (direction) {
15614+ /* Fill up the return values. */
15615+ sdvo_priv->dwMinOutDotClk =
15616+ (u32) byRets[0] | ((u32) byRets[1] << 8);
15617+ sdvo_priv->dwMaxOutDotClk =
15618+ (u32) byRets[2] | ((u32) byRets[3] << 8);
15619+
15620+ /* Multiply 10000 with the clocks obtained */
15621+ sdvo_priv->dwMinOutDotClk = (sdvo_priv->dwMinOutDotClk) * 10000;
15622+ sdvo_priv->dwMaxOutDotClk = (sdvo_priv->dwMaxOutDotClk) * 10000;
15623+
15624+ } else {
15625+ /* Fill up the return values. */
15626+ sdvo_priv->dwMinInDotClk = (u32) byRets[0] | ((u32) byRets[1] << 8);
15627+ sdvo_priv->dwMaxInDotClk = (u32) byRets[2] | ((u32) byRets[3] << 8);
15628+
15629+ /* Multiply 10000 with the clocks obtained */
15630+ sdvo_priv->dwMinInDotClk = (sdvo_priv->dwMinInDotClk) * 10000;
15631+ sdvo_priv->dwMaxInDotClk = (sdvo_priv->dwMaxInDotClk) * 10000;
15632+ }
15633+ DRM_DEBUG("MinDotClk = 0x%x\n", sdvo_priv->dwMinInDotClk);
15634+ DRM_DEBUG("MaxDotClk = 0x%x\n", sdvo_priv->dwMaxInDotClk);
15635+
15636+ return TRUE;
15637+
15638+}
15639+
15640+static bool i830_sdvo_get_supported_tvoutput_formats(struct drm_output * output,
15641+ u32 * pTVStdMask,
15642+ u32 * pHDTVStdMask, u32 *pTVStdFormat)
15643+{
15644+ struct intel_output *intel_output = output->driver_private;
15645+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15646+
15647+ u8 byRets[6];
15648+ u8 status;
15649+
15650+ /* Make all fields of the args/ret to zero */
15651+ memset(byRets, 0, sizeof(byRets));
15652+
15653+ /* Send the arguements & SDVO opcode to the h/w */
15654+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
15655+
15656+ status = intel_sdvo_read_response(output, byRets, 6);
15657+ if (status != SDVO_CMD_STATUS_SUCCESS)
15658+ return FALSE;
15659+
15660+ /* Fill up the return values; */
15661+ *pTVStdMask = (((u32) byRets[0]) |
15662+ ((u32) byRets[1] << 8) |
15663+ ((u32) (byRets[2] & 0x7) << 16));
15664+
15665+ *pHDTVStdMask = (((u32) byRets[2] & 0xF8) |
15666+ ((u32) byRets[3] << 8) |
15667+ ((u32) byRets[4] << 16) | ((u32) byRets[5] << 24));
15668+
15669+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_TV_FORMATS, NULL, 0);
15670+
15671+ status = intel_sdvo_read_response(output, byRets, 6);
15672+ if (status != SDVO_CMD_STATUS_SUCCESS)
15673+ return FALSE;
15674+
15675+ /* Fill up the return values; */
15676+ if(sdvo_priv->TVMode == TVMODE_SDTV)
15677+ *pTVStdFormat = (((u32) byRets[0]) |
15678+ ((u32) byRets[1] << 8) |
15679+ ((u32) (byRets[2] & 0x7) << 16));
15680+ else
15681+ *pTVStdFormat = (((u32) byRets[2] & 0xF8) |
15682+ ((u32) byRets[3] << 8) |
15683+ ((u32) byRets[4] << 16) | ((u32) byRets[5] << 24));
15684+ DRM_DEBUG("BIOS TV format is %d\n",*pTVStdFormat);
15685+ return TRUE;
15686+
15687+}
15688+
15689+static bool i830_sdvo_get_supported_enhancements(struct drm_output * output,
15690+ u32 * psupported_enhancements)
15691+{
15692+
15693+ u8 status;
15694+ u8 byRets[2];
15695+ struct intel_output *intel_output = output->driver_private;
15696+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15697+
15698+
15699+ /* Make all fields of the args/ret to zero */
15700+ memset(byRets, 0, sizeof(byRets));
15701+
15702+ /* Send the arguements & SDVO opcode to the h/w */
15703+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, NULL, 0);
15704+
15705+ status = intel_sdvo_read_response(output, byRets, 2);
15706+ if (status != SDVO_CMD_STATUS_SUCCESS)
15707+ return FALSE;
15708+
15709+ sdvo_priv->dwSupportedEnhancements = *psupported_enhancements =
15710+ ((u32) byRets[0] | ((u32) byRets[1] << 8));
15711+ return TRUE;
15712+
15713+}
15714+
15715+static bool i830_sdvo_get_max_horizontal_overscan(struct drm_output * output, u32 * pMaxVal,
15716+ u32 * pDefaultVal)
15717+{
15718+ u8 byRets[4];
15719+ u8 status;
15720+
15721+ /* Make all fields of the args/ret to zero */
15722+ memset(byRets, 0, sizeof(byRets));
15723+
15724+ /* Send the arguements & SDVO opcode to the h/w */
15725+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_HORIZONTAL_OVERSCAN, NULL,
15726+ 0);
15727+
15728+ status = intel_sdvo_read_response(output, byRets, 4);
15729+ if (status != SDVO_CMD_STATUS_SUCCESS)
15730+ return FALSE;
15731+ /* Fill up the return values. */
15732+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
15733+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
15734+ return TRUE;
15735+}
15736+
15737+static bool i830_sdvo_get_max_vertical_overscan(struct drm_output * output, u32 * pMaxVal,
15738+ u32 * pDefaultVal)
15739+{
15740+ u8 byRets[4];
15741+ u8 status;
15742+
15743+ /* Make all fields of the args/ret to zero */
15744+ memset(byRets, 0, sizeof(byRets));
15745+
15746+ /* Send the arguements & SDVO opcode to the h/w */
15747+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_VERTICAL_OVERSCAN, NULL, 0);
15748+
15749+ status = intel_sdvo_read_response(output, byRets, 4);
15750+ if (status != SDVO_CMD_STATUS_SUCCESS)
15751+ return FALSE;
15752+ /* Fill up the return values. */
15753+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
15754+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
15755+ return TRUE;
15756+}
15757+
15758+static bool i830_sdvo_get_max_horizontal_position(struct drm_output * output, u32 * pMaxVal,
15759+ u32 * pDefaultVal)
15760+{
15761+
15762+ u8 byRets[4];
15763+ u8 status;
15764+
15765+ /* Make all fields of the args/ret to zero */
15766+ memset(byRets, 0, sizeof(byRets));
15767+
15768+ /* Send the arguements & SDVO opcode to the h/w */
15769+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_HORIZONTAL_POSITION, NULL,
15770+ 0);
15771+
15772+ status = intel_sdvo_read_response(output, byRets, 4);
15773+ if (status != SDVO_CMD_STATUS_SUCCESS)
15774+ return FALSE;
15775+
15776+ /* Fill up the return values. */
15777+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
15778+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
15779+
15780+ return TRUE;
15781+}
15782+
15783+static bool i830_sdvo_get_max_vertical_position(struct drm_output * output,
15784+ u32 * pMaxVal, u32 * pDefaultVal)
15785+{
15786+
15787+ u8 byRets[4];
15788+ u8 status;
15789+
15790+ /* Make all fields of the args/ret to zero */
15791+ memset(byRets, 0, sizeof(byRets));
15792+
15793+ /* Send the arguements & SDVO opcode to the h/w */
15794+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_VERTICAL_POSITION, NULL, 0);
15795+
15796+ status = intel_sdvo_read_response(output, byRets, 4);
15797+ if (status != SDVO_CMD_STATUS_SUCCESS)
15798+ return FALSE;
15799+
15800+ /* Fill up the return values. */
15801+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
15802+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
15803+
15804+ return TRUE;
15805+}
15806+
15807+static bool i830_sdvo_get_max_flickerfilter(struct drm_output * output,
15808+ u32 * pMaxVal, u32 * pDefaultVal)
15809+{
15810+
15811+ u8 byRets[4];
15812+ u8 status;
15813+
15814+ /* Make all fields of the args/ret to zero */
15815+ memset(byRets, 0, sizeof(byRets));
15816+
15817+ /* Send the arguements & SDVO opcode to the h/w */
15818+
15819+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_FLICKER_FILTER, NULL, 0);
15820+
15821+ status = intel_sdvo_read_response(output, byRets, 4);
15822+ if (status != SDVO_CMD_STATUS_SUCCESS)
15823+ return FALSE;
15824+ /* Fill up the return values. */
15825+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
15826+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
15827+
15828+ return TRUE;
15829+}
15830+
15831+static bool i830_sdvo_get_max_brightness(struct drm_output * output,
15832+ u32 * pMaxVal, u32 * pDefaultVal)
15833+{
15834+
15835+ u8 byRets[4];
15836+ u8 status;
15837+
15838+ /* Make all fields of the args/ret to zero */
15839+ memset(byRets, 0, sizeof(byRets));
15840+
15841+ /* Send the arguements & SDVO opcode to the h/w */
15842+
15843+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
15844+
15845+ status = intel_sdvo_read_response(output, byRets, 4);
15846+ if (status != SDVO_CMD_STATUS_SUCCESS)
15847+ return FALSE;
15848+ /* Fill up the return values. */
15849+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
15850+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
15851+
15852+ return TRUE;
15853+}
15854+
15855+static bool i830_sdvo_get_max_contrast(struct drm_output * output,
15856+ u32 * pMaxVal, u32 * pDefaultVal)
15857+{
15858+
15859+ u8 byRets[4];
15860+ u8 status;
15861+
15862+ /* Make all fields of the args/ret to zero */
15863+ memset(byRets, 0, sizeof(byRets));
15864+
15865+ /* Send the arguements & SDVO opcode to the h/w */
15866+
15867+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
15868+
15869+ status = intel_sdvo_read_response(output, byRets, 4);
15870+ if (status != SDVO_CMD_STATUS_SUCCESS)
15871+ return FALSE;
15872+ /* Fill up the return values. */
15873+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
15874+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
15875+
15876+ return TRUE;
15877+}
15878+
15879+static bool i830_sdvo_get_max_sharpness(struct drm_output * output,
15880+ u32 * pMaxVal, u32 * pDefaultVal)
15881+{
15882+
15883+ u8 byRets[4];
15884+ u8 status;
15885+
15886+ /* Make all fields of the args/ret to zero */
15887+ memset(byRets, 0, sizeof(byRets));
15888+
15889+ /* Send the arguements & SDVO opcode to the h/w */
15890+
15891+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_SHARPNESS, NULL, 0);
15892+
15893+ status = intel_sdvo_read_response(output, byRets, 4);
15894+ if (status != SDVO_CMD_STATUS_SUCCESS)
15895+ return FALSE;
15896+
15897+ /* Fill up the return values. */
15898+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
15899+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
15900+
15901+ return TRUE;
15902+}
15903+
15904+static bool i830_sdvo_get_max_hue(struct drm_output * output,
15905+ u32 * pMaxVal, u32 * pDefaultVal)
15906+{
15907+ u8 byRets[4];
15908+ u8 status;
15909+
15910+ /* Make all fields of the args/ret to zero */
15911+ memset(byRets, 0, sizeof(byRets));
15912+
15913+ /* Send the arguements & SDVO opcode to the h/w */
15914+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_HUE, NULL, 0);
15915+
15916+ status = intel_sdvo_read_response(output, byRets, 4);
15917+ if (status != SDVO_CMD_STATUS_SUCCESS)
15918+ return FALSE;
15919+
15920+ /* Fill up the return values. */
15921+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
15922+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
15923+
15924+ return TRUE;
15925+}
15926+
15927+static bool i830_sdvo_get_max_saturation(struct drm_output * output,
15928+ u32 * pMaxVal, u32 * pDefaultVal)
15929+{
15930+
15931+ u8 byRets[4];
15932+ u8 status;
15933+
15934+ /* Make all fields of the args/ret to zero */
15935+ memset(byRets, 0, sizeof(byRets));
15936+
15937+ /* Send the arguements & SDVO opcode to the h/w */
15938+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
15939+
15940+ status = intel_sdvo_read_response(output, byRets, 4);
15941+ if (status != SDVO_CMD_STATUS_SUCCESS)
15942+ return FALSE;
15943+
15944+ /* Fill up the return values. */
15945+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
15946+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
15947+
15948+ return TRUE;
15949+}
15950+
15951+static bool i830_sdvo_get_max_adaptive_flickerfilter(struct drm_output * output,
15952+ u32 * pMaxVal,
15953+ u32 * pDefaultVal)
15954+{
15955+ u8 byRets[4];
15956+ u8 status;
15957+
15958+ /* Make all fields of the args/ret to zero */
15959+ memset(byRets, 0, sizeof(byRets));
15960+
15961+ /* Send the arguements & SDVO opcode to the h/w */
15962+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FILTER,
15963+ NULL, 0);
15964+ status = intel_sdvo_read_response(output, byRets, 4);
15965+ if (status != SDVO_CMD_STATUS_SUCCESS)
15966+ return FALSE;
15967+
15968+ /* Fill up the return values. */
15969+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
15970+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
15971+
15972+ return TRUE;
15973+}
15974+
15975+static bool i830_sdvo_get_max_lumafilter(struct drm_output * output,
15976+ u32 * pMaxVal, u32 * pDefaultVal)
15977+{
15978+
15979+ u8 byRets[4];
15980+ u8 status;
15981+
15982+ /* Make all fields of the args/ret to zero */
15983+ memset(byRets, 0, sizeof(byRets));
15984+
15985+ /* Send the arguements & SDVO opcode to the h/w */
15986+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_TV_LUMA_FILTER, NULL, 0);
15987+ status = intel_sdvo_read_response(output, byRets, 4);
15988+ if (status != SDVO_CMD_STATUS_SUCCESS)
15989+ return FALSE;
15990+
15991+ /* Fill up the return values. */
15992+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
15993+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
15994+
15995+ return TRUE;
15996+}
15997+
15998+static bool i830_sdvo_get_max_chromafilter(struct drm_output * output,
15999+ u32 * pMaxVal, u32 * pDefaultVal)
16000+{
16001+
16002+ u8 byRets[4];
16003+ u8 status;
16004+
16005+ /* Make all fields of the args/ret to zero */
16006+ memset(byRets, 0, sizeof(byRets));
16007+
16008+ /* Send the arguements & SDVO opcode to the h/w */
16009+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_TV_CHROMA_FILTER, NULL, 0);
16010+ status = intel_sdvo_read_response(output, byRets, 4);
16011+ if (status != SDVO_CMD_STATUS_SUCCESS)
16012+ return FALSE;
16013+
16014+ /* Fill up the return values. */
16015+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16016+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16017+
16018+ return TRUE;
16019+}
16020+
16021+static bool i830_sdvo_get_dotcrawl(struct drm_output * output,
16022+ u32 * pCurrentVal, u32 * pDefaultVal)
16023+{
16024+
16025+ u8 byRets[2];
16026+ u8 status;
16027+
16028+ /* Make all fields of the args/ret to zero */
16029+ memset(byRets, 0, sizeof(byRets));
16030+
16031+ /* Send the arguements & SDVO opcode to the h/w */
16032+
16033+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_DOT_CRAWL, NULL, 0);
16034+ status = intel_sdvo_read_response(output, byRets, 2);
16035+ if (status != SDVO_CMD_STATUS_SUCCESS)
16036+ return FALSE;
16037+
16038+ /* Tibet issue 1603772: Dot crawl do not persist after reboot/Hibernate */
16039+ /* Details : Bit0 is considered as DotCrawl Max value. But according to EDS, Bit0 */
16040+ /* represents the Current DotCrawl value. */
16041+ /* Fix : The current value is updated with Bit0. */
16042+
16043+ /* Fill up the return values. */
16044+ *pCurrentVal = (u32) (byRets[0] & 0x1);
16045+ *pDefaultVal = (u32) ((byRets[0] >> 1) & 0x1);
16046+ return TRUE;
16047+}
16048+
16049+static bool i830_sdvo_get_max_2D_flickerfilter(struct drm_output * output,
16050+ u32 * pMaxVal, u32 * pDefaultVal)
16051+{
16052+
16053+ u8 byRets[4];
16054+ u8 status;
16055+
16056+ /* Make all fields of the args/ret to zero */
16057+ memset(byRets, 0, sizeof(byRets));
16058+
16059+ /* Send the arguements & SDVO opcode to the h/w */
16060+
16061+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_2D_FLICKER_FILTER, NULL, 0);
16062+ status = intel_sdvo_read_response(output, byRets, 4);
16063+ if (status != SDVO_CMD_STATUS_SUCCESS)
16064+ return FALSE;
16065+
16066+ /* Fill up the return values. */
16067+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16068+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16069+
16070+ return TRUE;
16071+}
16072+
16073+static bool i830_sdvo_set_horizontal_overscan(struct drm_output * output, u32 dwVal)
16074+{
16075+
16076+ u8 byArgs[2];
16077+ u8 status;
16078+
16079+ /* Make all fields of the args/ret to zero */
16080+ memset(byArgs, 0, sizeof(byArgs));
16081+
16082+ /* Fill up the arguement value */
16083+ byArgs[0] = (u8) (dwVal & 0xFF);
16084+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16085+
16086+ /* Send the arguements & SDVO opcode to the h/w */
16087+
16088+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_HORIZONTAL_OVERSCAN, byArgs, 2);
16089+ status = intel_sdvo_read_response(output, NULL, 0);
16090+
16091+ if (status != SDVO_CMD_STATUS_SUCCESS)
16092+ return FALSE;
16093+ return TRUE;
16094+}
16095+
16096+static bool i830_sdvo_set_vertical_overscan(struct drm_output * output, u32 dwVal)
16097+{
16098+
16099+ u8 byArgs[2];
16100+ u8 status;
16101+
16102+ /* Make all fields of the args/ret to zero */
16103+ memset(byArgs, 0, sizeof(byArgs));
16104+
16105+ /* Fill up the arguement value */
16106+ byArgs[0] = (u8) (dwVal & 0xFF);
16107+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16108+
16109+ /* Send the arguements & SDVO opcode to the h/w */
16110+
16111+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_VERTICAL_OVERSCAN, byArgs, 2);
16112+ status = intel_sdvo_read_response(output, NULL, 0);
16113+
16114+ if (status != SDVO_CMD_STATUS_SUCCESS)
16115+ return FALSE;
16116+ return TRUE;
16117+}
16118+
16119+static bool i830_sdvo_set_horizontal_position(struct drm_output * output, u32 dwVal)
16120+{
16121+
16122+ u8 byArgs[2];
16123+ u8 status;
16124+
16125+ /* Make all fields of the args/ret to zero */
16126+ memset(byArgs, 0, sizeof(byArgs));
16127+
16128+ /* Fill up the arguement value */
16129+ byArgs[0] = (u8) (dwVal & 0xFF);
16130+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16131+
16132+ /* Send the arguements & SDVO opcode to the h/w */
16133+
16134+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_HORIZONTAL_POSITION, byArgs, 2);
16135+ status = intel_sdvo_read_response(output, NULL, 0);
16136+
16137+ if (status != SDVO_CMD_STATUS_SUCCESS)
16138+ return FALSE;
16139+
16140+ return TRUE;
16141+}
16142+
16143+static bool i830_sdvo_set_vertical_position(struct drm_output * output, u32 dwVal)
16144+{
16145+
16146+ u8 byArgs[2];
16147+ u8 status;
16148+
16149+ /* Make all fields of the args/ret to zero */
16150+ memset(byArgs, 0, sizeof(byArgs));
16151+
16152+ /* Fill up the arguement value */
16153+ byArgs[0] = (u8) (dwVal & 0xFF);
16154+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16155+
16156+ /* Send the arguements & SDVO opcode to the h/w */
16157+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_VERTICAL_POSITION, byArgs, 2);
16158+ status = intel_sdvo_read_response(output, NULL, 0);
16159+
16160+ if (status != SDVO_CMD_STATUS_SUCCESS)
16161+ return FALSE;
16162+
16163+ return TRUE;
16164+
16165+}
16166+
16167+static bool i830_sdvo_set_flickerilter(struct drm_output * output, u32 dwVal)
16168+{
16169+
16170+ u8 byArgs[2];
16171+ u8 status;
16172+
16173+ /* Make all fields of the args/ret to zero */
16174+ memset(byArgs, 0, sizeof(byArgs));
16175+
16176+ /* Fill up the arguement value */
16177+ byArgs[0] = (u8) (dwVal & 0xFF);
16178+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16179+
16180+ /* Send the arguements & SDVO opcode to the h/w */
16181+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_FLICKER_FILTER, byArgs, 2);
16182+ status = intel_sdvo_read_response(output, NULL, 0);
16183+
16184+ if (status != SDVO_CMD_STATUS_SUCCESS)
16185+ return FALSE;
16186+
16187+ return TRUE;
16188+}
16189+
16190+static bool i830_sdvo_set_brightness(struct drm_output * output, u32 dwVal)
16191+{
16192+
16193+ u8 byArgs[2];
16194+ u8 status;
16195+
16196+ /* Make all fields of the args/ret to zero */
16197+ memset(byArgs, 0, sizeof(byArgs));
16198+
16199+ /* Fill up the arguement value */
16200+ byArgs[0] = (u8) (dwVal & 0xFF);
16201+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16202+
16203+ /* Send the arguements & SDVO opcode to the h/w */
16204+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_BRIGHTNESS, byArgs, 2);
16205+ status = intel_sdvo_read_response(output, NULL, 0);
16206+
16207+ if (status != SDVO_CMD_STATUS_SUCCESS)
16208+ return FALSE;
16209+
16210+ return TRUE;
16211+}
16212+
16213+static bool i830_sdvo_set_contrast(struct drm_output * output, u32 dwVal)
16214+{
16215+
16216+ u8 byArgs[2];
16217+ u8 status;
16218+
16219+ /* Make all fields of the args/ret to zero */
16220+ memset(byArgs, 0, sizeof(byArgs));
16221+ /* Fill up the arguement value */
16222+ byArgs[0] = (u8) (dwVal & 0xFF);
16223+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16224+
16225+ /* Send the arguements & SDVO opcode to the h/w */
16226+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_CONTRAST, byArgs, 2);
16227+ status = intel_sdvo_read_response(output, NULL, 0);
16228+
16229+ if (status != SDVO_CMD_STATUS_SUCCESS)
16230+ return FALSE;
16231+
16232+ return TRUE;
16233+}
16234+
16235+static bool i830_sdvo_set_sharpness(struct drm_output * output, u32 dwVal)
16236+{
16237+
16238+ u8 byArgs[2];
16239+ u8 status;
16240+
16241+ /* Make all fields of the args/ret to zero */
16242+ memset(byArgs, 0, sizeof(byArgs));
16243+
16244+ /* Fill up the arguement value */
16245+ byArgs[0] = (u8) (dwVal & 0xFF);
16246+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16247+
16248+ /* Send the arguements & SDVO opcode to the h/w */
16249+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_SHARPNESS, byArgs, 2);
16250+ status = intel_sdvo_read_response(output, NULL, 0);
16251+
16252+ if (status != SDVO_CMD_STATUS_SUCCESS)
16253+ return FALSE;
16254+
16255+ return TRUE;
16256+}
16257+
16258+static bool i830_sdvo_set_hue(struct drm_output * output, u32 dwVal)
16259+{
16260+
16261+ u8 byArgs[2];
16262+ u8 status;
16263+
16264+ /* Make all fields of the args/ret to zero */
16265+ memset(byArgs, 0, sizeof(byArgs));
16266+
16267+ /* Fill up the arguement value */
16268+ byArgs[0] = (u8) (dwVal & 0xFF);
16269+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16270+
16271+ /* Send the arguements & SDVO opcode to the h/w */
16272+
16273+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_HUE, byArgs, 2);
16274+ status = intel_sdvo_read_response(output, NULL, 0);
16275+
16276+ if (status != SDVO_CMD_STATUS_SUCCESS)
16277+ return FALSE;
16278+
16279+ return TRUE;
16280+}
16281+
16282+static bool i830_sdvo_set_saturation(struct drm_output * output, u32 dwVal)
16283+{
16284+
16285+ u8 byArgs[2];
16286+ u8 status;
16287+
16288+ /* Make all fields of the args/ret to zero */
16289+ memset(byArgs, 0, sizeof(byArgs));
16290+
16291+ /* Fill up the arguement value */
16292+ byArgs[0] = (u8) (dwVal & 0xFF);
16293+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16294+
16295+ /* Send the arguements & SDVO opcode to the h/w */
16296+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_SATURATION, byArgs, 2);
16297+ status = intel_sdvo_read_response(output, NULL, 0);
16298+
16299+ if (status != SDVO_CMD_STATUS_SUCCESS)
16300+ return FALSE;
16301+
16302+ return TRUE;
16303+}
16304+
16305+static bool i830_sdvo_set_adaptive_flickerfilter(struct drm_output * output, u32 dwVal)
16306+{
16307+ u8 byArgs[2];
16308+ u8 status;
16309+
16310+ /* Make all fields of the args/ret to zero */
16311+ memset(byArgs, 0, sizeof(byArgs));
16312+
16313+ /* Fill up the arguement value */
16314+ byArgs[0] = (u8) (dwVal & 0xFF);
16315+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16316+
16317+ /* Send the arguements & SDVO opcode to the h/w */
16318+
16319+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_ADAPTIVE_FLICKER_FILTER, byArgs,
16320+ 2);
16321+ status = intel_sdvo_read_response(output, NULL, 0);
16322+
16323+ if (status != SDVO_CMD_STATUS_SUCCESS)
16324+ return FALSE;
16325+
16326+ return TRUE;
16327+
16328+}
16329+
16330+static bool i830_sdvo_set_lumafilter(struct drm_output * output, u32 dwVal)
16331+{
16332+ u8 byArgs[2];
16333+ u8 status;
16334+
16335+ /* Make all fields of the args/ret to zero */
16336+ memset(byArgs, 0, sizeof(byArgs));
16337+
16338+ /* Fill up the arguement value */
16339+ byArgs[0] = (u8) (dwVal & 0xFF);
16340+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16341+
16342+ /* Send the arguements & SDVO opcode to the h/w */
16343+
16344+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_LUMA_FILTER, byArgs, 2);
16345+ status = intel_sdvo_read_response(output, NULL, 0);
16346+
16347+ if (status != SDVO_CMD_STATUS_SUCCESS)
16348+ return FALSE;
16349+
16350+ return TRUE;
16351+}
16352+
16353+static bool i830_sdvo_set_chromafilter(struct drm_output * output, u32 dwVal)
16354+{
16355+
16356+ u8 byArgs[2];
16357+ u8 status;
16358+
16359+ /* Make all fields of the args/ret to zero */
16360+ memset(byArgs, 0, sizeof(byArgs));
16361+
16362+ /* Fill up the arguement value */
16363+ byArgs[0] = (u8) (dwVal & 0xFF);
16364+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16365+
16366+ /* Send the arguements & SDVO opcode to the h/w */
16367+
16368+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_CHROMA_FILTER, byArgs, 2);
16369+ status = intel_sdvo_read_response(output, NULL, 0);
16370+
16371+ if (status != SDVO_CMD_STATUS_SUCCESS)
16372+ return FALSE;
16373+
16374+ return TRUE;
16375+}
16376+
16377+static bool i830_sdvo_set_dotcrawl(struct drm_output * output, u32 dwVal)
16378+{
16379+
16380+ u8 byArgs[2];
16381+ u8 status;
16382+
16383+ /* Make all fields of the args/ret to zero */
16384+ memset(byArgs, 0, sizeof(byArgs));
16385+
16386+ /* Fill up the arguement value */
16387+ byArgs[0] = (u8) (dwVal & 0xFF);
16388+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16389+
16390+ /* Send the arguements & SDVO opcode to the h/w */
16391+
16392+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_DOT_CRAWL, byArgs, 2);
16393+ status = intel_sdvo_read_response(output, NULL, 0);
16394+ if (status != SDVO_CMD_STATUS_SUCCESS)
16395+ return FALSE;
16396+
16397+ return TRUE;
16398+}
16399+
16400+static bool i830_sdvo_set_2D_flickerfilter(struct drm_output * output, u32 dwVal)
16401+{
16402+
16403+ u8 byArgs[2];
16404+ u8 status;
16405+
16406+ /* Make all fields of the args/ret to zero */
16407+ memset(byArgs, 0, sizeof(byArgs));
16408+
16409+ /* Fill up the arguement value */
16410+ byArgs[0] = (u8) (dwVal & 0xFF);
16411+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16412+
16413+ /* Send the arguements & SDVO opcode to the h/w */
16414+
16415+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_2D_FLICKER_FILTER, byArgs, 2);
16416+ status = intel_sdvo_read_response(output, NULL, 0);
16417+
16418+ if (status != SDVO_CMD_STATUS_SUCCESS)
16419+ return FALSE;
16420+
16421+ return TRUE;
16422+}
16423+
16424+#if 0
16425+static bool i830_sdvo_set_ancillary_video_information(struct drm_output * output)
16426+{
16427+
16428+ u8 status;
16429+ u8 byArgs[4];
16430+ u32 dwAncillaryBits = 0;
16431+ struct intel_output *intel_output = output->driver_private;
16432+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
16433+
16434+
16435+ PSDVO_ANCILLARY_INFO_T pAncillaryInfo = &sdvo_priv->AncillaryInfo;
16436+
16437+ /* Make all fields of the args/ret to zero */
16438+ memset(byArgs, 0, sizeof(byArgs));
16439+
16440+ /* Handle picture aspect ratio (bits 8, 9) and */
16441+ /* active format aspect ratio (bits 10, 13) */
16442+ switch (pAncillaryInfo->AspectRatio) {
16443+ case CP_ASPECT_RATIO_FF_4_BY_3:
16444+ dwAncillaryBits |= UAIM_PAR_4_3;
16445+ dwAncillaryBits |= UAIM_FAR_4_BY_3_CENTER;
16446+ break;
16447+ case CP_ASPECT_RATIO_14_BY_9_CENTER:
16448+ dwAncillaryBits |= UAIM_FAR_14_BY_9_CENTER;
16449+ break;
16450+ case CP_ASPECT_RATIO_14_BY_9_TOP:
16451+ dwAncillaryBits |= UAIM_FAR_14_BY_9_LETTERBOX_TOP;
16452+ break;
16453+ case CP_ASPECT_RATIO_16_BY_9_CENTER:
16454+ dwAncillaryBits |= UAIM_PAR_16_9;
16455+ dwAncillaryBits |= UAIM_FAR_16_BY_9_CENTER;
16456+ break;
16457+ case CP_ASPECT_RATIO_16_BY_9_TOP:
16458+ dwAncillaryBits |= UAIM_PAR_16_9;
16459+ dwAncillaryBits |= UAIM_FAR_16_BY_9_LETTERBOX_TOP;
16460+ break;
16461+ case CP_ASPECT_RATIO_GT_16_BY_9_CENTER:
16462+ dwAncillaryBits |= UAIM_PAR_16_9;
16463+ dwAncillaryBits |= UAIM_FAR_GT_16_BY_9_LETTERBOX_CENTER;
16464+ break;
16465+ case CP_ASPECT_RATIO_FF_4_BY_3_PROT_CENTER:
16466+ dwAncillaryBits |= UAIM_FAR_4_BY_3_SNP_14_BY_9_CENTER;
16467+ break;
16468+ case CP_ASPECT_RATIO_FF_16_BY_9_ANAMORPHIC:
16469+ dwAncillaryBits |= UAIM_PAR_16_9;
16470+ break;
16471+ default:
16472+ DRM_DEBUG("fail to set ancillary video info\n");
16473+ return FALSE;
16474+
16475+ }
16476+
16477+ /* Fill up the argument value */
16478+ byArgs[0] = (u8) ((dwAncillaryBits >> 0) & 0xFF);
16479+ byArgs[1] = (u8) ((dwAncillaryBits >> 8) & 0xFF);
16480+ byArgs[2] = (u8) ((dwAncillaryBits >> 16) & 0xFF);
16481+ byArgs[3] = (u8) ((dwAncillaryBits >> 24) & 0xFF);
16482+
16483+ /* Send the arguements & SDVO opcode to the h/w */
16484+
16485+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_ANCILLARY_VIDEO_INFORMATION,
16486+ byArgs, 4);
16487+ status = intel_sdvo_read_response(output, NULL, 0);
16488+
16489+ if (status != SDVO_CMD_STATUS_SUCCESS)
16490+ return FALSE;
16491+
16492+ return TRUE;
16493+
16494+}
16495+#endif
16496+static bool i830_tv_program_display_params(struct drm_output * output)
16497+
16498+{
16499+ u8 status;
16500+ u32 dwMaxVal = 0;
16501+ u32 dwDefaultVal = 0;
16502+ u32 dwCurrentVal = 0;
16503+
16504+ struct intel_output *intel_output = output->driver_private;
16505+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
16506+
16507+
16508+ /* X & Y Positions */
16509+
16510+ /* Horizontal postition */
16511+ if (sdvo_priv->dwSupportedEnhancements & SDVO_HORIZONTAL_POSITION) {
16512+ status =
16513+ i830_sdvo_get_max_horizontal_position(output, &dwMaxVal,
16514+ &dwDefaultVal);
16515+
16516+ if (status) {
16517+ /*Tibet issue 1596943: After changing mode from 8x6 to 10x7 open CUI and press Restore Defaults */
16518+ /*Position changes. */
16519+
16520+ /* Tibet:1629992 : can't keep previous TV setting status if re-boot system after TV setting(screen position & size) of CUI */
16521+ /* Fix : compare whether current postion is greater than max value and then assign the default value. Earlier the check was */
16522+ /* against the pAim->PositionX.Max value to dwMaxVal. When we boot the PositionX.Max value is 0 and so after every reboot, */
16523+ /* position is set to default. */
16524+
16525+ if (sdvo_priv->dispParams.PositionX.Value > dwMaxVal)
16526+ sdvo_priv->dispParams.PositionX.Value = dwDefaultVal;
16527+
16528+ status =
16529+ i830_sdvo_set_horizontal_position(output,
16530+ sdvo_priv->dispParams.PositionX.
16531+ Value);
16532+
16533+ if (!status)
16534+ return status;
16535+
16536+ sdvo_priv->dispParams.PositionX.Max = dwMaxVal;
16537+ sdvo_priv->dispParams.PositionX.Min = 0;
16538+ sdvo_priv->dispParams.PositionX.Default = dwDefaultVal;
16539+ sdvo_priv->dispParams.PositionX.Step = 1;
16540+ } else {
16541+ return status;
16542+ }
16543+ }
16544+
16545+ /* Vertical position */
16546+ if (sdvo_priv->dwSupportedEnhancements & SDVO_VERTICAL_POSITION) {
16547+ status =
16548+ i830_sdvo_get_max_vertical_position(output, &dwMaxVal,
16549+ &dwDefaultVal);
16550+
16551+ if (status) {
16552+
16553+ /*Tibet issue 1596943: After changing mode from 8x6 to 10x7 open CUI and press Restore Defaults */
16554+ /*Position changes. */
16555+ /*currently if we are out of range get back to default */
16556+
16557+ /* Tibet:1629992 : can't keep previous TV setting status if re-boot system after TV setting(screen position & size) of CUI */
16558+ /* Fix : compare whether current postion is greater than max value and then assign the default value. Earlier the check was */
16559+ /* against the pAim->PositionY.Max value to dwMaxVal. When we boot the PositionX.Max value is 0 and so after every reboot, */
16560+ /* position is set to default. */
16561+
16562+ if (sdvo_priv->dispParams.PositionY.Value > dwMaxVal)
16563+ sdvo_priv->dispParams.PositionY.Value = dwDefaultVal;
16564+
16565+ status =
16566+ i830_sdvo_set_vertical_position(output,
16567+ sdvo_priv->dispParams.PositionY.
16568+ Value);
16569+ if (!status)
16570+ return status;
16571+
16572+ sdvo_priv->dispParams.PositionY.Max = dwMaxVal;
16573+ sdvo_priv->dispParams.PositionY.Min = 0;
16574+ sdvo_priv->dispParams.PositionY.Default = dwDefaultVal;
16575+ sdvo_priv->dispParams.PositionY.Step = 1;
16576+ } else {
16577+ return status;
16578+ }
16579+ }
16580+
16581+ /* Flicker Filter */
16582+ if (sdvo_priv->dwSupportedEnhancements & SDVO_FLICKER_FILTER) {
16583+ status =
16584+ i830_sdvo_get_max_flickerfilter(output, &dwMaxVal, &dwDefaultVal);
16585+
16586+ if (status) {
16587+ /*currently if we are out of range get back to default */
16588+ if (sdvo_priv->dispParams.FlickerFilter.Value > dwMaxVal)
16589+ sdvo_priv->dispParams.FlickerFilter.Value = dwDefaultVal;
16590+
16591+ status =
16592+ i830_sdvo_set_flickerilter(output,
16593+ sdvo_priv->dispParams.FlickerFilter.
16594+ Value);
16595+ if (!status)
16596+ return status;
16597+
16598+ sdvo_priv->dispParams.FlickerFilter.Max = dwMaxVal;
16599+ sdvo_priv->dispParams.FlickerFilter.Min = 0;
16600+ sdvo_priv->dispParams.FlickerFilter.Default = dwDefaultVal;
16601+ sdvo_priv->dispParams.FlickerFilter.Step = 1;
16602+ } else {
16603+ return status;
16604+ }
16605+ }
16606+
16607+ /* Brightness */
16608+ if (sdvo_priv->dwSupportedEnhancements & SDVO_BRIGHTNESS) {
16609+
16610+ status =
16611+ i830_sdvo_get_max_brightness(output, &dwMaxVal, &dwDefaultVal);
16612+
16613+ if (status) {
16614+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
16615+ /*no need to check it. */
16616+ if (sdvo_priv->dispParams.Brightness.Value > dwMaxVal)
16617+ sdvo_priv->dispParams.Brightness.Value = dwDefaultVal;
16618+
16619+ /* Program the device */
16620+ status =
16621+ i830_sdvo_set_brightness(output,
16622+ sdvo_priv->dispParams.Brightness.Value);
16623+ if (!status)
16624+ return status;
16625+
16626+ sdvo_priv->dispParams.Brightness.Max = dwMaxVal;
16627+ sdvo_priv->dispParams.Brightness.Min = 0;
16628+ sdvo_priv->dispParams.Brightness.Default = dwDefaultVal;
16629+ sdvo_priv->dispParams.Brightness.Step = 1;
16630+ } else {
16631+ return status;
16632+ }
16633+
16634+ }
16635+
16636+ /* Contrast */
16637+ if (sdvo_priv->dwSupportedEnhancements & SDVO_CONTRAST) {
16638+
16639+ status = i830_sdvo_get_max_contrast(output, &dwMaxVal, &dwDefaultVal);
16640+
16641+ if (status) {
16642+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
16643+ /*no need to check it. */
16644+ if (sdvo_priv->dispParams.Contrast.Value > dwMaxVal)
16645+ sdvo_priv->dispParams.Contrast.Value = dwDefaultVal;
16646+
16647+ /* Program the device */
16648+ status =
16649+ i830_sdvo_set_contrast(output,
16650+ sdvo_priv->dispParams.Contrast.Value);
16651+ if (!status)
16652+ return status;
16653+
16654+ sdvo_priv->dispParams.Contrast.Max = dwMaxVal;
16655+ sdvo_priv->dispParams.Contrast.Min = 0;
16656+ sdvo_priv->dispParams.Contrast.Default = dwDefaultVal;
16657+
16658+ sdvo_priv->dispParams.Contrast.Step = 1;
16659+
16660+ } else {
16661+ return status;
16662+ }
16663+ }
16664+
16665+ /* Sharpness */
16666+ if (sdvo_priv->dwSupportedEnhancements & SDVO_SHARPNESS) {
16667+
16668+ status =
16669+ i830_sdvo_get_max_sharpness(output, &dwMaxVal, &dwDefaultVal);
16670+
16671+ if (status) {
16672+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
16673+ /*no need to check it. */
16674+ if (sdvo_priv->dispParams.Sharpness.Value > dwMaxVal)
16675+ sdvo_priv->dispParams.Sharpness.Value = dwDefaultVal;
16676+
16677+ /* Program the device */
16678+ status =
16679+ i830_sdvo_set_sharpness(output,
16680+ sdvo_priv->dispParams.Sharpness.Value);
16681+ if (!status)
16682+ return status;
16683+ sdvo_priv->dispParams.Sharpness.Max = dwMaxVal;
16684+ sdvo_priv->dispParams.Sharpness.Min = 0;
16685+ sdvo_priv->dispParams.Sharpness.Default = dwDefaultVal;
16686+
16687+ sdvo_priv->dispParams.Sharpness.Step = 1;
16688+ } else {
16689+ return status;
16690+ }
16691+ }
16692+
16693+ /* Hue */
16694+ if (sdvo_priv->dwSupportedEnhancements & SDVO_HUE) {
16695+
16696+ status = i830_sdvo_get_max_hue(output, &dwMaxVal, &dwDefaultVal);
16697+
16698+ if (status) {
16699+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
16700+ /*no need to check it. */
16701+ if (sdvo_priv->dispParams.Hue.Value > dwMaxVal)
16702+ sdvo_priv->dispParams.Hue.Value = dwDefaultVal;
16703+
16704+ /* Program the device */
16705+ status = i830_sdvo_set_hue(output, sdvo_priv->dispParams.Hue.Value);
16706+ if (!status)
16707+ return status;
16708+
16709+ sdvo_priv->dispParams.Hue.Max = dwMaxVal;
16710+ sdvo_priv->dispParams.Hue.Min = 0;
16711+ sdvo_priv->dispParams.Hue.Default = dwDefaultVal;
16712+
16713+ sdvo_priv->dispParams.Hue.Step = 1;
16714+
16715+ } else {
16716+ return status;
16717+ }
16718+ }
16719+
16720+ /* Saturation */
16721+ if (sdvo_priv->dwSupportedEnhancements & SDVO_SATURATION) {
16722+ status =
16723+ i830_sdvo_get_max_saturation(output, &dwMaxVal, &dwDefaultVal);
16724+
16725+ if (status) {
16726+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
16727+ /*no need to check it. */
16728+ if (sdvo_priv->dispParams.Saturation.Value > dwMaxVal)
16729+ sdvo_priv->dispParams.Saturation.Value = dwDefaultVal;
16730+
16731+ /* Program the device */
16732+ status =
16733+ i830_sdvo_set_saturation(output,
16734+ sdvo_priv->dispParams.Saturation.Value);
16735+ if (!status)
16736+ return status;
16737+
16738+ sdvo_priv->dispParams.Saturation.Max = dwMaxVal;
16739+ sdvo_priv->dispParams.Saturation.Min = 0;
16740+ sdvo_priv->dispParams.Saturation.Default = dwDefaultVal;
16741+ sdvo_priv->dispParams.Saturation.Step = 1;
16742+ } else {
16743+ return status;
16744+ }
16745+
16746+ }
16747+
16748+ /* Adaptive Flicker filter */
16749+ if (sdvo_priv->dwSupportedEnhancements & SDVO_ADAPTIVE_FLICKER_FILTER) {
16750+ status =
16751+ i830_sdvo_get_max_adaptive_flickerfilter(output, &dwMaxVal,
16752+ &dwDefaultVal);
16753+
16754+ if (status) {
16755+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
16756+ /*no need to check it. */
16757+ if (sdvo_priv->dispParams.AdaptiveFF.Value > dwMaxVal)
16758+ sdvo_priv->dispParams.AdaptiveFF.Value = dwDefaultVal;
16759+
16760+ status =
16761+ i830_sdvo_set_adaptive_flickerfilter(output,
16762+ sdvo_priv->dispParams.
16763+ AdaptiveFF.Value);
16764+ if (!status)
16765+ return status;
16766+
16767+ sdvo_priv->dispParams.AdaptiveFF.Max = dwMaxVal;
16768+ sdvo_priv->dispParams.AdaptiveFF.Min = 0;
16769+ sdvo_priv->dispParams.AdaptiveFF.Default = dwDefaultVal;
16770+ sdvo_priv->dispParams.AdaptiveFF.Step = 1;
16771+ } else {
16772+ return status;
16773+ }
16774+ }
16775+
16776+ /* 2D Flicker filter */
16777+ if (sdvo_priv->dwSupportedEnhancements & SDVO_2D_FLICKER_FILTER) {
16778+
16779+ status =
16780+ i830_sdvo_get_max_2D_flickerfilter(output, &dwMaxVal,
16781+ &dwDefaultVal);
16782+
16783+ if (status) {
16784+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
16785+ /*no need to check it. */
16786+ if (sdvo_priv->dispParams.TwoD_FlickerFilter.Value > dwMaxVal)
16787+ sdvo_priv->dispParams.TwoD_FlickerFilter.Value = dwDefaultVal;
16788+
16789+ status =
16790+ i830_sdvo_set_2D_flickerfilter(output,
16791+ sdvo_priv->dispParams.
16792+ TwoD_FlickerFilter.Value);
16793+ if (!status)
16794+ return status;
16795+
16796+ sdvo_priv->dispParams.TwoD_FlickerFilter.Max = dwMaxVal;
16797+ sdvo_priv->dispParams.TwoD_FlickerFilter.Min = 0;
16798+ sdvo_priv->dispParams.TwoD_FlickerFilter.Default = dwDefaultVal;
16799+ sdvo_priv->dispParams.TwoD_FlickerFilter.Step = 1;
16800+ } else {
16801+ return status;
16802+ }
16803+ }
16804+
16805+ /* Luma Filter */
16806+ if (sdvo_priv->dwSupportedEnhancements & SDVO_TV_MAX_LUMA_FILTER) {
16807+ status =
16808+ i830_sdvo_get_max_lumafilter(output, &dwMaxVal, &dwDefaultVal);
16809+
16810+ if (status) {
16811+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
16812+ /*no need to check it. */
16813+ if (sdvo_priv->dispParams.LumaFilter.Value > dwMaxVal)
16814+ sdvo_priv->dispParams.LumaFilter.Value = dwDefaultVal;
16815+
16816+ /* Program the device */
16817+ status =
16818+ i830_sdvo_set_lumafilter(output,
16819+ sdvo_priv->dispParams.LumaFilter.Value);
16820+ if (!status)
16821+ return status;
16822+
16823+ sdvo_priv->dispParams.LumaFilter.Max = dwMaxVal;
16824+ sdvo_priv->dispParams.LumaFilter.Min = 0;
16825+ sdvo_priv->dispParams.LumaFilter.Default = dwDefaultVal;
16826+ sdvo_priv->dispParams.LumaFilter.Step = 1;
16827+
16828+ } else {
16829+ return status;
16830+ }
16831+
16832+ }
16833+
16834+ /* Chroma Filter */
16835+ if (sdvo_priv->dwSupportedEnhancements & SDVO_MAX_TV_CHROMA_FILTER) {
16836+
16837+ status =
16838+ i830_sdvo_get_max_chromafilter(output, &dwMaxVal, &dwDefaultVal);
16839+
16840+ if (status) {
16841+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
16842+ /*no need to check it. */
16843+ if (sdvo_priv->dispParams.ChromaFilter.Value > dwMaxVal)
16844+ sdvo_priv->dispParams.ChromaFilter.Value = dwDefaultVal;
16845+
16846+ /* Program the device */
16847+ status =
16848+ i830_sdvo_set_chromafilter(output,
16849+ sdvo_priv->dispParams.ChromaFilter.
16850+ Value);
16851+ if (!status)
16852+ return status;
16853+
16854+ sdvo_priv->dispParams.ChromaFilter.Max = dwMaxVal;
16855+ sdvo_priv->dispParams.ChromaFilter.Min = 0;
16856+ sdvo_priv->dispParams.ChromaFilter.Default = dwDefaultVal;
16857+ sdvo_priv->dispParams.ChromaFilter.Step = 1;
16858+ } else {
16859+ return status;
16860+ }
16861+
16862+ }
16863+
16864+ /* Dot Crawl */
16865+ if (sdvo_priv->dwSupportedEnhancements & SDVO_DOT_CRAWL) {
16866+ status = i830_sdvo_get_dotcrawl(output, &dwCurrentVal, &dwDefaultVal);
16867+
16868+ if (status) {
16869+
16870+ dwMaxVal = 1;
16871+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
16872+ /*no need to check it. */
16873+
16874+ /* Tibet issue 1603772: Dot crawl do not persist after reboot/Hibernate */
16875+ /* Details : "Dotcrawl.value" is compared with "dwDefaultVal". Since */
16876+ /* dwDefaultVal is always 0, dotCrawl value is always set to 0. */
16877+ /* Fix : Compare the current dotCrawl value with dwMaxValue. */
16878+
16879+ if (sdvo_priv->dispParams.DotCrawl.Value > dwMaxVal)
16880+
16881+ sdvo_priv->dispParams.DotCrawl.Value = dwMaxVal;
16882+
16883+ status =
16884+ i830_sdvo_set_dotcrawl(output,
16885+ sdvo_priv->dispParams.DotCrawl.Value);
16886+ if (!status)
16887+ return status;
16888+
16889+ sdvo_priv->dispParams.DotCrawl.Max = dwMaxVal;
16890+ sdvo_priv->dispParams.DotCrawl.Min = 0;
16891+ sdvo_priv->dispParams.DotCrawl.Default = dwMaxVal;
16892+ sdvo_priv->dispParams.DotCrawl.Step = 1;
16893+ } else {
16894+ return status;
16895+ }
16896+ }
16897+
16898+ return TRUE;
16899+}
16900+
16901+static bool i830_tv_set_overscan_parameters(struct drm_output * output)
16902+{
16903+ u8 status;
16904+
16905+ u32 dwDefaultVal = 0;
16906+ u32 dwMaxVal = 0;
16907+ u32 dwPercentageValue = 0;
16908+ u32 dwDefOverscanXValue = 0;
16909+ u32 dwDefOverscanYValue = 0;
16910+ u32 dwOverscanValue = 0;
16911+ u32 dwSupportedEnhancements;
16912+ struct intel_output *intel_output = output->driver_private;
16913+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
16914+
16915+
16916+ /* Get supported picture enhancements */
16917+ status =
16918+ i830_sdvo_get_supported_enhancements(output,
16919+ &dwSupportedEnhancements);
16920+ if (!status)
16921+ return status;
16922+
16923+ /* Horizontal Overscan */
16924+ if (dwSupportedEnhancements & SDVO_HORIZONTAL_OVERSCAN) {
16925+ status =
16926+ i830_sdvo_get_max_horizontal_overscan(output, &dwMaxVal,
16927+ &dwDefaultVal);
16928+ if (!status)
16929+ return status;
16930+
16931+ /*Calculate the default value in terms of percentage */
16932+ dwDefOverscanXValue = ((dwDefaultVal * 100) / dwMaxVal);
16933+
16934+ /*Calculate the default value in 0-1000 range */
16935+ dwDefOverscanXValue = (dwDefOverscanXValue * 10);
16936+
16937+ /*Overscan is in the range of 0 to 10000 as per MS spec */
16938+ if (sdvo_priv->OverScanX.Value > MAX_VAL)
16939+ sdvo_priv->OverScanX.Value = dwDefOverscanXValue;
16940+
16941+ /*Calculate the percentage(0-100%) of the overscan value */
16942+ dwPercentageValue = (sdvo_priv->OverScanX.Value * 100) / 1000;
16943+
16944+ /* Now map the % value to absolute value to be programed to the encoder */
16945+ dwOverscanValue = (dwMaxVal * dwPercentageValue) / 100;
16946+
16947+ status = i830_sdvo_set_horizontal_overscan(output, dwOverscanValue);
16948+ if (!status)
16949+ return status;
16950+
16951+ sdvo_priv->OverScanX.Max = 1000;
16952+ sdvo_priv->OverScanX.Min = 0;
16953+ sdvo_priv->OverScanX.Default = dwDefOverscanXValue;
16954+ sdvo_priv->OverScanX.Step = 20;
16955+ }
16956+
16957+ /* Horizontal Overscan */
16958+ /* vertical Overscan */
16959+ if (dwSupportedEnhancements & SDVO_VERTICAL_OVERSCAN) {
16960+ status =
16961+ i830_sdvo_get_max_vertical_overscan(output, &dwMaxVal,
16962+ &dwDefaultVal);
16963+ if (!status)
16964+ return status;
16965+
16966+ /*Calculate the default value in terms of percentage */
16967+ dwDefOverscanYValue = ((dwDefaultVal * 100) / dwMaxVal);
16968+
16969+ /*Calculate the default value in 0-1000 range */
16970+ dwDefOverscanYValue = (dwDefOverscanYValue * 10);
16971+
16972+ /*Overscan is in the range of 0 to 10000 as per MS spec */
16973+ if (sdvo_priv->OverScanY.Value > MAX_VAL)
16974+ sdvo_priv->OverScanY.Value = dwDefOverscanYValue;
16975+
16976+ /*Calculate the percentage(0-100%) of the overscan value */
16977+ dwPercentageValue = (sdvo_priv->OverScanY.Value * 100) / 1000;
16978+
16979+ /* Now map the % value to absolute value to be programed to the encoder */
16980+ dwOverscanValue = (dwMaxVal * dwPercentageValue) / 100;
16981+
16982+ status = i830_sdvo_set_vertical_overscan(output, dwOverscanValue);
16983+ if (!status)
16984+ return status;
16985+
16986+ sdvo_priv->OverScanY.Max = 1000;
16987+ sdvo_priv->OverScanY.Min = 0;
16988+ sdvo_priv->OverScanY.Default = dwDefOverscanYValue;
16989+ sdvo_priv->OverScanY.Step = 20;
16990+
16991+ }
16992+ /* vertical Overscan */
16993+ return TRUE;
16994+}
16995+
16996+static bool i830_translate_dtd2timing(struct drm_display_mode * pTimingInfo,
16997+ struct intel_sdvo_dtd *pDTD)
16998+{
16999+
17000+ u32 dwHBLHigh = 0;
17001+ u32 dwVBLHigh = 0;
17002+ u32 dwHSHigh1 = 0;
17003+ u32 dwHSHigh2 = 0;
17004+ u32 dwVSHigh1 = 0;
17005+ u32 dwVSHigh2 = 0;
17006+ u32 dwVPWLow = 0;
17007+ bool status = FALSE;
17008+
17009+ if ((pDTD == NULL) || (pTimingInfo == NULL)) {
17010+ return status;
17011+ }
17012+
17013+ pTimingInfo->clock= pDTD->part1.clock * 10000 / 1000; /*fix me if i am wrong */
17014+
17015+ pTimingInfo->hdisplay = pTimingInfo->crtc_hdisplay =
17016+ (u32) pDTD->part1.
17017+ h_active | ((u32) (pDTD->part1.h_high & 0xF0) << 4);
17018+
17019+ pTimingInfo->vdisplay = pTimingInfo->crtc_vdisplay =
17020+ (u32) pDTD->part1.
17021+ v_active | ((u32) (pDTD->part1.v_high & 0xF0) << 4);
17022+
17023+ pTimingInfo->crtc_hblank_start = pTimingInfo->crtc_hdisplay;
17024+
17025+ /* Horizontal Total = Horizontal Active + Horizontal Blanking */
17026+ dwHBLHigh = (u32) (pDTD->part1.h_high & 0x0F);
17027+ pTimingInfo->htotal = pTimingInfo->crtc_htotal =
17028+ pTimingInfo->crtc_hdisplay + (u32) pDTD->part1.h_blank +
17029+ (dwHBLHigh << 8);
17030+
17031+ pTimingInfo->crtc_hblank_end = pTimingInfo->crtc_htotal - 1;
17032+
17033+ /* Vertical Total = Vertical Active + Vertical Blanking */
17034+ dwVBLHigh = (u32) (pDTD->part1.v_high & 0x0F);
17035+ pTimingInfo->vtotal = pTimingInfo->crtc_vtotal =
17036+ pTimingInfo->crtc_vdisplay + (u32) pDTD->part1.v_blank +
17037+ (dwVBLHigh << 8);
17038+ pTimingInfo->crtc_vblank_start = pTimingInfo->crtc_vdisplay;
17039+ pTimingInfo->crtc_vblank_end = pTimingInfo->crtc_vtotal - 1;
17040+
17041+ /* Horz Sync Start = Horz Blank Start + Horz Sync Offset */
17042+ dwHSHigh1 = (u32) (pDTD->part2.sync_off_width_high & 0xC0);
17043+ pTimingInfo->hsync_start = pTimingInfo->crtc_hsync_start =
17044+ pTimingInfo->crtc_hblank_start + (u32) pDTD->part2.h_sync_off +
17045+ (dwHSHigh1 << 2);
17046+
17047+ /* Horz Sync End = Horz Sync Start + Horz Sync Pulse Width */
17048+ dwHSHigh2 = (u32) (pDTD->part2.sync_off_width_high & 0x30);
17049+ pTimingInfo->hsync_end = pTimingInfo->crtc_hsync_end =
17050+ pTimingInfo->crtc_hsync_start + (u32) pDTD->part2.h_sync_width +
17051+ (dwHSHigh2 << 4) - 1;
17052+
17053+ /* Vert Sync Start = Vert Blank Start + Vert Sync Offset */
17054+ dwVSHigh1 = (u32) (pDTD->part2.sync_off_width_high & 0x0C);
17055+ dwVPWLow = (u32) (pDTD->part2.v_sync_off_width & 0xF0);
17056+
17057+ pTimingInfo->vsync_start = pTimingInfo->crtc_vsync_start =
17058+ pTimingInfo->crtc_vblank_start + (dwVPWLow >> 4) + (dwVSHigh1 << 2);
17059+
17060+ /* Vert Sync End = Vert Sync Start + Vert Sync Pulse Width */
17061+ dwVSHigh2 = (u32) (pDTD->part2.sync_off_width_high & 0x03);
17062+ pTimingInfo->vsync_end = pTimingInfo->crtc_vsync_end =
17063+ pTimingInfo->crtc_vsync_start +
17064+ (u32) (pDTD->part2.v_sync_off_width & 0x0F) + (dwVSHigh2 << 4) - 1;
17065+
17066+ /* Fillup flags */
17067+ status = TRUE;
17068+
17069+ return status;
17070+}
17071+
17072+static void i830_translate_timing2dtd(struct drm_display_mode * mode, struct intel_sdvo_dtd *dtd)
17073+{
17074+ u16 width, height;
17075+ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
17076+ u16 h_sync_offset, v_sync_offset;
17077+
17078+ width = mode->crtc_hdisplay;
17079+ height = mode->crtc_vdisplay;
17080+
17081+ /* do some mode translations */
17082+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
17083+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
17084+
17085+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
17086+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
17087+
17088+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
17089+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
17090+
17091+ dtd->part1.clock = mode->clock * 1000 / 10000; /*xiaolin, fixme, do i need to by 1k hz */
17092+ dtd->part1.h_active = width & 0xff;
17093+ dtd->part1.h_blank = h_blank_len & 0xff;
17094+ dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
17095+ ((h_blank_len >> 8) & 0xf);
17096+ dtd->part1.v_active = height & 0xff;
17097+ dtd->part1.v_blank = v_blank_len & 0xff;
17098+ dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
17099+ ((v_blank_len >> 8) & 0xf);
17100+
17101+ dtd->part2.h_sync_off = h_sync_offset;
17102+ dtd->part2.h_sync_width = h_sync_len & 0xff;
17103+ dtd->part2.v_sync_off_width = ((v_sync_offset & 0xf) << 4 |
17104+ (v_sync_len & 0xf)) + 1;
17105+ dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
17106+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
17107+ ((v_sync_len & 0x30) >> 4);
17108+
17109+ dtd->part2.dtd_flags = 0x18;
17110+ if (mode->flags & V_PHSYNC)
17111+ dtd->part2.dtd_flags |= 0x2;
17112+ if (mode->flags & V_PVSYNC)
17113+ dtd->part2.dtd_flags |= 0x4;
17114+
17115+ dtd->part2.sdvo_flags = 0;
17116+ dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
17117+ dtd->part2.reserved = 0;
17118+
17119+}
17120+
17121+static bool i830_tv_set_target_io(struct drm_output* output)
17122+{
17123+ bool status;
17124+ struct intel_output *intel_output = output->driver_private;
17125+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17126+
17127+ status = intel_sdvo_set_target_input(output, TRUE, FALSE);
17128+ if (status)
17129+ status = intel_sdvo_set_target_output(output, sdvo_priv->active_outputs);
17130+
17131+ return status;
17132+}
17133+
17134+static bool i830_tv_get_max_min_dotclock(struct drm_output* output)
17135+{
17136+ u32 dwMaxClkRateMul = 1;
17137+ u32 dwMinClkRateMul = 1;
17138+ u8 status;
17139+
17140+ struct intel_output *intel_output = output->driver_private;
17141+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17142+
17143+
17144+ /* Set Target Input/Outputs */
17145+ status = i830_tv_set_target_io(output);
17146+ if (!status) {
17147+ DRM_DEBUG("SetTargetIO function FAILED!!! \n");
17148+ return status;
17149+ }
17150+
17151+ /* Get the clock rate multiplies supported by the encoder */
17152+ dwMinClkRateMul = 1;
17153+#if 0
17154+ /* why we need do this, some time, tv can't bring up for the wrong setting in the last time */
17155+ dwClkRateMulMask = i830_sdvo_get_clock_rate_mult(output);
17156+
17157+ /* Find the minimum clock rate multiplier supported */
17158+
17159+ if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_1X)
17160+ dwMinClkRateMul = 1;
17161+ else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_2X)
17162+ dwMinClkRateMul = 2;
17163+ else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_3X)
17164+ dwMinClkRateMul = 3;
17165+ else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_4X)
17166+ dwMinClkRateMul = 4;
17167+ else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_5X)
17168+ dwMinClkRateMul = 5;
17169+ else
17170+ return FALSE;
17171+#endif
17172+ /* Get the min and max input Dot Clock supported by the encoder */
17173+ status = i830_sdvo_get_input_output_pixelclock_range(output, FALSE); /* input */
17174+
17175+ if (!status) {
17176+ DRM_DEBUG("SDVOGetInputPixelClockRange() FAILED!!! \n");
17177+ return status;
17178+ }
17179+
17180+ /* Get the min and max output Dot Clock supported by the encoder */
17181+ status = i830_sdvo_get_input_output_pixelclock_range(output, TRUE); /* output */
17182+
17183+ if (!status) {
17184+ DRM_DEBUG("SDVOGetOutputPixelClockRange() FAILED!!! \n");
17185+ return status;
17186+ }
17187+
17188+ /* Maximum Dot Clock supported should be the minimum of the maximum */
17189+ /* dot clock supported by the encoder & the SDVO bus clock rate */
17190+ sdvo_priv->dwMaxDotClk =
17191+ ((sdvo_priv->dwMaxInDotClk * dwMaxClkRateMul) <
17192+ (sdvo_priv->dwMaxOutDotClk)) ? (sdvo_priv->dwMaxInDotClk *
17193+ dwMaxClkRateMul) : (sdvo_priv->dwMaxOutDotClk);
17194+
17195+ /* Minimum Dot Clock supported should be the maximum of the minimum */
17196+ /* dot clocks supported by the input & output */
17197+ sdvo_priv->dwMinDotClk =
17198+ ((sdvo_priv->dwMinInDotClk * dwMinClkRateMul) >
17199+ (sdvo_priv->dwMinOutDotClk)) ? (sdvo_priv->dwMinInDotClk *
17200+ dwMinClkRateMul) : (sdvo_priv->dwMinOutDotClk);
17201+
17202+ DRM_DEBUG("leave, i830_tv_get_max_min_dotclock() !!! \n");
17203+
17204+ return TRUE;
17205+
17206+}
17207+
17208+bool i830_tv_mode_check_support(struct drm_output* output, struct drm_display_mode* pMode)
17209+{
17210+ u32 dwDotClk = 0;
17211+ bool status;
17212+ struct intel_output *intel_output = output->driver_private;
17213+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17214+
17215+
17216+ dwDotClk = pMode->clock * 1000;
17217+
17218+ /*TODO: Need to fix this from SoftBios side........ */
17219+ if (sdvo_priv->TVMode == TVMODE_HDTV) {
17220+ if (((pMode->hdisplay == 1920) && (pMode->vdisplay== 1080)) ||
17221+ ((pMode->hdisplay== 1864) && (pMode->vdisplay== 1050)) ||
17222+ ((pMode->hdisplay== 1704) && (pMode->vdisplay== 960)) ||
17223+ ((pMode->hdisplay== 640) && (pMode->vdisplay== 448)))
17224+ return true;
17225+ }
17226+
17227+ if (sdvo_priv->bGetClk) {
17228+ status = i830_tv_get_max_min_dotclock(output);
17229+ if (!status) {
17230+ DRM_DEBUG("get max min dotclok failed\n");
17231+ return status;
17232+ }
17233+ sdvo_priv->bGetClk = false;
17234+ }
17235+
17236+ /* Check the Dot clock first. If the requested Dot Clock should fall */
17237+ /* in the supported range for the mode to be supported */
17238+ if ((dwDotClk <= sdvo_priv->dwMinDotClk) || (dwDotClk >= sdvo_priv->dwMaxDotClk)) {
17239+ DRM_DEBUG("dwDotClk value is out of range\n");
17240+ /*TODO: now consider VBT add and Remove mode. */
17241+ /* This mode can't be supported */
17242+ return false;
17243+ }
17244+ DRM_DEBUG("i830_tv_mode_check_support leave\n");
17245+ return true;
17246+
17247+}
17248+
17249+void print_Pll(char *prefix, ex_intel_clock_t * clock)
17250+{
17251+ DRM_DEBUG("%s: dotclock %d vco %d ((m %d, m1 %d, m2 %d), n %d, (p %d, p1 %d, p2 %d))\n",
17252+ prefix, clock->dot, clock->vco, clock->m, clock->m1, clock->m2,
17253+ clock->n, clock->p, clock->p1, clock->p2);
17254+}
17255+
17256+extern int intel_panel_fitter_pipe (struct drm_device *dev);
17257+extern int intel_get_core_clock_speed(struct drm_device *dev);
17258+
17259+void i830_sdvo_tv_settiming(struct drm_crtc *crtc, struct drm_display_mode * mode,
17260+ struct drm_display_mode * adjusted_mode)
17261+{
17262+
17263+ struct drm_device *dev = crtc->dev;
17264+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
17265+
17266+ int pipe = 0;
17267+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
17268+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
17269+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
17270+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
17271+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
17272+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
17273+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
17274+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
17275+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
17276+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
17277+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
17278+ int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
17279+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
17280+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
17281+ ex_intel_clock_t clock;
17282+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
17283+ bool ok, is_sdvo = FALSE;
17284+ int centerX = 0, centerY = 0;
17285+ u32 ulPortMultiplier, ulTemp, ulDotClock;
17286+ int sdvo_pixel_multiply;
17287+ u32 dotclock;
17288+
17289+ /* Set up some convenient bools for what outputs are connected to
17290+ * our pipe, used in DPLL setup.
17291+ */
17292+ if (!crtc->fb) {
17293+ DRM_ERROR("Can't set mode without attached fb\n");
17294+ return;
17295+ }
17296+ is_sdvo = TRUE;
17297+ ok = TRUE;
17298+ ulDotClock = mode->clock * 1000 / 1000; /*xiaolin, fixme, do i need to by 1k hz */
17299+ for (ulPortMultiplier = 1; ulPortMultiplier <= 5; ulPortMultiplier++) {
17300+ ulTemp = ulDotClock * ulPortMultiplier;
17301+ if ((ulTemp >= 100000) && (ulTemp <= 200000)) {
17302+ if ((ulPortMultiplier == 3) || (ulPortMultiplier == 5))
17303+ continue;
17304+ else
17305+ break;
17306+ }
17307+ }
17308+ /* ulPortMultiplier is 2, dotclok is 1babc, fall into the first one case */
17309+ /* add two to each m and n value -- optimizes (slightly) the search algo. */
17310+ dotclock = ulPortMultiplier * (mode->clock * 1000) / 1000;
17311+ DRM_DEBUG("mode->clock is %x, dotclock is %x,!\n", mode->clock,dotclock);
17312+
17313+ if ((dotclock >= 100000) && (dotclock < 140500)) {
17314+ DRM_DEBUG("dotclock is between 10000 and 140500!\n");
17315+ clock.p1 = 0x2;
17316+ clock.p2 = 0x00;
17317+ clock.n = 0x3;
17318+ clock.m1 = 0x10;
17319+ clock.m2 = 0x8;
17320+ } else if ((dotclock >= 140500) && (dotclock <= 200000)) {
17321+
17322+ DRM_DEBUG("dotclock is between 140500 and 200000!\n");
17323+ clock.p1 = 0x1;
17324+ /*CG was using 0x10 from spreadsheet it should be 0 */
17325+ /*pClock_Data->Clk_P2 = 0x10; */
17326+ clock.p2 = 0x00;
17327+ clock.n = 0x6;
17328+ clock.m1 = 0xC;
17329+ clock.m2 = 0x8;
17330+ } else
17331+ ok = FALSE;
17332+
17333+ if (!ok)
17334+ DRM_DEBUG("Couldn't find PLL settings for mode!\n");
17335+
17336+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
17337+
17338+ dpll = DPLL_VGA_MODE_DIS | DPLL_CLOCK_PHASE_9;
17339+
17340+ dpll |= DPLLB_MODE_DAC_SERIAL;
17341+
17342+ sdvo_pixel_multiply = ulPortMultiplier;
17343+ dpll |= DPLL_DVO_HIGH_SPEED;
17344+ dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
17345+
17346+ /* compute bitmask from p1 value */
17347+ dpll |= (clock.p1 << 16);
17348+ dpll |= (clock.p2 << 24);
17349+
17350+ dpll |= PLL_REF_INPUT_TVCLKINBC;
17351+
17352+ /* Set up the display plane register */
17353+ dspcntr = DISPPLANE_GAMMA_ENABLE;
17354+ switch (crtc->fb->bits_per_pixel) {
17355+ case 8:
17356+ dspcntr |= DISPPLANE_8BPP;
17357+ break;
17358+ case 16:
17359+ if (crtc->fb->depth == 15)
17360+ dspcntr |= DISPPLANE_15_16BPP;
17361+ else
17362+ dspcntr |= DISPPLANE_16BPP;
17363+ break;
17364+ case 32:
17365+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
17366+ break;
17367+ default:
17368+ DRM_DEBUG("unknown display bpp\n");
17369+ }
17370+
17371+ if (pipe == 0)
17372+ dspcntr |= DISPPLANE_SEL_PIPE_A;
17373+ else
17374+ dspcntr |= DISPPLANE_SEL_PIPE_B;
17375+
17376+ pipeconf = I915_READ(pipeconf_reg);
17377+ if (pipe == 0) {
17378+ /* Enable pixel doubling when the dot clock is > 90% of the (display)
17379+ * core speed.
17380+ *
17381+ * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
17382+ * pipe == 0 check?
17383+ */
17384+ if (mode->clock * 1000 > (intel_get_core_clock_speed(dev)) * 9 / 10) /*xiaolin, fixme, do i need to by 1k hz */
17385+ { pipeconf |= PIPEACONF_DOUBLE_WIDE; DRM_DEBUG("PIPEACONF_DOUBLE_WIDE\n");}
17386+ else
17387+ { pipeconf &= ~PIPEACONF_DOUBLE_WIDE; DRM_DEBUG("non PIPEACONF_DOUBLE_WIDE\n");}
17388+ }
17389+
17390+ dspcntr |= DISPLAY_PLANE_ENABLE;
17391+ pipeconf |= PIPEACONF_ENABLE;
17392+ dpll |= DPLL_VCO_ENABLE;
17393+
17394+ /* Disable the panel fitter if it was on our pipe */
17395+ if (intel_panel_fitter_pipe(dev) == pipe)
17396+ I915_WRITE(PFIT_CONTROL, 0);
17397+
17398+ print_Pll("chosen", &clock);
17399+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
17400+ drm_mode_debug_printmodeline(dev, mode);
17401+ DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d\n",
17402+ mode->mode_id, mode->name, mode->crtc_htotal, mode->crtc_hdisplay,
17403+ mode->crtc_hblank_end, mode->crtc_hblank_start,
17404+ mode->crtc_vtotal, mode->crtc_vdisplay,
17405+ mode->crtc_vblank_end, mode->crtc_vblank_start);
17406+ DRM_DEBUG("clock regs: 0x%08x, 0x%08x,dspntr is 0x%8x, pipeconf is 0x%8x\n", (int)dpll,
17407+ (int)fp,(int)dspcntr,(int)pipeconf);
17408+
17409+ if (dpll & DPLL_VCO_ENABLE) {
17410+ I915_WRITE(fp_reg, fp);
17411+ I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
17412+ (void)I915_READ(dpll_reg);
17413+ udelay(150);
17414+ }
17415+ I915_WRITE(fp_reg, fp);
17416+ I915_WRITE(dpll_reg, dpll);
17417+ (void)I915_READ(dpll_reg);
17418+ /* Wait for the clocks to stabilize. */
17419+ udelay(150);
17420+
17421+ /* write it again -- the BIOS does, after all */
17422+ I915_WRITE(dpll_reg, dpll);
17423+ I915_READ(dpll_reg);
17424+ /* Wait for the clocks to stabilize. */
17425+ udelay(150);
17426+
17427+ I915_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
17428+ ((mode->crtc_htotal - 1) << 16));
17429+ I915_WRITE(hblank_reg, (mode->crtc_hblank_start - 1) |
17430+ ((mode->crtc_hblank_end - 1) << 16));
17431+ I915_WRITE(hsync_reg, (mode->crtc_hsync_start - 1) |
17432+ ((mode->crtc_hsync_end - 1) << 16));
17433+ I915_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
17434+ ((mode->crtc_vtotal - 1) << 16));
17435+ I915_WRITE(vblank_reg, (mode->crtc_vblank_start - 1) |
17436+ ((mode->crtc_vblank_end - 1) << 16));
17437+ I915_WRITE(vsync_reg, (mode->crtc_vsync_start - 1) |
17438+ ((mode->crtc_vsync_end - 1) << 16));
17439+ I915_WRITE(dspstride_reg, crtc->fb->pitch);
17440+
17441+ if (0) {
17442+
17443+ centerX = (adjusted_mode->crtc_hdisplay - mode->hdisplay) / 2;
17444+ centerY = (adjusted_mode->crtc_vdisplay - mode->vdisplay) / 2;
17445+ I915_WRITE(dspsize_reg,
17446+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
17447+
17448+ I915_WRITE(dsppos_reg, centerY << 16 | centerX);
17449+ I915_WRITE(pipesrc_reg,
17450+ ((adjusted_mode->crtc_hdisplay -
17451+ 1) << 16) | (adjusted_mode->crtc_vdisplay - 1));
17452+ } else {
17453+ /* pipesrc and dspsize control the size that is scaled from, which should
17454+ * always be the user's requested size.
17455+ */
17456+ I915_WRITE(dspsize_reg,
17457+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
17458+ I915_WRITE(dsppos_reg, 0);
17459+ I915_WRITE(pipesrc_reg,
17460+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
17461+
17462+ }
17463+ I915_WRITE(pipeconf_reg, pipeconf);
17464+ I915_READ(pipeconf_reg);
17465+
17466+ intel_wait_for_vblank(dev);
17467+
17468+ I915_WRITE(dspcntr_reg, dspcntr);
17469+ /* Flush the plane changes */
17470+ //intel_pipe_set_base(crtc, 0, 0);
17471+ /* Disable the VGA plane that we never use */
17472+ //I915_WRITE(VGACNTRL, VGA_DISP_DISABLE);
17473+ //intel_wait_for_vblank(dev);
17474+
17475+}
17476+
17477+static void intel_sdvo_mode_set(struct drm_output *output,
17478+ struct drm_display_mode *mode,
17479+ struct drm_display_mode *adjusted_mode)
17480+{
17481+ struct drm_device *dev = output->dev;
17482+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
17483+ struct drm_crtc *crtc = output->crtc;
17484+ struct intel_crtc *intel_crtc = crtc->driver_private;
17485+ struct intel_output *intel_output = output->driver_private;
17486+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17487+
17488+ u32 sdvox;
17489+ struct intel_sdvo_dtd output_dtd;
17490+ int sdvo_pixel_multiply;
17491+ bool success;
17492+ struct drm_display_mode * save_mode;
17493+ DRM_DEBUG("xxintel_sdvo_mode_set\n");
17494+
17495+ if (!mode)
17496+ return;
17497+
17498+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
17499+ if (!i830_tv_mode_check_support(output, mode)) {
17500+ DRM_DEBUG("mode setting failed, use the forced mode\n");
17501+ mode = &tv_modes[0].mode_entry;
17502+ drm_mode_set_crtcinfo(mode, 0);
17503+ }
17504+ }
17505+ save_mode = mode;
17506+#if 0
17507+ width = mode->crtc_hdisplay;
17508+ height = mode->crtc_vdisplay;
17509+
17510+ /* do some mode translations */
17511+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
17512+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
17513+
17514+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
17515+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
17516+
17517+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
17518+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
17519+
17520+ output_dtd.part1.clock = mode->clock / 10;
17521+ output_dtd.part1.h_active = width & 0xff;
17522+ output_dtd.part1.h_blank = h_blank_len & 0xff;
17523+ output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
17524+ ((h_blank_len >> 8) & 0xf);
17525+ output_dtd.part1.v_active = height & 0xff;
17526+ output_dtd.part1.v_blank = v_blank_len & 0xff;
17527+ output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
17528+ ((v_blank_len >> 8) & 0xf);
17529+
17530+ output_dtd.part2.h_sync_off = h_sync_offset;
17531+ output_dtd.part2.h_sync_width = h_sync_len & 0xff;
17532+ output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
17533+ (v_sync_len & 0xf);
17534+ output_dtd.part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
17535+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
17536+ ((v_sync_len & 0x30) >> 4);
17537+
17538+ output_dtd.part2.dtd_flags = 0x18;
17539+ if (mode->flags & V_PHSYNC)
17540+ output_dtd.part2.dtd_flags |= 0x2;
17541+ if (mode->flags & V_PVSYNC)
17542+ output_dtd.part2.dtd_flags |= 0x4;
17543+
17544+ output_dtd.part2.sdvo_flags = 0;
17545+ output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
17546+ output_dtd.part2.reserved = 0;
17547+#else
17548+ /* disable and enable the display output */
17549+ intel_sdvo_set_target_output(output, 0);
17550+
17551+ //intel_sdvo_set_active_outputs(output, sdvo_priv->active_outputs);
17552+ memset(&output_dtd, 0, sizeof(struct intel_sdvo_dtd));
17553+ /* check if this mode can be supported or not */
17554+
17555+ i830_translate_timing2dtd(mode, &output_dtd);
17556+#endif
17557+ intel_sdvo_set_target_output(output, 0);
17558+ /* set the target input & output first */
17559+ /* Set the input timing to the screen. Assume always input 0. */
17560+ intel_sdvo_set_target_output(output, sdvo_priv->active_outputs);
17561+ intel_sdvo_set_output_timing(output, &output_dtd);
17562+ intel_sdvo_set_target_input(output, true, false);
17563+
17564+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
17565+ i830_tv_set_overscan_parameters(output);
17566+ /* Set TV standard */
17567+ #if 0
17568+ if (sdvo_priv->TVMode == TVMODE_HDTV)
17569+ i830_sdvo_map_hdtvstd_bitmask(output);
17570+ else
17571+ i830_sdvo_map_sdtvstd_bitmask(output);
17572+ #endif
17573+ /* Set TV format */
17574+ i830_sdvo_set_tvoutputs_formats(output);
17575+ /* We would like to use i830_sdvo_create_preferred_input_timing() to
17576+ * provide the device with a timing it can support, if it supports that
17577+ * feature. However, presumably we would need to adjust the CRTC to output
17578+ * the preferred timing, and we don't support that currently.
17579+ */
17580+ success = i830_sdvo_create_preferred_input_timing(output, mode);
17581+ if (success) {
17582+ i830_sdvo_get_preferred_input_timing(output, &output_dtd);
17583+ }
17584+ /* Set the overscan values now as input timing is dependent on overscan values */
17585+
17586+ }
17587+
17588+
17589+ /* We would like to use i830_sdvo_create_preferred_input_timing() to
17590+ * provide the device with a timing it can support, if it supports that
17591+ * feature. However, presumably we would need to adjust the CRTC to
17592+ * output the preferred timing, and we don't support that currently.
17593+ */
17594+#if 0
17595+ success = intel_sdvo_create_preferred_input_timing(output, clock,
17596+ width, height);
17597+ if (success) {
17598+ struct intel_sdvo_dtd *input_dtd;
17599+
17600+ intel_sdvo_get_preferred_input_timing(output, &input_dtd);
17601+ intel_sdvo_set_input_timing(output, &input_dtd);
17602+ }
17603+#else
17604+ /* Set input timing (in DTD) */
17605+ intel_sdvo_set_input_timing(output, &output_dtd);
17606+#endif
17607+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
17608+
17609+ DRM_DEBUG("xxintel_sdvo_mode_set tv path\n");
17610+ i830_tv_program_display_params(output);
17611+ /* translate dtd 2 timing */
17612+ i830_translate_dtd2timing(mode, &output_dtd);
17613+ /* Program clock rate multiplier, 2x,clock is = 0x360b730 */
17614+ if ((mode->clock * 1000 >= 24000000)
17615+ && (mode->clock * 1000 < 50000000)) {
17616+ intel_sdvo_set_clock_rate_mult(output, SDVO_CLOCK_RATE_MULT_4X);
17617+ } else if ((mode->clock * 1000 >= 50000000)
17618+ && (mode->clock * 1000 < 100000000)) {
17619+ intel_sdvo_set_clock_rate_mult(output, SDVO_CLOCK_RATE_MULT_2X);
17620+ } else if ((mode->clock * 1000 >= 100000000)
17621+ && (mode->clock * 1000 < 200000000)) {
17622+ intel_sdvo_set_clock_rate_mult(output, SDVO_CLOCK_RATE_MULT_1X);
17623+ } else
17624+ DRM_DEBUG("i830_sdvo_set_clock_rate is failed\n");
17625+
17626+ i830_sdvo_tv_settiming(output->crtc, mode, adjusted_mode);
17627+ //intel_crtc_mode_set(output->crtc, mode,adjusted_mode,0,0);
17628+ mode = save_mode;
17629+ } else {
17630+ DRM_DEBUG("xxintel_sdvo_mode_set - non tv path\n");
17631+ switch (intel_sdvo_get_pixel_multiplier(mode)) {
17632+ case 1:
17633+ intel_sdvo_set_clock_rate_mult(output,
17634+ SDVO_CLOCK_RATE_MULT_1X);
17635+ break;
17636+ case 2:
17637+ intel_sdvo_set_clock_rate_mult(output,
17638+ SDVO_CLOCK_RATE_MULT_2X);
17639+ break;
17640+ case 4:
17641+ intel_sdvo_set_clock_rate_mult(output,
17642+ SDVO_CLOCK_RATE_MULT_4X);
17643+ break;
17644+ }
17645+ }
17646+ /* Set the SDVO control regs. */
17647+ if (0/*IS_I965GM(dev)*/) {
17648+ sdvox = SDVO_BORDER_ENABLE;
17649+ } else {
17650+ sdvox = I915_READ(sdvo_priv->output_device);
17651+ switch (sdvo_priv->output_device) {
17652+ case SDVOB:
17653+ sdvox &= SDVOB_PRESERVE_MASK;
17654+ break;
17655+ case SDVOC:
17656+ sdvox &= SDVOC_PRESERVE_MASK;
17657+ break;
17658+ }
17659+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
17660+ }
17661+ if (intel_crtc->pipe == 1)
17662+ sdvox |= SDVO_PIPE_B_SELECT;
17663+
17664+ sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
17665+ if (IS_I965G(dev)) {
17666+ /* done in crtc_mode_set as the dpll_md reg must be written
17667+ early */
17668+ } else if (IS_POULSBO(dev) || IS_I945G(dev) || IS_I945GM(dev)) {
17669+ /* done in crtc_mode_set as it lives inside the
17670+ dpll register */
17671+ } else {
17672+ sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
17673+ }
17674+
17675+ intel_sdvo_write_sdvox(output, sdvox);
17676+ i830_sdvo_set_iomap(output);
17677+}
17678+
17679+static void intel_sdvo_dpms(struct drm_output *output, int mode)
17680+{
17681+ struct drm_device *dev = output->dev;
17682+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
17683+ struct intel_output *intel_output = output->driver_private;
17684+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17685+ u32 temp;
17686+
17687+ DRM_DEBUG("xxintel_sdvo_dpms, dpms mode is %d, active output is %d\n",mode,sdvo_priv->active_outputs);
17688+
17689+#ifdef SII_1392_WA
17690+ if((SII_1392==1) && (drm_psb_no_fb ==1)) {
17691+ DRM_DEBUG("don't touch 1392 card when no_fb=1\n");
17692+ return;
17693+ }
17694+#endif
17695+
17696+ if (mode != DPMSModeOn) {
17697+ intel_sdvo_set_active_outputs(output, sdvo_priv->output_device);
17698+ if (0)
17699+ intel_sdvo_set_encoder_power_state(output, mode);
17700+
17701+ if (mode == DPMSModeOff) {
17702+ temp = I915_READ(sdvo_priv->output_device);
17703+ if ((temp & SDVO_ENABLE) != 0) {
17704+ intel_sdvo_write_sdvox(output, temp & ~SDVO_ENABLE);
17705+ }
17706+ }
17707+ } else {
17708+ bool input1, input2;
17709+ int i;
17710+ u8 status;
17711+
17712+ temp = I915_READ(sdvo_priv->output_device);
17713+ if ((temp & SDVO_ENABLE) == 0)
17714+ intel_sdvo_write_sdvox(output, temp | SDVO_ENABLE);
17715+ for (i = 0; i < 2; i++)
17716+ intel_wait_for_vblank(dev);
17717+
17718+ status = intel_sdvo_get_trained_inputs(output, &input1,
17719+ &input2);
17720+
17721+
17722+ /* Warn if the device reported failure to sync.
17723+ * A lot of SDVO devices fail to notify of sync, but it's
17724+ * a given it the status is a success, we succeeded.
17725+ */
17726+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
17727+ DRM_DEBUG("First %s output reported failure to sync\n",
17728+ SDVO_NAME(sdvo_priv));
17729+ }
17730+
17731+ if (0)
17732+ intel_sdvo_set_encoder_power_state(output, mode);
17733+
17734+ DRM_DEBUG("xiaolin active output is %d\n",sdvo_priv->active_outputs);
17735+ intel_sdvo_set_active_outputs(output, sdvo_priv->active_outputs);
17736+ }
17737+ return;
17738+}
17739+
17740+static void intel_sdvo_save(struct drm_output *output)
17741+{
17742+ struct drm_device *dev = output->dev;
17743+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
17744+ struct intel_output *intel_output = output->driver_private;
17745+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17746+
17747+ DRM_DEBUG("xxintel_sdvo_save\n");
17748+
17749+ sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(output);
17750+ intel_sdvo_get_active_outputs(output, &sdvo_priv->save_active_outputs);
17751+
17752+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
17753+ intel_sdvo_set_target_input(output, true, false);
17754+ intel_sdvo_get_input_timing(output,
17755+ &sdvo_priv->save_input_dtd_1);
17756+ }
17757+
17758+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
17759+ intel_sdvo_set_target_input(output, false, true);
17760+ intel_sdvo_get_input_timing(output,
17761+ &sdvo_priv->save_input_dtd_2);
17762+ }
17763+
17764+ intel_sdvo_set_target_output(output, sdvo_priv->active_outputs);
17765+ intel_sdvo_get_output_timing(output,
17766+ &sdvo_priv->save_output_dtd[sdvo_priv->active_outputs]);
17767+ sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device);
17768+}
17769+
17770+static void intel_sdvo_restore(struct drm_output *output)
17771+{
17772+ struct drm_device *dev = output->dev;
17773+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
17774+ struct intel_output *intel_output = output->driver_private;
17775+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17776+ int i;
17777+ bool input1, input2;
17778+ u8 status;
17779+ DRM_DEBUG("xxintel_sdvo_restore\n");
17780+
17781+ intel_sdvo_set_active_outputs(output, 0);
17782+
17783+ intel_sdvo_set_target_output(output, sdvo_priv->save_active_outputs);
17784+ intel_sdvo_set_output_timing(output,
17785+ &sdvo_priv->save_output_dtd[sdvo_priv->save_active_outputs]);
17786+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
17787+ intel_sdvo_set_target_input(output, true, false);
17788+ intel_sdvo_set_input_timing(output, &sdvo_priv->save_input_dtd_1);
17789+ }
17790+
17791+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
17792+ intel_sdvo_set_target_input(output, false, true);
17793+ intel_sdvo_set_input_timing(output, &sdvo_priv->save_input_dtd_2);
17794+ }
17795+
17796+ intel_sdvo_set_clock_rate_mult(output, sdvo_priv->save_sdvo_mult);
17797+
17798+ I915_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
17799+
17800+ if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
17801+ {
17802+ for (i = 0; i < 2; i++)
17803+ intel_wait_for_vblank(dev);
17804+ status = intel_sdvo_get_trained_inputs(output, &input1, &input2);
17805+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
17806+ DRM_DEBUG("First %s output reported failure to sync\n",
17807+ SDVO_NAME(sdvo_priv));
17808+ }
17809+
17810+ i830_sdvo_set_iomap(output);
17811+ intel_sdvo_set_active_outputs(output, sdvo_priv->save_active_outputs);
17812+}
17813+
17814+static bool i830_tv_mode_find(struct drm_output * output,struct drm_display_mode * pMode)
17815+{
17816+ struct intel_output *intel_output = output->driver_private;
17817+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17818+
17819+ bool find = FALSE;
17820+ int i;
17821+
17822+ DRM_DEBUG("i830_tv_mode_find,0x%x\n", sdvo_priv->TVStandard);
17823+
17824+ for (i = 0; i < NUM_TV_MODES; i++)
17825+ {
17826+ const tv_mode_t *tv_mode = &tv_modes[i];
17827+ if (strcmp (tv_mode->mode_entry.name, pMode->name) == 0
17828+ && (pMode->type & M_T_TV)) {
17829+ find = TRUE;
17830+ break;
17831+ }
17832+ }
17833+ return find;
17834+}
17835+
17836+
17837+static int intel_sdvo_mode_valid(struct drm_output *output,
17838+ struct drm_display_mode *mode)
17839+{
17840+ struct intel_output *intel_output = output->driver_private;
17841+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17842+
17843+ bool status = TRUE;
17844+ DRM_DEBUG("xxintel_sdvo_mode_valid\n");
17845+
17846+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
17847+ status = i830_tv_mode_check_support(output, mode);
17848+ if (status) {
17849+ if(i830_tv_mode_find(output,mode)) {
17850+ DRM_DEBUG("%s is ok\n", mode->name);
17851+ return MODE_OK;
17852+ }
17853+ else
17854+ return MODE_CLOCK_RANGE;
17855+ } else {
17856+ DRM_DEBUG("%s is failed\n",
17857+ mode->name);
17858+ return MODE_CLOCK_RANGE;
17859+ }
17860+ }
17861+
17862+ if (mode->flags & V_DBLSCAN)
17863+ return MODE_NO_DBLESCAN;
17864+
17865+ if (sdvo_priv->pixel_clock_min > mode->clock)
17866+ return MODE_CLOCK_LOW;
17867+
17868+ if (sdvo_priv->pixel_clock_max < mode->clock)
17869+ return MODE_CLOCK_HIGH;
17870+
17871+ return MODE_OK;
17872+}
17873+
17874+static bool intel_sdvo_get_capabilities(struct drm_output *output, struct intel_sdvo_caps *caps)
17875+{
17876+ u8 status;
17877+
17878+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
17879+ status = intel_sdvo_read_response(output, caps, sizeof(*caps));
17880+ if (status != SDVO_CMD_STATUS_SUCCESS)
17881+ return false;
17882+
17883+ return true;
17884+}
17885+
17886+void i830_tv_get_default_params(struct drm_output * output)
17887+{
17888+ u32 dwSupportedSDTVBitMask = 0;
17889+ u32 dwSupportedHDTVBitMask = 0;
17890+ u32 dwTVStdBitmask = 0;
17891+
17892+ struct intel_output *intel_output = output->driver_private;
17893+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17894+
17895+
17896+ /* Get supported TV Standard */
17897+ i830_sdvo_get_supported_tvoutput_formats(output, &dwSupportedSDTVBitMask,
17898+ &dwSupportedHDTVBitMask,&dwTVStdBitmask);
17899+
17900+ sdvo_priv->dwSDVOSDTVBitMask = dwSupportedSDTVBitMask;
17901+ sdvo_priv->dwSDVOHDTVBitMask = dwSupportedHDTVBitMask;
17902+ sdvo_priv->TVStdBitmask = dwTVStdBitmask;
17903+
17904+}
17905+
17906+static enum drm_output_status intel_sdvo_detect(struct drm_output *output)
17907+{
17908+ u8 response[2];
17909+ u8 status;
17910+ u8 count = 5;
17911+
17912+ char deviceName[256];
17913+ char *name_suffix;
17914+ char *name_prefix;
17915+ unsigned char bytes[2];
17916+
17917+ struct drm_device *dev = output->dev;
17918+
17919+ struct intel_output *intel_output = output->driver_private;
17920+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17921+
17922+ DRM_DEBUG("xxintel_sdvo_detect\n");
17923+ intel_sdvo_dpms(output, DPMSModeOn);
17924+
17925+ if (!intel_sdvo_get_capabilities(output, &sdvo_priv->caps)) {
17926+ /*No SDVO support, power down the pipe */
17927+ intel_sdvo_dpms(output, DPMSModeOff);
17928+ return output_status_disconnected;
17929+ }
17930+
17931+#ifdef SII_1392_WA
17932+ if ((sdvo_priv->caps.vendor_id == 0x04) && (sdvo_priv->caps.device_id==0xAE)){
17933+ /*Leave the control of 1392 to X server*/
17934+ SII_1392=1;
17935+ printk("%s: detect 1392 card, leave the setting to up level\n", __FUNCTION__);
17936+ if (drm_psb_no_fb == 0)
17937+ intel_sdvo_dpms(output, DPMSModeOff);
17938+ return output_status_disconnected;
17939+ }
17940+#endif
17941+ while (count--) {
17942+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
17943+ status = intel_sdvo_read_response(output, &response, 2);
17944+
17945+ if(count >3 && status == SDVO_CMD_STATUS_PENDING) {
17946+ intel_sdvo_write_cmd(output,SDVO_CMD_RESET,NULL,0);
17947+ intel_sdvo_read_response(output, &response, 2);
17948+ continue;
17949+ }
17950+
17951+ if ((status != SDVO_CMD_STATUS_SUCCESS) || (response[0] == 0 && response[1] == 0)) {
17952+ udelay(500);
17953+ continue;
17954+ } else
17955+ break;
17956+ }
17957+ if (response[0] != 0 || response[1] != 0) {
17958+ /*Check what device types are connected to the hardware CRT/HDTV/S-Video/Composite */
17959+ /*in case of CRT and multiple TV's attached give preference in the order mentioned below */
17960+ /* 1. RGB */
17961+ /* 2. HDTV */
17962+ /* 3. S-Video */
17963+ /* 4. composite */
17964+ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
17965+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
17966+ output->subpixel_order = SubPixelHorizontalRGB;
17967+ name_prefix = "TMDS";
17968+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
17969+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
17970+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
17971+ output->subpixel_order = SubPixelHorizontalRGB;
17972+ name_prefix = "TMDS";
17973+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
17974+ } else if (response[0] & SDVO_OUTPUT_RGB0) {
17975+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
17976+ output->subpixel_order = SubPixelHorizontalRGB;
17977+ name_prefix = "RGB0";
17978+ sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
17979+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_RGB1) {
17980+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
17981+ output->subpixel_order = SubPixelHorizontalRGB;
17982+ name_prefix = "RGB1";
17983+ sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
17984+ } else if (response[0] & SDVO_OUTPUT_YPRPB0) {
17985+ sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB0;
17986+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_YPRPB1) {
17987+ sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB1;
17988+ }
17989+ /* SCART is given Second preference */
17990+ else if (response[0] & SDVO_OUTPUT_SCART0) {
17991+ sdvo_priv->active_outputs = SDVO_OUTPUT_SCART0;
17992+
17993+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SCART1) {
17994+ sdvo_priv->active_outputs = SDVO_OUTPUT_SCART1;
17995+ }
17996+ /* if S-Video type TV is connected along with Composite type TV give preference to S-Video */
17997+ else if (response[0] & SDVO_OUTPUT_SVID0) {
17998+ sdvo_priv->active_outputs = SDVO_OUTPUT_SVID0;
17999+
18000+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SVID1) {
18001+ sdvo_priv->active_outputs = SDVO_OUTPUT_SVID1;
18002+ }
18003+ /* Composite is given least preference */
18004+ else if (response[0] & SDVO_OUTPUT_CVBS0) {
18005+ sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS0;
18006+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_CVBS1) {
18007+ sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS1;
18008+ } else {
18009+ DRM_DEBUG("no display attached\n");
18010+
18011+ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
18012+ DRM_DEBUG("%s: No active TMDS or RGB outputs (0x%02x%02x) 0x%08x\n",
18013+ SDVO_NAME(sdvo_priv), bytes[0], bytes[1],
18014+ sdvo_priv->caps.output_flags);
18015+ name_prefix = "Unknown";
18016+ }
18017+
18018+ /* init para for TV connector */
18019+ if (sdvo_priv->active_outputs & SDVO_OUTPUT_TV0) {
18020+ DRM_INFO("TV is attaced\n");
18021+ output->subpixel_order = SubPixelHorizontalRGB;
18022+ name_prefix = "TV0";
18023+ /* Init TV mode setting para */
18024+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TV;
18025+ sdvo_priv->bGetClk = TRUE;
18026+ if (sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB0 ||
18027+ sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB1) {
18028+ /*sdvo_priv->TVStandard = HDTV_SMPTE_274M_1080i60;*/
18029+ sdvo_priv->TVMode = TVMODE_HDTV;
18030+ } else {
18031+ /*sdvo_priv->TVStandard = TVSTANDARD_NTSC_M;*/
18032+ sdvo_priv->TVMode = TVMODE_SDTV;
18033+ }
18034+
18035+ /*intel_output->pDevice->TVEnabled = TRUE;*/
18036+
18037+ i830_tv_get_default_params(output);
18038+ /*Init Display parameter for TV */
18039+ sdvo_priv->OverScanX.Value = 0xffffffff;
18040+ sdvo_priv->OverScanY.Value = 0xffffffff;
18041+ sdvo_priv->dispParams.Brightness.Value = 0x80;
18042+ sdvo_priv->dispParams.FlickerFilter.Value = 0xffffffff;
18043+ sdvo_priv->dispParams.AdaptiveFF.Value = 7;
18044+ sdvo_priv->dispParams.TwoD_FlickerFilter.Value = 0xffffffff;
18045+ sdvo_priv->dispParams.Contrast.Value = 0x40;
18046+ sdvo_priv->dispParams.PositionX.Value = 0x200;
18047+ sdvo_priv->dispParams.PositionY.Value = 0x200;
18048+ sdvo_priv->dispParams.DotCrawl.Value = 1;
18049+ sdvo_priv->dispParams.ChromaFilter.Value = 1;
18050+ sdvo_priv->dispParams.LumaFilter.Value = 2;
18051+ sdvo_priv->dispParams.Sharpness.Value = 4;
18052+ sdvo_priv->dispParams.Saturation.Value = 0x45;
18053+ sdvo_priv->dispParams.Hue.Value = 0x40;
18054+ sdvo_priv->dispParams.Dither.Value = 0;
18055+
18056+ }
18057+ else {
18058+ name_prefix = "RGB0";
18059+ DRM_INFO("non TV is attaced\n");
18060+ }
18061+ if (sdvo_priv->output_device == SDVOB) {
18062+ name_suffix = "-1";
18063+ } else {
18064+ name_suffix = "-2";
18065+ }
18066+
18067+ strcpy(deviceName, name_prefix);
18068+ strcat(deviceName, name_suffix);
18069+
18070+ if(output->name && (strcmp(output->name,deviceName) != 0)){
18071+ DRM_DEBUG("change the output name to %s\n", deviceName);
18072+ if (!drm_output_rename(output, deviceName)) {
18073+ drm_output_destroy(output);
18074+ return output_status_disconnected;
18075+ }
18076+
18077+ }
18078+ i830_sdvo_set_iomap(output);
18079+
18080+ DRM_INFO("get attached displays=0x%x,0x%x,connectedouputs=0x%x\n",
18081+ response[0], response[1], sdvo_priv->active_outputs);
18082+ return output_status_connected;
18083+ } else {
18084+ /*No SDVO display device attached */
18085+ intel_sdvo_dpms(output, DPMSModeOff);
18086+ sdvo_priv->ActiveDevice = SDVO_DEVICE_NONE;
18087+ return output_status_disconnected;
18088+ }
18089+}
18090+
18091+static int i830_sdvo_get_tvmode_from_table(struct drm_output *output)
18092+{
18093+ struct intel_output *intel_output = output->driver_private;
18094+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
18095+ struct drm_device *dev = output->dev;
18096+
18097+ int i, modes = 0;
18098+
18099+ for (i = 0; i < NUM_TV_MODES; i++)
18100+ if (((sdvo_priv->TVMode == TVMODE_HDTV) && /*hdtv mode list */
18101+ (tv_modes[i].dwSupportedHDTVvss & TVSTANDARD_HDTV_ALL)) ||
18102+ ((sdvo_priv->TVMode == TVMODE_SDTV) && /*sdtv mode list */
18103+ (tv_modes[i].dwSupportedSDTVvss & TVSTANDARD_SDTV_ALL))) {
18104+ struct drm_display_mode *newmode;
18105+ newmode = drm_mode_duplicate(dev, &tv_modes[i].mode_entry);
18106+ drm_mode_set_crtcinfo(newmode,0);
18107+ drm_mode_probed_add(output, newmode);
18108+ modes++;
18109+ }
18110+
18111+ return modes;
18112+
18113+}
18114+
18115+static int intel_sdvo_get_modes(struct drm_output *output)
18116+{
18117+ struct intel_output *intel_output = output->driver_private;
18118+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
18119+
18120+ DRM_DEBUG("xxintel_sdvo_get_modes\n");
18121+
18122+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
18123+ DRM_DEBUG("SDVO_DEVICE_TV\n");
18124+ i830_sdvo_get_tvmode_from_table(output);
18125+ if (list_empty(&output->probed_modes))
18126+ return 0;
18127+ return 1;
18128+
18129+ } else {
18130+ /* set the bus switch and get the modes */
18131+ intel_sdvo_set_control_bus_switch(output, SDVO_CONTROL_BUS_DDC2);
18132+ intel_ddc_get_modes(output);
18133+
18134+ if (list_empty(&output->probed_modes))
18135+ return 0;
18136+ return 1;
18137+ }
18138+#if 0
18139+ /* Mac mini hack. On this device, I get DDC through the analog, which
18140+ * load-detects as disconnected. I fail to DDC through the SDVO DDC,
18141+ * but it does load-detect as connected. So, just steal the DDC bits
18142+ * from analog when we fail at finding it the right way.
18143+ */
18144+ /* TODO */
18145+ return NULL;
18146+
18147+ return NULL;
18148+#endif
18149+}
18150+
18151+static void intel_sdvo_destroy(struct drm_output *output)
18152+{
18153+ struct intel_output *intel_output = output->driver_private;
18154+ DRM_DEBUG("xxintel_sdvo_destroy\n");
18155+
18156+ if (intel_output->i2c_bus)
18157+ intel_i2c_destroy(intel_output->i2c_bus);
18158+
18159+ if (intel_output) {
18160+ kfree(intel_output);
18161+ output->driver_private = NULL;
18162+ }
18163+}
18164+
18165+static const struct drm_output_funcs intel_sdvo_output_funcs = {
18166+ .dpms = intel_sdvo_dpms,
18167+ .save = intel_sdvo_save,
18168+ .restore = intel_sdvo_restore,
18169+ .mode_valid = intel_sdvo_mode_valid,
18170+ .mode_fixup = intel_sdvo_mode_fixup,
18171+ .prepare = intel_output_prepare,
18172+ .mode_set = intel_sdvo_mode_set,
18173+ .commit = intel_output_commit,
18174+ .detect = intel_sdvo_detect,
18175+ .get_modes = intel_sdvo_get_modes,
18176+ .cleanup = intel_sdvo_destroy
18177+};
18178+
18179+void intel_sdvo_init(struct drm_device *dev, int output_device)
18180+{
18181+ struct drm_output *output;
18182+ struct intel_output *intel_output;
18183+ struct intel_sdvo_priv *sdvo_priv;
18184+ struct intel_i2c_chan *i2cbus = NULL;
18185+ u8 ch[0x40];
18186+ int i;
18187+ char name[DRM_OUTPUT_LEN];
18188+ char *name_prefix;
18189+ char *name_suffix;
18190+
18191+ int count = 3;
18192+ u8 response[2];
18193+ u8 status;
18194+ unsigned char bytes[2];
18195+
18196+ DRM_DEBUG("xxintel_sdvo_init\n");
18197+
18198+ if (IS_POULSBO(dev)) {
18199+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
18200+ u32 sku_value = 0;
18201+ bool sku_bSDVOEnable = true;
18202+ if(pci_root)
18203+ {
18204+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
18205+ pci_read_config_dword(pci_root, 0xD4, &sku_value);
18206+ sku_bSDVOEnable = (sku_value & PCI_PORT5_REG80_SDVO_DISABLE)?false : true;
18207+ DRM_INFO("intel_sdvo_init: sku_value is 0x%08x\n", sku_value);
18208+ DRM_INFO("intel_sdvo_init: sku_bSDVOEnable is %d\n", sku_bSDVOEnable);
18209+ if (sku_bSDVOEnable == false)
18210+ return;
18211+ }
18212+ }
18213+
18214+ output = drm_output_create(dev, &intel_sdvo_output_funcs, NULL);
18215+ if (!output)
18216+ return;
18217+
18218+ intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
18219+ if (!intel_output) {
18220+ drm_output_destroy(output);
18221+ return;
18222+ }
18223+
18224+ sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
18225+ intel_output->type = INTEL_OUTPUT_SDVO;
18226+ output->driver_private = intel_output;
18227+ output->interlace_allowed = 0;
18228+ output->doublescan_allowed = 0;
18229+
18230+ /* setup the DDC bus. */
18231+ if (output_device == SDVOB)
18232+ i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
18233+ else
18234+ i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
18235+
18236+ if (i2cbus == NULL) {
18237+ drm_output_destroy(output);
18238+ return;
18239+ }
18240+
18241+ sdvo_priv->i2c_bus = i2cbus;
18242+
18243+ if (output_device == SDVOB) {
18244+ name_suffix = "-1";
18245+ sdvo_priv->i2c_bus->slave_addr = 0x38;
18246+ sdvo_priv->byInputWiring = SDVOB_IN0;
18247+ } else {
18248+ name_suffix = "-2";
18249+ sdvo_priv->i2c_bus->slave_addr = 0x39;
18250+ }
18251+
18252+ sdvo_priv->output_device = output_device;
18253+ intel_output->i2c_bus = i2cbus;
18254+ intel_output->dev_priv = sdvo_priv;
18255+
18256+
18257+ /* Read the regs to test if we can talk to the device */
18258+ for (i = 0; i < 0x40; i++) {
18259+ if (!intel_sdvo_read_byte(output, i, &ch[i])) {
18260+ DRM_DEBUG("No SDVO device found on SDVO%c\n",
18261+ output_device == SDVOB ? 'B' : 'C');
18262+ drm_output_destroy(output);
18263+ return;
18264+ }
18265+ }
18266+
18267+ intel_sdvo_get_capabilities(output, &sdvo_priv->caps);
18268+
18269+#ifdef SII_1392_WA
18270+ if ((sdvo_priv->caps.vendor_id == 0x04) && (sdvo_priv->caps.device_id==0xAE)){
18271+ /*Leave the control of 1392 to X server*/
18272+ SII_1392=1;
18273+ printk("%s: detect 1392 card, leave the setting to up level\n", __FUNCTION__);
18274+ if (drm_psb_no_fb == 0)
18275+ intel_sdvo_dpms(output, DPMSModeOff);
18276+ sdvo_priv->active_outputs = 0;
18277+ output->subpixel_order = SubPixelHorizontalRGB;
18278+ name_prefix = "SDVO";
18279+ sdvo_priv->ActiveDevice = SDVO_DEVICE_NONE;
18280+ strcpy(name, name_prefix);
18281+ strcat(name, name_suffix);
18282+ if (!drm_output_rename(output, name)) {
18283+ drm_output_destroy(output);
18284+ return;
18285+ }
18286+ return;
18287+ }
18288+#endif
18289+ memset(&sdvo_priv->active_outputs, 0, sizeof(sdvo_priv->active_outputs));
18290+
18291+ while (count--) {
18292+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
18293+ status = intel_sdvo_read_response(output, &response, 2);
18294+
18295+ if (status != SDVO_CMD_STATUS_SUCCESS) {
18296+ udelay(1000);
18297+ continue;
18298+ }
18299+ if (status == SDVO_CMD_STATUS_SUCCESS)
18300+ break;
18301+ }
18302+ if (response[0] != 0 || response[1] != 0) {
18303+ /*Check what device types are connected to the hardware CRT/HDTV/S-Video/Composite */
18304+ /*in case of CRT and multiple TV's attached give preference in the order mentioned below */
18305+ /* 1. RGB */
18306+ /* 2. HDTV */
18307+ /* 3. S-Video */
18308+ /* 4. composite */
18309+ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
18310+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
18311+ output->subpixel_order = SubPixelHorizontalRGB;
18312+ name_prefix = "TMDS";
18313+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
18314+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
18315+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
18316+ output->subpixel_order = SubPixelHorizontalRGB;
18317+ name_prefix = "TMDS";
18318+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
18319+ } else if (response[0] & SDVO_OUTPUT_RGB0) {
18320+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
18321+ output->subpixel_order = SubPixelHorizontalRGB;
18322+ name_prefix = "RGB0";
18323+ sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
18324+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_RGB1) {
18325+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
18326+ output->subpixel_order = SubPixelHorizontalRGB;
18327+ name_prefix = "RGB1";
18328+ sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
18329+ } else if (response[0] & SDVO_OUTPUT_YPRPB0) {
18330+ sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB0;
18331+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_YPRPB1) {
18332+ sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB1;
18333+ }
18334+ /* SCART is given Second preference */
18335+ else if (response[0] & SDVO_OUTPUT_SCART0) {
18336+ sdvo_priv->active_outputs = SDVO_OUTPUT_SCART0;
18337+
18338+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SCART1) {
18339+ sdvo_priv->active_outputs = SDVO_OUTPUT_SCART1;
18340+ }
18341+ /* if S-Video type TV is connected along with Composite type TV give preference to S-Video */
18342+ else if (response[0] & SDVO_OUTPUT_SVID0) {
18343+ sdvo_priv->active_outputs = SDVO_OUTPUT_SVID0;
18344+
18345+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SVID1) {
18346+ sdvo_priv->active_outputs = SDVO_OUTPUT_SVID1;
18347+ }
18348+ /* Composite is given least preference */
18349+ else if (response[0] & SDVO_OUTPUT_CVBS0) {
18350+ sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS0;
18351+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_CVBS1) {
18352+ sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS1;
18353+ } else {
18354+ DRM_DEBUG("no display attached\n");
18355+
18356+ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
18357+ DRM_INFO("%s: No active TMDS or RGB outputs (0x%02x%02x) 0x%08x\n",
18358+ SDVO_NAME(sdvo_priv), bytes[0], bytes[1],
18359+ sdvo_priv->caps.output_flags);
18360+ name_prefix = "Unknown";
18361+ }
18362+
18363+ /* init para for TV connector */
18364+ if (sdvo_priv->active_outputs & SDVO_OUTPUT_TV0) {
18365+ DRM_INFO("TV is attaced\n");
18366+ output->subpixel_order = SubPixelHorizontalRGB;
18367+ name_prefix = "TV0";
18368+ /* Init TV mode setting para */
18369+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TV;
18370+ sdvo_priv->bGetClk = TRUE;
18371+ if (sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB0 ||
18372+ sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB1) {
18373+ sdvo_priv->TVStandard = HDTV_SMPTE_274M_1080i60;
18374+ sdvo_priv->TVMode = TVMODE_HDTV;
18375+ } else {
18376+ sdvo_priv->TVStandard = TVSTANDARD_NTSC_M;
18377+ sdvo_priv->TVMode = TVMODE_SDTV;
18378+ }
18379+ /*intel_output->pDevice->TVEnabled = TRUE;*/
18380+ /*Init Display parameter for TV */
18381+ sdvo_priv->OverScanX.Value = 0xffffffff;
18382+ sdvo_priv->OverScanY.Value = 0xffffffff;
18383+ sdvo_priv->dispParams.Brightness.Value = 0x80;
18384+ sdvo_priv->dispParams.FlickerFilter.Value = 0xffffffff;
18385+ sdvo_priv->dispParams.AdaptiveFF.Value = 7;
18386+ sdvo_priv->dispParams.TwoD_FlickerFilter.Value = 0xffffffff;
18387+ sdvo_priv->dispParams.Contrast.Value = 0x40;
18388+ sdvo_priv->dispParams.PositionX.Value = 0x200;
18389+ sdvo_priv->dispParams.PositionY.Value = 0x200;
18390+ sdvo_priv->dispParams.DotCrawl.Value = 1;
18391+ sdvo_priv->dispParams.ChromaFilter.Value = 1;
18392+ sdvo_priv->dispParams.LumaFilter.Value = 2;
18393+ sdvo_priv->dispParams.Sharpness.Value = 4;
18394+ sdvo_priv->dispParams.Saturation.Value = 0x45;
18395+ sdvo_priv->dispParams.Hue.Value = 0x40;
18396+ sdvo_priv->dispParams.Dither.Value = 0;
18397+ }
18398+ else {
18399+ name_prefix = "RGB0";
18400+ DRM_INFO("non TV is attaced\n");
18401+ }
18402+
18403+ strcpy(name, name_prefix);
18404+ strcat(name, name_suffix);
18405+ if (!drm_output_rename(output, name)) {
18406+ drm_output_destroy(output);
18407+ return;
18408+ }
18409+ } else {
18410+ /*No SDVO display device attached */
18411+ intel_sdvo_dpms(output, DPMSModeOff);
18412+ sdvo_priv->active_outputs = 0;
18413+ output->subpixel_order = SubPixelHorizontalRGB;
18414+ name_prefix = "SDVO";
18415+ sdvo_priv->ActiveDevice = SDVO_DEVICE_NONE;
18416+ strcpy(name, name_prefix);
18417+ strcat(name, name_suffix);
18418+ if (!drm_output_rename(output, name)) {
18419+ drm_output_destroy(output);
18420+ return;
18421+ }
18422+
18423+ }
18424+
18425+ /*(void)intel_sdvo_set_active_outputs(output, sdvo_priv->active_outputs);*/
18426+
18427+ /* Set the input timing to the screen. Assume always input 0. */
18428+ intel_sdvo_set_target_input(output, true, false);
18429+
18430+ intel_sdvo_get_input_pixel_clock_range(output,
18431+ &sdvo_priv->pixel_clock_min,
18432+ &sdvo_priv->pixel_clock_max);
18433+
18434+
18435+ DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
18436+ "clock range %dMHz - %dMHz, "
18437+ "input 1: %c, input 2: %c, "
18438+ "output 1: %c, output 2: %c\n",
18439+ SDVO_NAME(sdvo_priv),
18440+ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
18441+ sdvo_priv->caps.device_rev_id,
18442+ sdvo_priv->pixel_clock_min / 1000,
18443+ sdvo_priv->pixel_clock_max / 1000,
18444+ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
18445+ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
18446+ /* check currently supported outputs */
18447+ sdvo_priv->caps.output_flags &
18448+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
18449+ sdvo_priv->caps.output_flags &
18450+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
18451+
18452+ intel_output->ddc_bus = i2cbus;
18453+}
18454Index: linux-2.6.27/drivers/gpu/drm/psb/intel_sdvo_regs.h
18455===================================================================
18456--- /dev/null 1970-01-01 00:00:00.000000000 +0000
18457+++ linux-2.6.27/drivers/gpu/drm/psb/intel_sdvo_regs.h 2009-02-05 13:29:33.000000000 +0000
18458@@ -0,0 +1,580 @@
18459+/*
18460+ * Copyright ?2006-2007 Intel Corporation
18461+ *
18462+ * Permission is hereby granted, free of charge, to any person obtaining a
18463+ * copy of this software and associated documentation files (the "Software"),
18464+ * to deal in the Software without restriction, including without limitation
18465+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18466+ * and/or sell copies of the Software, and to permit persons to whom the
18467+ * Software is furnished to do so, subject to the following conditions:
18468+ *
18469+ * The above copyright notice and this permission notice (including the next
18470+ * paragraph) shall be included in all copies or substantial portions of the
18471+ * Software.
18472+ *
18473+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18474+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18475+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18476+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18477+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18478+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
18479+ * DEALINGS IN THE SOFTWARE.
18480+ *
18481+ * Authors:
18482+ * Eric Anholt <eric@anholt.net>
18483+ */
18484+
18485+/**
18486+ * @file SDVO command definitions and structures.
18487+ */
18488+
18489+#define SDVO_OUTPUT_FIRST (0)
18490+#define SDVO_OUTPUT_TMDS0 (1 << 0)
18491+#define SDVO_OUTPUT_RGB0 (1 << 1)
18492+#define SDVO_OUTPUT_CVBS0 (1 << 2)
18493+#define SDVO_OUTPUT_SVID0 (1 << 3)
18494+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
18495+#define SDVO_OUTPUT_SCART0 (1 << 5)
18496+#define SDVO_OUTPUT_LVDS0 (1 << 6)
18497+#define SDVO_OUTPUT_TMDS1 (1 << 8)
18498+#define SDVO_OUTPUT_RGB1 (1 << 9)
18499+#define SDVO_OUTPUT_CVBS1 (1 << 10)
18500+#define SDVO_OUTPUT_SVID1 (1 << 11)
18501+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
18502+#define SDVO_OUTPUT_SCART1 (1 << 13)
18503+#define SDVO_OUTPUT_LVDS1 (1 << 14)
18504+#define SDVO_OUTPUT_LAST (14)
18505+
18506+struct intel_sdvo_caps {
18507+ u8 vendor_id;
18508+ u8 device_id;
18509+ u8 device_rev_id;
18510+ u8 sdvo_version_major;
18511+ u8 sdvo_version_minor;
18512+ unsigned int sdvo_inputs_mask:2;
18513+ unsigned int smooth_scaling:1;
18514+ unsigned int sharp_scaling:1;
18515+ unsigned int up_scaling:1;
18516+ unsigned int down_scaling:1;
18517+ unsigned int stall_support:1;
18518+ unsigned int pad:1;
18519+ u16 output_flags;
18520+} __attribute__((packed));
18521+
18522+/** This matches the EDID DTD structure, more or less */
18523+struct intel_sdvo_dtd {
18524+ struct {
18525+ u16 clock; /**< pixel clock, in 10kHz units */
18526+ u8 h_active; /**< lower 8 bits (pixels) */
18527+ u8 h_blank; /**< lower 8 bits (pixels) */
18528+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
18529+ u8 v_active; /**< lower 8 bits (lines) */
18530+ u8 v_blank; /**< lower 8 bits (lines) */
18531+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
18532+ } part1;
18533+
18534+ struct {
18535+ u8 h_sync_off; /**< lower 8 bits, from hblank start */
18536+ u8 h_sync_width; /**< lower 8 bits (pixels) */
18537+ /** lower 4 bits each vsync offset, vsync width */
18538+ u8 v_sync_off_width;
18539+ /**
18540+ * 2 high bits of hsync offset, 2 high bits of hsync width,
18541+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
18542+ */
18543+ u8 sync_off_width_high;
18544+ u8 dtd_flags;
18545+ u8 sdvo_flags;
18546+ /** bits 6-7 of vsync offset at bits 6-7 */
18547+ u8 v_sync_off_high;
18548+ u8 reserved;
18549+ } part2;
18550+} __attribute__((packed));
18551+
18552+struct intel_sdvo_pixel_clock_range {
18553+ u16 min; /**< pixel clock, in 10kHz units */
18554+ u16 max; /**< pixel clock, in 10kHz units */
18555+} __attribute__((packed));
18556+
18557+struct intel_sdvo_preferred_input_timing_args {
18558+ u16 clock;
18559+ u16 width;
18560+ u16 height;
18561+} __attribute__((packed));
18562+
18563+/* I2C registers for SDVO */
18564+#define SDVO_I2C_ARG_0 0x07
18565+#define SDVO_I2C_ARG_1 0x06
18566+#define SDVO_I2C_ARG_2 0x05
18567+#define SDVO_I2C_ARG_3 0x04
18568+#define SDVO_I2C_ARG_4 0x03
18569+#define SDVO_I2C_ARG_5 0x02
18570+#define SDVO_I2C_ARG_6 0x01
18571+#define SDVO_I2C_ARG_7 0x00
18572+#define SDVO_I2C_OPCODE 0x08
18573+#define SDVO_I2C_CMD_STATUS 0x09
18574+#define SDVO_I2C_RETURN_0 0x0a
18575+#define SDVO_I2C_RETURN_1 0x0b
18576+#define SDVO_I2C_RETURN_2 0x0c
18577+#define SDVO_I2C_RETURN_3 0x0d
18578+#define SDVO_I2C_RETURN_4 0x0e
18579+#define SDVO_I2C_RETURN_5 0x0f
18580+#define SDVO_I2C_RETURN_6 0x10
18581+#define SDVO_I2C_RETURN_7 0x11
18582+#define SDVO_I2C_VENDOR_BEGIN 0x20
18583+
18584+/* Status results */
18585+#define SDVO_CMD_STATUS_POWER_ON 0x0
18586+#define SDVO_CMD_STATUS_SUCCESS 0x1
18587+#define SDVO_CMD_STATUS_NOTSUPP 0x2
18588+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
18589+#define SDVO_CMD_STATUS_PENDING 0x4
18590+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
18591+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
18592+
18593+/* SDVO commands, argument/result registers */
18594+
18595+#define SDVO_CMD_RESET 0x01
18596+
18597+/** Returns a struct intel_sdvo_caps */
18598+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
18599+
18600+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
18601+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
18602+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
18603+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
18604+
18605+/**
18606+ * Reports which inputs are trained (managed to sync).
18607+ *
18608+ * Devices must have trained within 2 vsyncs of a mode change.
18609+ */
18610+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
18611+struct intel_sdvo_get_trained_inputs_response {
18612+ unsigned int input0_trained:1;
18613+ unsigned int input1_trained:1;
18614+ unsigned int pad:6;
18615+} __attribute__((packed));
18616+
18617+/** Returns a struct intel_sdvo_output_flags of active outputs. */
18618+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
18619+
18620+/**
18621+ * Sets the current set of active outputs.
18622+ *
18623+ * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
18624+ * on multi-output devices.
18625+ */
18626+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
18627+
18628+/**
18629+ * Returns the current mapping of SDVO inputs to outputs on the device.
18630+ *
18631+ * Returns two struct intel_sdvo_output_flags structures.
18632+ */
18633+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
18634+
18635+/**
18636+ * Sets the current mapping of SDVO inputs to outputs on the device.
18637+ *
18638+ * Takes two struct i380_sdvo_output_flags structures.
18639+ */
18640+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
18641+
18642+/**
18643+ * Returns a struct intel_sdvo_output_flags of attached displays.
18644+ */
18645+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
18646+
18647+/**
18648+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
18649+ */
18650+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
18651+
18652+/**
18653+ * Takes a struct intel_sdvo_output_flags.
18654+ */
18655+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
18656+
18657+/**
18658+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
18659+ * interrupts enabled.
18660+ */
18661+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
18662+
18663+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
18664+struct intel_sdvo_get_interrupt_event_source_response {
18665+ u16 interrupt_status;
18666+ unsigned int ambient_light_interrupt:1;
18667+ unsigned int pad:7;
18668+} __attribute__((packed));
18669+
18670+/**
18671+ * Selects which input is affected by future input commands.
18672+ *
18673+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
18674+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
18675+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
18676+ */
18677+#define SDVO_CMD_SET_TARGET_INPUT 0x10
18678+struct intel_sdvo_set_target_input_args {
18679+ unsigned int target_1:1;
18680+ unsigned int pad:7;
18681+} __attribute__((packed));
18682+
18683+/**
18684+ * Takes a struct intel_sdvo_output_flags of which outputs are targetted by
18685+ * future output commands.
18686+ *
18687+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
18688+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
18689+ */
18690+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
18691+
18692+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
18693+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
18694+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
18695+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
18696+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
18697+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
18698+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
18699+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
18700+/* Part 1 */
18701+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
18702+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
18703+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
18704+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
18705+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
18706+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
18707+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
18708+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
18709+/* Part 2 */
18710+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
18711+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
18712+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
18713+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
18714+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
18715+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
18716+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
18717+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
18718+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
18719+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
18720+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
18721+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
18722+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
18723+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
18724+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
18725+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
18726+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
18727+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
18728+
18729+/**
18730+ * Generates a DTD based on the given width, height, and flags.
18731+ *
18732+ * This will be supported by any device supporting scaling or interlaced
18733+ * modes.
18734+ */
18735+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
18736+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
18737+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
18738+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
18739+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
18740+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
18741+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
18742+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
18743+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
18744+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
18745+
18746+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
18747+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
18748+
18749+/** Returns a struct intel_sdvo_pixel_clock_range */
18750+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
18751+/** Returns a struct intel_sdvo_pixel_clock_range */
18752+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
18753+
18754+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
18755+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
18756+
18757+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
18758+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
18759+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
18760+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
18761+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
18762+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
18763+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
18764+
18765+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
18766+
18767+#define SDVO_CMD_GET_TV_FORMAT 0x28
18768+
18769+#define SDVO_CMD_SET_TV_FORMAT 0x29
18770+
18771+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
18772+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
18773+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
18774+# define SDVO_ENCODER_STATE_ON (1 << 0)
18775+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
18776+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
18777+# define SDVO_ENCODER_STATE_OFF (1 << 3)
18778+
18779+#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
18780+
18781+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
18782+# define SDVO_CONTROL_BUS_PROM 0x0
18783+# define SDVO_CONTROL_BUS_DDC1 0x1
18784+# define SDVO_CONTROL_BUS_DDC2 0x2
18785+# define SDVO_CONTROL_BUS_DDC3 0x3
18786+
18787+/* xiaolin, to support add-on SDVO TV Encoder */
18788+/* SDVO Bus & SDVO Inputs wiring details*/
18789+/* Bit 0: Is SDVOB connected to In0 (1 = yes, 0 = no*/
18790+/* Bit 1: Is SDVOB connected to In1 (1 = yes, 0 = no*/
18791+/* Bit 2: Is SDVOC connected to In0 (1 = yes, 0 = no*/
18792+/* Bit 3: Is SDVOC connected to In1 (1 = yes, 0 = no*/
18793+#define SDVOB_IN0 0x01
18794+#define SDVOB_IN1 0x02
18795+#define SDVOC_IN0 0x04
18796+#define SDVOC_IN1 0x08
18797+
18798+#define SDVO_OUTPUT_TV0 0x003C
18799+#define SDVO_OUTPUT_TV1 0x3C00
18800+#define SDVO_OUTPUT_LAST (14)
18801+
18802+#define SDVO_OUTPUT_CRT (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1 )
18803+#define SDVO_OUTPUT_TV (SDVO_OUTPUT_TV0 | SDVO_OUTPUT_TV1)
18804+#define SDVO_OUTPUT_LVDS (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
18805+#define SDVO_OUTPUT_TMDS (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
18806+
18807+
18808+
18809+#define SDVO_DEVICE_NONE 0x00
18810+#define SDVO_DEVICE_CRT 0x01
18811+#define SDVO_DEVICE_TV 0x02
18812+#define SDVO_DEVICE_LVDS 0x04
18813+#define SDVO_DEVICE_TMDS 0x08
18814+
18815+/* Different TV mode*/
18816+#define TVMODE_OFF 0x0000
18817+#define TVMODE_SDTV 0x0001
18818+#define TVMODE_HDTV 0x0002
18819+
18820+#define TVSTANDARD_NONE 0x00
18821+#define TVSTANDARD_NTSC_M 0x0001 // 75 IRE Setup
18822+#define TVSTANDARD_NTSC_M_J 0x0002 // Japan, 0 IRE Setup
18823+#define TVSTANDARD_PAL_B 0x0004
18824+#define TVSTANDARD_PAL_D 0x0008
18825+#define TVSTANDARD_PAL_H 0x0010
18826+#define TVSTANDARD_PAL_I 0x0020
18827+#define TVSTANDARD_PAL_M 0x0040
18828+#define TVSTANDARD_PAL_N 0x0080
18829+#define TVSTANDARD_SECAM_B 0x0100
18830+#define TVSTANDARD_SECAM_D 0x0200
18831+#define TVSTANDARD_SECAM_G 0x0400
18832+#define TVSTANDARD_SECAM_H 0x0800
18833+#define TVSTANDARD_SECAM_K 0x1000
18834+#define TVSTANDARD_SECAM_K1 0x2000
18835+#define TVSTANDARD_SECAM_L 0x4000
18836+#define TVSTANDARD_WIN_VGA 0x8000
18837+/*and the rest*/
18838+#define TVSTANDARD_NTSC_433 0x00010000
18839+#define TVSTANDARD_PAL_G 0x00020000
18840+#define TVSTANDARD_PAL_60 0x00040000
18841+#define TVSTANDARD_SECAM_L1 0x00080000
18842+#define TVSTANDARD_SDTV_ALL 0x000FFFFF
18843+
18844+
18845+/*HDTV standard defination added using the unused upper 12 bits of dwTVStandard*/
18846+#define HDTV_SMPTE_170M_480i59 0x00100000
18847+#define HDTV_SMPTE_293M_480p60 0x00200000
18848+#define HDTV_SMPTE_293M_480p59 0x00400000
18849+#define HDTV_ITURBT601_576i50 0x00800000
18850+#define HDTV_ITURBT601_576p50 0x01000000
18851+#define HDTV_SMPTE_296M_720p50 0x02000000
18852+#define HDTV_SMPTE_296M_720p59 0x04000000
18853+#define HDTV_SMPTE_296M_720p60 0x08000000
18854+#define HDTV_SMPTE_274M_1080i50 0x10000000
18855+#define HDTV_SMPTE_274M_1080i59 0x20000000
18856+#define HDTV_SMPTE_274M_1080i60 0x40000000
18857+#define HDTV_SMPTE_274M_1080p60 0x80000000
18858+#define TVSTANDARD_HDTV_ALL 0xFFF00000
18859+
18860+
18861+#define TVSTANDARD_NTSC 0x01
18862+#define TVSTANDARD_PAL 0x02
18863+
18864+#define TVOUTPUT_NONE 0x00
18865+#define TVOUTPUT_COMPOSITE 0x01
18866+#define TVOUTPUT_SVIDEO 0x02
18867+#define TVOUTPUT_RGB 0x04
18868+#define TVOUTPUT_YCBCR 0x08
18869+#define TVOUTPUT_SC 0x16
18870+
18871+/* Encoder supported TV standard bit mask per SDVO ED*/
18872+#define SDVO_NTSC_M 0x00000001
18873+#define SDVO_NTSC_M_J 0x00000002
18874+#define SDVO_NTSC_433 0x00000004
18875+#define SDVO_PAL_B 0x00000008
18876+#define SDVO_PAL_D 0x00000010
18877+#define SDVO_PAL_G 0x00000020
18878+#define SDVO_PAL_H 0x00000040
18879+#define SDVO_PAL_I 0x00000080
18880+#define SDVO_PAL_M 0x00000100
18881+#define SDVO_PAL_N 0x00000200
18882+#define SDVO_PAL_NC 0x00000400
18883+#define SDVO_PAL_60 0x00000800
18884+#define SDVO_SECAM_B 0x00001000
18885+#define SDVO_SECAM_D 0x00002000
18886+#define SDVO_SECAM_G 0x00004000
18887+#define SDVO_SECAM_K 0x00008000
18888+#define SDVO_SECAM_K1 0x00010000
18889+#define SDVO_SECAM_L 0x00020000
18890+#define SDVO_SECAM_60 0x00040000
18891+
18892+/* Number of SDTV format*/
18893+#define SDTV_NUM_STANDARDS 19
18894+
18895+/* Encoder supported HDTV standard bit mask per SDVO ED*/
18896+#define SDVO_HDTV_STD_240M_1080i59 0x00000008
18897+#define SDVO_HDTV_STD_240M_1080i60 0x00000010
18898+#define SDVO_HDTV_STD_260M_1080i59 0x00000020
18899+#define SDVO_HDTV_STD_260M_1080i60 0x00000040
18900+#define SDVO_HDTV_STD_274M_1080i50 0x00000080
18901+#define SDVO_HDTV_STD_274M_1080i59 0x00000100
18902+#define SDVO_HDTV_STD_274M_1080i60 0x00000200
18903+#define SDVO_HDTV_STD_274M_1080p23 0x00000400
18904+#define SDVO_HDTV_STD_274M_1080p24 0x00000800
18905+#define SDVO_HDTV_STD_274M_1080p25 0x00001000
18906+#define SDVO_HDTV_STD_274M_1080p29 0x00002000
18907+#define SDVO_HDTV_STD_274M_1080p30 0x00004000
18908+#define SDVO_HDTV_STD_274M_1080p50 0x00008000
18909+#define SDVO_HDTV_STD_274M_1080p59 0x00010000
18910+#define SDVO_HDTV_STD_274M_1080p60 0x00020000
18911+#define SDVO_HDTV_STD_295M_1080i50 0x00040000
18912+#define SDVO_HDTV_STD_295M_1080p50 0x00080000
18913+#define SDVO_HDTV_STD_296M_720p59 0x00100000
18914+#define SDVO_HDTV_STD_296M_720p60 0x00200000
18915+#define SDVO_HDTV_STD_296M_720p50 0x00400000
18916+#define SDVO_HDTV_STD_293M_480p59 0x00800000
18917+#define SDVO_HDTV_STD_170M_480i59 0x01000000
18918+#define SDVO_HDTV_STD_ITURBT601_576i50 0x02000000
18919+#define SDVO_HDTV_STD_ITURBT601_576p50 0x04000000
18920+#define SDVO_HDTV_STD_EIA_7702A_480i60 0x08000000
18921+#define SDVO_HDTV_STD_EIA_7702A_480p60 0x10000000
18922+
18923+/* SDTV resolution*/
18924+#define SDVO_SDTV_320x200 0x00000001
18925+#define SDVO_SDTV_320x240 0x00000002
18926+#define SDVO_SDTV_400x300 0x00000004
18927+#define SDVO_SDTV_640x350 0x00000008
18928+#define SDVO_SDTV_640x400 0x00000010
18929+#define SDVO_SDTV_640x480 0x00000020
18930+#define SDVO_SDTV_704x480 0x00000040
18931+#define SDVO_SDTV_704x576 0x00000080
18932+#define SDVO_SDTV_720x350 0x00000100
18933+#define SDVO_SDTV_720x400 0x00000200
18934+#define SDVO_SDTV_720x480 0x00000400
18935+#define SDVO_SDTV_720x540 0x00000800
18936+#define SDVO_SDTV_720x576 0x00001000
18937+#define SDVO_SDTV_768x576 0x00002000
18938+#define SDVO_SDTV_800x600 0x00004000
18939+#define SDVO_SDTV_832x624 0x00008000
18940+#define SDVO_SDTV_920x766 0x00010000
18941+#define SDVO_SDTV_1024x768 0x00020000
18942+#define SDVO_SDTV_1280x1024 0x00040000
18943+
18944+
18945+#define SDVO_HDTV_640x480 0x00000001
18946+#define SDVO_HDTV_800x600 0x00000002
18947+#define SDVO_HDTV_1024x768 0x00000004
18948+#define SDVO_HDTV_1064x600 0x00020000
18949+#define SDVO_HDTV_1280x720 0x00040000
18950+#define SDVO_HDTV_1704x960 0x00100000
18951+#define SDVO_HDTV_1864x1050 0x00200000
18952+#define SDVO_HDTV_1920x1080 0x00400000
18953+#define SDVO_HDTV_640x400 0x02000000
18954+
18955+/* Number of SDTV mode*/
18956+#define SDTV_NUM_MODES 19
18957+
18958+/* sdvo cmd for sdvo tv */
18959+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMINGS 0x1A
18960+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
18961+#define SDVO_CMD_GET_TV_FORMATS 0x28
18962+#define SDVO_CMD_SET_TV_FORMATS 0x29
18963+
18964+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
18965+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
18966+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
18967+#define SDVO_ENCODER_STATE_ON (1 << 0)
18968+#define SDVO_ENCODER_STATE_STANDBY (1 << 1)
18969+#define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
18970+#define SDVO_ENCODER_STATE_OFF (1 << 3)
18971+
18972+/* Bit mask of picture enhancement*/
18973+#define SDVO_FLICKER_FILTER 0x00000001
18974+#define SDVO_ADAPTIVE_FLICKER_FILTER 0x00000002
18975+#define SDVO_2D_FLICKER_FILTER 0x00000004
18976+#define SDVO_SATURATION 0x00000008
18977+#define SDVO_HUE 0x00000010
18978+#define SDVO_BRIGHTNESS 0x00000020
18979+#define SDVO_CONTRAST 0x00000040
18980+#define SDVO_HORIZONTAL_OVERSCAN 0x00000080
18981+#define SDVO_VERTICAL_OVERSCAN 0x00000100
18982+#define SDVO_HORIZONTAL_POSITION 0x00000200
18983+#define SDVO_VERTICAL_POSITION 0x00000400
18984+#define SDVO_SHARPNESS 0x00000800
18985+#define SDVO_DOT_CRAWL 0x00001000
18986+#define SDVO_DITHER 0x00002000
18987+#define SDVO_MAX_TV_CHROMA_FILTER 0x00004000
18988+#define SDVO_TV_MAX_LUMA_FILTER 0x00008000
18989+
18990+#define SDVO_CMD_GET_ANCILLARY_VIDEO_INFORMATION 0x3A
18991+#define SDVO_CMD_SET_ANCILLARY_VIDEO_INFORMATION 0x3B
18992+
18993+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
18994+#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4D
18995+#define SDVO_CMD_GET_FLICKER_FILTER 0x4E
18996+#define SDVO_CMD_SET_FLICKER_FILTER 0x4F
18997+#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FILTER 0x50
18998+#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FILTER 0x51
18999+#define SDVO_CMD_GET_MAX_2D_FLICKER_FILTER 0x52
19000+#define SDVO_CMD_GET_2D_FLICKER_FILTER 0x53
19001+#define SDVO_CMD_SET_2D_FLICKER_FILTER 0x54
19002+#define SDVO_CMD_GET_MAX_SATURATION 0x55
19003+#define SDVO_CMD_GET_SATURATION 0x56
19004+#define SDVO_CMD_SET_SATURATION 0x57
19005+#define SDVO_CMD_GET_MAX_HUE 0x58
19006+#define SDVO_CMD_GET_HUE 0x59
19007+#define SDVO_CMD_SET_HUE 0x5A
19008+#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5B
19009+#define SDVO_CMD_GET_BRIGHTNESS 0x5C
19010+#define SDVO_CMD_SET_BRIGHTNESS 0x5D
19011+#define SDVO_CMD_GET_MAX_CONTRAST 0x5E
19012+#define SDVO_CMD_GET_CONTRAST 0x5F
19013+#define SDVO_CMD_SET_CONTRAST 0x60
19014+
19015+#define SDVO_CMD_GET_MAX_HORIZONTAL_OVERSCAN 0x61
19016+#define SDVO_CMD_GET_HORIZONTAL_OVERSCAN 0x62
19017+#define SDVO_CMD_SET_HORIZONTAL_OVERSCAN 0x63
19018+#define SDVO_CMD_GET_MAX_VERTICAL_OVERSCAN 0x64
19019+#define SDVO_CMD_GET_VERTICAL_OVERSCAN 0x65
19020+#define SDVO_CMD_SET_VERTICAL_OVERSCAN 0x66
19021+#define SDVO_CMD_GET_MAX_HORIZONTAL_POSITION 0x67
19022+#define SDVO_CMD_GET_HORIZONTAL_POSITION 0x68
19023+#define SDVO_CMD_SET_HORIZONTAL_POSITION 0x69
19024+#define SDVO_CMD_GET_MAX_VERTICAL_POSITION 0x6A
19025+#define SDVO_CMD_GET_VERTICAL_POSITION 0x6B
19026+#define SDVO_CMD_SET_VERTICAL_POSITION 0x6C
19027+#define SDVO_CMD_GET_MAX_SHARPNESS 0x6D
19028+#define SDVO_CMD_GET_SHARPNESS 0x6E
19029+#define SDVO_CMD_SET_SHARPNESS 0x6F
19030+#define SDVO_CMD_GET_DOT_CRAWL 0x70
19031+#define SDVO_CMD_SET_DOT_CRAWL 0x71
19032+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
19033+#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
19034+#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
19035+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
19036+#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
19037+#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
19038+#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FILTER 0x7B
19039Index: linux-2.6.27/drivers/gpu/drm/psb/psb_buffer.c
19040===================================================================
19041--- /dev/null 1970-01-01 00:00:00.000000000 +0000
19042+++ linux-2.6.27/drivers/gpu/drm/psb/psb_buffer.c 2009-02-05 13:29:33.000000000 +0000
19043@@ -0,0 +1,437 @@
19044+/**************************************************************************
19045+ * Copyright (c) 2007, Intel Corporation.
19046+ * All Rights Reserved.
19047+ *
19048+ * This program is free software; you can redistribute it and/or modify it
19049+ * under the terms and conditions of the GNU General Public License,
19050+ * version 2, as published by the Free Software Foundation.
19051+ *
19052+ * This program is distributed in the hope it will be useful, but WITHOUT
19053+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19054+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19055+ * more details.
19056+ *
19057+ * You should have received a copy of the GNU General Public License along with
19058+ * this program; if not, write to the Free Software Foundation, Inc.,
19059+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19060+ *
19061+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19062+ * develop this driver.
19063+ *
19064+ **************************************************************************/
19065+/*
19066+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
19067+ */
19068+#include "drmP.h"
19069+#include "psb_drv.h"
19070+#include "psb_schedule.h"
19071+
19072+struct drm_psb_ttm_backend {
19073+ struct drm_ttm_backend base;
19074+ struct page **pages;
19075+ unsigned int desired_tile_stride;
19076+ unsigned int hw_tile_stride;
19077+ int mem_type;
19078+ unsigned long offset;
19079+ unsigned long num_pages;
19080+};
19081+
19082+int psb_fence_types(struct drm_buffer_object *bo, uint32_t * class,
19083+ uint32_t * type)
19084+{
19085+ switch (*class) {
19086+ case PSB_ENGINE_TA:
19087+ *type = DRM_FENCE_TYPE_EXE |
19088+ _PSB_FENCE_TYPE_TA_DONE | _PSB_FENCE_TYPE_RASTER_DONE;
19089+ if (bo->mem.mask & PSB_BO_FLAG_TA)
19090+ *type &= ~_PSB_FENCE_TYPE_RASTER_DONE;
19091+ if (bo->mem.mask & PSB_BO_FLAG_SCENE)
19092+ *type |= _PSB_FENCE_TYPE_SCENE_DONE;
19093+ if (bo->mem.mask & PSB_BO_FLAG_FEEDBACK)
19094+ *type |= _PSB_FENCE_TYPE_FEEDBACK;
19095+ break;
19096+ default:
19097+ *type = DRM_FENCE_TYPE_EXE;
19098+ }
19099+ return 0;
19100+}
19101+
19102+static inline size_t drm_size_align(size_t size)
19103+{
19104+ size_t tmpSize = 4;
19105+ if (size > PAGE_SIZE)
19106+ return PAGE_ALIGN(size);
19107+ while (tmpSize < size)
19108+ tmpSize <<= 1;
19109+
19110+ return (size_t) tmpSize;
19111+}
19112+
19113+/*
19114+ * Poulsbo GPU virtual space looks like this
19115+ * (We currently use only one MMU context).
19116+ *
19117+ * gatt_start = Start of GATT aperture in bus space.
19118+ * stolen_end = End of GATT populated by stolen memory in bus space.
19119+ * gatt_end = End of GATT
19120+ * twod_end = MIN(gatt_start + 256_MEM, gatt_end)
19121+ *
19122+ * 0x00000000 -> 0x10000000 Temporary mapping space for tiling- and copy operations.
19123+ * This space is not managed and is protected by the
19124+ * temp_mem mutex.
19125+ *
19126+ * 0x10000000 -> 0x20000000 DRM_PSB_MEM_KERNEL For kernel buffers.
19127+ *
19128+ * 0x20000000 -> gatt_start DRM_PSB_MEM_MMU For generic MMU-only use.
19129+ *
19130+ * gatt_start -> stolen_end DRM_BO_MEM_VRAM Pre-populated GATT pages.
19131+ *
19132+ * stolen_end -> twod_end DRM_BO_MEM_TT GATT memory usable by 2D engine.
19133+ *
19134+ * twod_end -> gatt_end DRM_BO_MEM_APER GATT memory not usable by 2D engine.
19135+ *
19136+ * gatt_end -> 0xffffffff Currently unused.
19137+ */
19138+
19139+int psb_init_mem_type(struct drm_device *dev, uint32_t type,
19140+ struct drm_mem_type_manager *man)
19141+{
19142+ struct drm_psb_private *dev_priv =
19143+ (struct drm_psb_private *)dev->dev_private;
19144+ struct psb_gtt *pg = dev_priv->pg;
19145+
19146+ switch (type) {
19147+ case DRM_BO_MEM_LOCAL:
19148+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19149+ _DRM_FLAG_MEMTYPE_CACHED;
19150+ man->drm_bus_maptype = 0;
19151+ break;
19152+ case DRM_PSB_MEM_KERNEL:
19153+ man->io_offset = 0x00000000;
19154+ man->io_size = 0x00000000;
19155+ man->io_addr = NULL;
19156+ man->drm_bus_maptype = _DRM_TTM;
19157+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19158+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
19159+ man->gpu_offset = PSB_MEM_KERNEL_START;
19160+ break;
19161+ case DRM_PSB_MEM_MMU:
19162+ man->io_offset = 0x00000000;
19163+ man->io_size = 0x00000000;
19164+ man->io_addr = NULL;
19165+ man->drm_bus_maptype = _DRM_TTM;
19166+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19167+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
19168+ man->gpu_offset = PSB_MEM_MMU_START;
19169+ break;
19170+ case DRM_PSB_MEM_PDS:
19171+ man->io_offset = 0x00000000;
19172+ man->io_size = 0x00000000;
19173+ man->io_addr = NULL;
19174+ man->drm_bus_maptype = _DRM_TTM;
19175+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19176+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
19177+ man->gpu_offset = PSB_MEM_PDS_START;
19178+ break;
19179+ case DRM_PSB_MEM_RASTGEOM:
19180+ man->io_offset = 0x00000000;
19181+ man->io_size = 0x00000000;
19182+ man->io_addr = NULL;
19183+ man->drm_bus_maptype = _DRM_TTM;
19184+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19185+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
19186+ man->gpu_offset = PSB_MEM_RASTGEOM_START;
19187+ break;
19188+ case DRM_BO_MEM_VRAM:
19189+ man->io_addr = NULL;
19190+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19191+ _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
19192+#ifdef PSB_WORKING_HOST_MMU_ACCESS
19193+ man->drm_bus_maptype = _DRM_AGP;
19194+ man->io_offset = pg->gatt_start;
19195+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
19196+#else
19197+ man->drm_bus_maptype = _DRM_TTM; /* Forces uncached */
19198+ man->io_offset = pg->stolen_base;
19199+ man->io_size = pg->stolen_size;
19200+#endif
19201+ man->gpu_offset = pg->gatt_start;
19202+ break;
19203+ case DRM_BO_MEM_TT: /* Mappable GATT memory */
19204+ man->io_offset = pg->gatt_start;
19205+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
19206+ man->io_addr = NULL;
19207+#ifdef PSB_WORKING_HOST_MMU_ACCESS
19208+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19209+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
19210+ man->drm_bus_maptype = _DRM_AGP;
19211+#else
19212+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19213+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
19214+ man->drm_bus_maptype = _DRM_TTM;
19215+#endif
19216+ man->gpu_offset = pg->gatt_start;
19217+ break;
19218+ case DRM_PSB_MEM_APER: /*MMU memory. Mappable. Not usable for 2D. */
19219+ man->io_offset = pg->gatt_start;
19220+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
19221+ man->io_addr = NULL;
19222+#ifdef PSB_WORKING_HOST_MMU_ACCESS
19223+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19224+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
19225+ man->drm_bus_maptype = _DRM_AGP;
19226+#else
19227+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19228+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
19229+ man->drm_bus_maptype = _DRM_TTM;
19230+#endif
19231+ man->gpu_offset = pg->gatt_start;
19232+ break;
19233+ default:
19234+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
19235+ return -EINVAL;
19236+ }
19237+ return 0;
19238+}
19239+
19240+uint32_t psb_evict_mask(struct drm_buffer_object * bo)
19241+{
19242+ switch (bo->mem.mem_type) {
19243+ case DRM_BO_MEM_VRAM:
19244+ return DRM_BO_FLAG_MEM_TT;
19245+ default:
19246+ return DRM_BO_FLAG_MEM_LOCAL;
19247+ }
19248+}
19249+
19250+int psb_invalidate_caches(struct drm_device *dev, uint64_t flags)
19251+{
19252+ return 0;
19253+}
19254+
19255+static int psb_move_blit(struct drm_buffer_object *bo,
19256+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
19257+{
19258+ struct drm_bo_mem_reg *old_mem = &bo->mem;
19259+ int dir = 0;
19260+
19261+ if ((old_mem->mem_type == new_mem->mem_type) &&
19262+ (new_mem->mm_node->start <
19263+ old_mem->mm_node->start + old_mem->mm_node->size)) {
19264+ dir = 1;
19265+ }
19266+
19267+ psb_emit_2d_copy_blit(bo->dev,
19268+ old_mem->mm_node->start << PAGE_SHIFT,
19269+ new_mem->mm_node->start << PAGE_SHIFT,
19270+ new_mem->num_pages, dir);
19271+
19272+ return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
19273+ DRM_FENCE_TYPE_EXE, 0, new_mem);
19274+}
19275+
19276+/*
19277+ * Flip destination ttm into cached-coherent GATT,
19278+ * then blit and subsequently move out again.
19279+ */
19280+
19281+static int psb_move_flip(struct drm_buffer_object *bo,
19282+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
19283+{
19284+ struct drm_device *dev = bo->dev;
19285+ struct drm_bo_mem_reg tmp_mem;
19286+ int ret;
19287+
19288+ tmp_mem = *new_mem;
19289+ tmp_mem.mm_node = NULL;
19290+ tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
19291+ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
19292+
19293+ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
19294+ if (ret)
19295+ return ret;
19296+ ret = drm_bind_ttm(bo->ttm, &tmp_mem);
19297+ if (ret)
19298+ goto out_cleanup;
19299+ ret = psb_move_blit(bo, 1, no_wait, &tmp_mem);
19300+ if (ret)
19301+ goto out_cleanup;
19302+
19303+ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
19304+ out_cleanup:
19305+ if (tmp_mem.mm_node) {
19306+ mutex_lock(&dev->struct_mutex);
19307+ if (tmp_mem.mm_node != bo->pinned_node)
19308+ drm_mm_put_block(tmp_mem.mm_node);
19309+ tmp_mem.mm_node = NULL;
19310+ mutex_unlock(&dev->struct_mutex);
19311+ }
19312+ return ret;
19313+}
19314+
19315+int psb_move(struct drm_buffer_object *bo,
19316+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
19317+{
19318+ struct drm_bo_mem_reg *old_mem = &bo->mem;
19319+
19320+ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
19321+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
19322+ } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
19323+ if (psb_move_flip(bo, evict, no_wait, new_mem))
19324+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
19325+ } else {
19326+ if (psb_move_blit(bo, evict, no_wait, new_mem))
19327+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
19328+ }
19329+ return 0;
19330+}
19331+
19332+static int drm_psb_tbe_nca(struct drm_ttm_backend *backend)
19333+{
19334+ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
19335+}
19336+
19337+static int drm_psb_tbe_populate(struct drm_ttm_backend *backend,
19338+ unsigned long num_pages, struct page **pages)
19339+{
19340+ struct drm_psb_ttm_backend *psb_be =
19341+ container_of(backend, struct drm_psb_ttm_backend, base);
19342+
19343+ psb_be->pages = pages;
19344+ return 0;
19345+}
19346+
19347+static int drm_psb_tbe_unbind(struct drm_ttm_backend *backend)
19348+{
19349+ struct drm_device *dev = backend->dev;
19350+ struct drm_psb_private *dev_priv =
19351+ (struct drm_psb_private *)dev->dev_private;
19352+ struct drm_psb_ttm_backend *psb_be =
19353+ container_of(backend, struct drm_psb_ttm_backend, base);
19354+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
19355+ struct drm_mem_type_manager *man = &dev->bm.man[psb_be->mem_type];
19356+
19357+ PSB_DEBUG_RENDER("MMU unbind.\n");
19358+
19359+ if (psb_be->mem_type == DRM_BO_MEM_TT) {
19360+ uint32_t gatt_p_offset = (psb_be->offset - man->gpu_offset) >>
19361+ PAGE_SHIFT;
19362+
19363+ (void)psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
19364+ psb_be->num_pages,
19365+ psb_be->desired_tile_stride,
19366+ psb_be->hw_tile_stride);
19367+ }
19368+
19369+ psb_mmu_remove_pages(pd, psb_be->offset,
19370+ psb_be->num_pages,
19371+ psb_be->desired_tile_stride,
19372+ psb_be->hw_tile_stride);
19373+
19374+ return 0;
19375+}
19376+
19377+static int drm_psb_tbe_bind(struct drm_ttm_backend *backend,
19378+ struct drm_bo_mem_reg *bo_mem)
19379+{
19380+ struct drm_device *dev = backend->dev;
19381+ struct drm_psb_private *dev_priv =
19382+ (struct drm_psb_private *)dev->dev_private;
19383+ struct drm_psb_ttm_backend *psb_be =
19384+ container_of(backend, struct drm_psb_ttm_backend, base);
19385+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
19386+ struct drm_mem_type_manager *man = &dev->bm.man[bo_mem->mem_type];
19387+ int type;
19388+ int ret = 0;
19389+
19390+ psb_be->mem_type = bo_mem->mem_type;
19391+ psb_be->num_pages = bo_mem->num_pages;
19392+ psb_be->desired_tile_stride = bo_mem->desired_tile_stride;
19393+ psb_be->hw_tile_stride = bo_mem->hw_tile_stride;
19394+ psb_be->desired_tile_stride = 0;
19395+ psb_be->hw_tile_stride = 0;
19396+ psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
19397+ man->gpu_offset;
19398+
19399+ type = (bo_mem->flags & DRM_BO_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
19400+
19401+ PSB_DEBUG_RENDER("MMU bind.\n");
19402+ if (psb_be->mem_type == DRM_BO_MEM_TT) {
19403+ uint32_t gatt_p_offset = (psb_be->offset - man->gpu_offset) >>
19404+ PAGE_SHIFT;
19405+
19406+ ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
19407+ gatt_p_offset,
19408+ psb_be->num_pages,
19409+ psb_be->desired_tile_stride,
19410+ psb_be->hw_tile_stride, type);
19411+ }
19412+
19413+ ret = psb_mmu_insert_pages(pd, psb_be->pages,
19414+ psb_be->offset, psb_be->num_pages,
19415+ psb_be->desired_tile_stride,
19416+ psb_be->hw_tile_stride, type);
19417+ if (ret)
19418+ goto out_err;
19419+
19420+ DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
19421+ DRM_BE_FLAG_BOUND_CACHED : 0, DRM_BE_FLAG_BOUND_CACHED);
19422+
19423+ return 0;
19424+ out_err:
19425+ drm_psb_tbe_unbind(backend);
19426+ return ret;
19427+
19428+}
19429+
19430+static void drm_psb_tbe_clear(struct drm_ttm_backend *backend)
19431+{
19432+ struct drm_psb_ttm_backend *psb_be =
19433+ container_of(backend, struct drm_psb_ttm_backend, base);
19434+
19435+ psb_be->pages = NULL;
19436+ return;
19437+}
19438+
19439+static void drm_psb_tbe_destroy(struct drm_ttm_backend *backend)
19440+{
19441+ struct drm_psb_ttm_backend *psb_be =
19442+ container_of(backend, struct drm_psb_ttm_backend, base);
19443+
19444+ if (backend)
19445+ drm_free(psb_be, sizeof(*psb_be), DRM_MEM_TTM);
19446+}
19447+
19448+static struct drm_ttm_backend_func psb_ttm_backend = {
19449+ .needs_ub_cache_adjust = drm_psb_tbe_nca,
19450+ .populate = drm_psb_tbe_populate,
19451+ .clear = drm_psb_tbe_clear,
19452+ .bind = drm_psb_tbe_bind,
19453+ .unbind = drm_psb_tbe_unbind,
19454+ .destroy = drm_psb_tbe_destroy,
19455+};
19456+
19457+struct drm_ttm_backend *drm_psb_tbe_init(struct drm_device *dev)
19458+{
19459+ struct drm_psb_ttm_backend *psb_be;
19460+
19461+ psb_be = drm_calloc(1, sizeof(*psb_be), DRM_MEM_TTM);
19462+ if (!psb_be)
19463+ return NULL;
19464+ psb_be->pages = NULL;
19465+ psb_be->base.func = &psb_ttm_backend;
19466+ psb_be->base.dev = dev;
19467+
19468+ return &psb_be->base;
19469+}
19470+
19471+int psb_tbe_size(struct drm_device *dev, unsigned long num_pages)
19472+{
19473+ /*
19474+ * Return the size of the structures themselves and the
19475+ * estimated size of the pagedir and pagetable entries.
19476+ */
19477+
19478+ return drm_size_align(sizeof(struct drm_psb_ttm_backend)) +
19479+ 8*num_pages;
19480+}
19481Index: linux-2.6.27/drivers/gpu/drm/psb/psb_drm.h
19482===================================================================
19483--- /dev/null 1970-01-01 00:00:00.000000000 +0000
19484+++ linux-2.6.27/drivers/gpu/drm/psb/psb_drm.h 2009-02-05 13:29:33.000000000 +0000
19485@@ -0,0 +1,370 @@
19486+/**************************************************************************
19487+ * Copyright (c) 2007, Intel Corporation.
19488+ * All Rights Reserved.
19489+ *
19490+ * This program is free software; you can redistribute it and/or modify it
19491+ * under the terms and conditions of the GNU General Public License,
19492+ * version 2, as published by the Free Software Foundation.
19493+ *
19494+ * This program is distributed in the hope it will be useful, but WITHOUT
19495+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19496+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19497+ * more details.
19498+ *
19499+ * You should have received a copy of the GNU General Public License along with
19500+ * this program; if not, write to the Free Software Foundation, Inc.,
19501+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19502+ *
19503+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19504+ * develop this driver.
19505+ *
19506+ **************************************************************************/
19507+/*
19508+ */
19509+
19510+#ifndef _PSB_DRM_H_
19511+#define _PSB_DRM_H_
19512+
19513+#if defined(__linux__) && !defined(__KERNEL__)
19514+#include<stdint.h>
19515+#endif
19516+
19517+/*
19518+ * Intel Poulsbo driver package version.
19519+ *
19520+ */
19521+/* #define PSB_PACKAGE_VERSION "ED"__DATE__*/
19522+#define PSB_PACKAGE_VERSION "2.1.0.32L.0019"
19523+
19524+#define DRM_PSB_SAREA_MAJOR 0
19525+#define DRM_PSB_SAREA_MINOR 1
19526+#define PSB_FIXED_SHIFT 16
19527+
19528+/*
19529+ * Public memory types.
19530+ */
19531+
19532+#define DRM_PSB_MEM_MMU DRM_BO_MEM_PRIV1
19533+#define DRM_PSB_FLAG_MEM_MMU DRM_BO_FLAG_MEM_PRIV1
19534+#define DRM_PSB_MEM_PDS DRM_BO_MEM_PRIV2
19535+#define DRM_PSB_FLAG_MEM_PDS DRM_BO_FLAG_MEM_PRIV2
19536+#define DRM_PSB_MEM_APER DRM_BO_MEM_PRIV3
19537+#define DRM_PSB_FLAG_MEM_APER DRM_BO_FLAG_MEM_PRIV3
19538+#define DRM_PSB_MEM_RASTGEOM DRM_BO_MEM_PRIV4
19539+#define DRM_PSB_FLAG_MEM_RASTGEOM DRM_BO_FLAG_MEM_PRIV4
19540+#define PSB_MEM_RASTGEOM_START 0x30000000
19541+
19542+typedef int32_t psb_fixed;
19543+typedef uint32_t psb_ufixed;
19544+
19545+static inline psb_fixed psb_int_to_fixed(int a)
19546+{
19547+ return a * (1 << PSB_FIXED_SHIFT);
19548+}
19549+
19550+static inline psb_ufixed psb_unsigned_to_ufixed(unsigned int a)
19551+{
19552+ return a << PSB_FIXED_SHIFT;
19553+}
19554+
19555+/*Status of the command sent to the gfx device.*/
19556+typedef enum {
19557+ DRM_CMD_SUCCESS,
19558+ DRM_CMD_FAILED,
19559+ DRM_CMD_HANG
19560+} drm_cmd_status_t;
19561+
19562+struct drm_psb_scanout {
19563+ uint32_t buffer_id; /* DRM buffer object ID */
19564+ uint32_t rotation; /* Rotation as in RR_rotation definitions */
19565+ uint32_t stride; /* Buffer stride in bytes */
19566+ uint32_t depth; /* Buffer depth in bits (NOT) bpp */
19567+ uint32_t width; /* Buffer width in pixels */
19568+ uint32_t height; /* Buffer height in lines */
19569+ psb_fixed transform[3][3]; /* Buffer composite transform */
19570+ /* (scaling, rot, reflect) */
19571+};
19572+
19573+#define DRM_PSB_SAREA_OWNERS 16
19574+#define DRM_PSB_SAREA_OWNER_2D 0
19575+#define DRM_PSB_SAREA_OWNER_3D 1
19576+
19577+#define DRM_PSB_SAREA_SCANOUTS 3
19578+
19579+struct drm_psb_sarea {
19580+ /* Track changes of this data structure */
19581+
19582+ uint32_t major;
19583+ uint32_t minor;
19584+
19585+ /* Last context to touch part of hw */
19586+ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
19587+
19588+ /* Definition of front- and rotated buffers */
19589+ uint32_t num_scanouts;
19590+ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
19591+
19592+ int planeA_x;
19593+ int planeA_y;
19594+ int planeA_w;
19595+ int planeA_h;
19596+ int planeB_x;
19597+ int planeB_y;
19598+ int planeB_w;
19599+ int planeB_h;
19600+ uint32_t msvdx_state;
19601+ uint32_t msvdx_context;
19602+};
19603+
19604+#define PSB_RELOC_MAGIC 0x67676767
19605+#define PSB_RELOC_SHIFT_MASK 0x0000FFFF
19606+#define PSB_RELOC_SHIFT_SHIFT 0
19607+#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000
19608+#define PSB_RELOC_ALSHIFT_SHIFT 16
19609+
19610+#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated
19611+ * buffer
19612+ */
19613+#define PSB_RELOC_OP_2D_OFFSET 1 /* Offset of the indicated
19614+ * buffer, relative to 2D
19615+ * base address
19616+ */
19617+#define PSB_RELOC_OP_PDS_OFFSET 2 /* Offset of the indicated buffer,
19618+ * relative to PDS base address
19619+ */
19620+#define PSB_RELOC_OP_STRIDE 3 /* Stride of the indicated
19621+ * buffer (for tiling)
19622+ */
19623+#define PSB_RELOC_OP_USE_OFFSET 4 /* Offset of USE buffer
19624+ * relative to base reg
19625+ */
19626+#define PSB_RELOC_OP_USE_REG 5 /* Base reg of USE buffer */
19627+
19628+struct drm_psb_reloc {
19629+ uint32_t reloc_op;
19630+ uint32_t where; /* offset in destination buffer */
19631+ uint32_t buffer; /* Buffer reloc applies to */
19632+ uint32_t mask; /* Destination format: */
19633+ uint32_t shift; /* Destination format: */
19634+ uint32_t pre_add; /* Destination format: */
19635+ uint32_t background; /* Destination add */
19636+ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */
19637+ uint32_t arg0; /* Reloc-op dependant */
19638+ uint32_t arg1;
19639+};
19640+
19641+#define PSB_BO_FLAG_TA (1ULL << 48)
19642+#define PSB_BO_FLAG_SCENE (1ULL << 49)
19643+#define PSB_BO_FLAG_FEEDBACK (1ULL << 50)
19644+#define PSB_BO_FLAG_USSE (1ULL << 51)
19645+
19646+#define PSB_ENGINE_2D 0
19647+#define PSB_ENGINE_VIDEO 1
19648+#define PSB_ENGINE_RASTERIZER 2
19649+#define PSB_ENGINE_TA 3
19650+#define PSB_ENGINE_HPRAST 4
19651+
19652+/*
19653+ * For this fence class we have a couple of
19654+ * fence types.
19655+ */
19656+
19657+#define _PSB_FENCE_EXE_SHIFT 0
19658+#define _PSB_FENCE_TA_DONE_SHIFT 1
19659+#define _PSB_FENCE_RASTER_DONE_SHIFT 2
19660+#define _PSB_FENCE_SCENE_DONE_SHIFT 3
19661+#define _PSB_FENCE_FEEDBACK_SHIFT 4
19662+
19663+#define _PSB_ENGINE_TA_FENCE_TYPES 5
19664+#define _PSB_FENCE_TYPE_TA_DONE (1 << _PSB_FENCE_TA_DONE_SHIFT)
19665+#define _PSB_FENCE_TYPE_RASTER_DONE (1 << _PSB_FENCE_RASTER_DONE_SHIFT)
19666+#define _PSB_FENCE_TYPE_SCENE_DONE (1 << _PSB_FENCE_SCENE_DONE_SHIFT)
19667+#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT)
19668+
19669+#define PSB_ENGINE_HPRAST 4
19670+#define PSB_NUM_ENGINES 5
19671+
19672+#define PSB_TA_FLAG_FIRSTPASS (1 << 0)
19673+#define PSB_TA_FLAG_LASTPASS (1 << 1)
19674+
19675+#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
19676+
19677+struct drm_psb_scene {
19678+ int handle_valid;
19679+ uint32_t handle;
19680+ uint32_t w;
19681+ uint32_t h;
19682+ uint32_t num_buffers;
19683+};
19684+
19685+struct drm_psb_hw_info
19686+{
19687+ uint32_t rev_id;
19688+ uint32_t caps;
19689+};
19690+
19691+typedef struct drm_psb_cmdbuf_arg {
19692+ uint64_t buffer_list; /* List of buffers to validate */
19693+ uint64_t clip_rects; /* See i915 counterpart */
19694+ uint64_t scene_arg;
19695+ uint64_t fence_arg;
19696+
19697+ uint32_t ta_flags;
19698+
19699+ uint32_t ta_handle; /* TA reg-value pairs */
19700+ uint32_t ta_offset;
19701+ uint32_t ta_size;
19702+
19703+ uint32_t oom_handle;
19704+ uint32_t oom_offset;
19705+ uint32_t oom_size;
19706+
19707+ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
19708+ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
19709+ uint32_t cmdbuf_size;
19710+
19711+ uint32_t reloc_handle; /* Reloc buffer object */
19712+ uint32_t reloc_offset;
19713+ uint32_t num_relocs;
19714+
19715+ int32_t damage; /* Damage front buffer with cliprects */
19716+ /* Not implemented yet */
19717+ uint32_t fence_flags;
19718+ uint32_t engine;
19719+
19720+ /*
19721+ * Feedback;
19722+ */
19723+
19724+ uint32_t feedback_ops;
19725+ uint32_t feedback_handle;
19726+ uint32_t feedback_offset;
19727+ uint32_t feedback_breakpoints;
19728+ uint32_t feedback_size;
19729+} drm_psb_cmdbuf_arg_t;
19730+
19731+struct drm_psb_xhw_init_arg {
19732+ uint32_t operation;
19733+ uint32_t buffer_handle;
19734+};
19735+
19736+/*
19737+ * Feedback components:
19738+ */
19739+
19740+/*
19741+ * Vistest component. The number of these in the feedback buffer
19742+ * equals the number of vistest breakpoints + 1.
19743+ * This is currently the only feedback component.
19744+ */
19745+
19746+struct drm_psb_vistest {
19747+ uint32_t vt[8];
19748+};
19749+
19750+#define PSB_HW_COOKIE_SIZE 16
19751+#define PSB_HW_FEEDBACK_SIZE 8
19752+#define PSB_HW_OOM_CMD_SIZE 6
19753+
19754+struct drm_psb_xhw_arg {
19755+ uint32_t op;
19756+ int ret;
19757+ uint32_t irq_op;
19758+ uint32_t issue_irq;
19759+ uint32_t cookie[PSB_HW_COOKIE_SIZE];
19760+ union {
19761+ struct {
19762+ uint32_t w;
19763+ uint32_t h;
19764+ uint32_t size;
19765+ uint32_t clear_p_start;
19766+ uint32_t clear_num_pages;
19767+ } si;
19768+ struct {
19769+ uint32_t fire_flags;
19770+ uint32_t hw_context;
19771+ uint32_t offset;
19772+ uint32_t engine;
19773+ uint32_t flags;
19774+ uint32_t rca;
19775+ uint32_t num_oom_cmds;
19776+ uint32_t oom_cmds[PSB_HW_OOM_CMD_SIZE];
19777+ } sb;
19778+ struct {
19779+ uint32_t pages;
19780+ uint32_t size;
19781+ } bi;
19782+ struct {
19783+ uint32_t bca;
19784+ uint32_t rca;
19785+ uint32_t flags;
19786+ } oom;
19787+ struct {
19788+ uint32_t pt_offset;
19789+ uint32_t param_offset;
19790+ uint32_t flags;
19791+ } bl;
19792+ struct {
19793+ uint32_t value;
19794+ } cl;
19795+ uint32_t feedback[PSB_HW_FEEDBACK_SIZE];
19796+ } arg;
19797+};
19798+
19799+#define DRM_PSB_CMDBUF 0x00
19800+#define DRM_PSB_XHW_INIT 0x01
19801+#define DRM_PSB_XHW 0x02
19802+#define DRM_PSB_SCENE_UNREF 0x03
19803+/* Controlling the kernel modesetting buffers */
19804+#define DRM_PSB_KMS_OFF 0x04
19805+#define DRM_PSB_KMS_ON 0x05
19806+#define DRM_PSB_HW_INFO 0x06
19807+
19808+#define PSB_XHW_INIT 0x00
19809+#define PSB_XHW_TAKEDOWN 0x01
19810+
19811+#define PSB_XHW_FIRE_RASTER 0x00
19812+#define PSB_XHW_SCENE_INFO 0x01
19813+#define PSB_XHW_SCENE_BIND_FIRE 0x02
19814+#define PSB_XHW_TA_MEM_INFO 0x03
19815+#define PSB_XHW_RESET_DPM 0x04
19816+#define PSB_XHW_OOM 0x05
19817+#define PSB_XHW_TERMINATE 0x06
19818+#define PSB_XHW_VISTEST 0x07
19819+#define PSB_XHW_RESUME 0x08
19820+#define PSB_XHW_TA_MEM_LOAD 0x09
19821+#define PSB_XHW_CHECK_LOCKUP 0x0a
19822+
19823+#define PSB_SCENE_FLAG_DIRTY (1 << 0)
19824+#define PSB_SCENE_FLAG_COMPLETE (1 << 1)
19825+#define PSB_SCENE_FLAG_SETUP (1 << 2)
19826+#define PSB_SCENE_FLAG_SETUP_ONLY (1 << 3)
19827+#define PSB_SCENE_FLAG_CLEARED (1 << 4)
19828+
19829+#define PSB_TA_MEM_FLAG_TA (1 << 0)
19830+#define PSB_TA_MEM_FLAG_RASTER (1 << 1)
19831+#define PSB_TA_MEM_FLAG_HOSTA (1 << 2)
19832+#define PSB_TA_MEM_FLAG_HOSTD (1 << 3)
19833+#define PSB_TA_MEM_FLAG_INIT (1 << 4)
19834+#define PSB_TA_MEM_FLAG_NEW_PT_OFFSET (1 << 5)
19835+
19836+/*Raster fire will deallocate memory */
19837+#define PSB_FIRE_FLAG_RASTER_DEALLOC (1 << 0)
19838+/*Isp reset needed due to change in ZLS format */
19839+#define PSB_FIRE_FLAG_NEEDS_ISP_RESET (1 << 1)
19840+/*These are set by Xpsb. */
19841+#define PSB_FIRE_FLAG_XHW_MASK 0xff000000
19842+/*The task has had at least one OOM and Xpsb will
19843+ send back messages on each fire. */
19844+#define PSB_FIRE_FLAG_XHW_OOM (1 << 24)
19845+
19846+#define PSB_SCENE_ENGINE_TA 0
19847+#define PSB_SCENE_ENGINE_RASTER 1
19848+#define PSB_SCENE_NUM_ENGINES 2
19849+
19850+struct drm_psb_dev_info_arg {
19851+ uint32_t num_use_attribute_registers;
19852+};
19853+#define DRM_PSB_DEVINFO 0x01
19854+
19855+#endif
19856Index: linux-2.6.27/drivers/gpu/drm/psb/psb_drv.c
19857===================================================================
19858--- /dev/null 1970-01-01 00:00:00.000000000 +0000
19859+++ linux-2.6.27/drivers/gpu/drm/psb/psb_drv.c 2009-02-05 13:29:33.000000000 +0000
19860@@ -0,0 +1,1006 @@
19861+/**************************************************************************
19862+ * Copyright (c) 2007, Intel Corporation.
19863+ * All Rights Reserved.
19864+ *
19865+ * This program is free software; you can redistribute it and/or modify it
19866+ * under the terms and conditions of the GNU General Public License,
19867+ * version 2, as published by the Free Software Foundation.
19868+ *
19869+ * This program is distributed in the hope it will be useful, but WITHOUT
19870+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19871+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19872+ * more details.
19873+ *
19874+ * You should have received a copy of the GNU General Public License along with
19875+ * this program; if not, write to the Free Software Foundation, Inc.,
19876+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19877+ *
19878+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19879+ * develop this driver.
19880+ *
19881+ **************************************************************************/
19882+/*
19883+ */
19884+
19885+#include "drmP.h"
19886+#include "drm.h"
19887+#include "psb_drm.h"
19888+#include "psb_drv.h"
19889+#include "psb_reg.h"
19890+#include "i915_reg.h"
19891+#include "psb_msvdx.h"
19892+#include "drm_pciids.h"
19893+#include "psb_scene.h"
19894+#include <linux/cpu.h>
19895+#include <linux/notifier.h>
19896+#include <linux/fb.h>
19897+
19898+int drm_psb_debug = 0;
19899+EXPORT_SYMBOL(drm_psb_debug);
19900+static int drm_psb_trap_pagefaults = 0;
19901+static int drm_psb_clock_gating = 0;
19902+static int drm_psb_ta_mem_size = 32 * 1024;
19903+int drm_psb_disable_vsync = 1;
19904+int drm_psb_no_fb = 0;
19905+int drm_psb_force_pipeb = 0;
19906+char* psb_init_mode;
19907+int psb_init_xres;
19908+int psb_init_yres;
19909+/*
19910+ *
19911+ */
19912+#define SII_1392_WA
19913+#ifdef SII_1392_WA
19914+extern int SII_1392;
19915+#endif
19916+
19917+MODULE_PARM_DESC(debug, "Enable debug output");
19918+MODULE_PARM_DESC(clock_gating, "clock gating");
19919+MODULE_PARM_DESC(no_fb, "Disable FBdev");
19920+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
19921+MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
19922+MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
19923+MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
19924+MODULE_PARM_DESC(mode, "initial mode name");
19925+MODULE_PARM_DESC(xres, "initial mode width");
19926+MODULE_PARM_DESC(yres, "initial mode height");
19927+
19928+module_param_named(debug, drm_psb_debug, int, 0600);
19929+module_param_named(clock_gating, drm_psb_clock_gating, int, 0600);
19930+module_param_named(no_fb, drm_psb_no_fb, int, 0600);
19931+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
19932+module_param_named(disable_vsync, drm_psb_disable_vsync, int, 0600);
19933+module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
19934+module_param_named(ta_mem_size, drm_psb_ta_mem_size, int, 0600);
19935+module_param_named(mode, psb_init_mode, charp, 0600);
19936+module_param_named(xres, psb_init_xres, int, 0600);
19937+module_param_named(yres, psb_init_yres, int, 0600);
19938+
19939+static struct pci_device_id pciidlist[] = {
19940+ psb_PCI_IDS
19941+};
19942+
19943+#define DRM_PSB_CMDBUF_IOCTL DRM_IOW(DRM_PSB_CMDBUF, \
19944+ struct drm_psb_cmdbuf_arg)
19945+#define DRM_PSB_XHW_INIT_IOCTL DRM_IOR(DRM_PSB_XHW_INIT, \
19946+ struct drm_psb_xhw_init_arg)
19947+#define DRM_PSB_XHW_IOCTL DRM_IO(DRM_PSB_XHW)
19948+
19949+#define DRM_PSB_SCENE_UNREF_IOCTL DRM_IOWR(DRM_PSB_SCENE_UNREF, \
19950+ struct drm_psb_scene)
19951+#define DRM_PSB_HW_INFO_IOCTL DRM_IOR(DRM_PSB_HW_INFO, \
19952+ struct drm_psb_hw_info)
19953+
19954+#define DRM_PSB_KMS_OFF_IOCTL DRM_IO(DRM_PSB_KMS_OFF)
19955+#define DRM_PSB_KMS_ON_IOCTL DRM_IO(DRM_PSB_KMS_ON)
19956+
19957+static struct drm_ioctl_desc psb_ioctls[] = {
19958+ DRM_IOCTL_DEF(DRM_PSB_CMDBUF_IOCTL, psb_cmdbuf_ioctl, DRM_AUTH),
19959+ DRM_IOCTL_DEF(DRM_PSB_XHW_INIT_IOCTL, psb_xhw_init_ioctl,
19960+ DRM_ROOT_ONLY),
19961+ DRM_IOCTL_DEF(DRM_PSB_XHW_IOCTL, psb_xhw_ioctl, DRM_ROOT_ONLY),
19962+ DRM_IOCTL_DEF(DRM_PSB_SCENE_UNREF_IOCTL, drm_psb_scene_unref_ioctl,
19963+ DRM_AUTH),
19964+ DRM_IOCTL_DEF(DRM_PSB_KMS_OFF_IOCTL, psbfb_kms_off_ioctl,
19965+ DRM_ROOT_ONLY),
19966+ DRM_IOCTL_DEF(DRM_PSB_KMS_ON_IOCTL, psbfb_kms_on_ioctl, DRM_ROOT_ONLY),
19967+ DRM_IOCTL_DEF(DRM_PSB_HW_INFO_IOCTL, psb_hw_info_ioctl, DRM_AUTH),
19968+};
19969+static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
19970+
19971+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
19972+
19973+#ifdef USE_PAT_WC
19974+#warning Init pat
19975+static int __cpuinit psb_cpu_callback(struct notifier_block *nfb,
19976+ unsigned long action,
19977+ void *hcpu)
19978+{
19979+ if (action == CPU_ONLINE)
19980+ drm_init_pat();
19981+
19982+ return 0;
19983+}
19984+
19985+static struct notifier_block __cpuinitdata psb_nb = {
19986+ .notifier_call = psb_cpu_callback,
19987+ .priority = 1
19988+};
19989+#endif
19990+
19991+static int dri_library_name(struct drm_device *dev, char *buf)
19992+{
19993+ return snprintf(buf, PAGE_SIZE, "psb\n");
19994+}
19995+
19996+static void psb_set_uopt(struct drm_psb_uopt *uopt)
19997+{
19998+ uopt->clock_gating = drm_psb_clock_gating;
19999+}
20000+
20001+static void psb_lastclose(struct drm_device *dev)
20002+{
20003+ struct drm_psb_private *dev_priv =
20004+ (struct drm_psb_private *)dev->dev_private;
20005+
20006+ if (!dev->dev_private)
20007+ return;
20008+
20009+ mutex_lock(&dev->struct_mutex);
20010+ if (dev_priv->ta_mem)
20011+ psb_ta_mem_unref_devlocked(&dev_priv->ta_mem);
20012+ mutex_unlock(&dev->struct_mutex);
20013+ mutex_lock(&dev_priv->cmdbuf_mutex);
20014+ if (dev_priv->buffers) {
20015+ vfree(dev_priv->buffers);
20016+ dev_priv->buffers = NULL;
20017+ }
20018+ mutex_unlock(&dev_priv->cmdbuf_mutex);
20019+}
20020+
20021+static void psb_do_takedown(struct drm_device *dev)
20022+{
20023+ struct drm_psb_private *dev_priv =
20024+ (struct drm_psb_private *)dev->dev_private;
20025+
20026+ mutex_lock(&dev->struct_mutex);
20027+ if (dev->bm.initialized) {
20028+ if (dev_priv->have_mem_rastgeom) {
20029+ drm_bo_clean_mm(dev, DRM_PSB_MEM_RASTGEOM);
20030+ dev_priv->have_mem_rastgeom = 0;
20031+ }
20032+ if (dev_priv->have_mem_mmu) {
20033+ drm_bo_clean_mm(dev, DRM_PSB_MEM_MMU);
20034+ dev_priv->have_mem_mmu = 0;
20035+ }
20036+ if (dev_priv->have_mem_aper) {
20037+ drm_bo_clean_mm(dev, DRM_PSB_MEM_APER);
20038+ dev_priv->have_mem_aper = 0;
20039+ }
20040+ if (dev_priv->have_tt) {
20041+ drm_bo_clean_mm(dev, DRM_BO_MEM_TT);
20042+ dev_priv->have_tt = 0;
20043+ }
20044+ if (dev_priv->have_vram) {
20045+ drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM);
20046+ dev_priv->have_vram = 0;
20047+ }
20048+ }
20049+ mutex_unlock(&dev->struct_mutex);
20050+
20051+ if (dev_priv->has_msvdx)
20052+ psb_msvdx_uninit(dev);
20053+
20054+ if (dev_priv->comm) {
20055+ kunmap(dev_priv->comm_page);
20056+ dev_priv->comm = NULL;
20057+ }
20058+ if (dev_priv->comm_page) {
20059+ __free_page(dev_priv->comm_page);
20060+ dev_priv->comm_page = NULL;
20061+ }
20062+}
20063+
20064+void psb_clockgating(struct drm_psb_private *dev_priv)
20065+{
20066+ uint32_t clock_gating;
20067+
20068+ if (dev_priv->uopt.clock_gating == 1) {
20069+ PSB_DEBUG_INIT("Disabling clock gating.\n");
20070+
20071+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
20072+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
20073+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
20074+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
20075+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
20076+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
20077+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
20078+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
20079+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
20080+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
20081+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
20082+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
20083+
20084+ } else if (dev_priv->uopt.clock_gating == 2) {
20085+ PSB_DEBUG_INIT("Enabling clock gating.\n");
20086+
20087+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_AUTO <<
20088+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
20089+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
20090+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
20091+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
20092+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
20093+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
20094+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
20095+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
20096+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
20097+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
20098+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
20099+ } else
20100+ clock_gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
20101+
20102+#ifdef FIX_TG_2D_CLOCKGATE
20103+ clock_gating &= ~_PSB_C_CLKGATECTL_2D_CLKG_MASK;
20104+ clock_gating |= (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
20105+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT);
20106+#endif
20107+ PSB_WSGX32(clock_gating, PSB_CR_CLKGATECTL);
20108+ (void)PSB_RSGX32(PSB_CR_CLKGATECTL);
20109+}
20110+
20111+static int psb_do_init(struct drm_device *dev)
20112+{
20113+ struct drm_psb_private *dev_priv =
20114+ (struct drm_psb_private *)dev->dev_private;
20115+ struct psb_gtt *pg = dev_priv->pg;
20116+
20117+ uint32_t stolen_gtt;
20118+ uint32_t tt_start;
20119+ uint32_t tt_pages;
20120+
20121+ int ret = -ENOMEM;
20122+
20123+ DRM_ERROR("Debug is 0x%08x\n", drm_psb_debug);
20124+
20125+ dev_priv->ta_mem_pages =
20126+ PSB_ALIGN_TO(drm_psb_ta_mem_size * 1024, PAGE_SIZE) >> PAGE_SHIFT;
20127+ dev_priv->comm_page = alloc_page(GFP_KERNEL);
20128+ if (!dev_priv->comm_page)
20129+ goto out_err;
20130+
20131+ dev_priv->comm = kmap(dev_priv->comm_page);
20132+ memset((void *)dev_priv->comm, 0, PAGE_SIZE);
20133+
20134+ dev_priv->has_msvdx = 1;
20135+ if (psb_msvdx_init(dev))
20136+ dev_priv->has_msvdx = 0;
20137+
20138+ /*
20139+ * Initialize sequence numbers for the different command
20140+ * submission mechanisms.
20141+ */
20142+
20143+ dev_priv->sequence[PSB_ENGINE_2D] = 0;
20144+ dev_priv->sequence[PSB_ENGINE_RASTERIZER] = 0;
20145+ dev_priv->sequence[PSB_ENGINE_TA] = 0;
20146+ dev_priv->sequence[PSB_ENGINE_HPRAST] = 0;
20147+
20148+ if (pg->gatt_start & 0x0FFFFFFF) {
20149+ DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
20150+ ret = -EINVAL;
20151+ goto out_err;
20152+ }
20153+
20154+ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
20155+ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
20156+ stolen_gtt = (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
20157+
20158+ dev_priv->gatt_free_offset = pg->gatt_start +
20159+ (stolen_gtt << PAGE_SHIFT) * 1024;
20160+
20161+ /*
20162+ * Insert a cache-coherent communications page in mmu space
20163+ * just after the stolen area. Will be used for fencing etc.
20164+ */
20165+
20166+ dev_priv->comm_mmu_offset = dev_priv->gatt_free_offset;
20167+ dev_priv->gatt_free_offset += PAGE_SIZE;
20168+
20169+ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
20170+ &dev_priv->comm_page,
20171+ dev_priv->comm_mmu_offset, 1, 0, 0,
20172+ PSB_MMU_CACHED_MEMORY);
20173+
20174+ if (ret)
20175+ goto out_err;
20176+
20177+ if (1 || drm_debug) {
20178+ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
20179+ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
20180+ DRM_INFO("SGX core id = 0x%08x\n", core_id);
20181+ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
20182+ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
20183+ _PSB_CC_REVISION_MAJOR_SHIFT,
20184+ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
20185+ _PSB_CC_REVISION_MINOR_SHIFT);
20186+ DRM_INFO
20187+ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
20188+ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
20189+ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
20190+ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
20191+ _PSB_CC_REVISION_DESIGNER_SHIFT);
20192+ }
20193+
20194+ dev_priv->irqmask_lock = SPIN_LOCK_UNLOCKED;
20195+ dev_priv->fence0_irq_on = 0;
20196+
20197+ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
20198+ pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
20199+ tt_start = dev_priv->gatt_free_offset - pg->gatt_start;
20200+ tt_pages -= tt_start >> PAGE_SHIFT;
20201+
20202+ mutex_lock(&dev->struct_mutex);
20203+
20204+ if (!drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0,
20205+ pg->stolen_size >> PAGE_SHIFT)) {
20206+ dev_priv->have_vram = 1;
20207+ }
20208+
20209+ if (!drm_bo_init_mm(dev, DRM_BO_MEM_TT, tt_start >> PAGE_SHIFT,
20210+ tt_pages)) {
20211+ dev_priv->have_tt = 1;
20212+ }
20213+
20214+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_MMU, 0x00000000,
20215+ (pg->gatt_start -
20216+ PSB_MEM_MMU_START) >> PAGE_SHIFT)) {
20217+ dev_priv->have_mem_mmu = 1;
20218+ }
20219+
20220+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_RASTGEOM, 0x00000000,
20221+ (PSB_MEM_MMU_START -
20222+ PSB_MEM_RASTGEOM_START) >> PAGE_SHIFT)) {
20223+ dev_priv->have_mem_rastgeom = 1;
20224+ }
20225+#if 0
20226+ if (pg->gatt_pages > PSB_TT_PRIV0_PLIMIT) {
20227+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_APER, PSB_TT_PRIV0_PLIMIT,
20228+ pg->gatt_pages - PSB_TT_PRIV0_PLIMIT)) {
20229+ dev_priv->have_mem_aper = 1;
20230+ }
20231+ }
20232+#endif
20233+
20234+ mutex_unlock(&dev->struct_mutex);
20235+
20236+ return 0;
20237+ out_err:
20238+ psb_do_takedown(dev);
20239+ return ret;
20240+}
20241+
20242+static int psb_driver_unload(struct drm_device *dev)
20243+{
20244+ struct drm_psb_private *dev_priv =
20245+ (struct drm_psb_private *)dev->dev_private;
20246+
20247+ intel_modeset_cleanup(dev);
20248+
20249+ if (dev_priv) {
20250+ psb_watchdog_takedown(dev_priv);
20251+ psb_do_takedown(dev);
20252+ psb_xhw_takedown(dev_priv);
20253+ psb_scheduler_takedown(&dev_priv->scheduler);
20254+
20255+ mutex_lock(&dev->struct_mutex);
20256+ if (dev_priv->have_mem_pds) {
20257+ drm_bo_clean_mm(dev, DRM_PSB_MEM_PDS);
20258+ dev_priv->have_mem_pds = 0;
20259+ }
20260+ if (dev_priv->have_mem_kernel) {
20261+ drm_bo_clean_mm(dev, DRM_PSB_MEM_KERNEL);
20262+ dev_priv->have_mem_kernel = 0;
20263+ }
20264+ mutex_unlock(&dev->struct_mutex);
20265+
20266+ (void)drm_bo_driver_finish(dev);
20267+
20268+ if (dev_priv->pf_pd) {
20269+ psb_mmu_free_pagedir(dev_priv->pf_pd);
20270+ dev_priv->pf_pd = NULL;
20271+ }
20272+ if (dev_priv->mmu) {
20273+ struct psb_gtt *pg = dev_priv->pg;
20274+
20275+ down_read(&pg->sem);
20276+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
20277+ (dev_priv->mmu),
20278+ pg->gatt_start,
20279+ pg->
20280+ stolen_size >> PAGE_SHIFT);
20281+ up_read(&pg->sem);
20282+ psb_mmu_driver_takedown(dev_priv->mmu);
20283+ dev_priv->mmu = NULL;
20284+ }
20285+ psb_gtt_takedown(dev_priv->pg, 1);
20286+ if (dev_priv->scratch_page) {
20287+ __free_page(dev_priv->scratch_page);
20288+ dev_priv->scratch_page = NULL;
20289+ }
20290+ psb_takedown_use_base(dev_priv);
20291+ if (dev_priv->vdc_reg) {
20292+ iounmap(dev_priv->vdc_reg);
20293+ dev_priv->vdc_reg = NULL;
20294+ }
20295+ if (dev_priv->sgx_reg) {
20296+ iounmap(dev_priv->sgx_reg);
20297+ dev_priv->sgx_reg = NULL;
20298+ }
20299+ if (dev_priv->msvdx_reg) {
20300+ iounmap(dev_priv->msvdx_reg);
20301+ dev_priv->msvdx_reg = NULL;
20302+ }
20303+
20304+ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
20305+ dev->dev_private = NULL;
20306+ }
20307+ return 0;
20308+}
20309+
20310+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
20311+extern int drm_pick_crtcs(struct drm_device *dev);
20312+extern char drm_init_mode[32];
20313+extern int drm_init_xres;
20314+extern int drm_init_yres;
20315+
20316+static int psb_initial_config(struct drm_device *dev, bool can_grow)
20317+{
20318+ struct drm_psb_private *dev_priv = dev->dev_private;
20319+ struct drm_output *output;
20320+ struct drm_crtc *crtc;
20321+ int ret = false;
20322+
20323+ mutex_lock(&dev->mode_config.mutex);
20324+
20325+ drm_crtc_probe_output_modes(dev, 2048, 2048);
20326+
20327+ /* strncpy(drm_init_mode, psb_init_mode, strlen(psb_init_mode)); */
20328+ drm_init_xres = psb_init_xres;
20329+ drm_init_yres = psb_init_yres;
20330+
20331+ drm_pick_crtcs(dev);
20332+
20333+ if ((I915_READ(PIPEACONF) & PIPEACONF_ENABLE) && !drm_psb_force_pipeb)
20334+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
20335+ if (!crtc->desired_mode)
20336+ continue;
20337+
20338+ dev->driver->fb_probe(dev, crtc);
20339+ } else
20340+ list_for_each_entry_reverse(crtc, &dev->mode_config.crtc_list,
20341+ head) {
20342+ if (!crtc->desired_mode)
20343+ continue;
20344+
20345+ dev->driver->fb_probe(dev, crtc);
20346+ }
20347+
20348+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
20349+
20350+ if (!output->crtc || !output->crtc->desired_mode)
20351+ continue;
20352+
20353+ if (output->crtc->fb)
20354+ drm_crtc_set_mode(output->crtc,
20355+ output->crtc->desired_mode, 0, 0);
20356+ }
20357+
20358+#ifdef SII_1392_WA
20359+ if((SII_1392 != 1) || (drm_psb_no_fb==0))
20360+ drm_disable_unused_functions(dev);
20361+#else
20362+ drm_disable_unused_functions(dev);
20363+#endif
20364+
20365+
20366+ mutex_unlock(&dev->mode_config.mutex);
20367+
20368+ return ret;
20369+
20370+}
20371+
20372+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
20373+{
20374+ struct drm_psb_private *dev_priv;
20375+ unsigned long resource_start;
20376+ struct psb_gtt *pg;
20377+ int ret = -ENOMEM;
20378+
20379+ DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION);
20380+ dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
20381+ if (dev_priv == NULL)
20382+ return -ENOMEM;
20383+
20384+ mutex_init(&dev_priv->temp_mem);
20385+ mutex_init(&dev_priv->cmdbuf_mutex);
20386+ mutex_init(&dev_priv->reset_mutex);
20387+ psb_init_disallowed();
20388+
20389+ atomic_set(&dev_priv->msvdx_mmu_invaldc, 0);
20390+
20391+#ifdef FIX_TG_16
20392+ atomic_set(&dev_priv->lock_2d, 0);
20393+ atomic_set(&dev_priv->ta_wait_2d, 0);
20394+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
20395+ atomic_set(&dev_priv->waiters_2d, 0);;
20396+ DRM_INIT_WAITQUEUE(&dev_priv->queue_2d);
20397+#else
20398+ mutex_init(&dev_priv->mutex_2d);
20399+#endif
20400+
20401+ spin_lock_init(&dev_priv->reloc_lock);
20402+
20403+ DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
20404+ DRM_INIT_WAITQUEUE(&dev_priv->event_2d_queue);
20405+
20406+ dev->dev_private = (void *)dev_priv;
20407+ dev_priv->chipset = chipset;
20408+ psb_set_uopt(&dev_priv->uopt);
20409+
20410+ psb_watchdog_init(dev_priv);
20411+ psb_scheduler_init(dev, &dev_priv->scheduler);
20412+
20413+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
20414+
20415+ dev_priv->msvdx_reg =
20416+ ioremap(resource_start + PSB_MSVDX_OFFSET, PSB_MSVDX_SIZE);
20417+ if (!dev_priv->msvdx_reg)
20418+ goto out_err;
20419+
20420+ dev_priv->vdc_reg =
20421+ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
20422+ if (!dev_priv->vdc_reg)
20423+ goto out_err;
20424+
20425+ dev_priv->sgx_reg =
20426+ ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
20427+ if (!dev_priv->sgx_reg)
20428+ goto out_err;
20429+
20430+ psb_clockgating(dev_priv);
20431+ if (psb_init_use_base(dev_priv, 3, 13))
20432+ goto out_err;
20433+
20434+ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
20435+ if (!dev_priv->scratch_page)
20436+ goto out_err;
20437+
20438+ dev_priv->pg = psb_gtt_alloc(dev);
20439+ if (!dev_priv->pg)
20440+ goto out_err;
20441+
20442+ ret = psb_gtt_init(dev_priv->pg, 0);
20443+ if (ret)
20444+ goto out_err;
20445+
20446+ dev_priv->mmu = psb_mmu_driver_init(dev_priv->sgx_reg,
20447+ drm_psb_trap_pagefaults, 0,
20448+ &dev_priv->msvdx_mmu_invaldc);
20449+ if (!dev_priv->mmu)
20450+ goto out_err;
20451+
20452+ pg = dev_priv->pg;
20453+
20454+ /*
20455+ * Make sgx MMU aware of the stolen memory area we call VRAM.
20456+ */
20457+
20458+ down_read(&pg->sem);
20459+ ret =
20460+ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
20461+ pg->stolen_base >> PAGE_SHIFT,
20462+ pg->gatt_start,
20463+ pg->stolen_size >> PAGE_SHIFT, 0);
20464+ up_read(&pg->sem);
20465+ if (ret)
20466+ goto out_err;
20467+
20468+ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
20469+ if (!dev_priv->pf_pd)
20470+ goto out_err;
20471+
20472+ /*
20473+ * Make all presumably unused requestors page-fault by making them
20474+ * use context 1 which does not have any valid mappings.
20475+ */
20476+
20477+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
20478+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
20479+ PSB_RSGX32(PSB_CR_BIF_BANK1);
20480+
20481+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
20482+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
20483+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
20484+
20485+ psb_init_2d(dev_priv);
20486+
20487+ ret = drm_bo_driver_init(dev);
20488+ if (ret)
20489+ goto out_err;
20490+
20491+ ret = drm_bo_init_mm(dev, DRM_PSB_MEM_KERNEL, 0x00000000,
20492+ (PSB_MEM_PDS_START - PSB_MEM_KERNEL_START)
20493+ >> PAGE_SHIFT);
20494+ if (ret)
20495+ goto out_err;
20496+ dev_priv->have_mem_kernel = 1;
20497+
20498+ ret = drm_bo_init_mm(dev, DRM_PSB_MEM_PDS, 0x00000000,
20499+ (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START)
20500+ >> PAGE_SHIFT);
20501+ if (ret)
20502+ goto out_err;
20503+ dev_priv->have_mem_pds = 1;
20504+
20505+ ret = psb_do_init(dev);
20506+ if (ret)
20507+ return ret;
20508+
20509+ ret = psb_xhw_init(dev);
20510+ if (ret)
20511+ return ret;
20512+
20513+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
20514+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
20515+
20516+ intel_modeset_init(dev);
20517+ psb_initial_config(dev, false);
20518+
20519+#ifdef USE_PAT_WC
20520+#warning Init pat
20521+ register_cpu_notifier(&psb_nb);
20522+#endif
20523+
20524+ return 0;
20525+ out_err:
20526+ psb_driver_unload(dev);
20527+ return ret;
20528+}
20529+
20530+int psb_driver_device_is_agp(struct drm_device *dev)
20531+{
20532+ return 0;
20533+}
20534+
20535+static int psb_prepare_msvdx_suspend(struct drm_device *dev)
20536+{
20537+ struct drm_psb_private *dev_priv =
20538+ (struct drm_psb_private *)dev->dev_private;
20539+ struct drm_fence_manager *fm = &dev->fm;
20540+ struct drm_fence_class_manager *fc = &fm->fence_class[PSB_ENGINE_VIDEO];
20541+ struct drm_fence_object *fence;
20542+ int ret = 0;
20543+ int signaled = 0;
20544+ int count = 0;
20545+ unsigned long _end = jiffies + 3 * DRM_HZ;
20546+
20547+ PSB_DEBUG_GENERAL("MSVDXACPI Entering psb_prepare_msvdx_suspend....\n");
20548+
20549+ /*set the msvdx-reset flag here.. */
20550+ dev_priv->msvdx_needs_reset = 1;
20551+
20552+ /*Ensure that all pending IRQs are serviced, */
20553+ list_for_each_entry(fence, &fc->ring, ring) {
20554+ count++;
20555+ do {
20556+ DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
20557+ (signaled =
20558+ drm_fence_object_signaled(fence,
20559+ DRM_FENCE_TYPE_EXE)));
20560+ if (signaled)
20561+ break;
20562+ if (time_after_eq(jiffies, _end))
20563+ PSB_DEBUG_GENERAL
20564+ ("MSVDXACPI: fence 0x%x didn't get signaled for 3 secs; we will suspend anyways\n",
20565+ (unsigned int)fence);
20566+ } while (ret == -EINTR);
20567+
20568+ }
20569+
20570+ /* Issue software reset */
20571+ PSB_WMSVDX32 (msvdx_sw_reset_all, MSVDX_CONTROL);
20572+
20573+ ret = psb_wait_for_register (dev_priv, MSVDX_CONTROL, 0,
20574+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
20575+
20576+ PSB_DEBUG_GENERAL("MSVDXACPI: All MSVDX IRQs (%d) serviced...\n",
20577+ count);
20578+ return 0;
20579+}
20580+
20581+static int psb_suspend(struct pci_dev *pdev, pm_message_t state)
20582+{
20583+ struct drm_device *dev = pci_get_drvdata(pdev);
20584+ struct drm_psb_private *dev_priv =
20585+ (struct drm_psb_private *)dev->dev_private;
20586+ struct drm_output *output;
20587+
20588+ if (drm_psb_no_fb == 0)
20589+ psbfb_suspend(dev);
20590+#ifdef WA_NO_FB_GARBAGE_DISPLAY
20591+ else {
20592+ if(num_registered_fb)
20593+ {
20594+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
20595+ if(output->crtc != NULL)
20596+ intel_crtc_mode_save(output->crtc);
20597+ //if(output->funcs->save)
20598+ // output->funcs->save(output);
20599+ }
20600+ }
20601+ }
20602+#endif
20603+
20604+ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
20605+ (void)psb_idle_3d(dev);
20606+ (void)psb_idle_2d(dev);
20607+ flush_scheduled_work();
20608+
20609+ psb_takedown_use_base(dev_priv);
20610+
20611+ if (dev_priv->has_msvdx)
20612+ psb_prepare_msvdx_suspend(dev);
20613+
20614+ pci_save_state(pdev);
20615+ pci_disable_device(pdev);
20616+ pci_set_power_state(pdev, PCI_D3hot);
20617+
20618+ return 0;
20619+}
20620+
20621+static int psb_resume(struct pci_dev *pdev)
20622+{
20623+ struct drm_device *dev = pci_get_drvdata(pdev);
20624+ struct drm_psb_private *dev_priv =
20625+ (struct drm_psb_private *)dev->dev_private;
20626+ struct psb_gtt *pg = dev_priv->pg;
20627+ struct drm_output *output;
20628+ int ret;
20629+
20630+ pci_set_power_state(pdev, PCI_D0);
20631+ pci_restore_state(pdev);
20632+ ret = pci_enable_device(pdev);
20633+ if (ret)
20634+ return ret;
20635+
20636+#ifdef USE_PAT_WC
20637+#warning Init pat
20638+ /* for single CPU's we do it here, then for more than one CPU we
20639+ * use the CPU notifier to reinit PAT on those CPU's.
20640+ */
20641+ drm_init_pat();
20642+#endif
20643+
20644+ INIT_LIST_HEAD(&dev_priv->resume_buf.head);
20645+ dev_priv->msvdx_needs_reset = 1;
20646+
20647+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
20648+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
20649+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
20650+
20651+ /*
20652+ * The GTT page tables are probably not saved.
20653+ * However, TT and VRAM is empty at this point.
20654+ */
20655+
20656+ psb_gtt_init(dev_priv->pg, 1);
20657+
20658+ /*
20659+ * The SGX loses it's register contents.
20660+ * Restore BIF registers. The MMU page tables are
20661+ * "normal" pages, so their contents should be kept.
20662+ */
20663+
20664+ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
20665+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
20666+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
20667+ PSB_RSGX32(PSB_CR_BIF_BANK1);
20668+
20669+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
20670+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
20671+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
20672+
20673+ /*
20674+ * 2D Base registers..
20675+ */
20676+ psb_init_2d(dev_priv);
20677+
20678+ if (drm_psb_no_fb == 0) {
20679+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
20680+ if(output->crtc != NULL)
20681+ drm_crtc_set_mode(output->crtc, &output->crtc->mode,
20682+ output->crtc->x, output->crtc->y);
20683+ }
20684+ }
20685+
20686+ /*
20687+ * Persistant 3D base registers and USSE base registers..
20688+ */
20689+
20690+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
20691+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
20692+ psb_init_use_base(dev_priv, 3, 13);
20693+
20694+ /*
20695+ * Now, re-initialize the 3D engine.
20696+ */
20697+
20698+ psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
20699+
20700+ psb_scheduler_ta_mem_check(dev_priv);
20701+ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
20702+ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
20703+ PSB_TA_MEM_FLAG_TA |
20704+ PSB_TA_MEM_FLAG_RASTER |
20705+ PSB_TA_MEM_FLAG_HOSTA |
20706+ PSB_TA_MEM_FLAG_HOSTD |
20707+ PSB_TA_MEM_FLAG_INIT,
20708+ dev_priv->ta_mem->ta_memory->offset,
20709+ dev_priv->ta_mem->hw_data->offset,
20710+ dev_priv->ta_mem->hw_cookie);
20711+ }
20712+
20713+ if (drm_psb_no_fb == 0)
20714+ psbfb_resume(dev);
20715+#ifdef WA_NO_FB_GARBAGE_DISPLAY
20716+ else {
20717+ if(num_registered_fb)
20718+ {
20719+ struct fb_info *fb_info=registered_fb[0];
20720+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
20721+ if(output->crtc != NULL)
20722+ intel_crtc_mode_restore(output->crtc);
20723+ }
20724+ if(fb_info)
20725+ {
20726+ fb_set_suspend(fb_info, 0);
20727+ printk("set the fb_set_suspend resume end\n");
20728+ }
20729+ }
20730+ }
20731+#endif
20732+
20733+ return 0;
20734+}
20735+
20736+/* always available as we are SIGIO'd */
20737+static unsigned int psb_poll(struct file *filp, struct poll_table_struct *wait)
20738+{
20739+ return (POLLIN | POLLRDNORM);
20740+}
20741+
20742+static int psb_release(struct inode *inode, struct file *filp)
20743+{
20744+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
20745+ struct drm_device *dev = file_priv->minor->dev;
20746+ struct drm_psb_private *dev_priv =
20747+ (struct drm_psb_private *)dev->dev_private;
20748+
20749+ if (dev_priv && dev_priv->xhw_file) {
20750+ psb_xhw_init_takedown(dev_priv, file_priv, 1);
20751+ }
20752+ return drm_release(inode, filp);
20753+}
20754+
20755+extern struct drm_fence_driver psb_fence_driver;
20756+
20757+/*
20758+ * Use this memory type priority if no eviction is needed.
20759+ */
20760+static uint32_t psb_mem_prios[] = { DRM_BO_MEM_VRAM,
20761+ DRM_BO_MEM_TT,
20762+ DRM_PSB_MEM_KERNEL,
20763+ DRM_PSB_MEM_MMU,
20764+ DRM_PSB_MEM_RASTGEOM,
20765+ DRM_PSB_MEM_PDS,
20766+ DRM_PSB_MEM_APER,
20767+ DRM_BO_MEM_LOCAL
20768+};
20769+
20770+/*
20771+ * Use this memory type priority if need to evict.
20772+ */
20773+static uint32_t psb_busy_prios[] = { DRM_BO_MEM_TT,
20774+ DRM_BO_MEM_VRAM,
20775+ DRM_PSB_MEM_KERNEL,
20776+ DRM_PSB_MEM_MMU,
20777+ DRM_PSB_MEM_RASTGEOM,
20778+ DRM_PSB_MEM_PDS,
20779+ DRM_PSB_MEM_APER,
20780+ DRM_BO_MEM_LOCAL
20781+};
20782+
20783+static struct drm_bo_driver psb_bo_driver = {
20784+ .mem_type_prio = psb_mem_prios,
20785+ .mem_busy_prio = psb_busy_prios,
20786+ .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
20787+ .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
20788+ .create_ttm_backend_entry = drm_psb_tbe_init,
20789+ .fence_type = psb_fence_types,
20790+ .invalidate_caches = psb_invalidate_caches,
20791+ .init_mem_type = psb_init_mem_type,
20792+ .evict_mask = psb_evict_mask,
20793+ .move = psb_move,
20794+ .backend_size = psb_tbe_size,
20795+ .command_stream_barrier = NULL,
20796+};
20797+
20798+static struct drm_driver driver = {
20799+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
20800+ DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
20801+ .load = psb_driver_load,
20802+ .unload = psb_driver_unload,
20803+ .dri_library_name = dri_library_name,
20804+ .get_reg_ofs = drm_core_get_reg_ofs,
20805+ .ioctls = psb_ioctls,
20806+ .device_is_agp = psb_driver_device_is_agp,
20807+ .vblank_wait = psb_vblank_wait,
20808+ .vblank_wait2 = psb_vblank_wait2,
20809+ .irq_preinstall = psb_irq_preinstall,
20810+ .irq_postinstall = psb_irq_postinstall,
20811+ .irq_uninstall = psb_irq_uninstall,
20812+ .irq_handler = psb_irq_handler,
20813+ .fb_probe = psbfb_probe,
20814+ .fb_remove = psbfb_remove,
20815+ .firstopen = NULL,
20816+ .lastclose = psb_lastclose,
20817+ .fops = {
20818+ .owner = THIS_MODULE,
20819+ .open = drm_open,
20820+ .release = psb_release,
20821+ .ioctl = drm_ioctl,
20822+ .mmap = drm_mmap,
20823+ .poll = psb_poll,
20824+ .fasync = drm_fasync,
20825+ },
20826+ .pci_driver = {
20827+ .name = DRIVER_NAME,
20828+ .id_table = pciidlist,
20829+ .probe = probe,
20830+ .remove = __devexit_p(drm_cleanup_pci),
20831+ .resume = psb_resume,
20832+ .suspend = psb_suspend,
20833+ },
20834+ .fence_driver = &psb_fence_driver,
20835+ .bo_driver = &psb_bo_driver,
20836+ .name = DRIVER_NAME,
20837+ .desc = DRIVER_DESC,
20838+ .date = PSB_DRM_DRIVER_DATE,
20839+ .major = PSB_DRM_DRIVER_MAJOR,
20840+ .minor = PSB_DRM_DRIVER_MINOR,
20841+ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
20842+};
20843+
20844+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
20845+{
20846+ return drm_get_dev(pdev, ent, &driver);
20847+}
20848+
20849+static int __init psb_init(void)
20850+{
20851+ driver.num_ioctls = psb_max_ioctl;
20852+
20853+ return drm_init(&driver, pciidlist);
20854+}
20855+
20856+static void __exit psb_exit(void)
20857+{
20858+ drm_exit(&driver);
20859+}
20860+
20861+module_init(psb_init);
20862+module_exit(psb_exit);
20863+
20864+MODULE_AUTHOR(DRIVER_AUTHOR);
20865+MODULE_DESCRIPTION(DRIVER_DESC);
20866+MODULE_LICENSE("GPL");
20867Index: linux-2.6.27/drivers/gpu/drm/psb/psb_drv.h
20868===================================================================
20869--- /dev/null 1970-01-01 00:00:00.000000000 +0000
20870+++ linux-2.6.27/drivers/gpu/drm/psb/psb_drv.h 2009-02-05 13:29:33.000000000 +0000
20871@@ -0,0 +1,775 @@
20872+/**************************************************************************
20873+ * Copyright (c) 2007, Intel Corporation.
20874+ * All Rights Reserved.
20875+ *
20876+ * This program is free software; you can redistribute it and/or modify it
20877+ * under the terms and conditions of the GNU General Public License,
20878+ * version 2, as published by the Free Software Foundation.
20879+ *
20880+ * This program is distributed in the hope it will be useful, but WITHOUT
20881+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20882+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20883+ * more details.
20884+ *
20885+ * You should have received a copy of the GNU General Public License along with
20886+ * this program; if not, write to the Free Software Foundation, Inc.,
20887+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20888+ *
20889+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
20890+ * develop this driver.
20891+ *
20892+ **************************************************************************/
20893+/*
20894+ */
20895+#ifndef _PSB_DRV_H_
20896+#define _PSB_DRV_H_
20897+
20898+#include "drmP.h"
20899+#include "psb_drm.h"
20900+#include "psb_reg.h"
20901+#include "psb_schedule.h"
20902+#include "intel_drv.h"
20903+
20904+enum {
20905+ CHIP_PSB_8108 = 0,
20906+ CHIP_PSB_8109 = 1
20907+};
20908+
20909+/*
20910+ * Hardware bugfixes
20911+ */
20912+
20913+#define FIX_TG_16
20914+#define FIX_TG_2D_CLOCKGATE
20915+
20916+#define DRIVER_NAME "psb"
20917+#define DRIVER_DESC "drm driver for the Intel GMA500"
20918+#define DRIVER_AUTHOR "Tungsten Graphics Inc."
20919+
20920+#define PSB_DRM_DRIVER_DATE "20080613"
20921+#define PSB_DRM_DRIVER_MAJOR 4
20922+#define PSB_DRM_DRIVER_MINOR 12
20923+#define PSB_DRM_DRIVER_PATCHLEVEL 0
20924+
20925+#define PSB_VDC_OFFSET 0x00000000
20926+#define PSB_VDC_SIZE 0x000080000
20927+#define PSB_SGX_SIZE 0x8000
20928+#define PSB_SGX_OFFSET 0x00040000
20929+#define PSB_MMIO_RESOURCE 0
20930+#define PSB_GATT_RESOURCE 2
20931+#define PSB_GTT_RESOURCE 3
20932+#define PSB_GMCH_CTRL 0x52
20933+#define PSB_BSM 0x5C
20934+#define _PSB_GMCH_ENABLED 0x4
20935+#define PSB_PGETBL_CTL 0x2020
20936+#define _PSB_PGETBL_ENABLED 0x00000001
20937+#define PSB_SGX_2D_SLAVE_PORT 0x4000
20938+#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
20939+#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
20940+#define PSB_NUM_VALIDATE_BUFFERS 1024
20941+#define PSB_MEM_KERNEL_START 0x10000000
20942+#define PSB_MEM_PDS_START 0x20000000
20943+#define PSB_MEM_MMU_START 0x40000000
20944+
20945+#define DRM_PSB_MEM_KERNEL DRM_BO_MEM_PRIV0
20946+#define DRM_PSB_FLAG_MEM_KERNEL DRM_BO_FLAG_MEM_PRIV0
20947+
20948+/*
20949+ * Flags for external memory type field.
20950+ */
20951+
20952+#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */
20953+#define PSB_MSVDX_SIZE 0x8000 /*MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
20954+
20955+#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
20956+#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
20957+#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
20958+
20959+/*
20960+ * PTE's and PDE's
20961+ */
20962+
20963+#define PSB_PDE_MASK 0x003FFFFF
20964+#define PSB_PDE_SHIFT 22
20965+#define PSB_PTE_SHIFT 12
20966+
20967+#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
20968+#define PSB_PTE_WO 0x0002 /* Write only */
20969+#define PSB_PTE_RO 0x0004 /* Read only */
20970+#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
20971+
20972+/*
20973+ * VDC registers and bits
20974+ */
20975+#define PSB_HWSTAM 0x2098
20976+#define PSB_INSTPM 0x20C0
20977+#define PSB_INT_IDENTITY_R 0x20A4
20978+#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
20979+#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
20980+#define _PSB_IRQ_SGX_FLAG (1<<18)
20981+#define _PSB_IRQ_MSVDX_FLAG (1<<19)
20982+#define PSB_INT_MASK_R 0x20A8
20983+#define PSB_INT_ENABLE_R 0x20A0
20984+#define PSB_PIPEASTAT 0x70024
20985+#define _PSB_VBLANK_INTERRUPT_ENABLE (1 << 17)
20986+#define _PSB_VBLANK_CLEAR (1 << 1)
20987+#define PSB_PIPEBSTAT 0x71024
20988+
20989+#define _PSB_MMU_ER_MASK 0x0001FF00
20990+#define _PSB_MMU_ER_HOST (1 << 16)
20991+#define GPIOA 0x5010
20992+#define GPIOB 0x5014
20993+#define GPIOC 0x5018
20994+#define GPIOD 0x501c
20995+#define GPIOE 0x5020
20996+#define GPIOF 0x5024
20997+#define GPIOG 0x5028
20998+#define GPIOH 0x502c
20999+#define GPIO_CLOCK_DIR_MASK (1 << 0)
21000+#define GPIO_CLOCK_DIR_IN (0 << 1)
21001+#define GPIO_CLOCK_DIR_OUT (1 << 1)
21002+#define GPIO_CLOCK_VAL_MASK (1 << 2)
21003+#define GPIO_CLOCK_VAL_OUT (1 << 3)
21004+#define GPIO_CLOCK_VAL_IN (1 << 4)
21005+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
21006+#define GPIO_DATA_DIR_MASK (1 << 8)
21007+#define GPIO_DATA_DIR_IN (0 << 9)
21008+#define GPIO_DATA_DIR_OUT (1 << 9)
21009+#define GPIO_DATA_VAL_MASK (1 << 10)
21010+#define GPIO_DATA_VAL_OUT (1 << 11)
21011+#define GPIO_DATA_VAL_IN (1 << 12)
21012+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
21013+
21014+#define VCLK_DIVISOR_VGA0 0x6000
21015+#define VCLK_DIVISOR_VGA1 0x6004
21016+#define VCLK_POST_DIV 0x6010
21017+
21018+#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
21019+#define I915_WRITE(_offs, _val) \
21020+ iowrite32(_val, dev_priv->vdc_reg + (_offs))
21021+#define I915_READ(_offs) \
21022+ ioread32(dev_priv->vdc_reg + (_offs))
21023+
21024+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
21025+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
21026+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
21027+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
21028+#define PSB_COMM_USER_IRQ (1024 >> 2)
21029+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
21030+#define PSB_COMM_FW (2048 >> 2)
21031+
21032+#define PSB_UIRQ_VISTEST 1
21033+#define PSB_UIRQ_OOM_REPLY 2
21034+#define PSB_UIRQ_FIRE_TA_REPLY 3
21035+#define PSB_UIRQ_FIRE_RASTER_REPLY 4
21036+
21037+#define PSB_2D_SIZE (256*1024*1024)
21038+#define PSB_MAX_RELOC_PAGES 1024
21039+
21040+#define PSB_LOW_REG_OFFS 0x0204
21041+#define PSB_HIGH_REG_OFFS 0x0600
21042+
21043+#define PSB_NUM_VBLANKS 2
21044+
21045+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
21046+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
21047+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
21048+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
21049+#define PSB_COMM_FW (2048 >> 2)
21050+
21051+#define PSB_2D_SIZE (256*1024*1024)
21052+#define PSB_MAX_RELOC_PAGES 1024
21053+
21054+#define PSB_LOW_REG_OFFS 0x0204
21055+#define PSB_HIGH_REG_OFFS 0x0600
21056+
21057+#define PSB_NUM_VBLANKS 2
21058+#define PSB_WATCHDOG_DELAY (DRM_HZ / 10)
21059+
21060+/*
21061+ * User options.
21062+ */
21063+
21064+struct drm_psb_uopt {
21065+ int clock_gating;
21066+};
21067+
21068+struct psb_gtt {
21069+ struct drm_device *dev;
21070+ int initialized;
21071+ uint32_t gatt_start;
21072+ uint32_t gtt_start;
21073+ uint32_t gtt_phys_start;
21074+ unsigned gtt_pages;
21075+ unsigned gatt_pages;
21076+ uint32_t stolen_base;
21077+ uint32_t pge_ctl;
21078+ u16 gmch_ctrl;
21079+ unsigned long stolen_size;
21080+ uint32_t *gtt_map;
21081+ struct rw_semaphore sem;
21082+};
21083+
21084+struct psb_use_base {
21085+ struct list_head head;
21086+ struct drm_fence_object *fence;
21087+ unsigned int reg;
21088+ unsigned long offset;
21089+ unsigned int dm;
21090+};
21091+
21092+struct psb_buflist_item;
21093+
21094+struct psb_msvdx_cmd_queue {
21095+ struct list_head head;
21096+ void *cmd;
21097+ unsigned long cmd_size;
21098+ uint32_t sequence;
21099+};
21100+
21101+struct drm_psb_private {
21102+ unsigned long chipset;
21103+ uint8_t psb_rev_id;
21104+
21105+ struct psb_xhw_buf resume_buf;
21106+ struct drm_psb_dev_info_arg dev_info;
21107+ struct drm_psb_uopt uopt;
21108+
21109+ struct psb_gtt *pg;
21110+
21111+ struct page *scratch_page;
21112+ struct page *comm_page;
21113+
21114+ volatile uint32_t *comm;
21115+ uint32_t comm_mmu_offset;
21116+ uint32_t mmu_2d_offset;
21117+ uint32_t sequence[PSB_NUM_ENGINES];
21118+ uint32_t last_sequence[PSB_NUM_ENGINES];
21119+ int idle[PSB_NUM_ENGINES];
21120+ uint32_t last_submitted_seq[PSB_NUM_ENGINES];
21121+ int engine_lockup_2d;
21122+
21123+ struct psb_mmu_driver *mmu;
21124+ struct psb_mmu_pd *pf_pd;
21125+
21126+ uint8_t *sgx_reg;
21127+ uint8_t *vdc_reg;
21128+ uint8_t *msvdx_reg;
21129+
21130+ /*
21131+ * MSVDX
21132+ */
21133+ int msvdx_needs_reset;
21134+ int has_msvdx;
21135+ uint32_t gatt_free_offset;
21136+ atomic_t msvdx_mmu_invaldc;
21137+
21138+ /*
21139+ * Fencing / irq.
21140+ */
21141+
21142+ uint32_t sgx_irq_mask;
21143+ uint32_t sgx2_irq_mask;
21144+ uint32_t vdc_irq_mask;
21145+
21146+ spinlock_t irqmask_lock;
21147+ spinlock_t sequence_lock;
21148+ int fence0_irq_on;
21149+ int irq_enabled;
21150+ unsigned int irqen_count_2d;
21151+ wait_queue_head_t event_2d_queue;
21152+
21153+#ifdef FIX_TG_16
21154+ wait_queue_head_t queue_2d;
21155+ atomic_t lock_2d;
21156+ atomic_t ta_wait_2d;
21157+ atomic_t ta_wait_2d_irq;
21158+ atomic_t waiters_2d;
21159+#else
21160+ struct mutex mutex_2d;
21161+#endif
21162+ uint32_t msvdx_current_sequence;
21163+ uint32_t msvdx_last_sequence;
21164+#define MSVDX_MAX_IDELTIME HZ*30
21165+ uint32_t msvdx_finished_sequence;
21166+ uint32_t msvdx_start_idle;
21167+ unsigned long msvdx_idle_start_jiffies;
21168+
21169+ int fence2_irq_on;
21170+
21171+ /*
21172+ * MSVDX Rendec Memory
21173+ */
21174+ struct drm_buffer_object *ccb0;
21175+ uint32_t base_addr0;
21176+ struct drm_buffer_object *ccb1;
21177+ uint32_t base_addr1;
21178+
21179+ /*
21180+ * Memory managers
21181+ */
21182+
21183+ int have_vram;
21184+ int have_tt;
21185+ int have_mem_mmu;
21186+ int have_mem_aper;
21187+ int have_mem_kernel;
21188+ int have_mem_pds;
21189+ int have_mem_rastgeom;
21190+ struct mutex temp_mem;
21191+
21192+ /*
21193+ * Relocation buffer mapping.
21194+ */
21195+
21196+ spinlock_t reloc_lock;
21197+ unsigned int rel_mapped_pages;
21198+ wait_queue_head_t rel_mapped_queue;
21199+
21200+ /*
21201+ * SAREA
21202+ */
21203+ struct drm_psb_sarea *sarea_priv;
21204+
21205+ /*
21206+ * LVDS info
21207+ */
21208+ int backlight_duty_cycle; /* restore backlight to this value */
21209+ bool panel_wants_dither;
21210+ struct drm_display_mode *panel_fixed_mode;
21211+
21212+ /*
21213+ * Register state
21214+ */
21215+ uint32_t saveDSPACNTR;
21216+ uint32_t saveDSPBCNTR;
21217+ uint32_t savePIPEACONF;
21218+ uint32_t savePIPEBCONF;
21219+ uint32_t savePIPEASRC;
21220+ uint32_t savePIPEBSRC;
21221+ uint32_t saveFPA0;
21222+ uint32_t saveFPA1;
21223+ uint32_t saveDPLL_A;
21224+ uint32_t saveDPLL_A_MD;
21225+ uint32_t saveHTOTAL_A;
21226+ uint32_t saveHBLANK_A;
21227+ uint32_t saveHSYNC_A;
21228+ uint32_t saveVTOTAL_A;
21229+ uint32_t saveVBLANK_A;
21230+ uint32_t saveVSYNC_A;
21231+ uint32_t saveDSPASTRIDE;
21232+ uint32_t saveDSPASIZE;
21233+ uint32_t saveDSPAPOS;
21234+ uint32_t saveDSPABASE;
21235+ uint32_t saveDSPASURF;
21236+ uint32_t saveFPB0;
21237+ uint32_t saveFPB1;
21238+ uint32_t saveDPLL_B;
21239+ uint32_t saveDPLL_B_MD;
21240+ uint32_t saveHTOTAL_B;
21241+ uint32_t saveHBLANK_B;
21242+ uint32_t saveHSYNC_B;
21243+ uint32_t saveVTOTAL_B;
21244+ uint32_t saveVBLANK_B;
21245+ uint32_t saveVSYNC_B;
21246+ uint32_t saveDSPBSTRIDE;
21247+ uint32_t saveDSPBSIZE;
21248+ uint32_t saveDSPBPOS;
21249+ uint32_t saveDSPBBASE;
21250+ uint32_t saveDSPBSURF;
21251+ uint32_t saveVCLK_DIVISOR_VGA0;
21252+ uint32_t saveVCLK_DIVISOR_VGA1;
21253+ uint32_t saveVCLK_POST_DIV;
21254+ uint32_t saveVGACNTRL;
21255+ uint32_t saveADPA;
21256+ uint32_t saveLVDS;
21257+ uint32_t saveDVOA;
21258+ uint32_t saveDVOB;
21259+ uint32_t saveDVOC;
21260+ uint32_t savePP_ON;
21261+ uint32_t savePP_OFF;
21262+ uint32_t savePP_CONTROL;
21263+ uint32_t savePP_CYCLE;
21264+ uint32_t savePFIT_CONTROL;
21265+ uint32_t savePaletteA[256];
21266+ uint32_t savePaletteB[256];
21267+ uint32_t saveBLC_PWM_CTL;
21268+ uint32_t saveCLOCKGATING;
21269+
21270+ /*
21271+ * USE code base register management.
21272+ */
21273+
21274+ struct drm_reg_manager use_manager;
21275+
21276+ /*
21277+ * Xhw
21278+ */
21279+
21280+ uint32_t *xhw;
21281+ struct drm_buffer_object *xhw_bo;
21282+ struct drm_bo_kmap_obj xhw_kmap;
21283+ struct list_head xhw_in;
21284+ spinlock_t xhw_lock;
21285+ atomic_t xhw_client;
21286+ struct drm_file *xhw_file;
21287+ wait_queue_head_t xhw_queue;
21288+ wait_queue_head_t xhw_caller_queue;
21289+ struct mutex xhw_mutex;
21290+ struct psb_xhw_buf *xhw_cur_buf;
21291+ int xhw_submit_ok;
21292+ int xhw_on;
21293+
21294+ /*
21295+ * Scheduling.
21296+ */
21297+
21298+ struct mutex reset_mutex;
21299+ struct mutex cmdbuf_mutex;
21300+ struct psb_scheduler scheduler;
21301+ struct psb_buflist_item *buffers;
21302+ uint32_t ta_mem_pages;
21303+ struct psb_ta_mem *ta_mem;
21304+ int force_ta_mem_load;
21305+
21306+ /*
21307+ * Watchdog
21308+ */
21309+
21310+ spinlock_t watchdog_lock;
21311+ struct timer_list watchdog_timer;
21312+ struct work_struct watchdog_wq;
21313+ struct work_struct msvdx_watchdog_wq;
21314+ int timer_available;
21315+
21316+ /*
21317+ * msvdx command queue
21318+ */
21319+ spinlock_t msvdx_lock;
21320+ struct mutex msvdx_mutex;
21321+ struct list_head msvdx_queue;
21322+ int msvdx_busy;
21323+
21324+};
21325+
21326+struct psb_mmu_driver;
21327+
21328+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
21329+ int trap_pagefaults,
21330+ int invalid_type,
21331+ atomic_t *msvdx_mmu_invaldc);
21332+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
21333+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver);
21334+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
21335+ uint32_t gtt_start, uint32_t gtt_pages);
21336+extern void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset);
21337+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
21338+ int trap_pagefaults,
21339+ int invalid_type);
21340+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
21341+extern void psb_mmu_flush(struct psb_mmu_driver *driver);
21342+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
21343+ unsigned long address,
21344+ uint32_t num_pages);
21345+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
21346+ uint32_t start_pfn,
21347+ unsigned long address,
21348+ uint32_t num_pages, int type);
21349+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
21350+ unsigned long *pfn);
21351+
21352+/*
21353+ * Enable / disable MMU for different requestors.
21354+ */
21355+
21356+extern void psb_mmu_enable_requestor(struct psb_mmu_driver *driver,
21357+ uint32_t mask);
21358+extern void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
21359+ uint32_t mask);
21360+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
21361+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
21362+ unsigned long address, uint32_t num_pages,
21363+ uint32_t desired_tile_stride,
21364+ uint32_t hw_tile_stride, int type);
21365+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
21366+ uint32_t num_pages,
21367+ uint32_t desired_tile_stride,
21368+ uint32_t hw_tile_stride);
21369+/*
21370+ * psb_sgx.c
21371+ */
21372+
21373+extern int psb_blit_sequence(struct drm_psb_private *dev_priv,
21374+ uint32_t sequence);
21375+extern void psb_init_2d(struct drm_psb_private *dev_priv);
21376+extern int psb_idle_2d(struct drm_device *dev);
21377+extern int psb_idle_3d(struct drm_device *dev);
21378+extern int psb_emit_2d_copy_blit(struct drm_device *dev,
21379+ uint32_t src_offset,
21380+ uint32_t dst_offset, uint32_t pages,
21381+ int direction);
21382+extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
21383+ struct drm_file *file_priv);
21384+extern int psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t * regs,
21385+ unsigned int cmds);
21386+extern int psb_submit_copy_cmdbuf(struct drm_device *dev,
21387+ struct drm_buffer_object *cmd_buffer,
21388+ unsigned long cmd_offset,
21389+ unsigned long cmd_size, int engine,
21390+ uint32_t * copy_buffer);
21391+extern void psb_fence_or_sync(struct drm_file *priv,
21392+ int engine,
21393+ struct drm_psb_cmdbuf_arg *arg,
21394+ struct drm_fence_arg *fence_arg,
21395+ struct drm_fence_object **fence_p);
21396+extern void psb_init_disallowed(void);
21397+
21398+/*
21399+ * psb_irq.c
21400+ */
21401+
21402+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
21403+extern void psb_irq_preinstall(struct drm_device *dev);
21404+extern int psb_irq_postinstall(struct drm_device *dev);
21405+extern void psb_irq_uninstall(struct drm_device *dev);
21406+extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
21407+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
21408+
21409+/*
21410+ * psb_fence.c
21411+ */
21412+
21413+extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
21414+extern void psb_2D_irq_off(struct drm_psb_private *dev_priv);
21415+extern void psb_2D_irq_on(struct drm_psb_private *dev_priv);
21416+extern uint32_t psb_fence_advance_sequence(struct drm_device *dev,
21417+ uint32_t class);
21418+extern int psb_fence_emit_sequence(struct drm_device *dev, uint32_t fence_class,
21419+ uint32_t flags, uint32_t * sequence,
21420+ uint32_t * native_type);
21421+extern void psb_fence_error(struct drm_device *dev,
21422+ uint32_t class,
21423+ uint32_t sequence, uint32_t type, int error);
21424+
21425+/*MSVDX stuff*/
21426+extern void psb_msvdx_irq_off(struct drm_psb_private *dev_priv);
21427+extern void psb_msvdx_irq_on(struct drm_psb_private *dev_priv);
21428+extern int psb_hw_info_ioctl(struct drm_device *dev, void *data,
21429+ struct drm_file *file_priv);
21430+
21431+/*
21432+ * psb_buffer.c
21433+ */
21434+extern struct drm_ttm_backend *drm_psb_tbe_init(struct drm_device *dev);
21435+extern int psb_fence_types(struct drm_buffer_object *bo, uint32_t * class,
21436+ uint32_t * type);
21437+extern uint32_t psb_evict_mask(struct drm_buffer_object *bo);
21438+extern int psb_invalidate_caches(struct drm_device *dev, uint64_t flags);
21439+extern int psb_init_mem_type(struct drm_device *dev, uint32_t type,
21440+ struct drm_mem_type_manager *man);
21441+extern int psb_move(struct drm_buffer_object *bo,
21442+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
21443+extern int psb_tbe_size(struct drm_device *dev, unsigned long num_pages);
21444+
21445+/*
21446+ * psb_gtt.c
21447+ */
21448+extern int psb_gtt_init(struct psb_gtt *pg, int resume);
21449+extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
21450+ unsigned offset_pages, unsigned num_pages,
21451+ unsigned desired_tile_stride,
21452+ unsigned hw_tile_stride, int type);
21453+extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
21454+ unsigned num_pages,
21455+ unsigned desired_tile_stride,
21456+ unsigned hw_tile_stride);
21457+
21458+extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
21459+extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
21460+
21461+/*
21462+ * psb_fb.c
21463+ */
21464+extern int psbfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
21465+extern int psbfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
21466+extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
21467+ struct drm_file *file_priv);
21468+extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
21469+ struct drm_file *file_priv);
21470+extern void psbfb_suspend(struct drm_device *dev);
21471+extern void psbfb_resume(struct drm_device *dev);
21472+
21473+/*
21474+ * psb_reset.c
21475+ */
21476+
21477+extern void psb_reset(struct drm_psb_private *dev_priv, int reset_2d);
21478+extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
21479+extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
21480+extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
21481+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
21482+
21483+/*
21484+ * psb_regman.c
21485+ */
21486+
21487+extern void psb_takedown_use_base(struct drm_psb_private *dev_priv);
21488+extern int psb_grab_use_base(struct drm_psb_private *dev_priv,
21489+ unsigned long dev_virtual,
21490+ unsigned long size,
21491+ unsigned int data_master,
21492+ uint32_t fence_class,
21493+ uint32_t fence_type,
21494+ int no_wait,
21495+ int ignore_signals,
21496+ int *r_reg, uint32_t * r_offset);
21497+extern int psb_init_use_base(struct drm_psb_private *dev_priv,
21498+ unsigned int reg_start, unsigned int reg_num);
21499+
21500+/*
21501+ * psb_xhw.c
21502+ */
21503+
21504+extern int psb_xhw_ioctl(struct drm_device *dev, void *data,
21505+ struct drm_file *file_priv);
21506+extern int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
21507+ struct drm_file *file_priv);
21508+extern int psb_xhw_init(struct drm_device *dev);
21509+extern void psb_xhw_takedown(struct drm_psb_private *dev_priv);
21510+extern void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
21511+ struct drm_file *file_priv, int closing);
21512+extern int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
21513+ struct psb_xhw_buf *buf,
21514+ uint32_t fire_flags,
21515+ uint32_t hw_context,
21516+ uint32_t * cookie,
21517+ uint32_t * oom_cmds,
21518+ uint32_t num_oom_cmds,
21519+ uint32_t offset,
21520+ uint32_t engine, uint32_t flags);
21521+extern int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
21522+ struct psb_xhw_buf *buf, uint32_t fire_flags);
21523+extern int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
21524+ struct psb_xhw_buf *buf,
21525+ uint32_t w,
21526+ uint32_t h,
21527+ uint32_t * hw_cookie,
21528+ uint32_t * bo_size,
21529+ uint32_t * clear_p_start,
21530+ uint32_t * clear_num_pages);
21531+
21532+extern int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
21533+ struct psb_xhw_buf *buf);
21534+extern int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
21535+ struct psb_xhw_buf *buf, uint32_t * value);
21536+extern int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
21537+ struct psb_xhw_buf *buf,
21538+ uint32_t pages,
21539+ uint32_t * hw_cookie, uint32_t * size);
21540+extern int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
21541+ struct psb_xhw_buf *buf, uint32_t * cookie);
21542+extern void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
21543+ struct psb_xhw_buf *buf,
21544+ uint32_t * cookie,
21545+ uint32_t * bca,
21546+ uint32_t * rca, uint32_t * flags);
21547+extern int psb_xhw_vistest(struct drm_psb_private *dev_priv,
21548+ struct psb_xhw_buf *buf);
21549+extern int psb_xhw_handler(struct drm_psb_private *dev_priv);
21550+extern int psb_xhw_resume(struct drm_psb_private *dev_priv,
21551+ struct psb_xhw_buf *buf);
21552+extern void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
21553+ struct psb_xhw_buf *buf, uint32_t * cookie);
21554+extern int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
21555+ struct psb_xhw_buf *buf,
21556+ uint32_t flags,
21557+ uint32_t param_offset,
21558+ uint32_t pt_offset, uint32_t * hw_cookie);
21559+extern void psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
21560+ struct psb_xhw_buf *buf);
21561+
21562+/*
21563+ * psb_schedule.c: HW bug fixing.
21564+ */
21565+
21566+#ifdef FIX_TG_16
21567+
21568+extern void psb_2d_unlock(struct drm_psb_private *dev_priv);
21569+extern void psb_2d_lock(struct drm_psb_private *dev_priv);
21570+extern void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv);
21571+
21572+#else
21573+
21574+#define psb_2d_lock(_dev_priv) mutex_lock(&(_dev_priv)->mutex_2d)
21575+#define psb_2d_unlock(_dev_priv) mutex_unlock(&(_dev_priv)->mutex_2d)
21576+
21577+#endif
21578+
21579+/*
21580+ * Utilities
21581+ */
21582+
21583+#define PSB_ALIGN_TO(_val, _align) \
21584+ (((_val) + ((_align) - 1)) & ~((_align) - 1))
21585+#define PSB_WVDC32(_val, _offs) \
21586+ iowrite32(_val, dev_priv->vdc_reg + (_offs))
21587+#define PSB_RVDC32(_offs) \
21588+ ioread32(dev_priv->vdc_reg + (_offs))
21589+#define PSB_WSGX32(_val, _offs) \
21590+ iowrite32(_val, dev_priv->sgx_reg + (_offs))
21591+#define PSB_RSGX32(_offs) \
21592+ ioread32(dev_priv->sgx_reg + (_offs))
21593+#define PSB_WMSVDX32(_val, _offs) \
21594+ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
21595+#define PSB_RMSVDX32(_offs) \
21596+ ioread32(dev_priv->msvdx_reg + (_offs))
21597+
21598+#define PSB_ALPL(_val, _base) \
21599+ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
21600+#define PSB_ALPLM(_val, _base) \
21601+ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
21602+
21603+#define PSB_D_RENDER (1 << 16)
21604+
21605+#define PSB_D_GENERAL (1 << 0)
21606+#define PSB_D_INIT (1 << 1)
21607+#define PSB_D_IRQ (1 << 2)
21608+#define PSB_D_FW (1 << 3)
21609+#define PSB_D_PERF (1 << 4)
21610+#define PSB_D_TMP (1 << 5)
21611+#define PSB_D_RELOC (1 << 6)
21612+
21613+extern int drm_psb_debug;
21614+extern int drm_psb_no_fb;
21615+extern int drm_psb_disable_vsync;
21616+
21617+#define PSB_DEBUG_FW(_fmt, _arg...) \
21618+ PSB_DEBUG(PSB_D_FW, _fmt, ##_arg)
21619+#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
21620+ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
21621+#define PSB_DEBUG_INIT(_fmt, _arg...) \
21622+ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
21623+#define PSB_DEBUG_IRQ(_fmt, _arg...) \
21624+ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
21625+#define PSB_DEBUG_RENDER(_fmt, _arg...) \
21626+ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
21627+#define PSB_DEBUG_PERF(_fmt, _arg...) \
21628+ PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg)
21629+#define PSB_DEBUG_TMP(_fmt, _arg...) \
21630+ PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg)
21631+#define PSB_DEBUG_RELOC(_fmt, _arg...) \
21632+ PSB_DEBUG(PSB_D_RELOC, _fmt, ##_arg)
21633+
21634+#if DRM_DEBUG_CODE
21635+#define PSB_DEBUG(_flag, _fmt, _arg...) \
21636+ do { \
21637+ if (unlikely((_flag) & drm_psb_debug)) \
21638+ printk(KERN_DEBUG \
21639+ "[psb:0x%02x:%s] " _fmt , _flag, \
21640+ __FUNCTION__ , ##_arg); \
21641+ } while (0)
21642+#else
21643+#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
21644+#endif
21645+
21646+#endif
21647Index: linux-2.6.27/drivers/gpu/drm/psb/psb_fb.c
21648===================================================================
21649--- /dev/null 1970-01-01 00:00:00.000000000 +0000
21650+++ linux-2.6.27/drivers/gpu/drm/psb/psb_fb.c 2009-02-05 13:29:33.000000000 +0000
21651@@ -0,0 +1,1330 @@
21652+/**************************************************************************
21653+ * Copyright (c) 2007, Intel Corporation.
21654+ * All Rights Reserved.
21655+ *
21656+ * This program is free software; you can redistribute it and/or modify it
21657+ * under the terms and conditions of the GNU General Public License,
21658+ * version 2, as published by the Free Software Foundation.
21659+ *
21660+ * This program is distributed in the hope it will be useful, but WITHOUT
21661+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21662+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21663+ * more details.
21664+ *
21665+ * You should have received a copy of the GNU General Public License along with
21666+ * this program; if not, write to the Free Software Foundation, Inc.,
21667+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21668+ *
21669+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
21670+ * develop this driver.
21671+ *
21672+ **************************************************************************/
21673+
21674+#include <linux/module.h>
21675+#include <linux/kernel.h>
21676+#include <linux/errno.h>
21677+#include <linux/string.h>
21678+#include <linux/mm.h>
21679+#include <linux/tty.h>
21680+#include <linux/slab.h>
21681+#include <linux/delay.h>
21682+#include <linux/fb.h>
21683+#include <linux/init.h>
21684+#include <linux/console.h>
21685+
21686+#include "drmP.h"
21687+#include "drm.h"
21688+#include "drm_crtc.h"
21689+#include "psb_drv.h"
21690+
21691+#define SII_1392_WA
21692+#ifdef SII_1392_WA
21693+extern int SII_1392;
21694+#endif
21695+
21696+struct psbfb_vm_info {
21697+ struct drm_buffer_object *bo;
21698+ struct address_space *f_mapping;
21699+ struct mutex vm_mutex;
21700+ atomic_t refcount;
21701+};
21702+
21703+struct psbfb_par {
21704+ struct drm_device *dev;
21705+ struct drm_crtc *crtc;
21706+ struct drm_output *output;
21707+ struct psbfb_vm_info *vi;
21708+ int dpms_state;
21709+};
21710+
21711+static void psbfb_vm_info_deref(struct psbfb_vm_info **vi)
21712+{
21713+ struct psbfb_vm_info *tmp = *vi;
21714+ *vi = NULL;
21715+ if (atomic_dec_and_test(&tmp->refcount)) {
21716+ drm_bo_usage_deref_unlocked(&tmp->bo);
21717+ drm_free(tmp, sizeof(*tmp), DRM_MEM_MAPS);
21718+ }
21719+}
21720+
21721+static struct psbfb_vm_info *psbfb_vm_info_ref(struct psbfb_vm_info *vi)
21722+{
21723+ atomic_inc(&vi->refcount);
21724+ return vi;
21725+}
21726+
21727+static struct psbfb_vm_info *psbfb_vm_info_create(void)
21728+{
21729+ struct psbfb_vm_info *vi;
21730+
21731+ vi = drm_calloc(1, sizeof(*vi), DRM_MEM_MAPS);
21732+ if (!vi)
21733+ return NULL;
21734+
21735+ mutex_init(&vi->vm_mutex);
21736+ atomic_set(&vi->refcount, 1);
21737+ return vi;
21738+}
21739+
21740+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
21741+
21742+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
21743+ unsigned blue, unsigned transp, struct fb_info *info)
21744+{
21745+ struct psbfb_par *par = info->par;
21746+ struct drm_crtc *crtc = par->crtc;
21747+ uint32_t v;
21748+
21749+ if (!crtc->fb)
21750+ return -ENOMEM;
21751+
21752+ if (regno > 255)
21753+ return 1;
21754+
21755+ if (crtc->funcs->gamma_set)
21756+ crtc->funcs->gamma_set(crtc, red, green, blue, regno);
21757+
21758+ red = CMAP_TOHW(red, info->var.red.length);
21759+ blue = CMAP_TOHW(blue, info->var.blue.length);
21760+ green = CMAP_TOHW(green, info->var.green.length);
21761+ transp = CMAP_TOHW(transp, info->var.transp.length);
21762+
21763+ v = (red << info->var.red.offset) |
21764+ (green << info->var.green.offset) |
21765+ (blue << info->var.blue.offset) |
21766+ (transp << info->var.transp.offset);
21767+
21768+ switch (crtc->fb->bits_per_pixel) {
21769+ case 16:
21770+ ((uint32_t *) info->pseudo_palette)[regno] = v;
21771+ break;
21772+ case 24:
21773+ case 32:
21774+ ((uint32_t *) info->pseudo_palette)[regno] = v;
21775+ break;
21776+ }
21777+
21778+ return 0;
21779+}
21780+
21781+static int psbfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
21782+{
21783+ struct psbfb_par *par = info->par;
21784+ struct drm_device *dev = par->dev;
21785+ struct drm_framebuffer *fb = par->crtc->fb;
21786+ struct drm_display_mode *drm_mode;
21787+ struct drm_output *output;
21788+ int depth;
21789+ int pitch;
21790+ int bpp = var->bits_per_pixel;
21791+
21792+ if (!fb)
21793+ return -ENOMEM;
21794+
21795+ if (!var->pixclock)
21796+ return -EINVAL;
21797+
21798+ /* don't support virtuals for now */
21799+ if (var->xres_virtual > var->xres)
21800+ return -EINVAL;
21801+
21802+ if (var->yres_virtual > var->yres)
21803+ return -EINVAL;
21804+
21805+ switch (bpp) {
21806+ case 8:
21807+ depth = 8;
21808+ break;
21809+ case 16:
21810+ depth = (var->green.length == 6) ? 16 : 15;
21811+ break;
21812+ case 24: /* assume this is 32bpp / depth 24 */
21813+ bpp = 32;
21814+ /* fallthrough */
21815+ case 32:
21816+ depth = (var->transp.length > 0) ? 32 : 24;
21817+ break;
21818+ default:
21819+ return -EINVAL;
21820+ }
21821+
21822+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
21823+
21824+ /* Check that we can resize */
21825+ if ((pitch * var->yres) > (fb->bo->num_pages << PAGE_SHIFT)) {
21826+#if 1
21827+ /* Need to resize the fb object.
21828+ * But the generic fbdev code doesn't really understand
21829+ * that we can do this. So disable for now.
21830+ */
21831+ DRM_INFO("Can't support requested size, too big!\n");
21832+ return -EINVAL;
21833+#else
21834+ int ret;
21835+ struct drm_buffer_object *fbo = NULL;
21836+ struct drm_bo_kmap_obj tmp_kmap;
21837+
21838+ /* a temporary BO to check if we could resize in setpar.
21839+ * Therefore no need to set NO_EVICT.
21840+ */
21841+ ret = drm_buffer_object_create(dev,
21842+ pitch * var->yres,
21843+ drm_bo_type_kernel,
21844+ DRM_BO_FLAG_READ |
21845+ DRM_BO_FLAG_WRITE |
21846+ DRM_BO_FLAG_MEM_TT |
21847+ DRM_BO_FLAG_MEM_VRAM,
21848+ DRM_BO_HINT_DONT_FENCE,
21849+ 0, 0, &fbo);
21850+ if (ret || !fbo)
21851+ return -ENOMEM;
21852+
21853+ ret = drm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
21854+ if (ret) {
21855+ drm_bo_usage_deref_unlocked(&fbo);
21856+ return -EINVAL;
21857+ }
21858+
21859+ drm_bo_kunmap(&tmp_kmap);
21860+ /* destroy our current fbo! */
21861+ drm_bo_usage_deref_unlocked(&fbo);
21862+#endif
21863+ }
21864+
21865+ switch (depth) {
21866+ case 8:
21867+ var->red.offset = 0;
21868+ var->green.offset = 0;
21869+ var->blue.offset = 0;
21870+ var->red.length = 8;
21871+ var->green.length = 8;
21872+ var->blue.length = 8;
21873+ var->transp.length = 0;
21874+ var->transp.offset = 0;
21875+ break;
21876+ case 15:
21877+ var->red.offset = 10;
21878+ var->green.offset = 5;
21879+ var->blue.offset = 0;
21880+ var->red.length = 5;
21881+ var->green.length = 5;
21882+ var->blue.length = 5;
21883+ var->transp.length = 1;
21884+ var->transp.offset = 15;
21885+ break;
21886+ case 16:
21887+ var->red.offset = 11;
21888+ var->green.offset = 5;
21889+ var->blue.offset = 0;
21890+ var->red.length = 5;
21891+ var->green.length = 6;
21892+ var->blue.length = 5;
21893+ var->transp.length = 0;
21894+ var->transp.offset = 0;
21895+ break;
21896+ case 24:
21897+ var->red.offset = 16;
21898+ var->green.offset = 8;
21899+ var->blue.offset = 0;
21900+ var->red.length = 8;
21901+ var->green.length = 8;
21902+ var->blue.length = 8;
21903+ var->transp.length = 0;
21904+ var->transp.offset = 0;
21905+ break;
21906+ case 32:
21907+ var->red.offset = 16;
21908+ var->green.offset = 8;
21909+ var->blue.offset = 0;
21910+ var->red.length = 8;
21911+ var->green.length = 8;
21912+ var->blue.length = 8;
21913+ var->transp.length = 8;
21914+ var->transp.offset = 24;
21915+ break;
21916+ default:
21917+ return -EINVAL;
21918+ }
21919+
21920+#if 0
21921+ /* Here we walk the output mode list and look for modes. If we haven't
21922+ * got it, then bail. Not very nice, so this is disabled.
21923+ * In the set_par code, we create our mode based on the incoming
21924+ * parameters. Nicer, but may not be desired by some.
21925+ */
21926+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
21927+ if (output->crtc == par->crtc)
21928+ break;
21929+ }
21930+
21931+ list_for_each_entry(drm_mode, &output->modes, head) {
21932+ if (drm_mode->hdisplay == var->xres &&
21933+ drm_mode->vdisplay == var->yres && drm_mode->clock != 0)
21934+ break;
21935+ }
21936+
21937+ if (!drm_mode)
21938+ return -EINVAL;
21939+#else
21940+ (void)dev; /* silence warnings */
21941+ (void)output;
21942+ (void)drm_mode;
21943+#endif
21944+
21945+ return 0;
21946+}
21947+
21948+static int psbfb_move_fb_bo(struct fb_info *info, struct drm_buffer_object *bo,
21949+ uint64_t mem_type_flags)
21950+{
21951+ struct psbfb_par *par;
21952+ loff_t holelen;
21953+ int ret;
21954+
21955+ /*
21956+ * Kill all user-space mappings of this device. They will be
21957+ * faulted back using nopfn when accessed.
21958+ */
21959+
21960+ par = info->par;
21961+ holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
21962+ mutex_lock(&par->vi->vm_mutex);
21963+ if (par->vi->f_mapping) {
21964+ unmap_mapping_range(par->vi->f_mapping, 0, holelen, 1);
21965+ }
21966+
21967+ ret = drm_bo_do_validate(bo,
21968+ mem_type_flags,
21969+ DRM_BO_MASK_MEM |
21970+ DRM_BO_FLAG_NO_EVICT,
21971+ DRM_BO_HINT_DONT_FENCE, 0, 1, NULL);
21972+
21973+ mutex_unlock(&par->vi->vm_mutex);
21974+ return ret;
21975+}
21976+
21977+/* this will let fbcon do the mode init */
21978+static int psbfb_set_par(struct fb_info *info)
21979+{
21980+ struct psbfb_par *par = info->par;
21981+ struct drm_framebuffer *fb = par->crtc->fb;
21982+ struct drm_device *dev = par->dev;
21983+ struct drm_display_mode *drm_mode;
21984+ struct fb_var_screeninfo *var = &info->var;
21985+ struct drm_psb_private *dev_priv = dev->dev_private;
21986+ struct drm_output *output;
21987+ int pitch;
21988+ int depth;
21989+ int bpp = var->bits_per_pixel;
21990+
21991+ if (!fb)
21992+ return -ENOMEM;
21993+
21994+ switch (bpp) {
21995+ case 8:
21996+ depth = 8;
21997+ break;
21998+ case 16:
21999+ depth = (var->green.length == 6) ? 16 : 15;
22000+ break;
22001+ case 24: /* assume this is 32bpp / depth 24 */
22002+ bpp = 32;
22003+ /* fallthrough */
22004+ case 32:
22005+ depth = (var->transp.length > 0) ? 32 : 24;
22006+ break;
22007+ default:
22008+ return -EINVAL;
22009+ }
22010+
22011+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
22012+
22013+ if ((pitch * var->yres) > (fb->bo->num_pages << PAGE_SHIFT)) {
22014+#if 1
22015+ /* Need to resize the fb object.
22016+ * But the generic fbdev code doesn't really understand
22017+ * that we can do this. So disable for now.
22018+ */
22019+ DRM_INFO("Can't support requested size, too big!\n");
22020+ return -EINVAL;
22021+#else
22022+ int ret;
22023+ struct drm_buffer_object *fbo = NULL, *tfbo;
22024+ struct drm_bo_kmap_obj tmp_kmap, tkmap;
22025+
22026+ ret = drm_buffer_object_create(dev,
22027+ pitch * var->yres,
22028+ drm_bo_type_kernel,
22029+ DRM_BO_FLAG_READ |
22030+ DRM_BO_FLAG_WRITE |
22031+ DRM_BO_FLAG_MEM_TT |
22032+ DRM_BO_FLAG_MEM_VRAM |
22033+ DRM_BO_FLAG_NO_EVICT,
22034+ DRM_BO_HINT_DONT_FENCE,
22035+ 0, 0, &fbo);
22036+ if (ret || !fbo) {
22037+ DRM_ERROR
22038+ ("failed to allocate new resized framebuffer\n");
22039+ return -ENOMEM;
22040+ }
22041+
22042+ ret = drm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
22043+ if (ret) {
22044+ DRM_ERROR("failed to kmap framebuffer.\n");
22045+ drm_bo_usage_deref_unlocked(&fbo);
22046+ return -EINVAL;
22047+ }
22048+
22049+ DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width,
22050+ fb->height, fb->offset, fbo);
22051+
22052+ /* set new screen base */
22053+ info->screen_base = tmp_kmap.virtual;
22054+
22055+ tkmap = fb->kmap;
22056+ fb->kmap = tmp_kmap;
22057+ drm_bo_kunmap(&tkmap);
22058+
22059+ tfbo = fb->bo;
22060+ fb->bo = fbo;
22061+ drm_bo_usage_deref_unlocked(&tfbo);
22062+#endif
22063+ }
22064+
22065+ fb->offset = fb->bo->offset - dev_priv->pg->gatt_start;
22066+ fb->width = var->xres;
22067+ fb->height = var->yres;
22068+ fb->bits_per_pixel = bpp;
22069+ fb->pitch = pitch;
22070+ fb->depth = depth;
22071+
22072+ info->fix.line_length = fb->pitch;
22073+ info->fix.visual =
22074+ (fb->depth == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
22075+
22076+ /* some fbdev's apps don't want these to change */
22077+ info->fix.smem_start = dev->mode_config.fb_base + fb->offset;
22078+
22079+ /* we have to align the output base address because the fb->bo
22080+ may be moved in the previous drm_bo_do_validate().
22081+ Otherwise the output screens may go black when exit the X
22082+ window and re-enter the console */
22083+ info->screen_base = fb->kmap.virtual;
22084+
22085+#if 0
22086+ /* relates to resize - disable */
22087+ info->fix.smem_len = info->fix.line_length * var->yres;
22088+ info->screen_size = info->fix.smem_len; /* ??? */
22089+#endif
22090+
22091+ /* Should we walk the output's modelist or just create our own ???
22092+ * For now, we create and destroy a mode based on the incoming
22093+ * parameters. But there's commented out code below which scans
22094+ * the output list too.
22095+ */
22096+#if 0
22097+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
22098+ if (output->crtc == par->crtc)
22099+ break;
22100+ }
22101+
22102+ list_for_each_entry(drm_mode, &output->modes, head) {
22103+ if (drm_mode->hdisplay == var->xres &&
22104+ drm_mode->vdisplay == var->yres && drm_mode->clock != 0)
22105+ break;
22106+ }
22107+#else
22108+ (void)output; /* silence warning */
22109+
22110+ drm_mode = drm_mode_create(dev);
22111+ drm_mode->hdisplay = var->xres;
22112+ drm_mode->hsync_start = drm_mode->hdisplay + var->right_margin;
22113+ drm_mode->hsync_end = drm_mode->hsync_start + var->hsync_len;
22114+ drm_mode->htotal = drm_mode->hsync_end + var->left_margin;
22115+ drm_mode->vdisplay = var->yres;
22116+ drm_mode->vsync_start = drm_mode->vdisplay + var->lower_margin;
22117+ drm_mode->vsync_end = drm_mode->vsync_start + var->vsync_len;
22118+ drm_mode->vtotal = drm_mode->vsync_end + var->upper_margin;
22119+ drm_mode->clock = PICOS2KHZ(var->pixclock);
22120+ drm_mode->vrefresh = drm_mode_vrefresh(drm_mode);
22121+ drm_mode_set_name(drm_mode);
22122+ drm_mode_set_crtcinfo(drm_mode, CRTC_INTERLACE_HALVE_V);
22123+#endif
22124+
22125+ if (!drm_crtc_set_mode(par->crtc, drm_mode, 0, 0))
22126+ return -EINVAL;
22127+
22128+ /* Have to destroy our created mode if we're not searching the mode
22129+ * list for it.
22130+ */
22131+#if 1
22132+ drm_mode_destroy(dev, drm_mode);
22133+#endif
22134+
22135+ return 0;
22136+}
22137+
22138+extern int psb_2d_submit(struct drm_psb_private *, uint32_t *, uint32_t);;
22139+
22140+static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv,
22141+ uint32_t dst_offset, uint32_t dst_stride,
22142+ uint32_t dst_format, uint16_t dst_x,
22143+ uint16_t dst_y, uint16_t size_x,
22144+ uint16_t size_y, uint32_t fill)
22145+{
22146+ uint32_t buffer[10];
22147+ uint32_t *buf;
22148+ int ret;
22149+
22150+ buf = buffer;
22151+
22152+ *buf++ = PSB_2D_FENCE_BH;
22153+
22154+ *buf++ =
22155+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
22156+ PSB_2D_DST_STRIDE_SHIFT);
22157+ *buf++ = dst_offset;
22158+
22159+ *buf++ =
22160+ PSB_2D_BLIT_BH |
22161+ PSB_2D_ROT_NONE |
22162+ PSB_2D_COPYORDER_TL2BR |
22163+ PSB_2D_DSTCK_DISABLE |
22164+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
22165+
22166+ *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT;
22167+ *buf++ =
22168+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
22169+ PSB_2D_DST_YSTART_SHIFT);
22170+ *buf++ =
22171+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
22172+ PSB_2D_DST_YSIZE_SHIFT);
22173+ *buf++ = PSB_2D_FLUSH_BH;
22174+
22175+ psb_2d_lock(dev_priv);
22176+ ret = psb_2d_submit(dev_priv, buffer, buf - buffer);
22177+ psb_2d_unlock(dev_priv);
22178+
22179+ return ret;
22180+}
22181+
22182+static void psbfb_fillrect_accel(struct fb_info *info,
22183+ const struct fb_fillrect *r)
22184+{
22185+ struct psbfb_par *par = info->par;
22186+ struct drm_framebuffer *fb = par->crtc->fb;
22187+ struct drm_psb_private *dev_priv = par->dev->dev_private;
22188+ uint32_t offset;
22189+ uint32_t stride;
22190+ uint32_t format;
22191+
22192+ if (!fb)
22193+ return;
22194+
22195+ offset = fb->offset;
22196+ stride = fb->pitch;
22197+
22198+ switch (fb->depth) {
22199+ case 8:
22200+ format = PSB_2D_DST_332RGB;
22201+ break;
22202+ case 15:
22203+ format = PSB_2D_DST_555RGB;
22204+ break;
22205+ case 16:
22206+ format = PSB_2D_DST_565RGB;
22207+ break;
22208+ case 24:
22209+ case 32:
22210+ /* this is wrong but since we don't do blending its okay */
22211+ format = PSB_2D_DST_8888ARGB;
22212+ break;
22213+ default:
22214+ /* software fallback */
22215+ cfb_fillrect(info, r);
22216+ return;
22217+ }
22218+
22219+ psb_accel_2d_fillrect(dev_priv,
22220+ offset, stride, format,
22221+ r->dx, r->dy, r->width, r->height, r->color);
22222+}
22223+
22224+static void psbfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
22225+{
22226+ if (info->state != FBINFO_STATE_RUNNING)
22227+ return;
22228+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
22229+ cfb_fillrect(info, rect);
22230+ return;
22231+ }
22232+ if (in_interrupt() || in_atomic()) {
22233+ /*
22234+ * Catch case when we're shutting down.
22235+ */
22236+ cfb_fillrect(info, rect);
22237+ return;
22238+ }
22239+ psbfb_fillrect_accel(info, rect);
22240+}
22241+
22242+uint32_t psb_accel_2d_copy_direction(int xdir, int ydir)
22243+{
22244+ if (xdir < 0)
22245+ return ((ydir <
22246+ 0) ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TR2BL);
22247+ else
22248+ return ((ydir <
22249+ 0) ? PSB_2D_COPYORDER_BL2TR : PSB_2D_COPYORDER_TL2BR);
22250+}
22251+
22252+/*
22253+ * @srcOffset in bytes
22254+ * @srcStride in bytes
22255+ * @srcFormat psb 2D format defines
22256+ * @dstOffset in bytes
22257+ * @dstStride in bytes
22258+ * @dstFormat psb 2D format defines
22259+ * @srcX offset in pixels
22260+ * @srcY offset in pixels
22261+ * @dstX offset in pixels
22262+ * @dstY offset in pixels
22263+ * @sizeX of the copied area
22264+ * @sizeY of the copied area
22265+ */
22266+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
22267+ uint32_t src_offset, uint32_t src_stride,
22268+ uint32_t src_format, uint32_t dst_offset,
22269+ uint32_t dst_stride, uint32_t dst_format,
22270+ uint16_t src_x, uint16_t src_y, uint16_t dst_x,
22271+ uint16_t dst_y, uint16_t size_x, uint16_t size_y)
22272+{
22273+ uint32_t blit_cmd;
22274+ uint32_t buffer[10];
22275+ uint32_t *buf;
22276+ uint32_t direction;
22277+ int ret;
22278+
22279+ buf = buffer;
22280+
22281+ direction = psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
22282+
22283+ if (direction == PSB_2D_COPYORDER_BR2TL ||
22284+ direction == PSB_2D_COPYORDER_TR2BL) {
22285+ src_x += size_x - 1;
22286+ dst_x += size_x - 1;
22287+ }
22288+ if (direction == PSB_2D_COPYORDER_BR2TL ||
22289+ direction == PSB_2D_COPYORDER_BL2TR) {
22290+ src_y += size_y - 1;
22291+ dst_y += size_y - 1;
22292+ }
22293+
22294+ blit_cmd =
22295+ PSB_2D_BLIT_BH |
22296+ PSB_2D_ROT_NONE |
22297+ PSB_2D_DSTCK_DISABLE |
22298+ PSB_2D_SRCCK_DISABLE |
22299+ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
22300+
22301+ *buf++ = PSB_2D_FENCE_BH;
22302+ *buf++ =
22303+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
22304+ PSB_2D_DST_STRIDE_SHIFT);
22305+ *buf++ = dst_offset;
22306+ *buf++ =
22307+ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
22308+ PSB_2D_SRC_STRIDE_SHIFT);
22309+ *buf++ = src_offset;
22310+ *buf++ =
22311+ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) | (src_y
22312+ <<
22313+ PSB_2D_SRCOFF_YSTART_SHIFT);
22314+ *buf++ = blit_cmd;
22315+ *buf++ =
22316+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
22317+ PSB_2D_DST_YSTART_SHIFT);
22318+ *buf++ =
22319+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
22320+ PSB_2D_DST_YSIZE_SHIFT);
22321+ *buf++ = PSB_2D_FLUSH_BH;
22322+
22323+ psb_2d_lock(dev_priv);
22324+ ret = psb_2d_submit(dev_priv, buffer, buf - buffer);
22325+ psb_2d_unlock(dev_priv);
22326+ return ret;
22327+}
22328+
22329+static void psbfb_copyarea_accel(struct fb_info *info,
22330+ const struct fb_copyarea *a)
22331+{
22332+ struct psbfb_par *par = info->par;
22333+ struct drm_framebuffer *fb = par->crtc->fb;
22334+ struct drm_psb_private *dev_priv = par->dev->dev_private;
22335+ uint32_t offset;
22336+ uint32_t stride;
22337+ uint32_t src_format;
22338+ uint32_t dst_format;
22339+
22340+ if (!fb)
22341+ return;
22342+
22343+ offset = fb->offset;
22344+ stride = fb->pitch;
22345+
22346+ if (a->width == 8 || a->height == 8) {
22347+ psb_2d_lock(dev_priv);
22348+ psb_idle_2d(par->dev);
22349+ psb_2d_unlock(dev_priv);
22350+ cfb_copyarea(info, a);
22351+ return;
22352+ }
22353+
22354+ switch (fb->depth) {
22355+ case 8:
22356+ src_format = PSB_2D_SRC_332RGB;
22357+ dst_format = PSB_2D_DST_332RGB;
22358+ break;
22359+ case 15:
22360+ src_format = PSB_2D_SRC_555RGB;
22361+ dst_format = PSB_2D_DST_555RGB;
22362+ break;
22363+ case 16:
22364+ src_format = PSB_2D_SRC_565RGB;
22365+ dst_format = PSB_2D_DST_565RGB;
22366+ break;
22367+ case 24:
22368+ case 32:
22369+ /* this is wrong but since we don't do blending its okay */
22370+ src_format = PSB_2D_SRC_8888ARGB;
22371+ dst_format = PSB_2D_DST_8888ARGB;
22372+ break;
22373+ default:
22374+ /* software fallback */
22375+ cfb_copyarea(info, a);
22376+ return;
22377+ }
22378+
22379+ psb_accel_2d_copy(dev_priv,
22380+ offset, stride, src_format,
22381+ offset, stride, dst_format,
22382+ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
22383+}
22384+
22385+static void psbfb_copyarea(struct fb_info *info,
22386+ const struct fb_copyarea *region)
22387+{
22388+ if (info->state != FBINFO_STATE_RUNNING)
22389+ return;
22390+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
22391+ cfb_copyarea(info, region);
22392+ return;
22393+ }
22394+ if (in_interrupt() || in_atomic()) {
22395+ /*
22396+ * Catch case when we're shutting down.
22397+ */
22398+ cfb_copyarea(info, region);
22399+ return;
22400+ }
22401+
22402+ psbfb_copyarea_accel(info, region);
22403+}
22404+
22405+void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
22406+{
22407+ if (info->state != FBINFO_STATE_RUNNING)
22408+ return;
22409+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
22410+ cfb_imageblit(info, image);
22411+ return;
22412+ }
22413+ if (in_interrupt() || in_atomic()) {
22414+ cfb_imageblit(info, image);
22415+ return;
22416+ }
22417+
22418+ cfb_imageblit(info, image);
22419+}
22420+
22421+static int psbfb_blank(int blank_mode, struct fb_info *info)
22422+{
22423+ int dpms_mode;
22424+ struct psbfb_par *par = info->par;
22425+ struct drm_output *output;
22426+
22427+ par->dpms_state = blank_mode;
22428+
22429+ switch(blank_mode) {
22430+ case FB_BLANK_UNBLANK:
22431+ dpms_mode = DPMSModeOn;
22432+ break;
22433+ case FB_BLANK_NORMAL:
22434+ if (!par->crtc)
22435+ return 0;
22436+ (*par->crtc->funcs->dpms)(par->crtc, DPMSModeStandby);
22437+ return 0;
22438+ case FB_BLANK_HSYNC_SUSPEND:
22439+ default:
22440+ dpms_mode = DPMSModeStandby;
22441+ break;
22442+ case FB_BLANK_VSYNC_SUSPEND:
22443+ dpms_mode = DPMSModeSuspend;
22444+ break;
22445+ case FB_BLANK_POWERDOWN:
22446+ dpms_mode = DPMSModeOff;
22447+ break;
22448+ }
22449+
22450+ if (!par->crtc)
22451+ return 0;
22452+
22453+ list_for_each_entry(output, &par->dev->mode_config.output_list, head) {
22454+ if (output->crtc == par->crtc)
22455+ (*output->funcs->dpms)(output, dpms_mode);
22456+ }
22457+
22458+ (*par->crtc->funcs->dpms)(par->crtc, dpms_mode);
22459+ return 0;
22460+}
22461+
22462+
22463+static int psbfb_kms_off(struct drm_device *dev, int suspend)
22464+{
22465+ struct drm_framebuffer *fb = 0;
22466+ struct drm_buffer_object *bo = 0;
22467+ struct drm_psb_private *dev_priv = dev->dev_private;
22468+ int ret = 0;
22469+
22470+ DRM_DEBUG("psbfb_kms_off_ioctl\n");
22471+
22472+ mutex_lock(&dev->mode_config.mutex);
22473+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
22474+ struct fb_info *info = fb->fbdev;
22475+ struct psbfb_par *par = info->par;
22476+ int save_dpms_state;
22477+
22478+ if (suspend)
22479+ fb_set_suspend(info, 1);
22480+ else
22481+ info->state &= ~FBINFO_STATE_RUNNING;
22482+
22483+ info->screen_base = NULL;
22484+
22485+ bo = fb->bo;
22486+
22487+ if (!bo)
22488+ continue;
22489+
22490+ drm_bo_kunmap(&fb->kmap);
22491+
22492+ /*
22493+ * We don't take the 2D lock here as we assume that the
22494+ * 2D engine will eventually idle anyway.
22495+ */
22496+
22497+ if (!suspend) {
22498+ uint32_t dummy2 = 0;
22499+ (void) psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
22500+ &dummy2, &dummy2);
22501+ psb_2d_lock(dev_priv);
22502+ (void)psb_idle_2d(dev);
22503+ psb_2d_unlock(dev_priv);
22504+ } else
22505+ psb_idle_2d(dev);
22506+
22507+ save_dpms_state = par->dpms_state;
22508+ psbfb_blank(FB_BLANK_NORMAL, info);
22509+ par->dpms_state = save_dpms_state;
22510+
22511+ ret = psbfb_move_fb_bo(info, bo, DRM_BO_FLAG_MEM_LOCAL);
22512+
22513+ if (ret)
22514+ goto out_err;
22515+ }
22516+ out_err:
22517+ mutex_unlock(&dev->mode_config.mutex);
22518+
22519+ return ret;
22520+}
22521+
22522+int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
22523+ struct drm_file *file_priv)
22524+{
22525+ int ret;
22526+
22527+ acquire_console_sem();
22528+ ret = psbfb_kms_off(dev, 0);
22529+ release_console_sem();
22530+
22531+ return ret;
22532+}
22533+
22534+static int psbfb_kms_on(struct drm_device *dev, int resume)
22535+{
22536+ struct drm_framebuffer *fb = 0;
22537+ struct drm_buffer_object *bo = 0;
22538+ struct drm_psb_private *dev_priv = dev->dev_private;
22539+ int ret = 0;
22540+ int dummy;
22541+
22542+ DRM_DEBUG("psbfb_kms_on_ioctl\n");
22543+
22544+ if (!resume) {
22545+ uint32_t dummy2 = 0;
22546+ (void) psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
22547+ &dummy2, &dummy2);
22548+ psb_2d_lock(dev_priv);
22549+ (void)psb_idle_2d(dev);
22550+ psb_2d_unlock(dev_priv);
22551+ } else
22552+ psb_idle_2d(dev);
22553+
22554+ mutex_lock(&dev->mode_config.mutex);
22555+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
22556+ struct fb_info *info = fb->fbdev;
22557+ struct psbfb_par *par = info->par;
22558+
22559+ bo = fb->bo;
22560+ if (!bo)
22561+ continue;
22562+
22563+ ret = psbfb_move_fb_bo(info, bo,
22564+ DRM_BO_FLAG_MEM_TT |
22565+ DRM_BO_FLAG_MEM_VRAM |
22566+ DRM_BO_FLAG_NO_EVICT);
22567+ if (ret)
22568+ goto out_err;
22569+
22570+ ret = drm_bo_kmap(bo, 0, bo->num_pages, &fb->kmap);
22571+ if (ret)
22572+ goto out_err;
22573+
22574+ info->screen_base = drm_bmo_virtual(&fb->kmap, &dummy);
22575+ fb->offset = bo->offset - dev_priv->pg->gatt_start;
22576+
22577+ if (ret)
22578+ goto out_err;
22579+
22580+ if (resume)
22581+ fb_set_suspend(info, 0);
22582+ else
22583+ info->state |= FBINFO_STATE_RUNNING;
22584+
22585+ /*
22586+ * Re-run modesetting here, since the VDS scanout offset may
22587+ * have changed.
22588+ */
22589+
22590+ if (par->crtc->enabled) {
22591+ psbfb_set_par(info);
22592+ psbfb_blank(par->dpms_state, info);
22593+ }
22594+ }
22595+ out_err:
22596+ mutex_unlock(&dev->mode_config.mutex);
22597+
22598+ return ret;
22599+}
22600+
22601+int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
22602+ struct drm_file *file_priv)
22603+{
22604+ int ret;
22605+
22606+ acquire_console_sem();
22607+ ret = psbfb_kms_on(dev, 0);
22608+ release_console_sem();
22609+#ifdef SII_1392_WA
22610+ if((SII_1392 != 1) || (drm_psb_no_fb==0))
22611+ drm_disable_unused_functions(dev);
22612+#else
22613+ drm_disable_unused_functions(dev);
22614+#endif
22615+ return ret;
22616+}
22617+
22618+void psbfb_suspend(struct drm_device *dev)
22619+{
22620+ acquire_console_sem();
22621+ psbfb_kms_off(dev, 1);
22622+ release_console_sem();
22623+}
22624+
22625+void psbfb_resume(struct drm_device *dev)
22626+{
22627+ acquire_console_sem();
22628+ psbfb_kms_on(dev, 1);
22629+ release_console_sem();
22630+#ifdef SII_1392_WA
22631+ if((SII_1392 != 1) || (drm_psb_no_fb==0))
22632+ drm_disable_unused_functions(dev);
22633+#else
22634+ drm_disable_unused_functions(dev);
22635+#endif
22636+}
22637+
22638+/*
22639+ * FIXME: Before kernel inclusion, migrate nopfn to fault.
22640+ * Also, these should be the default vm ops for buffer object type fbs.
22641+ */
22642+
22643+extern unsigned long drm_bo_vm_fault(struct vm_area_struct *vma,
22644+ struct vm_fault *vmf);
22645+
22646+/*
22647+ * This wrapper is a bit ugly and is here because we need access to a mutex
22648+ * that we can lock both around nopfn and around unmap_mapping_range + move.
22649+ * Normally, this would've been done using the bo mutex, but unfortunately
22650+ * we cannot lock it around drm_bo_do_validate(), since that would imply
22651+ * recursive locking.
22652+ */
22653+
22654+static int psbfb_fault(struct vm_area_struct *vma,
22655+ struct vm_fault *vmf)
22656+{
22657+ struct psbfb_vm_info *vi = (struct psbfb_vm_info *)vma->vm_private_data;
22658+ struct vm_area_struct tmp_vma;
22659+ int ret;
22660+
22661+ mutex_lock(&vi->vm_mutex);
22662+ tmp_vma = *vma;
22663+ tmp_vma.vm_private_data = vi->bo;
22664+ ret = drm_bo_vm_fault(&tmp_vma, vmf);
22665+ mutex_unlock(&vi->vm_mutex);
22666+ return ret;
22667+}
22668+
22669+static void psbfb_vm_open(struct vm_area_struct *vma)
22670+{
22671+ struct psbfb_vm_info *vi = (struct psbfb_vm_info *)vma->vm_private_data;
22672+
22673+ atomic_inc(&vi->refcount);
22674+}
22675+
22676+static void psbfb_vm_close(struct vm_area_struct *vma)
22677+{
22678+ psbfb_vm_info_deref((struct psbfb_vm_info **)&vma->vm_private_data);
22679+}
22680+
22681+static struct vm_operations_struct psbfb_vm_ops = {
22682+ .fault = psbfb_fault,
22683+ .open = psbfb_vm_open,
22684+ .close = psbfb_vm_close,
22685+};
22686+
22687+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
22688+{
22689+ struct psbfb_par *par = info->par;
22690+ struct drm_framebuffer *fb = par->crtc->fb;
22691+ struct drm_buffer_object *bo = fb->bo;
22692+ unsigned long size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
22693+ unsigned long offset = vma->vm_pgoff;
22694+
22695+ if (vma->vm_pgoff != 0)
22696+ return -EINVAL;
22697+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
22698+ return -EINVAL;
22699+ if (offset + size > bo->num_pages)
22700+ return -EINVAL;
22701+
22702+ mutex_lock(&par->vi->vm_mutex);
22703+ if (!par->vi->f_mapping)
22704+ par->vi->f_mapping = vma->vm_file->f_mapping;
22705+ mutex_unlock(&par->vi->vm_mutex);
22706+
22707+ vma->vm_private_data = psbfb_vm_info_ref(par->vi);
22708+
22709+ vma->vm_ops = &psbfb_vm_ops;
22710+ vma->vm_flags |= VM_PFNMAP;
22711+
22712+ return 0;
22713+}
22714+
22715+int psbfb_sync(struct fb_info *info)
22716+{
22717+ struct psbfb_par *par = info->par;
22718+ struct drm_psb_private *dev_priv = par->dev->dev_private;
22719+
22720+ psb_2d_lock(dev_priv);
22721+ psb_idle_2d(par->dev);
22722+ psb_2d_unlock(dev_priv);
22723+
22724+ return 0;
22725+}
22726+
22727+static struct fb_ops psbfb_ops = {
22728+ .owner = THIS_MODULE,
22729+ .fb_check_var = psbfb_check_var,
22730+ .fb_set_par = psbfb_set_par,
22731+ .fb_setcolreg = psbfb_setcolreg,
22732+ .fb_fillrect = psbfb_fillrect,
22733+ .fb_copyarea = psbfb_copyarea,
22734+ .fb_imageblit = psbfb_imageblit,
22735+ .fb_mmap = psbfb_mmap,
22736+ .fb_sync = psbfb_sync,
22737+ .fb_blank = psbfb_blank,
22738+};
22739+
22740+int psbfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
22741+{
22742+ struct fb_info *info;
22743+ struct psbfb_par *par;
22744+ struct device *device = &dev->pdev->dev;
22745+ struct drm_framebuffer *fb;
22746+ struct drm_display_mode *mode = crtc->desired_mode;
22747+ struct drm_psb_private *dev_priv =
22748+ (struct drm_psb_private *)dev->dev_private;
22749+ struct drm_buffer_object *fbo = NULL;
22750+ int ret;
22751+ int is_iomem;
22752+
22753+ if (drm_psb_no_fb) {
22754+ /* need to do this as the DRM will disable the output */
22755+ crtc->enabled = 1;
22756+ return 0;
22757+ }
22758+
22759+ info = framebuffer_alloc(sizeof(struct psbfb_par), device);
22760+ if (!info) {
22761+ return -ENOMEM;
22762+ }
22763+
22764+ fb = drm_framebuffer_create(dev);
22765+ if (!fb) {
22766+ framebuffer_release(info);
22767+ DRM_ERROR("failed to allocate fb.\n");
22768+ return -ENOMEM;
22769+ }
22770+ crtc->fb = fb;
22771+
22772+ fb->width = mode->hdisplay;
22773+ fb->height = mode->vdisplay;
22774+
22775+ fb->bits_per_pixel = 32;
22776+ fb->depth = 24;
22777+ fb->pitch =
22778+ ((fb->width * ((fb->bits_per_pixel + 1) / 8)) + 0x3f) & ~0x3f;
22779+
22780+ ret = drm_buffer_object_create(dev,
22781+ fb->pitch * fb->height,
22782+ drm_bo_type_kernel,
22783+ DRM_BO_FLAG_READ |
22784+ DRM_BO_FLAG_WRITE |
22785+ DRM_BO_FLAG_MEM_TT |
22786+ DRM_BO_FLAG_MEM_VRAM |
22787+ DRM_BO_FLAG_NO_EVICT,
22788+ DRM_BO_HINT_DONT_FENCE, 0, 0, &fbo);
22789+ if (ret || !fbo) {
22790+ DRM_ERROR("failed to allocate framebuffer\n");
22791+ goto out_err0;
22792+ }
22793+
22794+ fb->offset = fbo->offset - dev_priv->pg->gatt_start;
22795+ fb->bo = fbo;
22796+ DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width,
22797+ fb->height, fb->offset, fbo);
22798+
22799+ fb->fbdev = info;
22800+
22801+ par = info->par;
22802+
22803+ par->dev = dev;
22804+ par->crtc = crtc;
22805+ par->vi = psbfb_vm_info_create();
22806+ if (!par->vi)
22807+ goto out_err1;
22808+
22809+ mutex_lock(&dev->struct_mutex);
22810+ par->vi->bo = fbo;
22811+ atomic_inc(&fbo->usage);
22812+ mutex_unlock(&dev->struct_mutex);
22813+
22814+ par->vi->f_mapping = NULL;
22815+ info->fbops = &psbfb_ops;
22816+
22817+ strcpy(info->fix.id, "psbfb");
22818+ info->fix.type = FB_TYPE_PACKED_PIXELS;
22819+ info->fix.visual = FB_VISUAL_DIRECTCOLOR;
22820+ info->fix.type_aux = 0;
22821+ info->fix.xpanstep = 1;
22822+ info->fix.ypanstep = 1;
22823+ info->fix.ywrapstep = 0;
22824+ info->fix.accel = FB_ACCEL_NONE; /* ??? */
22825+ info->fix.type_aux = 0;
22826+ info->fix.mmio_start = 0;
22827+ info->fix.mmio_len = 0;
22828+ info->fix.line_length = fb->pitch;
22829+ info->fix.smem_start = dev->mode_config.fb_base + fb->offset;
22830+ info->fix.smem_len = info->fix.line_length * fb->height;
22831+
22832+ info->flags = FBINFO_DEFAULT |
22833+ FBINFO_PARTIAL_PAN_OK /*| FBINFO_MISC_ALWAYS_SETPAR */ ;
22834+
22835+ ret = drm_bo_kmap(fb->bo, 0, fb->bo->num_pages, &fb->kmap);
22836+ if (ret) {
22837+ DRM_ERROR("error mapping fb: %d\n", ret);
22838+ goto out_err2;
22839+ }
22840+
22841+ info->screen_base = drm_bmo_virtual(&fb->kmap, &is_iomem);
22842+ memset(info->screen_base, 0x00, fb->pitch*fb->height);
22843+ info->screen_size = info->fix.smem_len; /* FIXME */
22844+ info->pseudo_palette = fb->pseudo_palette;
22845+ info->var.xres_virtual = fb->width;
22846+ info->var.yres_virtual = fb->height;
22847+ info->var.bits_per_pixel = fb->bits_per_pixel;
22848+ info->var.xoffset = 0;
22849+ info->var.yoffset = 0;
22850+ info->var.activate = FB_ACTIVATE_NOW;
22851+ info->var.height = -1;
22852+ info->var.width = -1;
22853+ info->var.vmode = FB_VMODE_NONINTERLACED;
22854+
22855+ info->var.xres = mode->hdisplay;
22856+ info->var.right_margin = mode->hsync_start - mode->hdisplay;
22857+ info->var.hsync_len = mode->hsync_end - mode->hsync_start;
22858+ info->var.left_margin = mode->htotal - mode->hsync_end;
22859+ info->var.yres = mode->vdisplay;
22860+ info->var.lower_margin = mode->vsync_start - mode->vdisplay;
22861+ info->var.vsync_len = mode->vsync_end - mode->vsync_start;
22862+ info->var.upper_margin = mode->vtotal - mode->vsync_end;
22863+ info->var.pixclock = 10000000 / mode->htotal * 1000 /
22864+ mode->vtotal * 100;
22865+ /* avoid overflow */
22866+ info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
22867+
22868+ info->pixmap.size = 64 * 1024;
22869+ info->pixmap.buf_align = 8;
22870+ info->pixmap.access_align = 32;
22871+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
22872+ info->pixmap.scan_align = 1;
22873+
22874+ DRM_DEBUG("fb depth is %d\n", fb->depth);
22875+ DRM_DEBUG(" pitch is %d\n", fb->pitch);
22876+ switch (fb->depth) {
22877+ case 8:
22878+ info->var.red.offset = 0;
22879+ info->var.green.offset = 0;
22880+ info->var.blue.offset = 0;
22881+ info->var.red.length = 8; /* 8bit DAC */
22882+ info->var.green.length = 8;
22883+ info->var.blue.length = 8;
22884+ info->var.transp.offset = 0;
22885+ info->var.transp.length = 0;
22886+ break;
22887+ case 15:
22888+ info->var.red.offset = 10;
22889+ info->var.green.offset = 5;
22890+ info->var.blue.offset = 0;
22891+ info->var.red.length = info->var.green.length =
22892+ info->var.blue.length = 5;
22893+ info->var.transp.offset = 15;
22894+ info->var.transp.length = 1;
22895+ break;
22896+ case 16:
22897+ info->var.red.offset = 11;
22898+ info->var.green.offset = 5;
22899+ info->var.blue.offset = 0;
22900+ info->var.red.length = 5;
22901+ info->var.green.length = 6;
22902+ info->var.blue.length = 5;
22903+ info->var.transp.offset = 0;
22904+ break;
22905+ case 24:
22906+ info->var.red.offset = 16;
22907+ info->var.green.offset = 8;
22908+ info->var.blue.offset = 0;
22909+ info->var.red.length = info->var.green.length =
22910+ info->var.blue.length = 8;
22911+ info->var.transp.offset = 0;
22912+ info->var.transp.length = 0;
22913+ break;
22914+ case 32:
22915+ info->var.red.offset = 16;
22916+ info->var.green.offset = 8;
22917+ info->var.blue.offset = 0;
22918+ info->var.red.length = info->var.green.length =
22919+ info->var.blue.length = 8;
22920+ info->var.transp.offset = 24;
22921+ info->var.transp.length = 8;
22922+ break;
22923+ default:
22924+ break;
22925+ }
22926+
22927+ if (register_framebuffer(info) < 0)
22928+ goto out_err3;
22929+
22930+ if (psbfb_check_var(&info->var, info) < 0)
22931+ goto out_err4;
22932+
22933+ psbfb_set_par(info);
22934+
22935+ DRM_INFO("fb%d: %s frame buffer device\n", info->node, info->fix.id);
22936+
22937+ return 0;
22938+ out_err4:
22939+ unregister_framebuffer(info);
22940+ out_err3:
22941+ drm_bo_kunmap(&fb->kmap);
22942+ out_err2:
22943+ psbfb_vm_info_deref(&par->vi);
22944+ out_err1:
22945+ drm_bo_usage_deref_unlocked(&fb->bo);
22946+ out_err0:
22947+ drm_framebuffer_destroy(fb);
22948+ framebuffer_release(info);
22949+ crtc->fb = NULL;
22950+ return -EINVAL;
22951+}
22952+
22953+EXPORT_SYMBOL(psbfb_probe);
22954+
22955+int psbfb_remove(struct drm_device *dev, struct drm_crtc *crtc)
22956+{
22957+ struct drm_framebuffer *fb;
22958+ struct fb_info *info;
22959+ struct psbfb_par *par;
22960+
22961+ if (drm_psb_no_fb)
22962+ return 0;
22963+
22964+ fb = crtc->fb;
22965+ info = fb->fbdev;
22966+
22967+ if (info) {
22968+ unregister_framebuffer(info);
22969+ drm_bo_kunmap(&fb->kmap);
22970+ par = info->par;
22971+ if (par)
22972+ psbfb_vm_info_deref(&par->vi);
22973+ drm_bo_usage_deref_unlocked(&fb->bo);
22974+ drm_framebuffer_destroy(fb);
22975+ framebuffer_release(info);
22976+ }
22977+ return 0;
22978+}
22979+
22980+EXPORT_SYMBOL(psbfb_remove);
22981+
22982Index: linux-2.6.27/drivers/gpu/drm/psb/psb_fence.c
22983===================================================================
22984--- /dev/null 1970-01-01 00:00:00.000000000 +0000
22985+++ linux-2.6.27/drivers/gpu/drm/psb/psb_fence.c 2009-02-05 13:29:33.000000000 +0000
22986@@ -0,0 +1,285 @@
22987+/**************************************************************************
22988+ * Copyright (c) 2007, Intel Corporation.
22989+ * All Rights Reserved.
22990+ *
22991+ * This program is free software; you can redistribute it and/or modify it
22992+ * under the terms and conditions of the GNU General Public License,
22993+ * version 2, as published by the Free Software Foundation.
22994+ *
22995+ * This program is distributed in the hope it will be useful, but WITHOUT
22996+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22997+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22998+ * more details.
22999+ *
23000+ * You should have received a copy of the GNU General Public License along with
23001+ * this program; if not, write to the Free Software Foundation, Inc.,
23002+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23003+ *
23004+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
23005+ * develop this driver.
23006+ *
23007+ **************************************************************************/
23008+/*
23009+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
23010+ */
23011+
23012+#include "drmP.h"
23013+#include "psb_drv.h"
23014+
23015+static void psb_poll_ta(struct drm_device *dev, uint32_t waiting_types)
23016+{
23017+ struct drm_psb_private *dev_priv =
23018+ (struct drm_psb_private *)dev->dev_private;
23019+ struct drm_fence_driver *driver = dev->driver->fence_driver;
23020+ uint32_t cur_flag = 1;
23021+ uint32_t flags = 0;
23022+ uint32_t sequence = 0;
23023+ uint32_t remaining = 0xFFFFFFFF;
23024+ uint32_t diff;
23025+
23026+ struct psb_scheduler *scheduler;
23027+ struct psb_scheduler_seq *seq;
23028+ struct drm_fence_class_manager *fc =
23029+ &dev->fm.fence_class[PSB_ENGINE_TA];
23030+
23031+ if (unlikely(!dev_priv))
23032+ return;
23033+
23034+ scheduler = &dev_priv->scheduler;
23035+ seq = scheduler->seq;
23036+
23037+ while (likely(waiting_types & remaining)) {
23038+ if (!(waiting_types & cur_flag))
23039+ goto skip;
23040+ if (seq->reported)
23041+ goto skip;
23042+ if (flags == 0)
23043+ sequence = seq->sequence;
23044+ else if (sequence != seq->sequence) {
23045+ drm_fence_handler(dev, PSB_ENGINE_TA,
23046+ sequence, flags, 0);
23047+ sequence = seq->sequence;
23048+ flags = 0;
23049+ }
23050+ flags |= cur_flag;
23051+
23052+ /*
23053+ * Sequence may not have ended up on the ring yet.
23054+ * In that case, report it but don't mark it as
23055+ * reported. A subsequent poll will report it again.
23056+ */
23057+
23058+ diff = (fc->latest_queued_sequence - sequence) &
23059+ driver->sequence_mask;
23060+ if (diff < driver->wrap_diff)
23061+ seq->reported = 1;
23062+
23063+ skip:
23064+ cur_flag <<= 1;
23065+ remaining <<= 1;
23066+ seq++;
23067+ }
23068+
23069+ if (flags) {
23070+ drm_fence_handler(dev, PSB_ENGINE_TA, sequence, flags, 0);
23071+ }
23072+}
23073+
23074+static void psb_poll_other(struct drm_device *dev, uint32_t fence_class,
23075+ uint32_t waiting_types)
23076+{
23077+ struct drm_psb_private *dev_priv =
23078+ (struct drm_psb_private *)dev->dev_private;
23079+ struct drm_fence_manager *fm = &dev->fm;
23080+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
23081+ uint32_t sequence;
23082+
23083+ if (unlikely(!dev_priv))
23084+ return;
23085+
23086+ if (waiting_types) {
23087+ if (fence_class == PSB_ENGINE_VIDEO)
23088+ sequence = dev_priv->msvdx_current_sequence;
23089+ else
23090+ sequence = dev_priv->comm[fence_class << 4];
23091+
23092+ drm_fence_handler(dev, fence_class, sequence,
23093+ DRM_FENCE_TYPE_EXE, 0);
23094+
23095+ switch (fence_class) {
23096+ case PSB_ENGINE_2D:
23097+ if (dev_priv->fence0_irq_on && !fc->waiting_types) {
23098+ psb_2D_irq_off(dev_priv);
23099+ dev_priv->fence0_irq_on = 0;
23100+ } else if (!dev_priv->fence0_irq_on
23101+ && fc->waiting_types) {
23102+ psb_2D_irq_on(dev_priv);
23103+ dev_priv->fence0_irq_on = 1;
23104+ }
23105+ break;
23106+#if 0
23107+ /*
23108+ * FIXME: MSVDX irq switching
23109+ */
23110+
23111+ case PSB_ENGINE_VIDEO:
23112+ if (dev_priv->fence2_irq_on && !fc->waiting_types) {
23113+ psb_msvdx_irq_off(dev_priv);
23114+ dev_priv->fence2_irq_on = 0;
23115+ } else if (!dev_priv->fence2_irq_on
23116+ && fc->pending_exe_flush) {
23117+ psb_msvdx_irq_on(dev_priv);
23118+ dev_priv->fence2_irq_on = 1;
23119+ }
23120+ break;
23121+#endif
23122+ default:
23123+ return;
23124+ }
23125+ }
23126+}
23127+
23128+static void psb_fence_poll(struct drm_device *dev,
23129+ uint32_t fence_class, uint32_t waiting_types)
23130+{
23131+ switch (fence_class) {
23132+ case PSB_ENGINE_TA:
23133+ psb_poll_ta(dev, waiting_types);
23134+ break;
23135+ default:
23136+ psb_poll_other(dev, fence_class, waiting_types);
23137+ break;
23138+ }
23139+}
23140+
23141+void psb_fence_error(struct drm_device *dev,
23142+ uint32_t fence_class,
23143+ uint32_t sequence, uint32_t type, int error)
23144+{
23145+ struct drm_fence_manager *fm = &dev->fm;
23146+ unsigned long irq_flags;
23147+
23148+ BUG_ON(fence_class >= PSB_NUM_ENGINES);
23149+ write_lock_irqsave(&fm->lock, irq_flags);
23150+ drm_fence_handler(dev, fence_class, sequence, type, error);
23151+ write_unlock_irqrestore(&fm->lock, irq_flags);
23152+}
23153+
23154+int psb_fence_emit_sequence(struct drm_device *dev, uint32_t fence_class,
23155+ uint32_t flags, uint32_t * sequence,
23156+ uint32_t * native_type)
23157+{
23158+ struct drm_psb_private *dev_priv =
23159+ (struct drm_psb_private *)dev->dev_private;
23160+ uint32_t seq = 0;
23161+ int ret;
23162+
23163+ if (!dev_priv)
23164+ return -EINVAL;
23165+
23166+ if (fence_class >= PSB_NUM_ENGINES)
23167+ return -EINVAL;
23168+
23169+ switch (fence_class) {
23170+ case PSB_ENGINE_2D:
23171+ spin_lock(&dev_priv->sequence_lock);
23172+ seq = ++dev_priv->sequence[fence_class];
23173+ spin_unlock(&dev_priv->sequence_lock);
23174+ ret = psb_blit_sequence(dev_priv, seq);
23175+ if (ret)
23176+ return ret;
23177+ break;
23178+ case PSB_ENGINE_VIDEO:
23179+ spin_lock(&dev_priv->sequence_lock);
23180+ seq = ++dev_priv->sequence[fence_class];
23181+ spin_unlock(&dev_priv->sequence_lock);
23182+ break;
23183+ default:
23184+ spin_lock(&dev_priv->sequence_lock);
23185+ seq = dev_priv->sequence[fence_class];
23186+ spin_unlock(&dev_priv->sequence_lock);
23187+ }
23188+
23189+ *sequence = seq;
23190+ *native_type = DRM_FENCE_TYPE_EXE;
23191+
23192+ return 0;
23193+}
23194+
23195+uint32_t psb_fence_advance_sequence(struct drm_device * dev,
23196+ uint32_t fence_class)
23197+{
23198+ struct drm_psb_private *dev_priv =
23199+ (struct drm_psb_private *)dev->dev_private;
23200+ uint32_t sequence;
23201+
23202+ spin_lock(&dev_priv->sequence_lock);
23203+ sequence = ++dev_priv->sequence[fence_class];
23204+ spin_unlock(&dev_priv->sequence_lock);
23205+
23206+ return sequence;
23207+}
23208+
23209+void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
23210+{
23211+ struct drm_fence_manager *fm = &dev->fm;
23212+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
23213+
23214+#ifdef FIX_TG_16
23215+ if (fence_class == 0) {
23216+ struct drm_psb_private *dev_priv =
23217+ (struct drm_psb_private *)dev->dev_private;
23218+
23219+ if ((atomic_read(&dev_priv->ta_wait_2d_irq) == 1) &&
23220+ (PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
23221+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
23222+ _PSB_C2B_STATUS_BUSY) == 0))
23223+ psb_resume_ta_2d_idle(dev_priv);
23224+ }
23225+#endif
23226+ write_lock(&fm->lock);
23227+ psb_fence_poll(dev, fence_class, fc->waiting_types);
23228+ write_unlock(&fm->lock);
23229+}
23230+
23231+static int psb_fence_wait(struct drm_fence_object *fence,
23232+ int lazy, int interruptible, uint32_t mask)
23233+{
23234+ struct drm_device *dev = fence->dev;
23235+ struct drm_fence_class_manager *fc =
23236+ &dev->fm.fence_class[fence->fence_class];
23237+ int ret = 0;
23238+ unsigned long timeout = DRM_HZ *
23239+ ((fence->fence_class == PSB_ENGINE_TA) ? 30 : 3);
23240+
23241+ drm_fence_object_flush(fence, mask);
23242+ if (interruptible)
23243+ ret = wait_event_interruptible_timeout
23244+ (fc->fence_queue, drm_fence_object_signaled(fence, mask),
23245+ timeout);
23246+ else
23247+ ret = wait_event_timeout
23248+ (fc->fence_queue, drm_fence_object_signaled(fence, mask),
23249+ timeout);
23250+
23251+ if (unlikely(ret == -ERESTARTSYS))
23252+ return -EAGAIN;
23253+
23254+ if (unlikely(ret == 0))
23255+ return -EBUSY;
23256+
23257+ return 0;
23258+}
23259+
23260+struct drm_fence_driver psb_fence_driver = {
23261+ .num_classes = PSB_NUM_ENGINES,
23262+ .wrap_diff = (1 << 30),
23263+ .flush_diff = (1 << 29),
23264+ .sequence_mask = 0xFFFFFFFFU,
23265+ .has_irq = NULL,
23266+ .emit = psb_fence_emit_sequence,
23267+ .flush = NULL,
23268+ .poll = psb_fence_poll,
23269+ .needed_flush = NULL,
23270+ .wait = psb_fence_wait
23271+};
23272Index: linux-2.6.27/drivers/gpu/drm/psb/psb_gtt.c
23273===================================================================
23274--- /dev/null 1970-01-01 00:00:00.000000000 +0000
23275+++ linux-2.6.27/drivers/gpu/drm/psb/psb_gtt.c 2009-02-05 13:29:33.000000000 +0000
23276@@ -0,0 +1,233 @@
23277+/**************************************************************************
23278+ * Copyright (c) 2007, Intel Corporation.
23279+ * All Rights Reserved.
23280+ *
23281+ * This program is free software; you can redistribute it and/or modify it
23282+ * under the terms and conditions of the GNU General Public License,
23283+ * version 2, as published by the Free Software Foundation.
23284+ *
23285+ * This program is distributed in the hope it will be useful, but WITHOUT
23286+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23287+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23288+ * more details.
23289+ *
23290+ * You should have received a copy of the GNU General Public License along with
23291+ * this program; if not, write to the Free Software Foundation, Inc.,
23292+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23293+ *
23294+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
23295+ * develop this driver.
23296+ *
23297+ **************************************************************************/
23298+/*
23299+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
23300+ */
23301+#include "drmP.h"
23302+#include "psb_drv.h"
23303+
23304+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
23305+{
23306+ uint32_t mask = PSB_PTE_VALID;
23307+
23308+ if (type & PSB_MMU_CACHED_MEMORY)
23309+ mask |= PSB_PTE_CACHED;
23310+ if (type & PSB_MMU_RO_MEMORY)
23311+ mask |= PSB_PTE_RO;
23312+ if (type & PSB_MMU_WO_MEMORY)
23313+ mask |= PSB_PTE_WO;
23314+
23315+ return (pfn << PAGE_SHIFT) | mask;
23316+}
23317+
23318+struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
23319+{
23320+ struct psb_gtt *tmp = drm_calloc(1, sizeof(*tmp), DRM_MEM_DRIVER);
23321+
23322+ if (!tmp)
23323+ return NULL;
23324+
23325+ init_rwsem(&tmp->sem);
23326+ tmp->dev = dev;
23327+
23328+ return tmp;
23329+}
23330+
23331+void psb_gtt_takedown(struct psb_gtt *pg, int free)
23332+{
23333+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
23334+
23335+ if (!pg)
23336+ return;
23337+
23338+ if (pg->gtt_map) {
23339+ iounmap(pg->gtt_map);
23340+ pg->gtt_map = NULL;
23341+ }
23342+ if (pg->initialized) {
23343+ pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
23344+ pg->gmch_ctrl);
23345+ PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
23346+ (void)PSB_RVDC32(PSB_PGETBL_CTL);
23347+ }
23348+ if (free)
23349+ drm_free(pg, sizeof(*pg), DRM_MEM_DRIVER);
23350+}
23351+
23352+int psb_gtt_init(struct psb_gtt *pg, int resume)
23353+{
23354+ struct drm_device *dev = pg->dev;
23355+ struct drm_psb_private *dev_priv = dev->dev_private;
23356+ unsigned gtt_pages;
23357+ unsigned long stolen_size;
23358+ unsigned i, num_pages;
23359+ unsigned pfn_base;
23360+
23361+ int ret = 0;
23362+ uint32_t pte;
23363+
23364+ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
23365+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
23366+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
23367+
23368+ pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
23369+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
23370+ (void)PSB_RVDC32(PSB_PGETBL_CTL);
23371+
23372+ pg->initialized = 1;
23373+
23374+ pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
23375+ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
23376+ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
23377+ gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
23378+ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
23379+ >> PAGE_SHIFT;
23380+ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
23381+ stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
23382+
23383+ PSB_DEBUG_INIT("GTT phys start: 0x%08x.\n", pg->gtt_phys_start);
23384+ PSB_DEBUG_INIT("GTT start: 0x%08x.\n", pg->gtt_start);
23385+ PSB_DEBUG_INIT("GATT start: 0x%08x.\n", pg->gatt_start);
23386+ PSB_DEBUG_INIT("GTT pages: %u\n", gtt_pages);
23387+ PSB_DEBUG_INIT("Stolen size: %lu kiB\n", stolen_size / 1024);
23388+
23389+ if (resume && (gtt_pages != pg->gtt_pages) &&
23390+ (stolen_size != pg->stolen_size)) {
23391+ DRM_ERROR("GTT resume error.\n");
23392+ ret = -EINVAL;
23393+ goto out_err;
23394+ }
23395+
23396+ pg->gtt_pages = gtt_pages;
23397+ pg->stolen_size = stolen_size;
23398+ pg->gtt_map =
23399+ ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
23400+ if (!pg->gtt_map) {
23401+ DRM_ERROR("Failure to map gtt.\n");
23402+ ret = -ENOMEM;
23403+ goto out_err;
23404+ }
23405+
23406+ /*
23407+ * insert stolen pages.
23408+ */
23409+
23410+ pfn_base = pg->stolen_base >> PAGE_SHIFT;
23411+ num_pages = stolen_size >> PAGE_SHIFT;
23412+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
23413+ num_pages, pfn_base);
23414+ for (i = 0; i < num_pages; ++i) {
23415+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
23416+ iowrite32(pte, pg->gtt_map + i);
23417+ }
23418+
23419+ /*
23420+ * Init rest of gtt.
23421+ */
23422+
23423+ pfn_base = page_to_pfn(dev_priv->scratch_page);
23424+ pte = psb_gtt_mask_pte(pfn_base, 0);
23425+ PSB_DEBUG_INIT("Initializing the rest of a total "
23426+ "of %d gtt pages.\n", pg->gatt_pages);
23427+
23428+ for (; i < pg->gatt_pages; ++i)
23429+ iowrite32(pte, pg->gtt_map + i);
23430+ (void)ioread32(pg->gtt_map + i - 1);
23431+
23432+ return 0;
23433+
23434+ out_err:
23435+ psb_gtt_takedown(pg, 0);
23436+ return ret;
23437+}
23438+
23439+int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
23440+ unsigned offset_pages, unsigned num_pages,
23441+ unsigned desired_tile_stride, unsigned hw_tile_stride,
23442+ int type)
23443+{
23444+ unsigned rows = 1;
23445+ unsigned add;
23446+ unsigned row_add;
23447+ unsigned i;
23448+ unsigned j;
23449+ uint32_t *cur_page = NULL;
23450+ uint32_t pte;
23451+
23452+ if (hw_tile_stride)
23453+ rows = num_pages / desired_tile_stride;
23454+ else
23455+ desired_tile_stride = num_pages;
23456+
23457+ add = desired_tile_stride;
23458+ row_add = hw_tile_stride;
23459+
23460+ down_read(&pg->sem);
23461+ for (i = 0; i < rows; ++i) {
23462+ cur_page = pg->gtt_map + offset_pages;
23463+ for (j = 0; j < desired_tile_stride; ++j) {
23464+ pte = psb_gtt_mask_pte(page_to_pfn(*pages++), type);
23465+ iowrite32(pte, cur_page++);
23466+ }
23467+ offset_pages += add;
23468+ }
23469+ (void)ioread32(cur_page - 1);
23470+ up_read(&pg->sem);
23471+
23472+ return 0;
23473+}
23474+
23475+int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
23476+ unsigned num_pages, unsigned desired_tile_stride,
23477+ unsigned hw_tile_stride)
23478+{
23479+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
23480+ unsigned rows = 1;
23481+ unsigned add;
23482+ unsigned row_add;
23483+ unsigned i;
23484+ unsigned j;
23485+ uint32_t *cur_page = NULL;
23486+ unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
23487+ uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
23488+
23489+ if (hw_tile_stride)
23490+ rows = num_pages / desired_tile_stride;
23491+ else
23492+ desired_tile_stride = num_pages;
23493+
23494+ add = desired_tile_stride;
23495+ row_add = hw_tile_stride;
23496+
23497+ down_read(&pg->sem);
23498+ for (i = 0; i < rows; ++i) {
23499+ cur_page = pg->gtt_map + offset_pages;
23500+ for (j = 0; j < desired_tile_stride; ++j) {
23501+ iowrite32(pte, cur_page++);
23502+ }
23503+ offset_pages += add;
23504+ }
23505+ (void)ioread32(cur_page - 1);
23506+ up_read(&pg->sem);
23507+
23508+ return 0;
23509+}
23510Index: linux-2.6.27/drivers/gpu/drm/psb/psb_i2c.c
23511===================================================================
23512--- /dev/null 1970-01-01 00:00:00.000000000 +0000
23513+++ linux-2.6.27/drivers/gpu/drm/psb/psb_i2c.c 2009-02-05 13:29:33.000000000 +0000
23514@@ -0,0 +1,179 @@
23515+/*
23516+ * Copyright © 2006-2007 Intel Corporation
23517+ *
23518+ * Permission is hereby granted, free of charge, to any person obtaining a
23519+ * copy of this software and associated documentation files (the "Software"),
23520+ * to deal in the Software without restriction, including without limitation
23521+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
23522+ * and/or sell copies of the Software, and to permit persons to whom the
23523+ * Software is furnished to do so, subject to the following conditions:
23524+ *
23525+ * The above copyright notice and this permission notice (including the next
23526+ * paragraph) shall be included in all copies or substantial portions of the
23527+ * Software.
23528+ *
23529+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23530+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23531+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23532+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23533+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23534+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23535+ * DEALINGS IN THE SOFTWARE.
23536+ *
23537+ * Authors:
23538+ * Eric Anholt <eric@anholt.net>
23539+ */
23540+/*
23541+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
23542+ * Jesse Barnes <jesse.barnes@intel.com>
23543+ */
23544+
23545+#include <linux/i2c.h>
23546+#include <linux/i2c-id.h>
23547+#include <linux/i2c-algo-bit.h>
23548+#include "drmP.h"
23549+#include "drm.h"
23550+#include "intel_drv.h"
23551+#include "psb_drv.h"
23552+
23553+/*
23554+ * Intel GPIO access functions
23555+ */
23556+
23557+#define I2C_RISEFALL_TIME 20
23558+
23559+static int get_clock(void *data)
23560+{
23561+ struct intel_i2c_chan *chan = data;
23562+ struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
23563+ uint32_t val;
23564+
23565+ val = PSB_RVDC32(chan->reg);
23566+ return ((val & GPIO_CLOCK_VAL_IN) != 0);
23567+}
23568+
23569+static int get_data(void *data)
23570+{
23571+ struct intel_i2c_chan *chan = data;
23572+ struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
23573+ uint32_t val;
23574+
23575+ val = PSB_RVDC32(chan->reg);
23576+ return ((val & GPIO_DATA_VAL_IN) != 0);
23577+}
23578+
23579+static void set_clock(void *data, int state_high)
23580+{
23581+ struct intel_i2c_chan *chan = data;
23582+ struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
23583+ uint32_t reserved = 0, clock_bits;
23584+
23585+ /* On most chips, these bits must be preserved in software. */
23586+ reserved = PSB_RVDC32(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
23587+ GPIO_CLOCK_PULLUP_DISABLE);
23588+
23589+ if (state_high)
23590+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
23591+ else
23592+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
23593+ GPIO_CLOCK_VAL_MASK;
23594+ PSB_WVDC32(reserved | clock_bits, chan->reg);
23595+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
23596+}
23597+
23598+static void set_data(void *data, int state_high)
23599+{
23600+ struct intel_i2c_chan *chan = data;
23601+ struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
23602+ uint32_t reserved = 0, data_bits;
23603+
23604+ /* On most chips, these bits must be preserved in software. */
23605+ reserved = PSB_RVDC32(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
23606+ GPIO_CLOCK_PULLUP_DISABLE);
23607+
23608+ if (state_high)
23609+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
23610+ else
23611+ data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
23612+ GPIO_DATA_VAL_MASK;
23613+
23614+ PSB_WVDC32(data_bits, chan->reg);
23615+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
23616+}
23617+
23618+/**
23619+ * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
23620+ * @dev: DRM device
23621+ * @output: driver specific output device
23622+ * @reg: GPIO reg to use
23623+ * @name: name for this bus
23624+ *
23625+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
23626+ * in output probing and control (e.g. DDC or SDVO control functions).
23627+ *
23628+ * Possible values for @reg include:
23629+ * %GPIOA
23630+ * %GPIOB
23631+ * %GPIOC
23632+ * %GPIOD
23633+ * %GPIOE
23634+ * %GPIOF
23635+ * %GPIOG
23636+ * %GPIOH
23637+ * see PRM for details on how these different busses are used.
23638+ */
23639+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev,
23640+ const uint32_t reg, const char *name)
23641+{
23642+ struct intel_i2c_chan *chan;
23643+
23644+ chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
23645+ if (!chan)
23646+ goto out_free;
23647+
23648+ chan->drm_dev = dev;
23649+ chan->reg = reg;
23650+ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
23651+ chan->adapter.owner = THIS_MODULE;
23652+ chan->adapter.id = I2C_HW_B_INTELFB;
23653+ chan->adapter.algo_data = &chan->algo;
23654+ chan->adapter.dev.parent = &dev->pdev->dev;
23655+ chan->algo.setsda = set_data;
23656+ chan->algo.setscl = set_clock;
23657+ chan->algo.getsda = get_data;
23658+ chan->algo.getscl = get_clock;
23659+ chan->algo.udelay = 20;
23660+ chan->algo.timeout = usecs_to_jiffies(2200);
23661+ chan->algo.data = chan;
23662+
23663+ i2c_set_adapdata(&chan->adapter, chan);
23664+
23665+ if (i2c_bit_add_bus(&chan->adapter))
23666+ goto out_free;
23667+
23668+ /* JJJ: raise SCL and SDA? */
23669+ set_data(chan, 1);
23670+ set_clock(chan, 1);
23671+ udelay(20);
23672+
23673+ return chan;
23674+
23675+ out_free:
23676+ kfree(chan);
23677+ return NULL;
23678+}
23679+
23680+/**
23681+ * intel_i2c_destroy - unregister and free i2c bus resources
23682+ * @output: channel to free
23683+ *
23684+ * Unregister the adapter from the i2c layer, then free the structure.
23685+ */
23686+void intel_i2c_destroy(struct intel_i2c_chan *chan)
23687+{
23688+ if (!chan)
23689+ return;
23690+
23691+ i2c_del_adapter(&chan->adapter);
23692+ kfree(chan);
23693+}
23694Index: linux-2.6.27/drivers/gpu/drm/psb/psb_irq.c
23695===================================================================
23696--- /dev/null 1970-01-01 00:00:00.000000000 +0000
23697+++ linux-2.6.27/drivers/gpu/drm/psb/psb_irq.c 2009-02-05 13:29:33.000000000 +0000
23698@@ -0,0 +1,382 @@
23699+/**************************************************************************
23700+ * Copyright (c) 2007, Intel Corporation.
23701+ * All Rights Reserved.
23702+ *
23703+ * This program is free software; you can redistribute it and/or modify it
23704+ * under the terms and conditions of the GNU General Public License,
23705+ * version 2, as published by the Free Software Foundation.
23706+ *
23707+ * This program is distributed in the hope it will be useful, but WITHOUT
23708+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23709+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23710+ * more details.
23711+ *
23712+ * You should have received a copy of the GNU General Public License along with
23713+ * this program; if not, write to the Free Software Foundation, Inc.,
23714+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23715+ *
23716+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
23717+ * develop this driver.
23718+ *
23719+ **************************************************************************/
23720+/*
23721+ */
23722+
23723+#include "drmP.h"
23724+#include "psb_drv.h"
23725+#include "psb_reg.h"
23726+#include "psb_msvdx.h"
23727+
23728+/*
23729+ * Video display controller interrupt.
23730+ */
23731+
23732+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
23733+{
23734+ struct drm_psb_private *dev_priv =
23735+ (struct drm_psb_private *)dev->dev_private;
23736+ uint32_t pipe_stats;
23737+ int wake = 0;
23738+
23739+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)) {
23740+ pipe_stats = PSB_RVDC32(PSB_PIPEASTAT);
23741+ atomic_inc(&dev->vbl_received);
23742+ wake = 1;
23743+ PSB_WVDC32(pipe_stats | _PSB_VBLANK_INTERRUPT_ENABLE |
23744+ _PSB_VBLANK_CLEAR, PSB_PIPEASTAT);
23745+ }
23746+
23747+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)) {
23748+ pipe_stats = PSB_RVDC32(PSB_PIPEBSTAT);
23749+ atomic_inc(&dev->vbl_received2);
23750+ wake = 1;
23751+ PSB_WVDC32(pipe_stats | _PSB_VBLANK_INTERRUPT_ENABLE |
23752+ _PSB_VBLANK_CLEAR, PSB_PIPEBSTAT);
23753+ }
23754+
23755+ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
23756+ (void)PSB_RVDC32(PSB_INT_IDENTITY_R);
23757+ DRM_READMEMORYBARRIER();
23758+
23759+ if (wake) {
23760+ DRM_WAKEUP(&dev->vbl_queue);
23761+ drm_vbl_send_signals(dev);
23762+ }
23763+}
23764+
23765+/*
23766+ * SGX interrupt source 1.
23767+ */
23768+
23769+static void psb_sgx_interrupt(struct drm_device *dev, uint32_t sgx_stat,
23770+ uint32_t sgx_stat2)
23771+{
23772+ struct drm_psb_private *dev_priv =
23773+ (struct drm_psb_private *)dev->dev_private;
23774+
23775+ if (sgx_stat & _PSB_CE_TWOD_COMPLETE) {
23776+ DRM_WAKEUP(&dev_priv->event_2d_queue);
23777+ psb_fence_handler(dev, 0);
23778+ }
23779+
23780+ if (unlikely(sgx_stat2 & _PSB_CE2_BIF_REQUESTER_FAULT))
23781+ psb_print_pagefault(dev_priv);
23782+
23783+ psb_scheduler_handler(dev_priv, sgx_stat);
23784+}
23785+
23786+/*
23787+ * MSVDX interrupt.
23788+ */
23789+static void psb_msvdx_interrupt(struct drm_device *dev, uint32_t msvdx_stat)
23790+{
23791+ struct drm_psb_private *dev_priv =
23792+ (struct drm_psb_private *)dev->dev_private;
23793+
23794+ if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) {
23795+ /*Ideally we should we should never get to this */
23796+ PSB_DEBUG_GENERAL
23797+ ("******MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d ***** (MMU FAULT)\n",
23798+ msvdx_stat, dev_priv->fence2_irq_on);
23799+
23800+ /* Pause MMU */
23801+ PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK,
23802+ MSVDX_MMU_CONTROL0);
23803+ DRM_WRITEMEMORYBARRIER();
23804+
23805+ /* Clear this interupt bit only */
23806+ PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK,
23807+ MSVDX_INTERRUPT_CLEAR);
23808+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
23809+ DRM_READMEMORYBARRIER();
23810+
23811+ dev_priv->msvdx_needs_reset = 1;
23812+ } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) {
23813+ PSB_DEBUG_GENERAL
23814+ ("******MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d ***** (MTX)\n",
23815+ msvdx_stat, dev_priv->fence2_irq_on);
23816+
23817+ /* Clear all interupt bits */
23818+ PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR);
23819+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
23820+ DRM_READMEMORYBARRIER();
23821+
23822+ psb_msvdx_mtx_interrupt(dev);
23823+ }
23824+}
23825+
23826+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
23827+{
23828+ struct drm_device *dev = (struct drm_device *)arg;
23829+ struct drm_psb_private *dev_priv =
23830+ (struct drm_psb_private *)dev->dev_private;
23831+
23832+ uint32_t vdc_stat;
23833+ uint32_t sgx_stat;
23834+ uint32_t sgx_stat2;
23835+ uint32_t msvdx_stat;
23836+ int handled = 0;
23837+
23838+ spin_lock(&dev_priv->irqmask_lock);
23839+
23840+ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
23841+ sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS);
23842+ sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
23843+ msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
23844+
23845+ sgx_stat2 &= dev_priv->sgx2_irq_mask;
23846+ sgx_stat &= dev_priv->sgx_irq_mask;
23847+ PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2);
23848+ PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR);
23849+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
23850+
23851+ vdc_stat &= dev_priv->vdc_irq_mask;
23852+ spin_unlock(&dev_priv->irqmask_lock);
23853+
23854+ if (msvdx_stat) {
23855+ psb_msvdx_interrupt(dev, msvdx_stat);
23856+ handled = 1;
23857+ }
23858+
23859+ if (vdc_stat) {
23860+ /* MSVDX IRQ status is part of vdc_irq_mask */
23861+ psb_vdc_interrupt(dev, vdc_stat);
23862+ handled = 1;
23863+ }
23864+
23865+ if (sgx_stat || sgx_stat2) {
23866+ psb_sgx_interrupt(dev, sgx_stat, sgx_stat2);
23867+ handled = 1;
23868+ }
23869+
23870+ if (!handled) {
23871+ return IRQ_NONE;
23872+ }
23873+
23874+ return IRQ_HANDLED;
23875+}
23876+
23877+void psb_msvdx_irq_preinstall(struct drm_psb_private *dev_priv)
23878+{
23879+ unsigned long mtx_int = 0;
23880+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
23881+
23882+ /*Clear MTX interrupt */
23883+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
23884+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
23885+}
23886+
23887+void psb_irq_preinstall(struct drm_device *dev)
23888+{
23889+ struct drm_psb_private *dev_priv =
23890+ (struct drm_psb_private *)dev->dev_private;
23891+ spin_lock(&dev_priv->irqmask_lock);
23892+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
23893+ PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
23894+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
23895+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
23896+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
23897+
23898+ dev_priv->sgx_irq_mask = _PSB_CE_PIXELBE_END_RENDER |
23899+ _PSB_CE_DPM_3D_MEM_FREE |
23900+ _PSB_CE_TA_FINISHED |
23901+ _PSB_CE_DPM_REACHED_MEM_THRESH |
23902+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
23903+ _PSB_CE_DPM_OUT_OF_MEMORY_MT |
23904+ _PSB_CE_TA_TERMINATE | _PSB_CE_SW_EVENT;
23905+
23906+ dev_priv->sgx2_irq_mask = _PSB_CE2_BIF_REQUESTER_FAULT;
23907+
23908+ dev_priv->vdc_irq_mask = _PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG;
23909+
23910+ if (!drm_psb_disable_vsync)
23911+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG |
23912+ _PSB_VSYNC_PIPEB_FLAG;
23913+
23914+ /*Clear MTX interrupt */
23915+ {
23916+ unsigned long mtx_int = 0;
23917+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS,
23918+ CR_MTX_IRQ, 1);
23919+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
23920+ }
23921+ spin_unlock(&dev_priv->irqmask_lock);
23922+}
23923+
23924+void psb_msvdx_irq_postinstall(struct drm_psb_private *dev_priv)
23925+{
23926+ /* Enable Mtx Interupt to host */
23927+ unsigned long enables = 0;
23928+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
23929+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
23930+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
23931+}
23932+
23933+int psb_irq_postinstall(struct drm_device *dev)
23934+{
23935+ struct drm_psb_private *dev_priv =
23936+ (struct drm_psb_private *)dev->dev_private;
23937+ unsigned long irqflags;
23938+
23939+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
23940+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
23941+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
23942+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
23943+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
23944+ /****MSVDX IRQ Setup...*****/
23945+ /* Enable Mtx Interupt to host */
23946+ {
23947+ unsigned long enables = 0;
23948+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
23949+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS,
23950+ CR_MTX_IRQ, 1);
23951+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
23952+ }
23953+ dev_priv->irq_enabled = 1;
23954+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
23955+ return 0;
23956+}
23957+
23958+void psb_irq_uninstall(struct drm_device *dev)
23959+{
23960+ struct drm_psb_private *dev_priv =
23961+ (struct drm_psb_private *)dev->dev_private;
23962+ unsigned long irqflags;
23963+
23964+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
23965+
23966+ dev_priv->sgx_irq_mask = 0x00000000;
23967+ dev_priv->sgx2_irq_mask = 0x00000000;
23968+ dev_priv->vdc_irq_mask = 0x00000000;
23969+
23970+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
23971+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
23972+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
23973+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
23974+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
23975+ wmb();
23976+ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
23977+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS), PSB_CR_EVENT_HOST_CLEAR);
23978+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2), PSB_CR_EVENT_HOST_CLEAR2);
23979+
23980+ /****MSVDX IRQ Setup...*****/
23981+ /* Clear interrupt enabled flag */
23982+ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
23983+
23984+ dev_priv->irq_enabled = 0;
23985+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
23986+
23987+}
23988+
23989+void psb_2D_irq_off(struct drm_psb_private *dev_priv)
23990+{
23991+ unsigned long irqflags;
23992+ uint32_t old_mask;
23993+ uint32_t cleared_mask;
23994+
23995+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
23996+ --dev_priv->irqen_count_2d;
23997+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
23998+
23999+ old_mask = dev_priv->sgx_irq_mask;
24000+ dev_priv->sgx_irq_mask &= ~_PSB_CE_TWOD_COMPLETE;
24001+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
24002+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
24003+
24004+ cleared_mask = (old_mask ^ dev_priv->sgx_irq_mask) & old_mask;
24005+ PSB_WSGX32(cleared_mask, PSB_CR_EVENT_HOST_CLEAR);
24006+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
24007+ }
24008+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
24009+}
24010+
24011+void psb_2D_irq_on(struct drm_psb_private *dev_priv)
24012+{
24013+ unsigned long irqflags;
24014+
24015+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
24016+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
24017+ dev_priv->sgx_irq_mask |= _PSB_CE_TWOD_COMPLETE;
24018+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
24019+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
24020+ }
24021+ ++dev_priv->irqen_count_2d;
24022+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
24023+}
24024+
24025+static int psb_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
24026+ atomic_t * counter)
24027+{
24028+ unsigned int cur_vblank;
24029+ int ret = 0;
24030+
24031+ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
24032+ (((cur_vblank = atomic_read(counter))
24033+ - *sequence) <= (1 << 23)));
24034+
24035+ *sequence = cur_vblank;
24036+
24037+ return ret;
24038+}
24039+
24040+int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence)
24041+{
24042+ int ret;
24043+
24044+ ret = psb_vblank_do_wait(dev, sequence, &dev->vbl_received);
24045+ return ret;
24046+}
24047+
24048+int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
24049+{
24050+ int ret;
24051+
24052+ ret = psb_vblank_do_wait(dev, sequence, &dev->vbl_received2);
24053+ return ret;
24054+}
24055+
24056+void psb_msvdx_irq_off(struct drm_psb_private *dev_priv)
24057+{
24058+ unsigned long irqflags;
24059+
24060+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
24061+ if (dev_priv->irq_enabled) {
24062+ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
24063+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
24064+ (void)PSB_RSGX32(PSB_INT_ENABLE_R);
24065+ }
24066+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
24067+}
24068+
24069+void psb_msvdx_irq_on(struct drm_psb_private *dev_priv)
24070+{
24071+ unsigned long irqflags;
24072+
24073+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
24074+ if (dev_priv->irq_enabled) {
24075+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
24076+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
24077+ (void)PSB_RSGX32(PSB_INT_ENABLE_R);
24078+ }
24079+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
24080+}
24081Index: linux-2.6.27/drivers/gpu/drm/psb/psb_mmu.c
24082===================================================================
24083--- /dev/null 1970-01-01 00:00:00.000000000 +0000
24084+++ linux-2.6.27/drivers/gpu/drm/psb/psb_mmu.c 2009-02-05 13:29:33.000000000 +0000
24085@@ -0,0 +1,1037 @@
24086+/**************************************************************************
24087+ * Copyright (c) 2007, Intel Corporation.
24088+ * All Rights Reserved.
24089+ *
24090+ * This program is free software; you can redistribute it and/or modify it
24091+ * under the terms and conditions of the GNU General Public License,
24092+ * version 2, as published by the Free Software Foundation.
24093+ *
24094+ * This program is distributed in the hope it will be useful, but WITHOUT
24095+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24096+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24097+ * more details.
24098+ *
24099+ * You should have received a copy of the GNU General Public License along with
24100+ * this program; if not, write to the Free Software Foundation, Inc.,
24101+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24102+ *
24103+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
24104+ * develop this driver.
24105+ *
24106+ **************************************************************************/
24107+#include "drmP.h"
24108+#include "psb_drv.h"
24109+#include "psb_reg.h"
24110+
24111+/*
24112+ * Code for the SGX MMU:
24113+ */
24114+
24115+/*
24116+ * clflush on one processor only:
24117+ * clflush should apparently flush the cache line on all processors in an
24118+ * SMP system.
24119+ */
24120+
24121+/*
24122+ * kmap atomic:
24123+ * The usage of the slots must be completely encapsulated within a spinlock, and
24124+ * no other functions that may be using the locks for other purposed may be
24125+ * called from within the locked region.
24126+ * Since the slots are per processor, this will guarantee that we are the only
24127+ * user.
24128+ */
24129+
24130+/*
24131+ * TODO: Inserting ptes from an interrupt handler:
24132+ * This may be desirable for some SGX functionality where the GPU can fault in
24133+ * needed pages. For that, we need to make an atomic insert_pages function, that
24134+ * may fail.
24135+ * If it fails, the caller need to insert the page using a workqueue function,
24136+ * but on average it should be fast.
24137+ */
24138+
24139+struct psb_mmu_driver {
24140+ /* protects driver- and pd structures. Always take in read mode
24141+ * before taking the page table spinlock.
24142+ */
24143+ struct rw_semaphore sem;
24144+
24145+ /* protects page tables, directory tables and pt tables.
24146+ * and pt structures.
24147+ */
24148+ spinlock_t lock;
24149+
24150+ atomic_t needs_tlbflush;
24151+ atomic_t *msvdx_mmu_invaldc;
24152+ uint8_t __iomem *register_map;
24153+ struct psb_mmu_pd *default_pd;
24154+ uint32_t bif_ctrl;
24155+ int has_clflush;
24156+ int clflush_add;
24157+ unsigned long clflush_mask;
24158+};
24159+
24160+struct psb_mmu_pd;
24161+
24162+struct psb_mmu_pt {
24163+ struct psb_mmu_pd *pd;
24164+ uint32_t index;
24165+ uint32_t count;
24166+ struct page *p;
24167+ uint32_t *v;
24168+};
24169+
24170+struct psb_mmu_pd {
24171+ struct psb_mmu_driver *driver;
24172+ int hw_context;
24173+ struct psb_mmu_pt **tables;
24174+ struct page *p;
24175+ struct page *dummy_pt;
24176+ struct page *dummy_page;
24177+ uint32_t pd_mask;
24178+ uint32_t invalid_pde;
24179+ uint32_t invalid_pte;
24180+};
24181+
24182+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
24183+{
24184+ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
24185+}
24186+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
24187+{
24188+ return (offset >> PSB_PDE_SHIFT);
24189+}
24190+
24191+#if defined(CONFIG_X86)
24192+static inline void psb_clflush(void *addr)
24193+{
24194+ __asm__ __volatile__("clflush (%0)\n"::"r"(addr):"memory");
24195+}
24196+
24197+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
24198+{
24199+ if (!driver->has_clflush)
24200+ return;
24201+
24202+ mb();
24203+ psb_clflush(addr);
24204+ mb();
24205+}
24206+#else
24207+
24208+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
24209+{;
24210+}
24211+
24212+#endif
24213+
24214+static inline void psb_iowrite32(const struct psb_mmu_driver *d,
24215+ uint32_t val, uint32_t offset)
24216+{
24217+ iowrite32(val, d->register_map + offset);
24218+}
24219+
24220+static inline uint32_t psb_ioread32(const struct psb_mmu_driver *d,
24221+ uint32_t offset)
24222+{
24223+ return ioread32(d->register_map + offset);
24224+}
24225+
24226+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
24227+{
24228+ if (atomic_read(&driver->needs_tlbflush) || force) {
24229+ uint32_t val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
24230+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
24231+ PSB_CR_BIF_CTRL);
24232+ wmb();
24233+ psb_iowrite32(driver, val & ~_PSB_CB_CTRL_INVALDC,
24234+ PSB_CR_BIF_CTRL);
24235+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
24236+ if (driver->msvdx_mmu_invaldc)
24237+ atomic_set(driver->msvdx_mmu_invaldc, 1);
24238+ }
24239+ atomic_set(&driver->needs_tlbflush, 0);
24240+}
24241+
24242+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
24243+{
24244+ down_write(&driver->sem);
24245+ psb_mmu_flush_pd_locked(driver, force);
24246+ up_write(&driver->sem);
24247+}
24248+
24249+void psb_mmu_flush(struct psb_mmu_driver *driver)
24250+{
24251+ uint32_t val;
24252+
24253+ down_write(&driver->sem);
24254+ val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
24255+ if (atomic_read(&driver->needs_tlbflush))
24256+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
24257+ PSB_CR_BIF_CTRL);
24258+ else
24259+ psb_iowrite32(driver, val | _PSB_CB_CTRL_FLUSH,
24260+ PSB_CR_BIF_CTRL);
24261+ wmb();
24262+ psb_iowrite32(driver,
24263+ val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
24264+ PSB_CR_BIF_CTRL);
24265+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
24266+ atomic_set(&driver->needs_tlbflush, 0);
24267+ if (driver->msvdx_mmu_invaldc)
24268+ atomic_set(driver->msvdx_mmu_invaldc, 1);
24269+ up_write(&driver->sem);
24270+}
24271+
24272+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
24273+{
24274+ uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
24275+ PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
24276+
24277+ drm_ttm_cache_flush();
24278+ down_write(&pd->driver->sem);
24279+ psb_iowrite32(pd->driver, (page_to_pfn(pd->p) << PAGE_SHIFT), offset);
24280+ wmb();
24281+ psb_mmu_flush_pd_locked(pd->driver, 1);
24282+ pd->hw_context = hw_context;
24283+ up_write(&pd->driver->sem);
24284+
24285+}
24286+
24287+static inline unsigned long psb_pd_addr_end(unsigned long addr,
24288+ unsigned long end)
24289+{
24290+
24291+ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
24292+ return (addr < end) ? addr : end;
24293+}
24294+
24295+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
24296+{
24297+ uint32_t mask = PSB_PTE_VALID;
24298+
24299+ if (type & PSB_MMU_CACHED_MEMORY)
24300+ mask |= PSB_PTE_CACHED;
24301+ if (type & PSB_MMU_RO_MEMORY)
24302+ mask |= PSB_PTE_RO;
24303+ if (type & PSB_MMU_WO_MEMORY)
24304+ mask |= PSB_PTE_WO;
24305+
24306+ return (pfn << PAGE_SHIFT) | mask;
24307+}
24308+
24309+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
24310+ int trap_pagefaults, int invalid_type)
24311+{
24312+ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
24313+ uint32_t *v;
24314+ int i;
24315+
24316+ if (!pd)
24317+ return NULL;
24318+
24319+ pd->p = alloc_page(GFP_DMA32);
24320+ if (!pd->p)
24321+ goto out_err1;
24322+ pd->dummy_pt = alloc_page(GFP_DMA32);
24323+ if (!pd->dummy_pt)
24324+ goto out_err2;
24325+ pd->dummy_page = alloc_page(GFP_DMA32);
24326+ if (!pd->dummy_page)
24327+ goto out_err3;
24328+
24329+ if (!trap_pagefaults) {
24330+ pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
24331+ invalid_type |
24332+ PSB_MMU_CACHED_MEMORY);
24333+ pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
24334+ invalid_type |
24335+ PSB_MMU_CACHED_MEMORY);
24336+ } else {
24337+ pd->invalid_pde = 0;
24338+ pd->invalid_pte = 0;
24339+ }
24340+
24341+ v = kmap(pd->dummy_pt);
24342+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
24343+ v[i] = pd->invalid_pte;
24344+ }
24345+ kunmap(pd->dummy_pt);
24346+
24347+ v = kmap(pd->p);
24348+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
24349+ v[i] = pd->invalid_pde;
24350+ }
24351+ kunmap(pd->p);
24352+
24353+ clear_page(kmap(pd->dummy_page));
24354+ kunmap(pd->dummy_page);
24355+
24356+ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
24357+ if (!pd->tables)
24358+ goto out_err4;
24359+
24360+ pd->hw_context = -1;
24361+ pd->pd_mask = PSB_PTE_VALID;
24362+ pd->driver = driver;
24363+
24364+ return pd;
24365+
24366+ out_err4:
24367+ __free_page(pd->dummy_page);
24368+ out_err3:
24369+ __free_page(pd->dummy_pt);
24370+ out_err2:
24371+ __free_page(pd->p);
24372+ out_err1:
24373+ kfree(pd);
24374+ return NULL;
24375+}
24376+
24377+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
24378+{
24379+ __free_page(pt->p);
24380+ kfree(pt);
24381+}
24382+
24383+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
24384+{
24385+ struct psb_mmu_driver *driver = pd->driver;
24386+ struct psb_mmu_pt *pt;
24387+ int i;
24388+
24389+ down_write(&driver->sem);
24390+ if (pd->hw_context != -1) {
24391+ psb_iowrite32(driver, 0,
24392+ PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
24393+ psb_mmu_flush_pd_locked(driver, 1);
24394+ }
24395+
24396+ /* Should take the spinlock here, but we don't need to do that
24397+ since we have the semaphore in write mode. */
24398+
24399+ for (i = 0; i < 1024; ++i) {
24400+ pt = pd->tables[i];
24401+ if (pt)
24402+ psb_mmu_free_pt(pt);
24403+ }
24404+
24405+ vfree(pd->tables);
24406+ __free_page(pd->dummy_page);
24407+ __free_page(pd->dummy_pt);
24408+ __free_page(pd->p);
24409+ kfree(pd);
24410+ up_write(&driver->sem);
24411+}
24412+
24413+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
24414+{
24415+ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
24416+ void *v;
24417+ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
24418+ uint32_t clflush_count = PAGE_SIZE / clflush_add;
24419+ spinlock_t *lock = &pd->driver->lock;
24420+ uint8_t *clf;
24421+ uint32_t *ptes;
24422+ int i;
24423+
24424+ if (!pt)
24425+ return NULL;
24426+
24427+ pt->p = alloc_page(GFP_DMA32);
24428+ if (!pt->p) {
24429+ kfree(pt);
24430+ return NULL;
24431+ }
24432+
24433+ spin_lock(lock);
24434+
24435+ v = kmap_atomic(pt->p, KM_USER0);
24436+ clf = (uint8_t *) v;
24437+ ptes = (uint32_t *) v;
24438+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
24439+ *ptes++ = pd->invalid_pte;
24440+ }
24441+
24442+#if defined(CONFIG_X86)
24443+ if (pd->driver->has_clflush && pd->hw_context != -1) {
24444+ mb();
24445+ for (i = 0; i < clflush_count; ++i) {
24446+ psb_clflush(clf);
24447+ clf += clflush_add;
24448+ }
24449+ mb();
24450+ }
24451+#endif
24452+ kunmap_atomic(v, KM_USER0);
24453+ spin_unlock(lock);
24454+
24455+ pt->count = 0;
24456+ pt->pd = pd;
24457+ pt->index = 0;
24458+
24459+ return pt;
24460+}
24461+
24462+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
24463+ unsigned long addr)
24464+{
24465+ uint32_t index = psb_mmu_pd_index(addr);
24466+ struct psb_mmu_pt *pt;
24467+ volatile uint32_t *v;
24468+ spinlock_t *lock = &pd->driver->lock;
24469+
24470+ spin_lock(lock);
24471+ pt = pd->tables[index];
24472+ while (!pt) {
24473+ spin_unlock(lock);
24474+ pt = psb_mmu_alloc_pt(pd);
24475+ if (!pt)
24476+ return NULL;
24477+ spin_lock(lock);
24478+
24479+ if (pd->tables[index]) {
24480+ spin_unlock(lock);
24481+ psb_mmu_free_pt(pt);
24482+ spin_lock(lock);
24483+ pt = pd->tables[index];
24484+ continue;
24485+ }
24486+
24487+ v = kmap_atomic(pd->p, KM_USER0);
24488+ pd->tables[index] = pt;
24489+ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
24490+ pt->index = index;
24491+ kunmap_atomic((void *)v, KM_USER0);
24492+
24493+ if (pd->hw_context != -1) {
24494+ psb_mmu_clflush(pd->driver, (void *)&v[index]);
24495+ atomic_set(&pd->driver->needs_tlbflush, 1);
24496+ }
24497+ }
24498+ pt->v = kmap_atomic(pt->p, KM_USER0);
24499+ return pt;
24500+}
24501+
24502+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
24503+ unsigned long addr)
24504+{
24505+ uint32_t index = psb_mmu_pd_index(addr);
24506+ struct psb_mmu_pt *pt;
24507+ spinlock_t *lock = &pd->driver->lock;
24508+
24509+ spin_lock(lock);
24510+ pt = pd->tables[index];
24511+ if (!pt) {
24512+ spin_unlock(lock);
24513+ return NULL;
24514+ }
24515+ pt->v = kmap_atomic(pt->p, KM_USER0);
24516+ return pt;
24517+}
24518+
24519+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
24520+{
24521+ struct psb_mmu_pd *pd = pt->pd;
24522+ volatile uint32_t *v;
24523+
24524+ kunmap_atomic(pt->v, KM_USER0);
24525+ if (pt->count == 0) {
24526+ v = kmap_atomic(pd->p, KM_USER0);
24527+ v[pt->index] = pd->invalid_pde;
24528+ pd->tables[pt->index] = NULL;
24529+
24530+ if (pd->hw_context != -1) {
24531+ psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
24532+ atomic_set(&pd->driver->needs_tlbflush, 1);
24533+ }
24534+ kunmap_atomic(pt->v, KM_USER0);
24535+ spin_unlock(&pd->driver->lock);
24536+ psb_mmu_free_pt(pt);
24537+ return;
24538+ }
24539+ spin_unlock(&pd->driver->lock);
24540+}
24541+
24542+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
24543+ uint32_t pte)
24544+{
24545+ pt->v[psb_mmu_pt_index(addr)] = pte;
24546+}
24547+
24548+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
24549+ unsigned long addr)
24550+{
24551+ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
24552+}
24553+
24554+#if 0
24555+static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
24556+ uint32_t mmu_offset)
24557+{
24558+ uint32_t *v;
24559+ uint32_t pfn;
24560+
24561+ v = kmap_atomic(pd->p, KM_USER0);
24562+ if (!v) {
24563+ printk(KERN_INFO "Could not kmap pde page.\n");
24564+ return 0;
24565+ }
24566+ pfn = v[psb_mmu_pd_index(mmu_offset)];
24567+ // printk(KERN_INFO "pde is 0x%08x\n",pfn);
24568+ kunmap_atomic(v, KM_USER0);
24569+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
24570+ printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
24571+ mmu_offset, pfn);
24572+ }
24573+ v = ioremap(pfn & 0xFFFFF000, 4096);
24574+ if (!v) {
24575+ printk(KERN_INFO "Could not kmap pte page.\n");
24576+ return 0;
24577+ }
24578+ pfn = v[psb_mmu_pt_index(mmu_offset)];
24579+ // printk(KERN_INFO "pte is 0x%08x\n",pfn);
24580+ iounmap(v);
24581+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
24582+ printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
24583+ mmu_offset, pfn);
24584+ }
24585+ return pfn >> PAGE_SHIFT;
24586+}
24587+
24588+static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
24589+ uint32_t mmu_offset, uint32_t gtt_pages)
24590+{
24591+ uint32_t start;
24592+ uint32_t next;
24593+
24594+ printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
24595+ mmu_offset, gtt_pages);
24596+ down_read(&pd->driver->sem);
24597+ start = psb_mmu_check_pte_locked(pd, mmu_offset);
24598+ mmu_offset += PAGE_SIZE;
24599+ gtt_pages -= 1;
24600+ while (gtt_pages--) {
24601+ next = psb_mmu_check_pte_locked(pd, mmu_offset);
24602+ if (next != start + 1) {
24603+ printk(KERN_INFO "Ptes out of order: 0x%08x, 0x%08x.\n",
24604+ start, next);
24605+ }
24606+ start = next;
24607+ mmu_offset += PAGE_SIZE;
24608+ }
24609+ up_read(&pd->driver->sem);
24610+}
24611+
24612+#endif
24613+
24614+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
24615+ uint32_t mmu_offset, uint32_t gtt_start,
24616+ uint32_t gtt_pages)
24617+{
24618+ uint32_t *v;
24619+ uint32_t start = psb_mmu_pd_index(mmu_offset);
24620+ struct psb_mmu_driver *driver = pd->driver;
24621+
24622+ down_read(&driver->sem);
24623+ spin_lock(&driver->lock);
24624+
24625+ v = kmap_atomic(pd->p, KM_USER0);
24626+ v += start;
24627+
24628+ while (gtt_pages--) {
24629+ *v++ = gtt_start | pd->pd_mask;
24630+ gtt_start += PAGE_SIZE;
24631+ }
24632+
24633+ drm_ttm_cache_flush();
24634+ kunmap_atomic(v, KM_USER0);
24635+ spin_unlock(&driver->lock);
24636+
24637+ if (pd->hw_context != -1)
24638+ atomic_set(&pd->driver->needs_tlbflush, 1);
24639+
24640+ up_read(&pd->driver->sem);
24641+ psb_mmu_flush_pd(pd->driver, 0);
24642+}
24643+
24644+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
24645+{
24646+ struct psb_mmu_pd *pd;
24647+
24648+ down_read(&driver->sem);
24649+ pd = driver->default_pd;
24650+ up_read(&driver->sem);
24651+
24652+ return pd;
24653+}
24654+
24655+/* Returns the physical address of the PD shared by sgx/msvdx */
24656+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver * driver)
24657+{
24658+ struct psb_mmu_pd *pd;
24659+
24660+ pd = psb_mmu_get_default_pd(driver);
24661+ return ((page_to_pfn(pd->p) << PAGE_SHIFT));
24662+}
24663+
24664+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
24665+{
24666+ psb_iowrite32(driver, driver->bif_ctrl, PSB_CR_BIF_CTRL);
24667+ psb_mmu_free_pagedir(driver->default_pd);
24668+ kfree(driver);
24669+}
24670+
24671+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
24672+ int trap_pagefaults,
24673+ int invalid_type,
24674+ atomic_t *msvdx_mmu_invaldc)
24675+{
24676+ struct psb_mmu_driver *driver;
24677+
24678+ driver = (struct psb_mmu_driver *)kmalloc(sizeof(*driver), GFP_KERNEL);
24679+
24680+ if (!driver)
24681+ return NULL;
24682+
24683+ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
24684+ invalid_type);
24685+ if (!driver->default_pd)
24686+ goto out_err1;
24687+
24688+ spin_lock_init(&driver->lock);
24689+ init_rwsem(&driver->sem);
24690+ down_write(&driver->sem);
24691+ driver->register_map = registers;
24692+ atomic_set(&driver->needs_tlbflush, 1);
24693+ driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
24694+
24695+ driver->bif_ctrl = psb_ioread32(driver, PSB_CR_BIF_CTRL);
24696+ psb_iowrite32(driver, driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
24697+ PSB_CR_BIF_CTRL);
24698+ psb_iowrite32(driver, driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
24699+ PSB_CR_BIF_CTRL);
24700+
24701+ driver->has_clflush = 0;
24702+
24703+#if defined(CONFIG_X86)
24704+ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
24705+ uint32_t tfms, misc, cap0, cap4, clflush_size;
24706+
24707+ /*
24708+ * clflush size is determined at kernel setup for x86_64 but not for
24709+ * i386. We have to do it here.
24710+ */
24711+
24712+ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
24713+ clflush_size = ((misc >> 8) & 0xff) * 8;
24714+ driver->has_clflush = 1;
24715+ driver->clflush_add =
24716+ PAGE_SIZE * clflush_size / sizeof(uint32_t);
24717+ driver->clflush_mask = driver->clflush_add - 1;
24718+ driver->clflush_mask = ~driver->clflush_mask;
24719+ }
24720+#endif
24721+
24722+ up_write(&driver->sem);
24723+ return driver;
24724+
24725+ out_err1:
24726+ kfree(driver);
24727+ return NULL;
24728+}
24729+
24730+#if defined(CONFIG_X86)
24731+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
24732+ uint32_t num_pages, uint32_t desired_tile_stride,
24733+ uint32_t hw_tile_stride)
24734+{
24735+ struct psb_mmu_pt *pt;
24736+ uint32_t rows = 1;
24737+ uint32_t i;
24738+ unsigned long addr;
24739+ unsigned long end;
24740+ unsigned long next;
24741+ unsigned long add;
24742+ unsigned long row_add;
24743+ unsigned long clflush_add = pd->driver->clflush_add;
24744+ unsigned long clflush_mask = pd->driver->clflush_mask;
24745+
24746+ if (!pd->driver->has_clflush) {
24747+ drm_ttm_cache_flush();
24748+ return;
24749+ }
24750+
24751+ if (hw_tile_stride)
24752+ rows = num_pages / desired_tile_stride;
24753+ else
24754+ desired_tile_stride = num_pages;
24755+
24756+ add = desired_tile_stride << PAGE_SHIFT;
24757+ row_add = hw_tile_stride << PAGE_SHIFT;
24758+ mb();
24759+ for (i = 0; i < rows; ++i) {
24760+
24761+ addr = address;
24762+ end = addr + add;
24763+
24764+ do {
24765+ next = psb_pd_addr_end(addr, end);
24766+ pt = psb_mmu_pt_map_lock(pd, addr);
24767+ if (!pt)
24768+ continue;
24769+ do {
24770+ psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
24771+ } while (addr += clflush_add,
24772+ (addr & clflush_mask) < next);
24773+
24774+ psb_mmu_pt_unmap_unlock(pt);
24775+ } while (addr = next, next != end);
24776+ address += row_add;
24777+ }
24778+ mb();
24779+}
24780+#else
24781+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
24782+ uint32_t num_pages, uint32_t desired_tile_stride,
24783+ uint32_t hw_tile_stride)
24784+{
24785+ drm_ttm_cache_flush();
24786+}
24787+#endif
24788+
24789+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
24790+ unsigned long address, uint32_t num_pages)
24791+{
24792+ struct psb_mmu_pt *pt;
24793+ unsigned long addr;
24794+ unsigned long end;
24795+ unsigned long next;
24796+ unsigned long f_address = address;
24797+
24798+ down_read(&pd->driver->sem);
24799+
24800+ addr = address;
24801+ end = addr + (num_pages << PAGE_SHIFT);
24802+
24803+ do {
24804+ next = psb_pd_addr_end(addr, end);
24805+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
24806+ if (!pt)
24807+ goto out;
24808+ do {
24809+ psb_mmu_invalidate_pte(pt, addr);
24810+ --pt->count;
24811+ } while (addr += PAGE_SIZE, addr < next);
24812+ psb_mmu_pt_unmap_unlock(pt);
24813+
24814+ } while (addr = next, next != end);
24815+
24816+ out:
24817+ if (pd->hw_context != -1)
24818+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
24819+
24820+ up_read(&pd->driver->sem);
24821+
24822+ if (pd->hw_context != -1)
24823+ psb_mmu_flush(pd->driver);
24824+
24825+ return;
24826+}
24827+
24828+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
24829+ uint32_t num_pages, uint32_t desired_tile_stride,
24830+ uint32_t hw_tile_stride)
24831+{
24832+ struct psb_mmu_pt *pt;
24833+ uint32_t rows = 1;
24834+ uint32_t i;
24835+ unsigned long addr;
24836+ unsigned long end;
24837+ unsigned long next;
24838+ unsigned long add;
24839+ unsigned long row_add;
24840+ unsigned long f_address = address;
24841+
24842+ if (hw_tile_stride)
24843+ rows = num_pages / desired_tile_stride;
24844+ else
24845+ desired_tile_stride = num_pages;
24846+
24847+ add = desired_tile_stride << PAGE_SHIFT;
24848+ row_add = hw_tile_stride << PAGE_SHIFT;
24849+
24850+ down_read(&pd->driver->sem);
24851+
24852+ /* Make sure we only need to flush this processor's cache */
24853+
24854+ for (i = 0; i < rows; ++i) {
24855+
24856+ addr = address;
24857+ end = addr + add;
24858+
24859+ do {
24860+ next = psb_pd_addr_end(addr, end);
24861+ pt = psb_mmu_pt_map_lock(pd, addr);
24862+ if (!pt)
24863+ continue;
24864+ do {
24865+ psb_mmu_invalidate_pte(pt, addr);
24866+ --pt->count;
24867+
24868+ } while (addr += PAGE_SIZE, addr < next);
24869+ psb_mmu_pt_unmap_unlock(pt);
24870+
24871+ } while (addr = next, next != end);
24872+ address += row_add;
24873+ }
24874+ if (pd->hw_context != -1)
24875+ psb_mmu_flush_ptes(pd, f_address, num_pages,
24876+ desired_tile_stride, hw_tile_stride);
24877+
24878+ up_read(&pd->driver->sem);
24879+
24880+ if (pd->hw_context != -1)
24881+ psb_mmu_flush(pd->driver);
24882+}
24883+
24884+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
24885+ unsigned long address, uint32_t num_pages,
24886+ int type)
24887+{
24888+ struct psb_mmu_pt *pt;
24889+ uint32_t pte;
24890+ unsigned long addr;
24891+ unsigned long end;
24892+ unsigned long next;
24893+ unsigned long f_address = address;
24894+ int ret = -ENOMEM;
24895+
24896+ down_read(&pd->driver->sem);
24897+
24898+ addr = address;
24899+ end = addr + (num_pages << PAGE_SHIFT);
24900+
24901+ do {
24902+ next = psb_pd_addr_end(addr, end);
24903+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
24904+ if (!pt) {
24905+ ret = -ENOMEM;
24906+ goto out;
24907+ }
24908+ do {
24909+ pte = psb_mmu_mask_pte(start_pfn++, type);
24910+ psb_mmu_set_pte(pt, addr, pte);
24911+ pt->count++;
24912+ } while (addr += PAGE_SIZE, addr < next);
24913+ psb_mmu_pt_unmap_unlock(pt);
24914+
24915+ } while (addr = next, next != end);
24916+ ret = 0;
24917+
24918+ out:
24919+ if (pd->hw_context != -1)
24920+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
24921+
24922+ up_read(&pd->driver->sem);
24923+
24924+ if (pd->hw_context != -1)
24925+ psb_mmu_flush(pd->driver);
24926+
24927+ return 0;
24928+}
24929+
24930+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
24931+ unsigned long address, uint32_t num_pages,
24932+ uint32_t desired_tile_stride, uint32_t hw_tile_stride,
24933+ int type)
24934+{
24935+ struct psb_mmu_pt *pt;
24936+ uint32_t rows = 1;
24937+ uint32_t i;
24938+ uint32_t pte;
24939+ unsigned long addr;
24940+ unsigned long end;
24941+ unsigned long next;
24942+ unsigned long add;
24943+ unsigned long row_add;
24944+ unsigned long f_address = address;
24945+ int ret = -ENOMEM;
24946+
24947+ if (hw_tile_stride) {
24948+ if (num_pages % desired_tile_stride != 0)
24949+ return -EINVAL;
24950+ rows = num_pages / desired_tile_stride;
24951+ } else {
24952+ desired_tile_stride = num_pages;
24953+ }
24954+
24955+ add = desired_tile_stride << PAGE_SHIFT;
24956+ row_add = hw_tile_stride << PAGE_SHIFT;
24957+
24958+ down_read(&pd->driver->sem);
24959+
24960+ for (i = 0; i < rows; ++i) {
24961+
24962+ addr = address;
24963+ end = addr + add;
24964+
24965+ do {
24966+ next = psb_pd_addr_end(addr, end);
24967+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
24968+ if (!pt)
24969+ goto out;
24970+ do {
24971+ pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
24972+ type);
24973+ psb_mmu_set_pte(pt, addr, pte);
24974+ pt->count++;
24975+ } while (addr += PAGE_SIZE, addr < next);
24976+ psb_mmu_pt_unmap_unlock(pt);
24977+
24978+ } while (addr = next, next != end);
24979+
24980+ address += row_add;
24981+ }
24982+ ret = 0;
24983+ out:
24984+ if (pd->hw_context != -1)
24985+ psb_mmu_flush_ptes(pd, f_address, num_pages,
24986+ desired_tile_stride, hw_tile_stride);
24987+
24988+ up_read(&pd->driver->sem);
24989+
24990+ if (pd->hw_context != -1)
24991+ psb_mmu_flush(pd->driver);
24992+
24993+ return 0;
24994+}
24995+
24996+void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
24997+{
24998+ mask &= _PSB_MMU_ER_MASK;
24999+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
25000+ PSB_CR_BIF_CTRL);
25001+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
25002+}
25003+
25004+void psb_mmu_disable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
25005+{
25006+ mask &= _PSB_MMU_ER_MASK;
25007+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
25008+ PSB_CR_BIF_CTRL);
25009+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
25010+}
25011+
25012+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
25013+ unsigned long *pfn)
25014+{
25015+ int ret;
25016+ struct psb_mmu_pt *pt;
25017+ uint32_t tmp;
25018+ spinlock_t *lock = &pd->driver->lock;
25019+
25020+ down_read(&pd->driver->sem);
25021+ pt = psb_mmu_pt_map_lock(pd, virtual);
25022+ if (!pt) {
25023+ uint32_t *v;
25024+
25025+ spin_lock(lock);
25026+ v = kmap_atomic(pd->p, KM_USER0);
25027+ tmp = v[psb_mmu_pd_index(virtual)];
25028+ kunmap_atomic(v, KM_USER0);
25029+ spin_unlock(lock);
25030+
25031+ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
25032+ !(pd->invalid_pte & PSB_PTE_VALID)) {
25033+ ret = -EINVAL;
25034+ goto out;
25035+ }
25036+ ret = 0;
25037+ *pfn = pd->invalid_pte >> PAGE_SHIFT;
25038+ goto out;
25039+ }
25040+ tmp = pt->v[psb_mmu_pt_index(virtual)];
25041+ if (!(tmp & PSB_PTE_VALID)) {
25042+ ret = -EINVAL;
25043+ } else {
25044+ ret = 0;
25045+ *pfn = tmp >> PAGE_SHIFT;
25046+ }
25047+ psb_mmu_pt_unmap_unlock(pt);
25048+ out:
25049+ up_read(&pd->driver->sem);
25050+ return ret;
25051+}
25052+
25053+void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
25054+{
25055+ struct page *p;
25056+ unsigned long pfn;
25057+ int ret = 0;
25058+ struct psb_mmu_pd *pd;
25059+ uint32_t *v;
25060+ uint32_t *vmmu;
25061+
25062+ pd = driver->default_pd;
25063+ if (!pd) {
25064+ printk(KERN_WARNING "Could not get default pd\n");
25065+ }
25066+
25067+ p = alloc_page(GFP_DMA32);
25068+
25069+ if (!p) {
25070+ printk(KERN_WARNING "Failed allocating page\n");
25071+ return;
25072+ }
25073+
25074+ v = kmap(p);
25075+ memset(v, 0x67, PAGE_SIZE);
25076+
25077+ pfn = (offset >> PAGE_SHIFT);
25078+
25079+ ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0,
25080+ PSB_MMU_CACHED_MEMORY);
25081+ if (ret) {
25082+ printk(KERN_WARNING "Failed inserting mmu page\n");
25083+ goto out_err1;
25084+ }
25085+
25086+ /* Ioremap the page through the GART aperture */
25087+
25088+ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
25089+ if (!vmmu) {
25090+ printk(KERN_WARNING "Failed ioremapping page\n");
25091+ goto out_err2;
25092+ }
25093+
25094+ /* Read from the page with mmu disabled. */
25095+ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
25096+
25097+ /* Enable the mmu for host accesses and read again. */
25098+ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
25099+
25100+ printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
25101+ ioread32(vmmu));
25102+ *v = 0x15243705;
25103+ printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
25104+ ioread32(vmmu));
25105+ iowrite32(0x16243355, vmmu);
25106+ (void)ioread32(vmmu);
25107+ printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
25108+
25109+ printk(KERN_INFO "Int stat is 0x%08x\n",
25110+ psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
25111+ printk(KERN_INFO "Fault is 0x%08x\n",
25112+ psb_ioread32(driver, PSB_CR_BIF_FAULT));
25113+
25114+ /* Disable MMU for host accesses and clear page fault register */
25115+ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
25116+ iounmap(vmmu);
25117+ out_err2:
25118+ psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
25119+ out_err1:
25120+ kunmap(p);
25121+ __free_page(p);
25122+}
25123Index: linux-2.6.27/drivers/gpu/drm/psb/psb_msvdx.c
25124===================================================================
25125--- /dev/null 1970-01-01 00:00:00.000000000 +0000
25126+++ linux-2.6.27/drivers/gpu/drm/psb/psb_msvdx.c 2009-02-05 13:29:33.000000000 +0000
25127@@ -0,0 +1,676 @@
25128+/**
25129+ * file psb_msvdx.c
25130+ * MSVDX I/O operations and IRQ handling
25131+ *
25132+ */
25133+
25134+/**************************************************************************
25135+ *
25136+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
25137+ * Copyright (c) Imagination Technologies Limited, UK
25138+ * All Rights Reserved.
25139+ *
25140+ * Permission is hereby granted, free of charge, to any person obtaining a
25141+ * copy of this software and associated documentation files (the
25142+ * "Software"), to deal in the Software without restriction, including
25143+ * without limitation the rights to use, copy, modify, merge, publish,
25144+ * distribute, sub license, and/or sell copies of the Software, and to
25145+ * permit persons to whom the Software is furnished to do so, subject to
25146+ * the following conditions:
25147+ *
25148+ * The above copyright notice and this permission notice (including the
25149+ * next paragraph) shall be included in all copies or substantial portions
25150+ * of the Software.
25151+ *
25152+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25153+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25154+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25155+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25156+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25157+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25158+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
25159+ *
25160+ **************************************************************************/
25161+
25162+#include "drmP.h"
25163+#include "drm_os_linux.h"
25164+#include "psb_drv.h"
25165+#include "psb_drm.h"
25166+#include "psb_msvdx.h"
25167+
25168+#include <asm/io.h>
25169+#include <linux/delay.h>
25170+
25171+#ifndef list_first_entry
25172+#define list_first_entry(ptr, type, member) \
25173+ list_entry((ptr)->next, type, member)
25174+#endif
25175+
25176+static int psb_msvdx_send (struct drm_device *dev, void *cmd,
25177+ unsigned long cmd_size);
25178+
25179+int
25180+psb_msvdx_dequeue_send (struct drm_device *dev)
25181+{
25182+ struct drm_psb_private *dev_priv = dev->dev_private;
25183+ struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
25184+ int ret = 0;
25185+
25186+ if (list_empty (&dev_priv->msvdx_queue))
25187+ {
25188+ PSB_DEBUG_GENERAL ("MSVDXQUE: msvdx list empty.\n");
25189+ dev_priv->msvdx_busy = 0;
25190+ return -EINVAL;
25191+ }
25192+ msvdx_cmd =
25193+ list_first_entry (&dev_priv->msvdx_queue, struct psb_msvdx_cmd_queue,
25194+ head);
25195+ PSB_DEBUG_GENERAL ("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
25196+ ret = psb_msvdx_send (dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size);
25197+ if (ret)
25198+ {
25199+ PSB_DEBUG_GENERAL ("MSVDXQUE: psb_msvdx_send failed\n");
25200+ ret = -EINVAL;
25201+ }
25202+ list_del (&msvdx_cmd->head);
25203+ kfree (msvdx_cmd->cmd);
25204+ drm_free (msvdx_cmd, sizeof (struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
25205+ return ret;
25206+}
25207+
25208+int
25209+psb_msvdx_map_command (struct drm_device *dev,
25210+ struct drm_buffer_object *cmd_buffer,
25211+ unsigned long cmd_offset, unsigned long cmd_size,
25212+ void **msvdx_cmd, uint32_t sequence, int copy_cmd)
25213+{
25214+ struct drm_psb_private *dev_priv = dev->dev_private;
25215+ int ret = 0;
25216+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
25217+ unsigned long cmd_size_remaining;
25218+ struct drm_bo_kmap_obj cmd_kmap;
25219+ void *cmd, *tmp, *cmd_start;
25220+ int is_iomem;
25221+
25222+ /* command buffers may not exceed page boundary */
25223+ if (cmd_size + cmd_page_offset > PAGE_SIZE)
25224+ return -EINVAL;
25225+
25226+ ret = drm_bo_kmap (cmd_buffer, cmd_offset >> PAGE_SHIFT, 2, &cmd_kmap);
25227+
25228+ if (ret)
25229+ {
25230+ PSB_DEBUG_GENERAL ("MSVDXQUE:ret:%d\n", ret);
25231+ return ret;
25232+ }
25233+
25234+ cmd_start =
25235+ (void *) drm_bmo_virtual (&cmd_kmap, &is_iomem) + cmd_page_offset;
25236+ cmd = cmd_start;
25237+ cmd_size_remaining = cmd_size;
25238+
25239+ while (cmd_size_remaining > 0)
25240+ {
25241+ uint32_t mmu_ptd;
25242+ uint32_t cur_cmd_size = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_SIZE);
25243+ uint32_t cur_cmd_id = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_ID);
25244+ PSB_DEBUG_GENERAL
25245+ ("cmd start at %08x cur_cmd_size = %d cur_cmd_id = %02x fence = %08x\n",
25246+ (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence);
25247+ if ((cur_cmd_size % sizeof (uint32_t))
25248+ || (cur_cmd_size > cmd_size_remaining))
25249+ {
25250+ ret = -EINVAL;
25251+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25252+ goto out;
25253+ }
25254+
25255+ switch (cur_cmd_id)
25256+ {
25257+ case VA_MSGID_RENDER:
25258+ /* Fence ID */
25259+ MEMIO_WRITE_FIELD (cmd, FW_VA_RENDER_FENCE_VALUE, sequence);
25260+
25261+ mmu_ptd = psb_get_default_pd_addr (dev_priv->mmu);
25262+ if (atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc, 1, 0) == 1)
25263+ {
25264+ mmu_ptd |= 1;
25265+ PSB_DEBUG_GENERAL ("MSVDX: Setting MMU invalidate flag\n");
25266+ }
25267+ /* PTD */
25268+ MEMIO_WRITE_FIELD (cmd, FW_VA_RENDER_MMUPTD, mmu_ptd);
25269+ break;
25270+
25271+ default:
25272+ /* Msg not supported */
25273+ ret = -EINVAL;
25274+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25275+ goto out;
25276+ }
25277+
25278+ cmd += cur_cmd_size;
25279+ cmd_size_remaining -= cur_cmd_size;
25280+ }
25281+
25282+ if (copy_cmd)
25283+ {
25284+ PSB_DEBUG_GENERAL
25285+ ("MSVDXQUE: psb_msvdx_map_command copying command...\n");
25286+ tmp = drm_calloc (1, cmd_size, DRM_MEM_DRIVER);
25287+ if (tmp == NULL)
25288+ {
25289+ ret = -ENOMEM;
25290+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25291+ goto out;
25292+ }
25293+ memcpy (tmp, cmd_start, cmd_size);
25294+ *msvdx_cmd = tmp;
25295+ }
25296+ else
25297+ {
25298+ PSB_DEBUG_GENERAL
25299+ ("MSVDXQUE: psb_msvdx_map_command did NOT copy command...\n");
25300+ ret = psb_msvdx_send (dev, cmd_start, cmd_size);
25301+ if (ret)
25302+ {
25303+ PSB_DEBUG_GENERAL ("MSVDXQUE: psb_msvdx_send failed\n");
25304+ ret = -EINVAL;
25305+ }
25306+ }
25307+
25308+out:
25309+ drm_bo_kunmap (&cmd_kmap);
25310+
25311+ return ret;
25312+}
25313+
25314+int
25315+psb_submit_video_cmdbuf (struct drm_device *dev,
25316+ struct drm_buffer_object *cmd_buffer,
25317+ unsigned long cmd_offset, unsigned long cmd_size,
25318+ struct drm_fence_object *fence)
25319+{
25320+ struct drm_psb_private *dev_priv = dev->dev_private;
25321+ uint32_t sequence = fence->sequence;
25322+ unsigned long irq_flags;
25323+ int ret = 0;
25324+
25325+ mutex_lock (&dev_priv->msvdx_mutex);
25326+ psb_schedule_watchdog (dev_priv);
25327+
25328+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
25329+ if (dev_priv->msvdx_needs_reset)
25330+ {
25331+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
25332+ PSB_DEBUG_GENERAL ("MSVDX: Needs reset\n");
25333+ if (psb_msvdx_reset (dev_priv))
25334+ {
25335+ mutex_unlock (&dev_priv->msvdx_mutex);
25336+ ret = -EBUSY;
25337+ PSB_DEBUG_GENERAL ("MSVDX: Reset failed\n");
25338+ return ret;
25339+ }
25340+ PSB_DEBUG_GENERAL ("MSVDX: Reset ok\n");
25341+ dev_priv->msvdx_needs_reset = 0;
25342+ dev_priv->msvdx_busy = 0;
25343+ dev_priv->msvdx_start_idle = 0;
25344+
25345+ psb_msvdx_init (dev);
25346+ psb_msvdx_irq_preinstall (dev_priv);
25347+ psb_msvdx_irq_postinstall (dev_priv);
25348+ PSB_DEBUG_GENERAL ("MSVDX: Init ok\n");
25349+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
25350+ }
25351+
25352+ if (!dev_priv->msvdx_busy)
25353+ {
25354+ dev_priv->msvdx_busy = 1;
25355+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
25356+ PSB_DEBUG_GENERAL
25357+ ("MSVDXQUE: nothing in the queue sending sequence:%08x..\n",
25358+ sequence);
25359+ ret =
25360+ psb_msvdx_map_command (dev, cmd_buffer, cmd_offset, cmd_size,
25361+ NULL, sequence, 0);
25362+ if (ret)
25363+ {
25364+ mutex_unlock (&dev_priv->msvdx_mutex);
25365+ PSB_DEBUG_GENERAL ("MSVDXQUE: Failed to extract cmd...\n");
25366+ return ret;
25367+ }
25368+ }
25369+ else
25370+ {
25371+ struct psb_msvdx_cmd_queue *msvdx_cmd;
25372+ void *cmd = NULL;
25373+
25374+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
25375+ /*queue the command to be sent when the h/w is ready */
25376+ PSB_DEBUG_GENERAL ("MSVDXQUE: queueing sequence:%08x..\n", sequence);
25377+ msvdx_cmd =
25378+ drm_calloc (1, sizeof (struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
25379+ if (msvdx_cmd == NULL)
25380+ {
25381+ mutex_unlock (&dev_priv->msvdx_mutex);
25382+ PSB_DEBUG_GENERAL ("MSVDXQUE: Out of memory...\n");
25383+ return -ENOMEM;
25384+ }
25385+
25386+ ret =
25387+ psb_msvdx_map_command (dev, cmd_buffer, cmd_offset, cmd_size,
25388+ &cmd, sequence, 1);
25389+ if (ret)
25390+ {
25391+ mutex_unlock (&dev_priv->msvdx_mutex);
25392+ PSB_DEBUG_GENERAL ("MSVDXQUE: Failed to extract cmd...\n");
25393+ drm_free (msvdx_cmd, sizeof (struct psb_msvdx_cmd_queue),
25394+ DRM_MEM_DRIVER);
25395+ return ret;
25396+ }
25397+ msvdx_cmd->cmd = cmd;
25398+ msvdx_cmd->cmd_size = cmd_size;
25399+ msvdx_cmd->sequence = sequence;
25400+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
25401+ list_add_tail (&msvdx_cmd->head, &dev_priv->msvdx_queue);
25402+ if (!dev_priv->msvdx_busy)
25403+ {
25404+ dev_priv->msvdx_busy = 1;
25405+ PSB_DEBUG_GENERAL ("MSVDXQUE: Need immediate dequeue\n");
25406+ psb_msvdx_dequeue_send (dev);
25407+ }
25408+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
25409+ }
25410+ mutex_unlock (&dev_priv->msvdx_mutex);
25411+ return ret;
25412+}
25413+
25414+int
25415+psb_msvdx_send (struct drm_device *dev, void *cmd, unsigned long cmd_size)
25416+{
25417+ int ret = 0;
25418+ struct drm_psb_private *dev_priv = dev->dev_private;
25419+
25420+ while (cmd_size > 0)
25421+ {
25422+ uint32_t cur_cmd_size = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_SIZE);
25423+ if (cur_cmd_size > cmd_size)
25424+ {
25425+ ret = -EINVAL;
25426+ PSB_DEBUG_GENERAL
25427+ ("MSVDX: cmd_size = %d cur_cmd_size = %d\n",
25428+ (int) cmd_size, cur_cmd_size);
25429+ goto out;
25430+ }
25431+ /* Send the message to h/w */
25432+ ret = psb_mtx_send (dev_priv, cmd);
25433+ if (ret)
25434+ {
25435+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25436+ goto out;
25437+ }
25438+ cmd += cur_cmd_size;
25439+ cmd_size -= cur_cmd_size;
25440+ }
25441+
25442+out:
25443+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25444+ return ret;
25445+}
25446+
25447+/***********************************************************************************
25448+ * Function Name : psb_mtx_send
25449+ * Inputs :
25450+ * Outputs :
25451+ * Returns :
25452+ * Description :
25453+ ************************************************************************************/
25454+int
25455+psb_mtx_send (struct drm_psb_private *dev_priv, const void *pvMsg)
25456+{
25457+
25458+ static uint32_t padMessage[FWRK_PADMSG_SIZE];
25459+
25460+ const uint32_t *pui32Msg = (uint32_t *) pvMsg;
25461+ uint32_t msgNumWords, wordsFree, readIndex, writeIndex;
25462+ int ret = 0;
25463+
25464+ PSB_DEBUG_GENERAL ("MSVDX: psb_mtx_send\n");
25465+
25466+ /* we need clocks enabled before we touch VEC local ram */
25467+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
25468+
25469+ msgNumWords = (MEMIO_READ_FIELD (pvMsg, FWRK_GENMSG_SIZE) + 3) / 4;
25470+
25471+ if (msgNumWords > NUM_WORDS_MTX_BUF)
25472+ {
25473+ ret = -EINVAL;
25474+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25475+ goto out;
25476+ }
25477+
25478+ readIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_RD_INDEX);
25479+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_WRT_INDEX);
25480+
25481+ if (writeIndex + msgNumWords > NUM_WORDS_MTX_BUF)
25482+ { /* message would wrap, need to send a pad message */
25483+ BUG_ON (MEMIO_READ_FIELD (pvMsg, FWRK_GENMSG_ID) == FWRK_MSGID_PADDING); /* Shouldn't happen for a PAD message itself */
25484+ /* if the read pointer is at zero then we must wait for it to change otherwise the write
25485+ * pointer will equal the read pointer,which should only happen when the buffer is empty
25486+ *
25487+ * This will only happens if we try to overfill the queue, queue management should make
25488+ * sure this never happens in the first place.
25489+ */
25490+ BUG_ON (0 == readIndex);
25491+ if (0 == readIndex)
25492+ {
25493+ ret = -EINVAL;
25494+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25495+ goto out;
25496+ }
25497+ /* Send a pad message */
25498+ MEMIO_WRITE_FIELD (padMessage, FWRK_GENMSG_SIZE,
25499+ (NUM_WORDS_MTX_BUF - writeIndex) << 2);
25500+ MEMIO_WRITE_FIELD (padMessage, FWRK_GENMSG_ID, FWRK_MSGID_PADDING);
25501+ psb_mtx_send (dev_priv, padMessage);
25502+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_WRT_INDEX);
25503+ }
25504+
25505+ wordsFree =
25506+ (writeIndex >=
25507+ readIndex) ? NUM_WORDS_MTX_BUF - (writeIndex -
25508+ readIndex) : readIndex - writeIndex;
25509+
25510+ BUG_ON (msgNumWords > wordsFree);
25511+ if (msgNumWords > wordsFree)
25512+ {
25513+ ret = -EINVAL;
25514+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25515+ goto out;
25516+ }
25517+
25518+ while (msgNumWords > 0)
25519+ {
25520+ PSB_WMSVDX32 (*pui32Msg++, MSVDX_COMMS_TO_MTX_BUF + (writeIndex << 2));
25521+ msgNumWords--;
25522+ writeIndex++;
25523+ if (NUM_WORDS_MTX_BUF == writeIndex)
25524+ {
25525+ writeIndex = 0;
25526+ }
25527+ }
25528+ PSB_WMSVDX32 (writeIndex, MSVDX_COMMS_TO_MTX_WRT_INDEX);
25529+
25530+ /* Make sure clocks are enabled before we kick */
25531+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
25532+
25533+ /* signal an interrupt to let the mtx know there is a new message */
25534+ PSB_WMSVDX32 (1, MSVDX_MTX_KICKI);
25535+
25536+out:
25537+ return ret;
25538+}
25539+
25540+/*
25541+ * MSVDX MTX interrupt
25542+ */
25543+void
25544+psb_msvdx_mtx_interrupt (struct drm_device *dev)
25545+{
25546+ static uint32_t msgBuffer[128];
25547+ uint32_t readIndex, writeIndex;
25548+ uint32_t msgNumWords, msgWordOffset;
25549+ struct drm_psb_private *dev_priv =
25550+ (struct drm_psb_private *) dev->dev_private;
25551+
25552+ /* Are clocks enabled - If not enable before attempting to read from VLR */
25553+ if (PSB_RMSVDX32 (MSVDX_MAN_CLK_ENABLE) != (clk_enable_all))
25554+ {
25555+ PSB_DEBUG_GENERAL
25556+ ("MSVDX: Warning - Clocks disabled when Interupt set\n");
25557+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
25558+ }
25559+
25560+ for (;;)
25561+ {
25562+ readIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_RD_INDEX);
25563+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_WRT_INDEX);
25564+
25565+ if (readIndex != writeIndex)
25566+ {
25567+ msgWordOffset = 0;
25568+
25569+ msgBuffer[msgWordOffset] =
25570+ PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_BUF + (readIndex << 2));
25571+
25572+ msgNumWords = (MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_SIZE) + 3) / 4; /* round to nearest word */
25573+
25574+ /*ASSERT(msgNumWords <= sizeof(msgBuffer) / sizeof(uint32_t)); */
25575+
25576+ if (++readIndex >= NUM_WORDS_HOST_BUF)
25577+ readIndex = 0;
25578+
25579+ for (msgWordOffset++; msgWordOffset < msgNumWords; msgWordOffset++)
25580+ {
25581+ msgBuffer[msgWordOffset] =
25582+ PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_BUF + (readIndex << 2));
25583+
25584+ if (++readIndex >= NUM_WORDS_HOST_BUF)
25585+ {
25586+ readIndex = 0;
25587+ }
25588+ }
25589+
25590+ /* Update the Read index */
25591+ PSB_WMSVDX32 (readIndex, MSVDX_COMMS_TO_HOST_RD_INDEX);
25592+
25593+ if (!dev_priv->msvdx_needs_reset)
25594+ switch (MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID))
25595+ {
25596+ case VA_MSGID_CMD_HW_PANIC:
25597+ case VA_MSGID_CMD_FAILED:
25598+ {
25599+ uint32_t ui32Fence = MEMIO_READ_FIELD (msgBuffer,
25600+ FW_VA_CMD_FAILED_FENCE_VALUE);
25601+ uint32_t ui32FaultStatus = MEMIO_READ_FIELD (msgBuffer,
25602+ FW_VA_CMD_FAILED_IRQSTATUS);
25603+
25604+ if(MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID) == VA_MSGID_CMD_HW_PANIC )
25605+ PSB_DEBUG_GENERAL
25606+ ("MSVDX: VA_MSGID_CMD_HW_PANIC: Msvdx fault detected - Fence: %08x, Status: %08x - resetting and ignoring error\n",
25607+ ui32Fence, ui32FaultStatus);
25608+ else
25609+ PSB_DEBUG_GENERAL
25610+ ("MSVDX: VA_MSGID_CMD_FAILED: Msvdx fault detected - Fence: %08x, Status: %08x - resetting and ignoring error\n",
25611+ ui32Fence, ui32FaultStatus);
25612+
25613+ dev_priv->msvdx_needs_reset = 1;
25614+
25615+ if(MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID) == VA_MSGID_CMD_HW_PANIC)
25616+ {
25617+ if (dev_priv->
25618+ msvdx_current_sequence
25619+ - dev_priv->sequence[PSB_ENGINE_VIDEO] > 0x0FFFFFFF)
25620+ dev_priv->msvdx_current_sequence++;
25621+ PSB_DEBUG_GENERAL
25622+ ("MSVDX: Fence ID missing, assuming %08x\n",
25623+ dev_priv->msvdx_current_sequence);
25624+ }
25625+ else
25626+ dev_priv->msvdx_current_sequence = ui32Fence;
25627+
25628+ psb_fence_error (dev,
25629+ PSB_ENGINE_VIDEO,
25630+ dev_priv->
25631+ msvdx_current_sequence,
25632+ DRM_FENCE_TYPE_EXE, DRM_CMD_FAILED);
25633+
25634+ /* Flush the command queue */
25635+ psb_msvdx_flush_cmd_queue (dev);
25636+
25637+ goto isrExit;
25638+ break;
25639+ }
25640+ case VA_MSGID_CMD_COMPLETED:
25641+ {
25642+ uint32_t ui32Fence = MEMIO_READ_FIELD (msgBuffer,
25643+ FW_VA_CMD_COMPLETED_FENCE_VALUE);
25644+ uint32_t ui32Flags =
25645+ MEMIO_READ_FIELD (msgBuffer, FW_VA_CMD_COMPLETED_FLAGS);
25646+
25647+ PSB_DEBUG_GENERAL
25648+ ("msvdx VA_MSGID_CMD_COMPLETED: FenceID: %08x, flags: 0x%x\n",
25649+ ui32Fence, ui32Flags);
25650+ dev_priv->msvdx_current_sequence = ui32Fence;
25651+
25652+ psb_fence_handler (dev, PSB_ENGINE_VIDEO);
25653+
25654+
25655+ if (ui32Flags & FW_VA_RENDER_HOST_INT)
25656+ {
25657+ /*Now send the next command from the msvdx cmd queue */
25658+ psb_msvdx_dequeue_send (dev);
25659+ goto isrExit;
25660+ }
25661+ break;
25662+ }
25663+ case VA_MSGID_ACK:
25664+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_ACK\n");
25665+ break;
25666+
25667+ case VA_MSGID_TEST1:
25668+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_TEST1\n");
25669+ break;
25670+
25671+ case VA_MSGID_TEST2:
25672+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_TEST2\n");
25673+ break;
25674+ /* Don't need to do anything with these messages */
25675+
25676+ case VA_MSGID_DEBLOCK_REQUIRED:
25677+ {
25678+ uint32_t ui32ContextId = MEMIO_READ_FIELD (msgBuffer,
25679+ FW_VA_DEBLOCK_REQUIRED_CONTEXT);
25680+
25681+ /* The BE we now be locked. */
25682+
25683+ /* Unblock rendec by reading the mtx2mtx end of slice */
25684+ (void) PSB_RMSVDX32 (MSVDX_RENDEC_READ_DATA);
25685+
25686+ PSB_DEBUG_GENERAL
25687+ ("msvdx VA_MSGID_DEBLOCK_REQUIRED Context=%08x\n",
25688+ ui32ContextId);
25689+ goto isrExit;
25690+ break;
25691+ }
25692+
25693+ default:
25694+ {
25695+ PSB_DEBUG_GENERAL
25696+ ("ERROR: msvdx Unknown message from MTX \n");
25697+ }
25698+ break;
25699+
25700+ }
25701+ }
25702+ else
25703+ {
25704+ /* Get out of here if nothing */
25705+ break;
25706+ }
25707+ }
25708+isrExit:
25709+
25710+#if 1
25711+ if (!dev_priv->msvdx_busy)
25712+ {
25713+ /* check that clocks are enabled before reading VLR */
25714+ if( PSB_RMSVDX32( MSVDX_MAN_CLK_ENABLE ) != (clk_enable_all) )
25715+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
25716+
25717+ /* If the firmware says the hardware is idle and the CCB is empty then we can power down */
25718+ uint32_t ui32FWStatus = PSB_RMSVDX32( MSVDX_COMMS_FW_STATUS );
25719+ uint32_t ui32CCBRoff = PSB_RMSVDX32 ( MSVDX_COMMS_TO_MTX_RD_INDEX );
25720+ uint32_t ui32CCBWoff = PSB_RMSVDX32 ( MSVDX_COMMS_TO_MTX_WRT_INDEX );
25721+
25722+ if( (ui32FWStatus & MSVDX_FW_STATUS_HW_IDLE) && (ui32CCBRoff == ui32CCBWoff))
25723+ {
25724+ PSB_DEBUG_GENERAL("MSVDX_CLOCK: Setting clock to minimal...\n");
25725+ PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
25726+ }
25727+ }
25728+#endif
25729+ DRM_MEMORYBARRIER ();
25730+}
25731+
25732+void
25733+psb_msvdx_lockup (struct drm_psb_private *dev_priv,
25734+ int *msvdx_lockup, int *msvdx_idle)
25735+{
25736+ unsigned long irq_flags;
25737+// struct psb_scheduler *scheduler = &dev_priv->scheduler;
25738+
25739+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
25740+ *msvdx_lockup = 0;
25741+ *msvdx_idle = 1;
25742+
25743+ if (!dev_priv->has_msvdx)
25744+ {
25745+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
25746+ return;
25747+ }
25748+#if 0
25749+ PSB_DEBUG_GENERAL ("MSVDXTimer: current_sequence:%d "
25750+ "last_sequence:%d and last_submitted_sequence :%d\n",
25751+ dev_priv->msvdx_current_sequence,
25752+ dev_priv->msvdx_last_sequence,
25753+ dev_priv->sequence[PSB_ENGINE_VIDEO]);
25754+#endif
25755+ if (dev_priv->msvdx_current_sequence -
25756+ dev_priv->sequence[PSB_ENGINE_VIDEO] > 0x0FFFFFFF)
25757+ {
25758+
25759+ if (dev_priv->msvdx_current_sequence == dev_priv->msvdx_last_sequence)
25760+ {
25761+ PSB_DEBUG_GENERAL
25762+ ("MSVDXTimer: msvdx locked-up for sequence:%d\n",
25763+ dev_priv->msvdx_current_sequence);
25764+ *msvdx_lockup = 1;
25765+ }
25766+ else
25767+ {
25768+ PSB_DEBUG_GENERAL ("MSVDXTimer: msvdx responded fine so far...\n");
25769+ dev_priv->msvdx_last_sequence = dev_priv->msvdx_current_sequence;
25770+ *msvdx_idle = 0;
25771+ }
25772+ if (dev_priv->msvdx_start_idle)
25773+ dev_priv->msvdx_start_idle = 0;
25774+ }
25775+ else
25776+ {
25777+ if (dev_priv->msvdx_needs_reset == 0)
25778+ {
25779+ if (dev_priv->msvdx_start_idle && (dev_priv->msvdx_finished_sequence == dev_priv->msvdx_current_sequence))
25780+ {
25781+ //if (dev_priv->msvdx_idle_start_jiffies + MSVDX_MAX_IDELTIME >= jiffies)
25782+ if (time_after_eq(jiffies, dev_priv->msvdx_idle_start_jiffies + MSVDX_MAX_IDELTIME))
25783+ {
25784+ printk("set the msvdx clock to 0 in the %s\n", __FUNCTION__);
25785+ PSB_WMSVDX32 (0, MSVDX_MAN_CLK_ENABLE);
25786+ dev_priv->msvdx_needs_reset = 1;
25787+ }
25788+ else
25789+ {
25790+ *msvdx_idle = 0;
25791+ }
25792+ }
25793+ else
25794+ {
25795+ dev_priv->msvdx_start_idle = 1;
25796+ dev_priv->msvdx_idle_start_jiffies = jiffies;
25797+ dev_priv->msvdx_finished_sequence = dev_priv->msvdx_current_sequence;
25798+ *msvdx_idle = 0;
25799+ }
25800+ }
25801+ }
25802+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
25803+}
25804Index: linux-2.6.27/drivers/gpu/drm/psb/psb_msvdx.h
25805===================================================================
25806--- /dev/null 1970-01-01 00:00:00.000000000 +0000
25807+++ linux-2.6.27/drivers/gpu/drm/psb/psb_msvdx.h 2009-02-05 13:29:33.000000000 +0000
25808@@ -0,0 +1,564 @@
25809+/**************************************************************************
25810+ *
25811+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
25812+ * Copyright (c) Imagination Technologies Limited, UK
25813+ * All Rights Reserved.
25814+ *
25815+ * Permission is hereby granted, free of charge, to any person obtaining a
25816+ * copy of this software and associated documentation files (the
25817+ * "Software"), to deal in the Software without restriction, including
25818+ * without limitation the rights to use, copy, modify, merge, publish,
25819+ * distribute, sub license, and/or sell copies of the Software, and to
25820+ * permit persons to whom the Software is furnished to do so, subject to
25821+ * the following conditions:
25822+ *
25823+ * The above copyright notice and this permission notice (including the
25824+ * next paragraph) shall be included in all copies or substantial portions
25825+ * of the Software.
25826+ *
25827+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25828+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25829+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25830+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25831+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25832+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25833+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
25834+ *
25835+ **************************************************************************/
25836+
25837+#ifndef _PSB_MSVDX_H_
25838+#define _PSB_MSVDX_H_
25839+
25840+#define assert(expr) \
25841+ if(unlikely(!(expr))) { \
25842+ printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
25843+ #expr,__FILE__,__FUNCTION__,__LINE__); \
25844+ }
25845+
25846+#define PSB_ASSERT(x) assert (x)
25847+#define IMG_ASSERT(x) assert (x)
25848+
25849+#include "psb_drv.h"
25850+int
25851+psb_wait_for_register (struct drm_psb_private *dev_priv,
25852+ uint32_t ui32Offset,
25853+ uint32_t ui32Value, uint32_t ui32Enable);
25854+
25855+void psb_msvdx_mtx_interrupt (struct drm_device *dev);
25856+int psb_msvdx_init (struct drm_device *dev);
25857+int psb_msvdx_uninit (struct drm_device *dev);
25858+int psb_msvdx_reset (struct drm_psb_private *dev_priv);
25859+uint32_t psb_get_default_pd_addr (struct psb_mmu_driver *driver);
25860+int psb_mtx_send (struct drm_psb_private *dev_priv, const void *pvMsg);
25861+void psb_msvdx_irq_preinstall (struct drm_psb_private *dev_priv);
25862+void psb_msvdx_irq_postinstall (struct drm_psb_private *dev_priv);
25863+void psb_msvdx_flush_cmd_queue (struct drm_device *dev);
25864+extern void psb_msvdx_lockup (struct drm_psb_private *dev_priv,
25865+ int *msvdx_lockup, int *msvdx_idle);
25866+#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2 /* Non-Optimal Invalidation is not default */
25867+#define FW_VA_RENDER_HOST_INT 0x00004000
25868+#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020
25869+
25870+#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200
25871+
25872+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION \
25873+ | MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
25874+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION \
25875+ | MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
25876+
25877+
25878+#define POULSBO_D0 0x5
25879+#define POULSBO_D1 0x6
25880+#define PSB_REVID_OFFSET 0x8
25881+
25882+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001 /* There is no work currently underway on the hardware*/
25883+
25884+#define clk_enable_all MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
25885+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
25886+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
25887+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
25888+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
25889+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
25890+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
25891+
25892+#define clk_enable_minimal MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
25893+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
25894+
25895+#define clk_enable_auto MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \
25896+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \
25897+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \
25898+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \
25899+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \
25900+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
25901+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
25902+
25903+#define msvdx_sw_reset_all MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \
25904+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \
25905+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \
25906+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \
25907+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK
25908+
25909+
25910+#define PCI_PORT5_REG80_FFUSE 0xD0058000
25911+#define MTX_CODE_BASE (0x80900000)
25912+#define MTX_DATA_BASE (0x82880000)
25913+#define PC_START_ADDRESS (0x80900000)
25914+
25915+#define MTX_CORE_CODE_MEM (0x10 )
25916+#define MTX_CORE_DATA_MEM (0x18 )
25917+
25918+#define MTX_INTERNAL_REG( R_SPECIFIER , U_SPECIFIER ) ( ((R_SPECIFIER)<<4) | (U_SPECIFIER) )
25919+#define MTX_PC MTX_INTERNAL_REG( 0 , 5 )
25920+
25921+#define RENDEC_A_SIZE ( 2 * 1024* 1024 )
25922+#define RENDEC_B_SIZE ( RENDEC_A_SIZE / 4 )
25923+
25924+#define MEMIO_READ_FIELD(vpMem, field) \
25925+ ((uint32_t)(((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) & field##_MASK) >> field##_SHIFT))
25926+
25927+#define MEMIO_WRITE_FIELD(vpMem, field, ui32Value) \
25928+ (*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) = \
25929+ ((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) & (field##_TYPE)~field##_MASK) | \
25930+ (field##_TYPE)(( (uint32_t) (ui32Value) << field##_SHIFT) & field##_MASK);
25931+
25932+#define MEMIO_WRITE_FIELD_LITE(vpMem, field, ui32Value) \
25933+ (*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) = \
25934+ ((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) | \
25935+ (field##_TYPE) (( (uint32_t) (ui32Value) << field##_SHIFT)) );
25936+
25937+#define REGIO_READ_FIELD(ui32RegValue, reg, field) \
25938+ ((ui32RegValue & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
25939+
25940+#define REGIO_WRITE_FIELD(ui32RegValue, reg, field, ui32Value) \
25941+ (ui32RegValue) = \
25942+ ((ui32RegValue) & ~(reg##_##field##_MASK)) | \
25943+ (((ui32Value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));
25944+
25945+#define REGIO_WRITE_FIELD_LITE(ui32RegValue, reg, field, ui32Value) \
25946+ (ui32RegValue) = \
25947+ ( (ui32RegValue) | ( (ui32Value) << (reg##_##field##_SHIFT) ) );
25948+
25949+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK (0x00000001)
25950+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK (0x00000002)
25951+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK (0x00000004)
25952+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK (0x00000008)
25953+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK (0x00000010)
25954+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK (0x00000020)
25955+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK (0x00000040)
25956+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK (0x00040000)
25957+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK (0x00080000)
25958+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK (0x00100000)
25959+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK (0x00200000)
25960+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
25961+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK (0x00010000)
25962+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK (0x00100000)
25963+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK (0x01000000)
25964+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK (0x10000000)
25965+
25966+/* MTX registers */
25967+#define MSVDX_MTX_ENABLE (0x0000)
25968+#define MSVDX_MTX_KICKI (0x0088)
25969+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC)
25970+#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8)
25971+#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104)
25972+#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108)
25973+#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C)
25974+#define MSVDX_MTX_SOFT_RESET (0x0200)
25975+
25976+/* MSVDX registers */
25977+#define MSVDX_CONTROL (0x0600)
25978+#define MSVDX_INTERRUPT_CLEAR (0x060C)
25979+#define MSVDX_INTERRUPT_STATUS (0x0608)
25980+#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610)
25981+#define MSVDX_MMU_CONTROL0 (0x0680)
25982+#define MSVDX_MTX_RAM_BANK (0x06F0)
25983+#define MSVDX_MAN_CLK_ENABLE (0x0620)
25984+
25985+/* RENDEC registers */
25986+#define MSVDX_RENDEC_CONTROL0 (0x0868)
25987+#define MSVDX_RENDEC_CONTROL1 (0x086C)
25988+#define MSVDX_RENDEC_BUFFER_SIZE (0x0870)
25989+#define MSVDX_RENDEC_BASE_ADDR0 (0x0874)
25990+#define MSVDX_RENDEC_BASE_ADDR1 (0x0878)
25991+#define MSVDX_RENDEC_READ_DATA (0x0898)
25992+#define MSVDX_RENDEC_CONTEXT0 (0x0950)
25993+#define MSVDX_RENDEC_CONTEXT1 (0x0954)
25994+#define MSVDX_RENDEC_CONTEXT2 (0x0958)
25995+#define MSVDX_RENDEC_CONTEXT3 (0x095C)
25996+#define MSVDX_RENDEC_CONTEXT4 (0x0960)
25997+#define MSVDX_RENDEC_CONTEXT5 (0x0964)
25998+
25999+/*
26000+ * This defines the MSVDX communication buffer
26001+ */
26002+#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */
26003+#define NUM_WORDS_HOST_BUF (100) /*!< Host buffer size (in 32-bit words) */
26004+#define NUM_WORDS_MTX_BUF (100) /*!< MTX buffer size (in 32-bit words) */
26005+
26006+#define MSVDX_COMMS_AREA_ADDR (0x02cc0)
26007+
26008+#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10)
26009+#define MSVDX_COMMS_SCRATCH (MSVDX_COMMS_AREA_ADDR - 0x08)
26010+#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04)
26011+#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00)
26012+#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04)
26013+#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08)
26014+#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C)
26015+#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10)
26016+#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14)
26017+#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18)
26018+#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C)
26019+#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20)
26020+#define MSVDX_COMMS_TO_MTX_BUF (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
26021+
26022+#define MSVDX_COMMS_AREA_END (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
26023+
26024+#if (MSVDX_COMMS_AREA_END != 0x03000)
26025+#error
26026+#endif
26027+
26028+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000)
26029+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31)
26030+
26031+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000)
26032+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16)
26033+
26034+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000)
26035+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20)
26036+
26037+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC)
26038+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2)
26039+
26040+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002)
26041+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1)
26042+
26043+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001)
26044+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0)
26045+
26046+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001)
26047+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0)
26048+
26049+#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001)
26050+#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0)
26051+
26052+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
26053+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
26054+
26055+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00)
26056+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8)
26057+
26058+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000)
26059+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14)
26060+
26061+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002)
26062+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1)
26063+
26064+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000)
26065+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16)
26066+
26067+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF)
26068+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0)
26069+
26070+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000)
26071+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16)
26072+
26073+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF)
26074+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0)
26075+
26076+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000)
26077+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18)
26078+
26079+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000)
26080+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16)
26081+
26082+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000)
26083+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24)
26084+
26085+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001)
26086+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0)
26087+
26088+#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80) /*!< Start of parser specific Host->MTX messages. */
26089+#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0) /*!< Start of parser specific MTX->Host messages. */
26090+#define FWRK_MSGID_PADDING ( 0 )
26091+
26092+#define FWRK_GENMSG_SIZE_TYPE uint8_t
26093+#define FWRK_GENMSG_SIZE_MASK (0xFF)
26094+#define FWRK_GENMSG_SIZE_SHIFT (0)
26095+#define FWRK_GENMSG_SIZE_OFFSET (0x0000)
26096+#define FWRK_GENMSG_ID_TYPE uint8_t
26097+#define FWRK_GENMSG_ID_MASK (0xFF)
26098+#define FWRK_GENMSG_ID_SHIFT (0)
26099+#define FWRK_GENMSG_ID_OFFSET (0x0001)
26100+#define FWRK_PADMSG_SIZE (2)
26101+
26102+/*!
26103+******************************************************************************
26104+ This type defines the framework specified message ids
26105+******************************************************************************/
26106+enum
26107+{
26108+ /*! Sent by the DXVA driver on the host to the mtx firmware.
26109+ */
26110+ VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
26111+ VA_MSGID_RENDER,
26112+ VA_MSGID_DEBLOCK,
26113+ VA_MSGID_OOLD,
26114+
26115+ /* Test Messages */
26116+ VA_MSGID_TEST1,
26117+ VA_MSGID_TEST2,
26118+
26119+ /*! Sent by the mtx firmware to itself.
26120+ */
26121+ VA_MSGID_RENDER_MC_INTERRUPT,
26122+
26123+ /*! Sent by the DXVA firmware on the MTX to the host.
26124+ */
26125+ VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
26126+ VA_MSGID_CMD_COMPLETED_BATCH,
26127+ VA_MSGID_DEBLOCK_REQUIRED,
26128+ VA_MSGID_TEST_RESPONCE,
26129+ VA_MSGID_ACK,
26130+
26131+ VA_MSGID_CMD_FAILED,
26132+ VA_MSGID_CMD_UNSUPPORTED,
26133+ VA_MSGID_CMD_HW_PANIC,
26134+};
26135+
26136+/* MSVDX Firmware interface */
26137+
26138+#define FW_VA_RENDER_SIZE (32)
26139+
26140+// FW_VA_RENDER MSG_SIZE
26141+#define FW_VA_RENDER_MSG_SIZE_ALIGNMENT (1)
26142+#define FW_VA_RENDER_MSG_SIZE_TYPE uint8_t
26143+#define FW_VA_RENDER_MSG_SIZE_MASK (0xFF)
26144+#define FW_VA_RENDER_MSG_SIZE_LSBMASK (0xFF)
26145+#define FW_VA_RENDER_MSG_SIZE_OFFSET (0x0000)
26146+#define FW_VA_RENDER_MSG_SIZE_SHIFT (0)
26147+
26148+// FW_VA_RENDER ID
26149+#define FW_VA_RENDER_ID_ALIGNMENT (1)
26150+#define FW_VA_RENDER_ID_TYPE uint8_t
26151+#define FW_VA_RENDER_ID_MASK (0xFF)
26152+#define FW_VA_RENDER_ID_LSBMASK (0xFF)
26153+#define FW_VA_RENDER_ID_OFFSET (0x0001)
26154+#define FW_VA_RENDER_ID_SHIFT (0)
26155+
26156+// FW_VA_RENDER BUFFER_SIZE
26157+#define FW_VA_RENDER_BUFFER_SIZE_ALIGNMENT (2)
26158+#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t
26159+#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF)
26160+#define FW_VA_RENDER_BUFFER_SIZE_LSBMASK (0x0FFF)
26161+#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002)
26162+#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0)
26163+
26164+// FW_VA_RENDER MMUPTD
26165+#define FW_VA_RENDER_MMUPTD_ALIGNMENT (4)
26166+#define FW_VA_RENDER_MMUPTD_TYPE uint32_t
26167+#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF)
26168+#define FW_VA_RENDER_MMUPTD_LSBMASK (0xFFFFFFFF)
26169+#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004)
26170+#define FW_VA_RENDER_MMUPTD_SHIFT (0)
26171+
26172+// FW_VA_RENDER LLDMA_ADDRESS
26173+#define FW_VA_RENDER_LLDMA_ADDRESS_ALIGNMENT (4)
26174+#define FW_VA_RENDER_LLDMA_ADDRESS_TYPE uint32_t
26175+#define FW_VA_RENDER_LLDMA_ADDRESS_MASK (0xFFFFFFFF)
26176+#define FW_VA_RENDER_LLDMA_ADDRESS_LSBMASK (0xFFFFFFFF)
26177+#define FW_VA_RENDER_LLDMA_ADDRESS_OFFSET (0x0008)
26178+#define FW_VA_RENDER_LLDMA_ADDRESS_SHIFT (0)
26179+
26180+// FW_VA_RENDER CONTEXT
26181+#define FW_VA_RENDER_CONTEXT_ALIGNMENT (4)
26182+#define FW_VA_RENDER_CONTEXT_TYPE uint32_t
26183+#define FW_VA_RENDER_CONTEXT_MASK (0xFFFFFFFF)
26184+#define FW_VA_RENDER_CONTEXT_LSBMASK (0xFFFFFFFF)
26185+#define FW_VA_RENDER_CONTEXT_OFFSET (0x000C)
26186+#define FW_VA_RENDER_CONTEXT_SHIFT (0)
26187+
26188+// FW_VA_RENDER FENCE_VALUE
26189+#define FW_VA_RENDER_FENCE_VALUE_ALIGNMENT (4)
26190+#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t
26191+#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF)
26192+#define FW_VA_RENDER_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
26193+#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010)
26194+#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0)
26195+
26196+// FW_VA_RENDER OPERATING_MODE
26197+#define FW_VA_RENDER_OPERATING_MODE_ALIGNMENT (4)
26198+#define FW_VA_RENDER_OPERATING_MODE_TYPE uint32_t
26199+#define FW_VA_RENDER_OPERATING_MODE_MASK (0xFFFFFFFF)
26200+#define FW_VA_RENDER_OPERATING_MODE_LSBMASK (0xFFFFFFFF)
26201+#define FW_VA_RENDER_OPERATING_MODE_OFFSET (0x0014)
26202+#define FW_VA_RENDER_OPERATING_MODE_SHIFT (0)
26203+
26204+// FW_VA_RENDER FIRST_MB_IN_SLICE
26205+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_ALIGNMENT (2)
26206+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_TYPE uint16_t
26207+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_MASK (0xFFFF)
26208+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_LSBMASK (0xFFFF)
26209+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_OFFSET (0x0018)
26210+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_SHIFT (0)
26211+
26212+// FW_VA_RENDER LAST_MB_IN_FRAME
26213+#define FW_VA_RENDER_LAST_MB_IN_FRAME_ALIGNMENT (2)
26214+#define FW_VA_RENDER_LAST_MB_IN_FRAME_TYPE uint16_t
26215+#define FW_VA_RENDER_LAST_MB_IN_FRAME_MASK (0xFFFF)
26216+#define FW_VA_RENDER_LAST_MB_IN_FRAME_LSBMASK (0xFFFF)
26217+#define FW_VA_RENDER_LAST_MB_IN_FRAME_OFFSET (0x001A)
26218+#define FW_VA_RENDER_LAST_MB_IN_FRAME_SHIFT (0)
26219+
26220+// FW_VA_RENDER FLAGS
26221+#define FW_VA_RENDER_FLAGS_ALIGNMENT (4)
26222+#define FW_VA_RENDER_FLAGS_TYPE uint32_t
26223+#define FW_VA_RENDER_FLAGS_MASK (0xFFFFFFFF)
26224+#define FW_VA_RENDER_FLAGS_LSBMASK (0xFFFFFFFF)
26225+#define FW_VA_RENDER_FLAGS_OFFSET (0x001C)
26226+#define FW_VA_RENDER_FLAGS_SHIFT (0)
26227+
26228+#define FW_VA_CMD_COMPLETED_SIZE (12)
26229+
26230+// FW_VA_CMD_COMPLETED MSG_SIZE
26231+#define FW_VA_CMD_COMPLETED_MSG_SIZE_ALIGNMENT (1)
26232+#define FW_VA_CMD_COMPLETED_MSG_SIZE_TYPE uint8_t
26233+#define FW_VA_CMD_COMPLETED_MSG_SIZE_MASK (0xFF)
26234+#define FW_VA_CMD_COMPLETED_MSG_SIZE_LSBMASK (0xFF)
26235+#define FW_VA_CMD_COMPLETED_MSG_SIZE_OFFSET (0x0000)
26236+#define FW_VA_CMD_COMPLETED_MSG_SIZE_SHIFT (0)
26237+
26238+// FW_VA_CMD_COMPLETED ID
26239+#define FW_VA_CMD_COMPLETED_ID_ALIGNMENT (1)
26240+#define FW_VA_CMD_COMPLETED_ID_TYPE uint8_t
26241+#define FW_VA_CMD_COMPLETED_ID_MASK (0xFF)
26242+#define FW_VA_CMD_COMPLETED_ID_LSBMASK (0xFF)
26243+#define FW_VA_CMD_COMPLETED_ID_OFFSET (0x0001)
26244+#define FW_VA_CMD_COMPLETED_ID_SHIFT (0)
26245+
26246+// FW_VA_CMD_COMPLETED FENCE_VALUE
26247+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_ALIGNMENT (4)
26248+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t
26249+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF)
26250+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
26251+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004)
26252+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0)
26253+
26254+// FW_VA_CMD_COMPLETED FLAGS
26255+#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4)
26256+#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t
26257+#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF)
26258+#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF)
26259+#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008)
26260+#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0)
26261+
26262+#define FW_VA_CMD_FAILED_SIZE (12)
26263+
26264+// FW_VA_CMD_FAILED MSG_SIZE
26265+#define FW_VA_CMD_FAILED_MSG_SIZE_ALIGNMENT (1)
26266+#define FW_VA_CMD_FAILED_MSG_SIZE_TYPE uint8_t
26267+#define FW_VA_CMD_FAILED_MSG_SIZE_MASK (0xFF)
26268+#define FW_VA_CMD_FAILED_MSG_SIZE_LSBMASK (0xFF)
26269+#define FW_VA_CMD_FAILED_MSG_SIZE_OFFSET (0x0000)
26270+#define FW_VA_CMD_FAILED_MSG_SIZE_SHIFT (0)
26271+
26272+// FW_VA_CMD_FAILED ID
26273+#define FW_VA_CMD_FAILED_ID_ALIGNMENT (1)
26274+#define FW_VA_CMD_FAILED_ID_TYPE uint8_t
26275+#define FW_VA_CMD_FAILED_ID_MASK (0xFF)
26276+#define FW_VA_CMD_FAILED_ID_LSBMASK (0xFF)
26277+#define FW_VA_CMD_FAILED_ID_OFFSET (0x0001)
26278+#define FW_VA_CMD_FAILED_ID_SHIFT (0)
26279+
26280+// FW_VA_CMD_FAILED FLAGS
26281+#define FW_VA_CMD_FAILED_FLAGS_ALIGNMENT (2)
26282+#define FW_VA_CMD_FAILED_FLAGS_TYPE uint16_t
26283+#define FW_VA_CMD_FAILED_FLAGS_MASK (0xFFFF)
26284+#define FW_VA_CMD_FAILED_FLAGS_LSBMASK (0xFFFF)
26285+#define FW_VA_CMD_FAILED_FLAGS_OFFSET (0x0002)
26286+#define FW_VA_CMD_FAILED_FLAGS_SHIFT (0)
26287+
26288+// FW_VA_CMD_FAILED FENCE_VALUE
26289+#define FW_VA_CMD_FAILED_FENCE_VALUE_ALIGNMENT (4)
26290+#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t
26291+#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF)
26292+#define FW_VA_CMD_FAILED_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
26293+#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004)
26294+#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0)
26295+
26296+// FW_VA_CMD_FAILED IRQSTATUS
26297+#define FW_VA_CMD_FAILED_IRQSTATUS_ALIGNMENT (4)
26298+#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t
26299+#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF)
26300+#define FW_VA_CMD_FAILED_IRQSTATUS_LSBMASK (0xFFFFFFFF)
26301+#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008)
26302+#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0)
26303+
26304+#define FW_VA_DEBLOCK_REQUIRED_SIZE (8)
26305+
26306+// FW_VA_DEBLOCK_REQUIRED MSG_SIZE
26307+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_ALIGNMENT (1)
26308+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_TYPE uint8_t
26309+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_MASK (0xFF)
26310+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_LSBMASK (0xFF)
26311+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_OFFSET (0x0000)
26312+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_SHIFT (0)
26313+
26314+// FW_VA_DEBLOCK_REQUIRED ID
26315+#define FW_VA_DEBLOCK_REQUIRED_ID_ALIGNMENT (1)
26316+#define FW_VA_DEBLOCK_REQUIRED_ID_TYPE uint8_t
26317+#define FW_VA_DEBLOCK_REQUIRED_ID_MASK (0xFF)
26318+#define FW_VA_DEBLOCK_REQUIRED_ID_LSBMASK (0xFF)
26319+#define FW_VA_DEBLOCK_REQUIRED_ID_OFFSET (0x0001)
26320+#define FW_VA_DEBLOCK_REQUIRED_ID_SHIFT (0)
26321+
26322+// FW_VA_DEBLOCK_REQUIRED CONTEXT
26323+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_ALIGNMENT (4)
26324+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t
26325+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF)
26326+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_LSBMASK (0xFFFFFFFF)
26327+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004)
26328+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0)
26329+
26330+#define FW_VA_HW_PANIC_SIZE (12)
26331+
26332+// FW_VA_HW_PANIC FLAGS
26333+#define FW_VA_HW_PANIC_FLAGS_ALIGNMENT (2)
26334+#define FW_VA_HW_PANIC_FLAGS_TYPE uint16_t
26335+#define FW_VA_HW_PANIC_FLAGS_MASK (0xFFFF)
26336+#define FW_VA_HW_PANIC_FLAGS_LSBMASK (0xFFFF)
26337+#define FW_VA_HW_PANIC_FLAGS_OFFSET (0x0002)
26338+#define FW_VA_HW_PANIC_FLAGS_SHIFT (0)
26339+
26340+// FW_VA_HW_PANIC MSG_SIZE
26341+#define FW_VA_HW_PANIC_MSG_SIZE_ALIGNMENT (1)
26342+#define FW_VA_HW_PANIC_MSG_SIZE_TYPE uint8_t
26343+#define FW_VA_HW_PANIC_MSG_SIZE_MASK (0xFF)
26344+#define FW_VA_HW_PANIC_MSG_SIZE_LSBMASK (0xFF)
26345+#define FW_VA_HW_PANIC_MSG_SIZE_OFFSET (0x0000)
26346+#define FW_VA_HW_PANIC_MSG_SIZE_SHIFT (0)
26347+
26348+// FW_VA_HW_PANIC ID
26349+#define FW_VA_HW_PANIC_ID_ALIGNMENT (1)
26350+#define FW_VA_HW_PANIC_ID_TYPE uint8_t
26351+#define FW_VA_HW_PANIC_ID_MASK (0xFF)
26352+#define FW_VA_HW_PANIC_ID_LSBMASK (0xFF)
26353+#define FW_VA_HW_PANIC_ID_OFFSET (0x0001)
26354+#define FW_VA_HW_PANIC_ID_SHIFT (0)
26355+
26356+// FW_VA_HW_PANIC FENCE_VALUE
26357+#define FW_VA_HW_PANIC_FENCE_VALUE_ALIGNMENT (4)
26358+#define FW_VA_HW_PANIC_FENCE_VALUE_TYPE uint32_t
26359+#define FW_VA_HW_PANIC_FENCE_VALUE_MASK (0xFFFFFFFF)
26360+#define FW_VA_HW_PANIC_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
26361+#define FW_VA_HW_PANIC_FENCE_VALUE_OFFSET (0x0004)
26362+#define FW_VA_HW_PANIC_FENCE_VALUE_SHIFT (0)
26363+
26364+// FW_VA_HW_PANIC IRQSTATUS
26365+#define FW_VA_HW_PANIC_IRQSTATUS_ALIGNMENT (4)
26366+#define FW_VA_HW_PANIC_IRQSTATUS_TYPE uint32_t
26367+#define FW_VA_HW_PANIC_IRQSTATUS_MASK (0xFFFFFFFF)
26368+#define FW_VA_HW_PANIC_IRQSTATUS_LSBMASK (0xFFFFFFFF)
26369+#define FW_VA_HW_PANIC_IRQSTATUS_OFFSET (0x0008)
26370+#define FW_VA_HW_PANIC_IRQSTATUS_SHIFT (0)
26371+
26372+#endif
26373Index: linux-2.6.27/drivers/gpu/drm/psb/psb_msvdxinit.c
26374===================================================================
26375--- /dev/null 1970-01-01 00:00:00.000000000 +0000
26376+++ linux-2.6.27/drivers/gpu/drm/psb/psb_msvdxinit.c 2009-02-05 13:29:33.000000000 +0000
26377@@ -0,0 +1,625 @@
26378+/**
26379+ * file psb_msvdxinit.c
26380+ * MSVDX initialization and mtx-firmware upload
26381+ *
26382+ */
26383+
26384+/**************************************************************************
26385+ *
26386+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
26387+ * Copyright (c) Imagination Technologies Limited, UK
26388+ * All Rights Reserved.
26389+ *
26390+ * Permission is hereby granted, free of charge, to any person obtaining a
26391+ * copy of this software and associated documentation files (the
26392+ * "Software"), to deal in the Software without restriction, including
26393+ * without limitation the rights to use, copy, modify, merge, publish,
26394+ * distribute, sub license, and/or sell copies of the Software, and to
26395+ * permit persons to whom the Software is furnished to do so, subject to
26396+ * the following conditions:
26397+ *
26398+ * The above copyright notice and this permission notice (including the
26399+ * next paragraph) shall be included in all copies or substantial portions
26400+ * of the Software.
26401+ *
26402+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26403+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26404+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26405+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26406+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26407+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26408+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
26409+ *
26410+ **************************************************************************/
26411+
26412+#include "drmP.h"
26413+#include "drm.h"
26414+#include "psb_drv.h"
26415+#include "psb_msvdx.h"
26416+#include <linux/firmware.h>
26417+
26418+/*MSVDX FW header*/
26419+struct msvdx_fw
26420+{
26421+ uint32_t ver;
26422+ uint32_t text_size;
26423+ uint32_t data_size;
26424+ uint32_t data_location;
26425+};
26426+
26427+int
26428+psb_wait_for_register (struct drm_psb_private *dev_priv,
26429+ uint32_t ui32Offset,
26430+ uint32_t ui32Value, uint32_t ui32Enable)
26431+{
26432+ uint32_t ui32Temp;
26433+ uint32_t ui32PollCount = 1000;
26434+ while (ui32PollCount)
26435+ {
26436+ ui32Temp = PSB_RMSVDX32 (ui32Offset);
26437+ if (ui32Value == (ui32Temp & ui32Enable)) /* All the bits are reset */
26438+ return 0; /* So exit */
26439+
26440+ /* Wait a bit */
26441+ DRM_UDELAY (100);
26442+ ui32PollCount--;
26443+ }
26444+ PSB_DEBUG_GENERAL
26445+ ("MSVDX: Timeout while waiting for register %08x: expecting %08x (mask %08x), got %08x\n",
26446+ ui32Offset, ui32Value, ui32Enable, ui32Temp);
26447+ return 1;
26448+}
26449+
26450+int
26451+psb_poll_mtx_irq (struct drm_psb_private *dev_priv)
26452+{
26453+ int ret = 0;
26454+ uint32_t MtxInt = 0;
26455+ REGIO_WRITE_FIELD_LITE (MtxInt, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
26456+
26457+ ret = psb_wait_for_register (dev_priv, MSVDX_INTERRUPT_STATUS, MtxInt, /* Required value */
26458+ MtxInt /* Enabled bits */ );
26459+ if (ret)
26460+ {
26461+ PSB_DEBUG_GENERAL
26462+ ("MSVDX: Error Mtx did not return int within a resonable time\n");
26463+
26464+ return ret;
26465+ }
26466+
26467+ PSB_DEBUG_GENERAL ("MSVDX: Got MTX Int\n");
26468+
26469+ /* Got it so clear the bit */
26470+ PSB_WMSVDX32 (MtxInt, MSVDX_INTERRUPT_CLEAR);
26471+
26472+ return ret;
26473+}
26474+
26475+void
26476+psb_write_mtx_core_reg (struct drm_psb_private *dev_priv,
26477+ const uint32_t ui32CoreRegister,
26478+ const uint32_t ui32Val)
26479+{
26480+ uint32_t ui32Reg = 0;
26481+
26482+ /* Put data in MTX_RW_DATA */
26483+ PSB_WMSVDX32 (ui32Val, MSVDX_MTX_REGISTER_READ_WRITE_DATA);
26484+
26485+ /* DREADY is set to 0 and request a write */
26486+ ui32Reg = ui32CoreRegister;
26487+ REGIO_WRITE_FIELD_LITE (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
26488+ MTX_RNW, 0);
26489+ REGIO_WRITE_FIELD_LITE (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
26490+ MTX_DREADY, 0);
26491+ PSB_WMSVDX32 (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST);
26492+
26493+ psb_wait_for_register (dev_priv, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK, /* Required Value */
26494+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
26495+}
26496+
26497+void
26498+psb_upload_fw (struct drm_psb_private *dev_priv, const uint32_t ui32DataMem,
26499+ uint32_t ui32RamBankSize, uint32_t ui32Address,
26500+ const unsigned int uiWords, const uint32_t * const pui32Data)
26501+{
26502+ uint32_t ui32Loop, ui32Ctrl, ui32RamId, ui32Addr, ui32CurrBank =
26503+ (uint32_t) ~ 0;
26504+ uint32_t ui32AccessControl;
26505+
26506+ /* Save the access control register... */
26507+ ui32AccessControl = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_CONTROL);
26508+
26509+ /* Wait for MCMSTAT to become be idle 1 */
26510+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
26511+ 0xffffffff /* Enables */ );
26512+
26513+ for (ui32Loop = 0; ui32Loop < uiWords; ui32Loop++)
26514+ {
26515+ ui32RamId = ui32DataMem + (ui32Address / ui32RamBankSize);
26516+
26517+ if (ui32RamId != ui32CurrBank)
26518+ {
26519+ ui32Addr = ui32Address >> 2;
26520+
26521+ ui32Ctrl = 0;
26522+
26523+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
26524+ MSVDX_MTX_RAM_ACCESS_CONTROL,
26525+ MTX_MCMID, ui32RamId);
26526+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
26527+ MSVDX_MTX_RAM_ACCESS_CONTROL,
26528+ MTX_MCM_ADDR, ui32Addr);
26529+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
26530+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMAI, 1);
26531+
26532+ PSB_WMSVDX32 (ui32Ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
26533+
26534+ ui32CurrBank = ui32RamId;
26535+ }
26536+ ui32Address += 4;
26537+
26538+ PSB_WMSVDX32 (pui32Data[ui32Loop], MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
26539+
26540+ /* Wait for MCMSTAT to become be idle 1 */
26541+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
26542+ 0xffffffff /* Enables */ );
26543+ }
26544+ PSB_DEBUG_GENERAL ("MSVDX: Upload done\n");
26545+
26546+ /* Restore the access control register... */
26547+ PSB_WMSVDX32 (ui32AccessControl, MSVDX_MTX_RAM_ACCESS_CONTROL);
26548+}
26549+
26550+static int
26551+psb_verify_fw (struct drm_psb_private *dev_priv,
26552+ const uint32_t ui32RamBankSize,
26553+ const uint32_t ui32DataMem, uint32_t ui32Address,
26554+ const uint32_t uiWords, const uint32_t * const pui32Data)
26555+{
26556+ uint32_t ui32Loop, ui32Ctrl, ui32RamId, ui32Addr, ui32CurrBank =
26557+ (uint32_t) ~ 0;
26558+ uint32_t ui32AccessControl;
26559+ int ret = 0;
26560+
26561+ /* Save the access control register... */
26562+ ui32AccessControl = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_CONTROL);
26563+
26564+ /* Wait for MCMSTAT to become be idle 1 */
26565+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
26566+ 0xffffffff /* Enables */ );
26567+
26568+ for (ui32Loop = 0; ui32Loop < uiWords; ui32Loop++)
26569+ {
26570+ uint32_t ui32ReadBackVal;
26571+ ui32RamId = ui32DataMem + (ui32Address / ui32RamBankSize);
26572+
26573+ if (ui32RamId != ui32CurrBank)
26574+ {
26575+ ui32Addr = ui32Address >> 2;
26576+ ui32Ctrl = 0;
26577+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
26578+ MSVDX_MTX_RAM_ACCESS_CONTROL,
26579+ MTX_MCMID, ui32RamId);
26580+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
26581+ MSVDX_MTX_RAM_ACCESS_CONTROL,
26582+ MTX_MCM_ADDR, ui32Addr);
26583+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
26584+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMAI, 1);
26585+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
26586+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMR, 1);
26587+
26588+ PSB_WMSVDX32 (ui32Ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
26589+
26590+ ui32CurrBank = ui32RamId;
26591+ }
26592+ ui32Address += 4;
26593+
26594+ /* Wait for MCMSTAT to become be idle 1 */
26595+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
26596+ 0xffffffff /* Enables */ );
26597+
26598+ ui32ReadBackVal = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
26599+ if (pui32Data[ui32Loop] != ui32ReadBackVal)
26600+ {
26601+ DRM_ERROR
26602+ ("psb: Firmware validation fails at index=%08x\n", ui32Loop);
26603+ ret = 1;
26604+ break;
26605+ }
26606+ }
26607+
26608+ /* Restore the access control register... */
26609+ PSB_WMSVDX32 (ui32AccessControl, MSVDX_MTX_RAM_ACCESS_CONTROL);
26610+
26611+ return ret;
26612+}
26613+
26614+static uint32_t *
26615+msvdx_get_fw (struct drm_device *dev,
26616+ const struct firmware **raw, uint8_t * name)
26617+{
26618+ int rc;
26619+ int *ptr = NULL;
26620+
26621+ rc = request_firmware (raw, name, &dev->pdev->dev);
26622+ if (rc < 0)
26623+ {
26624+ DRM_ERROR ("MSVDX: %s request_firmware failed: Reason %d\n", name, rc);
26625+ return NULL;
26626+ }
26627+
26628+ if ((*raw)->size < sizeof (struct msvdx_fw))
26629+ {
26630+ PSB_DEBUG_GENERAL ("MSVDX: %s is is not correct size(%zd)\n",
26631+ name, (*raw)->size);
26632+ return NULL;
26633+ }
26634+
26635+ ptr = (int *) ((*raw))->data;
26636+
26637+ if (!ptr)
26638+ {
26639+ PSB_DEBUG_GENERAL ("MSVDX: Failed to load %s\n", name);
26640+ return NULL;
26641+ }
26642+ /*another sanity check... */
26643+ if ((*raw)->size !=
26644+ (sizeof (struct msvdx_fw) +
26645+ sizeof (uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
26646+ sizeof (uint32_t) * ((struct msvdx_fw *) ptr)->data_size))
26647+ {
26648+ PSB_DEBUG_GENERAL ("MSVDX: %s is is not correct size(%zd)\n",
26649+ name, (*raw)->size);
26650+ return NULL;
26651+ }
26652+ return ptr;
26653+}
26654+
26655+static int
26656+psb_setup_fw (struct drm_device *dev)
26657+{
26658+ struct drm_psb_private *dev_priv = dev->dev_private;
26659+ int ret = 0;
26660+
26661+ uint32_t ram_bank_size;
26662+ struct msvdx_fw *fw;
26663+ uint32_t *fw_ptr = NULL;
26664+ uint32_t *text_ptr = NULL;
26665+ uint32_t *data_ptr = NULL;
26666+ const struct firmware *raw = NULL;
26667+ /* todo : Assert the clock is on - if not turn it on to upload code */
26668+
26669+ PSB_DEBUG_GENERAL ("MSVDX: psb_setup_fw\n");
26670+
26671+ /* Reset MTX */
26672+ PSB_WMSVDX32 (MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK, MSVDX_MTX_SOFT_RESET);
26673+
26674+ /* Initialses Communication controll area to 0 */
26675+ if(dev_priv->psb_rev_id >= POULSBO_D1)
26676+ {
26677+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1 or later revision.\n");
26678+ PSB_WMSVDX32 (MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1, MSVDX_COMMS_OFFSET_FLAGS);
26679+ }
26680+ else
26681+ {
26682+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0 or earlier revision.\n");
26683+ PSB_WMSVDX32 (MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0, MSVDX_COMMS_OFFSET_FLAGS);
26684+ }
26685+
26686+ PSB_WMSVDX32 (0, MSVDX_COMMS_MSG_COUNTER);
26687+ PSB_WMSVDX32 (0, MSVDX_COMMS_SIGNATURE);
26688+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_HOST_RD_INDEX);
26689+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
26690+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_MTX_RD_INDEX);
26691+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
26692+ PSB_WMSVDX32 (0, MSVDX_COMMS_FW_STATUS);
26693+
26694+ /* read register bank size */
26695+ {
26696+ uint32_t ui32BankSize, ui32Reg;
26697+ ui32Reg = PSB_RMSVDX32 (MSVDX_MTX_RAM_BANK);
26698+ ui32BankSize =
26699+ REGIO_READ_FIELD (ui32Reg, MSVDX_MTX_RAM_BANK, CR_MTX_RAM_BANK_SIZE);
26700+ ram_bank_size = (uint32_t) (1 << (ui32BankSize + 2));
26701+ }
26702+
26703+ PSB_DEBUG_GENERAL ("MSVDX: RAM bank size = %d bytes\n", ram_bank_size);
26704+
26705+ fw_ptr = msvdx_get_fw (dev, &raw, "msvdx_fw.bin");
26706+
26707+ if (!fw_ptr)
26708+ {
26709+ DRM_ERROR ("psb: No valid msvdx_fw.bin firmware found.\n");
26710+ ret = 1;
26711+ goto out;
26712+ }
26713+
26714+ fw = (struct msvdx_fw *) fw_ptr;
26715+ if (fw->ver != 0x02)
26716+ {
26717+ DRM_ERROR
26718+ ("psb: msvdx_fw.bin firmware version mismatch, got version=%02x expected version=%02x\n",
26719+ fw->ver, 0x02);
26720+ ret = 1;
26721+ goto out;
26722+ }
26723+
26724+ text_ptr = (uint32_t *) ((uint8_t *) fw_ptr + sizeof (struct msvdx_fw));
26725+ data_ptr = text_ptr + fw->text_size;
26726+
26727+ PSB_DEBUG_GENERAL ("MSVDX: Retrieved pointers for firmware\n");
26728+ PSB_DEBUG_GENERAL ("MSVDX: text_size: %d\n", fw->text_size);
26729+ PSB_DEBUG_GENERAL ("MSVDX: data_size: %d\n", fw->data_size);
26730+ PSB_DEBUG_GENERAL ("MSVDX: data_location: 0x%x\n", fw->data_location);
26731+ PSB_DEBUG_GENERAL ("MSVDX: First 4 bytes of text: 0x%x\n", *text_ptr);
26732+ PSB_DEBUG_GENERAL ("MSVDX: First 4 bytes of data: 0x%x\n", *data_ptr);
26733+
26734+ PSB_DEBUG_GENERAL ("MSVDX: Uploading firmware\n");
26735+ psb_upload_fw (dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
26736+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size, text_ptr);
26737+ psb_upload_fw (dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
26738+ fw->data_location - MTX_DATA_BASE, fw->data_size, data_ptr);
26739+
26740+ /*todo : Verify code upload possibly only in debug */
26741+ if (psb_verify_fw
26742+ (dev_priv, ram_bank_size, MTX_CORE_CODE_MEM,
26743+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size, text_ptr))
26744+ {
26745+ /* Firmware code upload failed */
26746+ ret = 1;
26747+ goto out;
26748+ }
26749+ if (psb_verify_fw
26750+ (dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
26751+ fw->data_location - MTX_DATA_BASE, fw->data_size, data_ptr))
26752+ {
26753+ /* Firmware data upload failed */
26754+ ret = 1;
26755+ goto out;
26756+ }
26757+
26758+ /* -- Set starting PC address */
26759+ psb_write_mtx_core_reg (dev_priv, MTX_PC, PC_START_ADDRESS);
26760+
26761+ /* -- Turn on the thread */
26762+ PSB_WMSVDX32 (MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE);
26763+
26764+ /* Wait for the signature value to be written back */
26765+ ret = psb_wait_for_register (dev_priv, MSVDX_COMMS_SIGNATURE, MSVDX_COMMS_SIGNATURE_VALUE, /* Required value */
26766+ 0xffffffff /* Enabled bits */ );
26767+ if (ret)
26768+ {
26769+ DRM_ERROR ("psb: MSVDX firmware fails to initialize.\n");
26770+ goto out;
26771+ }
26772+
26773+ PSB_DEBUG_GENERAL ("MSVDX: MTX Initial indications OK\n");
26774+ PSB_DEBUG_GENERAL ("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
26775+ MSVDX_COMMS_AREA_ADDR);
26776+out:
26777+ if (raw)
26778+ {
26779+ PSB_DEBUG_GENERAL ("MSVDX releasing firmware resouces....\n");
26780+ release_firmware (raw);
26781+ }
26782+ return ret;
26783+}
26784+
26785+static void
26786+psb_free_ccb (struct drm_buffer_object **ccb)
26787+{
26788+ drm_bo_usage_deref_unlocked (ccb);
26789+ *ccb = NULL;
26790+}
26791+
26792+/*******************************************************************************
26793+
26794+ @Function psb_msvdx_reset
26795+
26796+ @Description
26797+
26798+ Reset chip and disable interrupts.
26799+
26800+ @Input psDeviceNode - device info. structure
26801+
26802+ @Return 0 - Success
26803+ 1 - Failure
26804+
26805+******************************************************************************/
26806+int
26807+psb_msvdx_reset (struct drm_psb_private *dev_priv)
26808+{
26809+ int ret = 0;
26810+
26811+ /* Issue software reset */
26812+ PSB_WMSVDX32 (msvdx_sw_reset_all, MSVDX_CONTROL);
26813+
26814+ ret = psb_wait_for_register (dev_priv, MSVDX_CONTROL, 0, /* Required value */
26815+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK
26816+ /* Enabled bits */ );
26817+
26818+ if (!ret)
26819+ {
26820+ /* Clear interrupt enabled flag */
26821+ PSB_WMSVDX32 (0, MSVDX_HOST_INTERRUPT_ENABLE);
26822+
26823+ /* Clear any pending interrupt flags */
26824+ PSB_WMSVDX32 (0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR);
26825+ }
26826+
26827+ mutex_destroy (&dev_priv->msvdx_mutex);
26828+
26829+ return ret;
26830+}
26831+
26832+static int
26833+psb_allocate_ccb (struct drm_device *dev,
26834+ struct drm_buffer_object **ccb,
26835+ uint32_t * base_addr, int size)
26836+{
26837+ int ret;
26838+ struct drm_bo_kmap_obj tmp_kmap;
26839+ int is_iomem;
26840+
26841+ ret = drm_buffer_object_create (dev, size,
26842+ drm_bo_type_kernel,
26843+ DRM_BO_FLAG_READ |
26844+ DRM_PSB_FLAG_MEM_KERNEL |
26845+ DRM_BO_FLAG_NO_EVICT,
26846+ DRM_BO_HINT_DONT_FENCE, 0, 0, ccb);
26847+ if (ret)
26848+ {
26849+ PSB_DEBUG_GENERAL ("Failed to allocate CCB.\n");
26850+ *ccb = NULL;
26851+ return 1;
26852+ }
26853+
26854+ ret = drm_bo_kmap (*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
26855+ if (ret)
26856+ {
26857+ PSB_DEBUG_GENERAL ("drm_bo_kmap failed ret: %d\n", ret);
26858+ drm_bo_usage_deref_unlocked (ccb);
26859+ *ccb = NULL;
26860+ return 1;
26861+ }
26862+
26863+ memset (drm_bmo_virtual (&tmp_kmap, &is_iomem), 0, size);
26864+ drm_bo_kunmap (&tmp_kmap);
26865+
26866+ *base_addr = (*ccb)->offset;
26867+ return 0;
26868+}
26869+
26870+int
26871+psb_msvdx_init (struct drm_device *dev)
26872+{
26873+ struct drm_psb_private *dev_priv = dev->dev_private;
26874+ uint32_t ui32Cmd;
26875+ int ret;
26876+
26877+ PSB_DEBUG_GENERAL ("MSVDX: psb_msvdx_init\n");
26878+
26879+ /*Initialize command msvdx queueing */
26880+ INIT_LIST_HEAD (&dev_priv->msvdx_queue);
26881+ mutex_init (&dev_priv->msvdx_mutex);
26882+ spin_lock_init (&dev_priv->msvdx_lock);
26883+ dev_priv->msvdx_busy = 0;
26884+
26885+ /*figure out the stepping*/
26886+ pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &dev_priv->psb_rev_id );
26887+
26888+ /* Enable Clocks */
26889+ PSB_DEBUG_GENERAL ("Enabling clocks\n");
26890+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
26891+
26892+ /* Enable MMU by removing all bypass bits */
26893+ PSB_WMSVDX32 (0, MSVDX_MMU_CONTROL0);
26894+
26895+ PSB_DEBUG_GENERAL ("MSVDX: Setting up RENDEC\n");
26896+ /* Allocate device virtual memory as required by rendec.... */
26897+ if (!dev_priv->ccb0)
26898+ {
26899+ ret =
26900+ psb_allocate_ccb (dev, &dev_priv->ccb0,
26901+ &dev_priv->base_addr0, RENDEC_A_SIZE);
26902+ if (ret)
26903+ goto err_exit;
26904+ }
26905+
26906+ if (!dev_priv->ccb1)
26907+ {
26908+ ret =
26909+ psb_allocate_ccb (dev, &dev_priv->ccb1,
26910+ &dev_priv->base_addr1, RENDEC_B_SIZE);
26911+ if (ret)
26912+ goto err_exit;
26913+ }
26914+
26915+ PSB_DEBUG_GENERAL ("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
26916+ dev_priv->base_addr0, dev_priv->base_addr1);
26917+
26918+ PSB_WMSVDX32 (dev_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0);
26919+ PSB_WMSVDX32 (dev_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1);
26920+
26921+ ui32Cmd = 0;
26922+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE,
26923+ RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
26924+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE,
26925+ RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
26926+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE);
26927+
26928+ ui32Cmd = 0;
26929+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1,
26930+ RENDEC_DECODE_START_SIZE, 0);
26931+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1, RENDEC_BURST_SIZE_W, 1);
26932+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1, RENDEC_BURST_SIZE_R, 1);
26933+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1,
26934+ RENDEC_EXTERNAL_MEMORY, 1);
26935+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTROL1);
26936+
26937+ ui32Cmd = 0x00101010;
26938+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT0);
26939+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT1);
26940+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT2);
26941+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT3);
26942+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT4);
26943+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT5);
26944+
26945+ ui32Cmd = 0;
26946+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE, 1);
26947+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTROL0);
26948+
26949+ ret = psb_setup_fw (dev);
26950+ if (ret)
26951+ goto err_exit;
26952+
26953+ PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
26954+
26955+ return 0;
26956+
26957+err_exit:
26958+ if (dev_priv->ccb0)
26959+ psb_free_ccb (&dev_priv->ccb0);
26960+ if (dev_priv->ccb1)
26961+ psb_free_ccb (&dev_priv->ccb1);
26962+
26963+ return 1;
26964+}
26965+
26966+int
26967+psb_msvdx_uninit (struct drm_device *dev)
26968+{
26969+ struct drm_psb_private *dev_priv = dev->dev_private;
26970+
26971+ /*Reset MSVDX chip */
26972+ psb_msvdx_reset (dev_priv);
26973+
26974+// PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
26975+ printk("set the msvdx clock to 0 in the %s\n", __FUNCTION__);
26976+ PSB_WMSVDX32 (0, MSVDX_MAN_CLK_ENABLE);
26977+
26978+ /*Clean up resources...*/
26979+ if (dev_priv->ccb0)
26980+ psb_free_ccb (&dev_priv->ccb0);
26981+ if (dev_priv->ccb1)
26982+ psb_free_ccb (&dev_priv->ccb1);
26983+
26984+ return 0;
26985+}
26986+
26987+int psb_hw_info_ioctl(struct drm_device *dev, void *data,
26988+ struct drm_file *file_priv)
26989+{
26990+ struct drm_psb_private *dev_priv = dev->dev_private;
26991+ struct drm_psb_hw_info *hw_info = data;
26992+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
26993+
26994+ hw_info->rev_id = dev_priv->psb_rev_id;
26995+
26996+ /*read the fuse info to determine the caps*/
26997+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
26998+ pci_read_config_dword(pci_root, 0xD4, &hw_info->caps);
26999+
27000+ PSB_DEBUG_GENERAL("MSVDX: PSB caps: 0x%x\n", hw_info->caps);
27001+ return 0;
27002+}
27003Index: linux-2.6.27/drivers/gpu/drm/psb/psb_reg.h
27004===================================================================
27005--- /dev/null 1970-01-01 00:00:00.000000000 +0000
27006+++ linux-2.6.27/drivers/gpu/drm/psb/psb_reg.h 2009-02-05 13:29:33.000000000 +0000
27007@@ -0,0 +1,562 @@
27008+/**************************************************************************
27009+ *
27010+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
27011+ * Copyright (c) 2007, Intel Corporation.
27012+ * All Rights Reserved.
27013+ *
27014+ * This program is free software; you can redistribute it and/or modify it
27015+ * under the terms and conditions of the GNU General Public License,
27016+ * version 2, as published by the Free Software Foundation.
27017+ *
27018+ * This program is distributed in the hope it will be useful, but WITHOUT
27019+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
27020+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
27021+ * more details.
27022+ *
27023+ * You should have received a copy of the GNU General Public License along with
27024+ * this program; if not, write to the Free Software Foundation, Inc.,
27025+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
27026+ *
27027+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
27028+ * develop this driver.
27029+ *
27030+ **************************************************************************/
27031+/*
27032+ */
27033+#ifndef _PSB_REG_H_
27034+#define _PSB_REG_H_
27035+
27036+#define PSB_CR_CLKGATECTL 0x0000
27037+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
27038+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
27039+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
27040+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
27041+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
27042+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
27043+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
27044+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
27045+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
27046+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
27047+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
27048+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
27049+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
27050+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
27051+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
27052+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
27053+
27054+#define PSB_CR_CORE_ID 0x0010
27055+#define _PSB_CC_ID_ID_SHIFT (16)
27056+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
27057+#define _PSB_CC_ID_CONFIG_SHIFT (0)
27058+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
27059+
27060+#define PSB_CR_CORE_REVISION 0x0014
27061+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
27062+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
27063+#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
27064+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
27065+#define _PSB_CC_REVISION_MINOR_SHIFT (8)
27066+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
27067+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
27068+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
27069+
27070+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
27071+
27072+#define PSB_CR_SOFT_RESET 0x0080
27073+#define _PSB_CS_RESET_TSP_RESET (1 << 6)
27074+#define _PSB_CS_RESET_ISP_RESET (1 << 5)
27075+#define _PSB_CS_RESET_USE_RESET (1 << 4)
27076+#define _PSB_CS_RESET_TA_RESET (1 << 3)
27077+#define _PSB_CS_RESET_DPM_RESET (1 << 2)
27078+#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
27079+#define _PSB_CS_RESET_BIF_RESET (1 << 0)
27080+
27081+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
27082+
27083+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
27084+
27085+#define PSB_CR_EVENT_STATUS2 0x0118
27086+
27087+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
27088+#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
27089+
27090+#define PSB_CR_EVENT_STATUS 0x012C
27091+
27092+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
27093+
27094+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
27095+#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
27096+#define _PSB_CE_TA_DPM_FAULT (1 << 28)
27097+#define _PSB_CE_TWOD_COMPLETE (1 << 27)
27098+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
27099+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
27100+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
27101+#define _PSB_CE_SW_EVENT (1 << 14)
27102+#define _PSB_CE_TA_FINISHED (1 << 13)
27103+#define _PSB_CE_TA_TERMINATE (1 << 12)
27104+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
27105+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
27106+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
27107+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
27108+
27109+
27110+#define PSB_USE_OFFSET_MASK 0x0007FFFF
27111+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
27112+#define PSB_CR_USE_CODE_BASE0 0x0A0C
27113+#define PSB_CR_USE_CODE_BASE1 0x0A10
27114+#define PSB_CR_USE_CODE_BASE2 0x0A14
27115+#define PSB_CR_USE_CODE_BASE3 0x0A18
27116+#define PSB_CR_USE_CODE_BASE4 0x0A1C
27117+#define PSB_CR_USE_CODE_BASE5 0x0A20
27118+#define PSB_CR_USE_CODE_BASE6 0x0A24
27119+#define PSB_CR_USE_CODE_BASE7 0x0A28
27120+#define PSB_CR_USE_CODE_BASE8 0x0A2C
27121+#define PSB_CR_USE_CODE_BASE9 0x0A30
27122+#define PSB_CR_USE_CODE_BASE10 0x0A34
27123+#define PSB_CR_USE_CODE_BASE11 0x0A38
27124+#define PSB_CR_USE_CODE_BASE12 0x0A3C
27125+#define PSB_CR_USE_CODE_BASE13 0x0A40
27126+#define PSB_CR_USE_CODE_BASE14 0x0A44
27127+#define PSB_CR_USE_CODE_BASE15 0x0A48
27128+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
27129+#define _PSB_CUC_BASE_DM_SHIFT (25)
27130+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
27131+#define _PSB_CUC_BASE_ADDR_SHIFT (0) // 1024-bit aligned address?
27132+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
27133+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
27134+#define _PSB_CUC_DM_VERTEX (0)
27135+#define _PSB_CUC_DM_PIXEL (1)
27136+#define _PSB_CUC_DM_RESERVED (2)
27137+#define _PSB_CUC_DM_EDM (3)
27138+
27139+#define PSB_CR_PDS_EXEC_BASE 0x0AB8
27140+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) // 1MB aligned address
27141+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
27142+
27143+#define PSB_CR_EVENT_KICKER 0x0AC4
27144+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) // 128-bit aligned address
27145+
27146+#define PSB_CR_EVENT_KICK 0x0AC8
27147+#define _PSB_CE_KICK_NOW (1 << 0)
27148+
27149+
27150+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
27151+
27152+#define PSB_CR_BIF_CTRL 0x0C00
27153+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
27154+#define _PSB_CB_CTRL_INVALDC (1 << 3)
27155+#define _PSB_CB_CTRL_FLUSH (1 << 2)
27156+
27157+#define PSB_CR_BIF_INT_STAT 0x0C04
27158+
27159+#define PSB_CR_BIF_FAULT 0x0C08
27160+#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
27161+#define _PSB_CBI_STAT_FAULT_SHIFT (0)
27162+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
27163+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
27164+#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
27165+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
27166+#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
27167+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
27168+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
27169+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
27170+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
27171+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
27172+
27173+#define PSB_CR_BIF_BANK0 0x0C78
27174+
27175+#define PSB_CR_BIF_BANK1 0x0C7C
27176+
27177+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
27178+
27179+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
27180+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
27181+
27182+#define PSB_CR_2D_SOCIF 0x0E18
27183+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
27184+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
27185+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
27186+
27187+#define PSB_CR_2D_BLIT_STATUS 0x0E04
27188+#define _PSB_C2B_STATUS_BUSY (1 << 24)
27189+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
27190+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
27191+
27192+/*
27193+ * 2D defs.
27194+ */
27195+
27196+/*
27197+ * 2D Slave Port Data : Block Header's Object Type
27198+ */
27199+
27200+#define PSB_2D_CLIP_BH (0x00000000)
27201+#define PSB_2D_PAT_BH (0x10000000)
27202+#define PSB_2D_CTRL_BH (0x20000000)
27203+#define PSB_2D_SRC_OFF_BH (0x30000000)
27204+#define PSB_2D_MASK_OFF_BH (0x40000000)
27205+#define PSB_2D_RESERVED1_BH (0x50000000)
27206+#define PSB_2D_RESERVED2_BH (0x60000000)
27207+#define PSB_2D_FENCE_BH (0x70000000)
27208+#define PSB_2D_BLIT_BH (0x80000000)
27209+#define PSB_2D_SRC_SURF_BH (0x90000000)
27210+#define PSB_2D_DST_SURF_BH (0xA0000000)
27211+#define PSB_2D_PAT_SURF_BH (0xB0000000)
27212+#define PSB_2D_SRC_PAL_BH (0xC0000000)
27213+#define PSB_2D_PAT_PAL_BH (0xD0000000)
27214+#define PSB_2D_MASK_SURF_BH (0xE0000000)
27215+#define PSB_2D_FLUSH_BH (0xF0000000)
27216+
27217+/*
27218+ * Clip Definition block (PSB_2D_CLIP_BH)
27219+ */
27220+#define PSB_2D_CLIPCOUNT_MAX (1)
27221+#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
27222+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
27223+#define PSB_2D_CLIPCOUNT_SHIFT (0)
27224+// clip rectangle min & max
27225+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
27226+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
27227+#define PSB_2D_CLIP_XMAX_SHIFT (12)
27228+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
27229+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
27230+#define PSB_2D_CLIP_XMIN_SHIFT (0)
27231+// clip rectangle offset
27232+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
27233+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
27234+#define PSB_2D_CLIP_YMAX_SHIFT (12)
27235+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
27236+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
27237+#define PSB_2D_CLIP_YMIN_SHIFT (0)
27238+
27239+/*
27240+ * Pattern Control (PSB_2D_PAT_BH)
27241+ */
27242+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
27243+#define PSB_2D_PAT_HEIGHT_SHIFT (0)
27244+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
27245+#define PSB_2D_PAT_WIDTH_SHIFT (5)
27246+#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
27247+#define PSB_2D_PAT_YSTART_SHIFT (10)
27248+#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
27249+#define PSB_2D_PAT_XSTART_SHIFT (15)
27250+
27251+/*
27252+ * 2D Control block (PSB_2D_CTRL_BH)
27253+ */
27254+// Present Flags
27255+#define PSB_2D_SRCCK_CTRL (0x00000001)
27256+#define PSB_2D_DSTCK_CTRL (0x00000002)
27257+#define PSB_2D_ALPHA_CTRL (0x00000004)
27258+// Colour Key Colour (SRC/DST)
27259+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
27260+#define PSB_2D_CK_COL_CLRMASK (0x00000000)
27261+#define PSB_2D_CK_COL_SHIFT (0)
27262+// Colour Key Mask (SRC/DST)
27263+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
27264+#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
27265+#define PSB_2D_CK_MASK_SHIFT (0)
27266+// Alpha Control (Alpha/RGB)
27267+#define PSB_2D_GBLALPHA_MASK (0x000FF000)
27268+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
27269+#define PSB_2D_GBLALPHA_SHIFT (12)
27270+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
27271+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
27272+#define PSB_2D_SRCALPHA_OP_SHIFT (20)
27273+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
27274+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
27275+#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
27276+#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
27277+#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
27278+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
27279+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
27280+#define PSB_2D_SRCALPHA_INVERT (0x00800000)
27281+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
27282+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
27283+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
27284+#define PSB_2D_DSTALPHA_OP_SHIFT (24)
27285+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
27286+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
27287+#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
27288+#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
27289+#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
27290+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
27291+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
27292+#define PSB_2D_DSTALPHA_INVERT (0x08000000)
27293+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
27294+
27295+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
27296+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
27297+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
27298+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
27299+
27300+/*
27301+ *Source Offset (PSB_2D_SRC_OFF_BH)
27302+ */
27303+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
27304+#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
27305+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
27306+#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
27307+
27308+/*
27309+ * Mask Offset (PSB_2D_MASK_OFF_BH)
27310+ */
27311+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
27312+#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
27313+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
27314+#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
27315+
27316+/*
27317+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
27318+ */
27319+
27320+/*
27321+ *Blit Rectangle (PSB_2D_BLIT_BH)
27322+ */
27323+
27324+#define PSB_2D_ROT_MASK (3<<25)
27325+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
27326+#define PSB_2D_ROT_NONE (0<<25)
27327+#define PSB_2D_ROT_90DEGS (1<<25)
27328+#define PSB_2D_ROT_180DEGS (2<<25)
27329+#define PSB_2D_ROT_270DEGS (3<<25)
27330+
27331+#define PSB_2D_COPYORDER_MASK (3<<23)
27332+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
27333+#define PSB_2D_COPYORDER_TL2BR (0<<23)
27334+#define PSB_2D_COPYORDER_BR2TL (1<<23)
27335+#define PSB_2D_COPYORDER_TR2BL (2<<23)
27336+#define PSB_2D_COPYORDER_BL2TR (3<<23)
27337+
27338+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
27339+#define PSB_2D_DSTCK_DISABLE (0x00000000)
27340+#define PSB_2D_DSTCK_PASS (0x00200000)
27341+#define PSB_2D_DSTCK_REJECT (0x00400000)
27342+
27343+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
27344+#define PSB_2D_SRCCK_DISABLE (0x00000000)
27345+#define PSB_2D_SRCCK_PASS (0x00080000)
27346+#define PSB_2D_SRCCK_REJECT (0x00100000)
27347+
27348+#define PSB_2D_CLIP_ENABLE (0x00040000)
27349+
27350+#define PSB_2D_ALPHA_ENABLE (0x00020000)
27351+
27352+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
27353+#define PSB_2D_PAT_MASK (0x00010000)
27354+#define PSB_2D_USE_PAT (0x00010000)
27355+#define PSB_2D_USE_FILL (0x00000000)
27356+/*
27357+ * Tungsten Graphics note on rop codes: If rop A and rop B are
27358+ * identical, the mask surface will not be read and need not be
27359+ * set up.
27360+ */
27361+
27362+#define PSB_2D_ROP3B_MASK (0x0000FF00)
27363+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
27364+#define PSB_2D_ROP3B_SHIFT (8)
27365+// rop code A
27366+#define PSB_2D_ROP3A_MASK (0x000000FF)
27367+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
27368+#define PSB_2D_ROP3A_SHIFT (0)
27369+
27370+#define PSB_2D_ROP4_MASK (0x0000FFFF)
27371+/*
27372+ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
27373+ * Fill Colour RGBA8888
27374+ */
27375+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
27376+#define PSB_2D_FILLCOLOUR_SHIFT (0)
27377+/*
27378+ * DWORD1: (Always Present)
27379+ * X Start (Dest)
27380+ * Y Start (Dest)
27381+ */
27382+#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
27383+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
27384+#define PSB_2D_DST_XSTART_SHIFT (12)
27385+#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
27386+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
27387+#define PSB_2D_DST_YSTART_SHIFT (0)
27388+/*
27389+ * DWORD2: (Always Present)
27390+ * X Size (Dest)
27391+ * Y Size (Dest)
27392+ */
27393+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
27394+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
27395+#define PSB_2D_DST_XSIZE_SHIFT (12)
27396+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
27397+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
27398+#define PSB_2D_DST_YSIZE_SHIFT (0)
27399+
27400+/*
27401+ * Source Surface (PSB_2D_SRC_SURF_BH)
27402+ */
27403+/*
27404+ * WORD 0
27405+ */
27406+
27407+#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
27408+#define PSB_2D_SRC_1_PAL (0x00000000)
27409+#define PSB_2D_SRC_2_PAL (0x00008000)
27410+#define PSB_2D_SRC_4_PAL (0x00010000)
27411+#define PSB_2D_SRC_8_PAL (0x00018000)
27412+#define PSB_2D_SRC_8_ALPHA (0x00020000)
27413+#define PSB_2D_SRC_4_ALPHA (0x00028000)
27414+#define PSB_2D_SRC_332RGB (0x00030000)
27415+#define PSB_2D_SRC_4444ARGB (0x00038000)
27416+#define PSB_2D_SRC_555RGB (0x00040000)
27417+#define PSB_2D_SRC_1555ARGB (0x00048000)
27418+#define PSB_2D_SRC_565RGB (0x00050000)
27419+#define PSB_2D_SRC_0888ARGB (0x00058000)
27420+#define PSB_2D_SRC_8888ARGB (0x00060000)
27421+#define PSB_2D_SRC_8888UYVY (0x00068000)
27422+#define PSB_2D_SRC_RESERVED (0x00070000)
27423+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
27424+
27425+
27426+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
27427+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
27428+#define PSB_2D_SRC_STRIDE_SHIFT (0)
27429+/*
27430+ * WORD 1 - Base Address
27431+ */
27432+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
27433+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
27434+#define PSB_2D_SRC_ADDR_SHIFT (2)
27435+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
27436+
27437+/*
27438+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
27439+ */
27440+/*
27441+ * WORD 0
27442+ */
27443+
27444+#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
27445+#define PSB_2D_PAT_1_PAL (0x00000000)
27446+#define PSB_2D_PAT_2_PAL (0x00008000)
27447+#define PSB_2D_PAT_4_PAL (0x00010000)
27448+#define PSB_2D_PAT_8_PAL (0x00018000)
27449+#define PSB_2D_PAT_8_ALPHA (0x00020000)
27450+#define PSB_2D_PAT_4_ALPHA (0x00028000)
27451+#define PSB_2D_PAT_332RGB (0x00030000)
27452+#define PSB_2D_PAT_4444ARGB (0x00038000)
27453+#define PSB_2D_PAT_555RGB (0x00040000)
27454+#define PSB_2D_PAT_1555ARGB (0x00048000)
27455+#define PSB_2D_PAT_565RGB (0x00050000)
27456+#define PSB_2D_PAT_0888ARGB (0x00058000)
27457+#define PSB_2D_PAT_8888ARGB (0x00060000)
27458+
27459+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
27460+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
27461+#define PSB_2D_PAT_STRIDE_SHIFT (0)
27462+/*
27463+ * WORD 1 - Base Address
27464+ */
27465+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
27466+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
27467+#define PSB_2D_PAT_ADDR_SHIFT (2)
27468+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
27469+
27470+/*
27471+ * Destination Surface (PSB_2D_DST_SURF_BH)
27472+ */
27473+/*
27474+ * WORD 0
27475+ */
27476+
27477+#define PSB_2D_DST_FORMAT_MASK (0x00078000)
27478+#define PSB_2D_DST_332RGB (0x00030000)
27479+#define PSB_2D_DST_4444ARGB (0x00038000)
27480+#define PSB_2D_DST_555RGB (0x00040000)
27481+#define PSB_2D_DST_1555ARGB (0x00048000)
27482+#define PSB_2D_DST_565RGB (0x00050000)
27483+#define PSB_2D_DST_0888ARGB (0x00058000)
27484+#define PSB_2D_DST_8888ARGB (0x00060000)
27485+#define PSB_2D_DST_8888AYUV (0x00070000)
27486+
27487+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
27488+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
27489+#define PSB_2D_DST_STRIDE_SHIFT (0)
27490+/*
27491+ * WORD 1 - Base Address
27492+ */
27493+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
27494+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
27495+#define PSB_2D_DST_ADDR_SHIFT (2)
27496+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
27497+
27498+/*
27499+ * Mask Surface (PSB_2D_MASK_SURF_BH)
27500+ */
27501+/*
27502+ * WORD 0
27503+ */
27504+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
27505+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
27506+#define PSB_2D_MASK_STRIDE_SHIFT (0)
27507+/*
27508+ * WORD 1 - Base Address
27509+ */
27510+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
27511+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
27512+#define PSB_2D_MASK_ADDR_SHIFT (2)
27513+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
27514+
27515+/*
27516+ * Source Palette (PSB_2D_SRC_PAL_BH)
27517+ */
27518+
27519+#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
27520+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
27521+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
27522+#define PSB_2D_SRCPAL_BYTEALIGN (1024)
27523+
27524+/*
27525+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
27526+ */
27527+
27528+#define PSB_2D_PATPAL_ADDR_SHIFT (0)
27529+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
27530+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
27531+#define PSB_2D_PATPAL_BYTEALIGN (1024)
27532+
27533+/*
27534+ * Rop3 Codes (2 LS bytes)
27535+ */
27536+
27537+#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
27538+#define PSB_2D_ROP3_PATCOPY (0xF0F0)
27539+#define PSB_2D_ROP3_WHITENESS (0xFFFF)
27540+#define PSB_2D_ROP3_BLACKNESS (0x0000)
27541+#define PSB_2D_ROP3_SRC (0xCC)
27542+#define PSB_2D_ROP3_PAT (0xF0)
27543+#define PSB_2D_ROP3_DST (0xAA)
27544+
27545+
27546+/*
27547+ * Sizes.
27548+ */
27549+
27550+#define PSB_SCENE_HW_COOKIE_SIZE 16
27551+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
27552+
27553+/*
27554+ * Scene stuff.
27555+ */
27556+
27557+#define PSB_NUM_HW_SCENES 2
27558+
27559+/*
27560+ * Scheduler completion actions.
27561+ */
27562+
27563+#define PSB_RASTER_BLOCK 0
27564+#define PSB_RASTER 1
27565+#define PSB_RETURN 2
27566+#define PSB_TA 3
27567+
27568+
27569+#endif
27570Index: linux-2.6.27/drivers/gpu/drm/psb/psb_regman.c
27571===================================================================
27572--- /dev/null 1970-01-01 00:00:00.000000000 +0000
27573+++ linux-2.6.27/drivers/gpu/drm/psb/psb_regman.c 2009-02-05 13:29:33.000000000 +0000
27574@@ -0,0 +1,175 @@
27575+/**************************************************************************
27576+ * Copyright (c) 2007, Intel Corporation.
27577+ * All Rights Reserved.
27578+ *
27579+ * This program is free software; you can redistribute it and/or modify it
27580+ * under the terms and conditions of the GNU General Public License,
27581+ * version 2, as published by the Free Software Foundation.
27582+ *
27583+ * This program is distributed in the hope it will be useful, but WITHOUT
27584+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
27585+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
27586+ * more details.
27587+ *
27588+ * You should have received a copy of the GNU General Public License along with
27589+ * this program; if not, write to the Free Software Foundation, Inc.,
27590+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
27591+ *
27592+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
27593+ * develop this driver.
27594+ *
27595+ **************************************************************************/
27596+/*
27597+ */
27598+
27599+#include "drmP.h"
27600+#include "psb_drv.h"
27601+
27602+struct psb_use_reg {
27603+ struct drm_reg reg;
27604+ struct drm_psb_private *dev_priv;
27605+ uint32_t reg_seq;
27606+ uint32_t base;
27607+ uint32_t data_master;
27608+};
27609+
27610+struct psb_use_reg_data {
27611+ uint32_t base;
27612+ uint32_t size;
27613+ uint32_t data_master;
27614+};
27615+
27616+static int psb_use_reg_reusable(const struct drm_reg *reg, const void *data)
27617+{
27618+ struct psb_use_reg *use_reg =
27619+ container_of(reg, struct psb_use_reg, reg);
27620+ struct psb_use_reg_data *use_data = (struct psb_use_reg_data *)data;
27621+
27622+ return ((use_reg->base <= use_data->base) &&
27623+ (use_reg->base + PSB_USE_OFFSET_SIZE >
27624+ use_data->base + use_data->size) &&
27625+ use_reg->data_master == use_data->data_master);
27626+}
27627+
27628+static int psb_use_reg_set(struct psb_use_reg *use_reg,
27629+ const struct psb_use_reg_data *use_data)
27630+{
27631+ struct drm_psb_private *dev_priv = use_reg->dev_priv;
27632+
27633+ if (use_reg->reg.fence == NULL)
27634+ use_reg->data_master = use_data->data_master;
27635+
27636+ if (use_reg->reg.fence == NULL &&
27637+ !psb_use_reg_reusable(&use_reg->reg, (const void *)use_data)) {
27638+
27639+ use_reg->base = use_data->base & ~PSB_USE_OFFSET_MASK;
27640+ use_reg->data_master = use_data->data_master;
27641+
27642+ if (!psb_use_reg_reusable(&use_reg->reg,
27643+ (const void *)use_data)) {
27644+ DRM_ERROR("USE base mechanism didn't support "
27645+ "buffer size or alignment\n");
27646+ return -EINVAL;
27647+ }
27648+
27649+ PSB_WSGX32(PSB_ALPL(use_reg->base, _PSB_CUC_BASE_ADDR) |
27650+ (use_reg->data_master << _PSB_CUC_BASE_DM_SHIFT),
27651+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
27652+ }
27653+ return 0;
27654+
27655+}
27656+
27657+int psb_grab_use_base(struct drm_psb_private *dev_priv,
27658+ unsigned long base,
27659+ unsigned long size,
27660+ unsigned int data_master,
27661+ uint32_t fence_class,
27662+ uint32_t fence_type,
27663+ int no_wait,
27664+ int interruptible, int *r_reg, uint32_t * r_offset)
27665+{
27666+ struct psb_use_reg_data use_data = {
27667+ .base = base,
27668+ .size = size,
27669+ .data_master = data_master
27670+ };
27671+ int ret;
27672+
27673+ struct drm_reg *reg;
27674+ struct psb_use_reg *use_reg;
27675+
27676+ ret = drm_regs_alloc(&dev_priv->use_manager,
27677+ (const void *)&use_data,
27678+ fence_class,
27679+ fence_type, interruptible, no_wait, &reg);
27680+ if (ret)
27681+ return ret;
27682+
27683+ use_reg = container_of(reg, struct psb_use_reg, reg);
27684+ ret = psb_use_reg_set(use_reg, &use_data);
27685+
27686+ if (ret)
27687+ return ret;
27688+
27689+ *r_reg = use_reg->reg_seq;
27690+ *r_offset = base - use_reg->base;
27691+
27692+ return 0;
27693+};
27694+
27695+static void psb_use_reg_destroy(struct drm_reg *reg)
27696+{
27697+ struct psb_use_reg *use_reg =
27698+ container_of(reg, struct psb_use_reg, reg);
27699+ struct drm_psb_private *dev_priv = use_reg->dev_priv;
27700+
27701+ PSB_WSGX32(PSB_ALPL(0, _PSB_CUC_BASE_ADDR),
27702+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
27703+
27704+ drm_free(use_reg, sizeof(*use_reg), DRM_MEM_DRIVER);
27705+}
27706+
27707+int psb_init_use_base(struct drm_psb_private *dev_priv,
27708+ unsigned int reg_start, unsigned int reg_num)
27709+{
27710+ struct psb_use_reg *use_reg;
27711+ int i;
27712+ int ret = 0;
27713+
27714+ mutex_lock(&dev_priv->cmdbuf_mutex);
27715+
27716+ drm_regs_init(&dev_priv->use_manager,
27717+ &psb_use_reg_reusable, &psb_use_reg_destroy);
27718+
27719+ for (i = reg_start; i < reg_start + reg_num; ++i) {
27720+ use_reg = drm_calloc(1, sizeof(*use_reg), DRM_MEM_DRIVER);
27721+ if (!use_reg) {
27722+ ret = -ENOMEM;
27723+ goto out;
27724+ }
27725+
27726+ use_reg->dev_priv = dev_priv;
27727+ use_reg->reg_seq = i;
27728+ use_reg->base = 0;
27729+ use_reg->data_master = _PSB_CUC_DM_PIXEL;
27730+
27731+ PSB_WSGX32(PSB_ALPL(use_reg->base, _PSB_CUC_BASE_ADDR) |
27732+ (use_reg->data_master << _PSB_CUC_BASE_DM_SHIFT),
27733+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
27734+
27735+ drm_regs_add(&dev_priv->use_manager, &use_reg->reg);
27736+ }
27737+ out:
27738+ mutex_unlock(&dev_priv->cmdbuf_mutex);
27739+
27740+ return ret;
27741+
27742+}
27743+
27744+void psb_takedown_use_base(struct drm_psb_private *dev_priv)
27745+{
27746+ mutex_lock(&dev_priv->cmdbuf_mutex);
27747+ drm_regs_free(&dev_priv->use_manager);
27748+ mutex_unlock(&dev_priv->cmdbuf_mutex);
27749+}
27750Index: linux-2.6.27/drivers/gpu/drm/psb/psb_reset.c
27751===================================================================
27752--- /dev/null 1970-01-01 00:00:00.000000000 +0000
27753+++ linux-2.6.27/drivers/gpu/drm/psb/psb_reset.c 2009-02-05 13:29:33.000000000 +0000
27754@@ -0,0 +1,374 @@
27755+/**************************************************************************
27756+ * Copyright (c) 2007, Intel Corporation.
27757+ * All Rights Reserved.
27758+ *
27759+ * This program is free software; you can redistribute it and/or modify it
27760+ * under the terms and conditions of the GNU General Public License,
27761+ * version 2, as published by the Free Software Foundation.
27762+ *
27763+ * This program is distributed in the hope it will be useful, but WITHOUT
27764+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
27765+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
27766+ * more details.
27767+ *
27768+ * You should have received a copy of the GNU General Public License along with
27769+ * this program; if not, write to the Free Software Foundation, Inc.,
27770+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
27771+ *
27772+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
27773+ * develop this driver.
27774+ *
27775+ **************************************************************************/
27776+/*
27777+ * Authors:
27778+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
27779+ */
27780+
27781+#include "drmP.h"
27782+#include "psb_drv.h"
27783+#include "psb_reg.h"
27784+#include "psb_scene.h"
27785+#include "psb_msvdx.h"
27786+
27787+#define PSB_2D_TIMEOUT_MSEC 100
27788+
27789+void psb_reset(struct drm_psb_private *dev_priv, int reset_2d)
27790+{
27791+ uint32_t val;
27792+
27793+ val = _PSB_CS_RESET_BIF_RESET |
27794+ _PSB_CS_RESET_DPM_RESET |
27795+ _PSB_CS_RESET_TA_RESET |
27796+ _PSB_CS_RESET_USE_RESET |
27797+ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET;
27798+
27799+ if (reset_2d)
27800+ val |= _PSB_CS_RESET_TWOD_RESET;
27801+
27802+ PSB_WSGX32(val, PSB_CR_SOFT_RESET);
27803+ (void)PSB_RSGX32(PSB_CR_SOFT_RESET);
27804+
27805+ msleep(1);
27806+
27807+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
27808+ wmb();
27809+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
27810+ PSB_CR_BIF_CTRL);
27811+ wmb();
27812+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
27813+
27814+ msleep(1);
27815+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
27816+ PSB_CR_BIF_CTRL);
27817+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
27818+}
27819+
27820+void psb_print_pagefault(struct drm_psb_private *dev_priv)
27821+{
27822+ uint32_t val;
27823+ uint32_t addr;
27824+
27825+ val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
27826+ addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
27827+
27828+ if (val) {
27829+ if (val & _PSB_CBI_STAT_PF_N_RW)
27830+ DRM_ERROR("Poulsbo MMU page fault:\n");
27831+ else
27832+ DRM_ERROR("Poulsbo MMU read / write "
27833+ "protection fault:\n");
27834+
27835+ if (val & _PSB_CBI_STAT_FAULT_CACHE)
27836+ DRM_ERROR("\tCache requestor.\n");
27837+ if (val & _PSB_CBI_STAT_FAULT_TA)
27838+ DRM_ERROR("\tTA requestor.\n");
27839+ if (val & _PSB_CBI_STAT_FAULT_VDM)
27840+ DRM_ERROR("\tVDM requestor.\n");
27841+ if (val & _PSB_CBI_STAT_FAULT_2D)
27842+ DRM_ERROR("\t2D requestor.\n");
27843+ if (val & _PSB_CBI_STAT_FAULT_PBE)
27844+ DRM_ERROR("\tPBE requestor.\n");
27845+ if (val & _PSB_CBI_STAT_FAULT_TSP)
27846+ DRM_ERROR("\tTSP requestor.\n");
27847+ if (val & _PSB_CBI_STAT_FAULT_ISP)
27848+ DRM_ERROR("\tISP requestor.\n");
27849+ if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
27850+ DRM_ERROR("\tUSSEPDS requestor.\n");
27851+ if (val & _PSB_CBI_STAT_FAULT_HOST)
27852+ DRM_ERROR("\tHost requestor.\n");
27853+
27854+ DRM_ERROR("\tMMU failing address is 0x%08x.\n", (unsigned)addr);
27855+ }
27856+}
27857+
27858+void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
27859+{
27860+ struct timer_list *wt = &dev_priv->watchdog_timer;
27861+ unsigned long irq_flags;
27862+
27863+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
27864+ if (dev_priv->timer_available && !timer_pending(wt)) {
27865+ wt->expires = jiffies + PSB_WATCHDOG_DELAY;
27866+ add_timer(wt);
27867+ }
27868+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
27869+}
27870+
27871+#if 0
27872+static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv,
27873+ unsigned int engine, int *lockup, int *idle)
27874+{
27875+ uint32_t received_seq;
27876+
27877+ received_seq = dev_priv->comm[engine << 4];
27878+ spin_lock(&dev_priv->sequence_lock);
27879+ *idle = (received_seq == dev_priv->sequence[engine]);
27880+ spin_unlock(&dev_priv->sequence_lock);
27881+
27882+ if (*idle) {
27883+ dev_priv->idle[engine] = 1;
27884+ *lockup = 0;
27885+ return;
27886+ }
27887+
27888+ if (dev_priv->idle[engine]) {
27889+ dev_priv->idle[engine] = 0;
27890+ dev_priv->last_sequence[engine] = received_seq;
27891+ *lockup = 0;
27892+ return;
27893+ }
27894+
27895+ *lockup = (dev_priv->last_sequence[engine] == received_seq);
27896+}
27897+
27898+#endif
27899+static void psb_watchdog_func(unsigned long data)
27900+{
27901+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)data;
27902+ int lockup;
27903+ int msvdx_lockup;
27904+ int msvdx_idle;
27905+ int lockup_2d;
27906+ int idle_2d;
27907+ int idle;
27908+ unsigned long irq_flags;
27909+
27910+ psb_scheduler_lockup(dev_priv, &lockup, &idle);
27911+ psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
27912+#if 0
27913+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
27914+#else
27915+ lockup_2d = 0;
27916+ idle_2d = 1;
27917+#endif
27918+ if (lockup || msvdx_lockup || lockup_2d) {
27919+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
27920+ dev_priv->timer_available = 0;
27921+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
27922+ if (lockup) {
27923+ psb_print_pagefault(dev_priv);
27924+ schedule_work(&dev_priv->watchdog_wq);
27925+ }
27926+ if (msvdx_lockup)
27927+ schedule_work(&dev_priv->msvdx_watchdog_wq);
27928+ }
27929+ if (!idle || !msvdx_idle || !idle_2d)
27930+ psb_schedule_watchdog(dev_priv);
27931+}
27932+
27933+void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
27934+{
27935+ struct drm_psb_private *dev_priv = dev->dev_private;
27936+ struct psb_msvdx_cmd_queue *msvdx_cmd;
27937+ struct list_head *list, *next;
27938+ /*Flush the msvdx cmd queue and signal all fences in the queue */
27939+ list_for_each_safe(list, next, &dev_priv->msvdx_queue) {
27940+ msvdx_cmd = list_entry(list, struct psb_msvdx_cmd_queue, head);
27941+ PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
27942+ msvdx_cmd->sequence);
27943+ dev_priv->msvdx_current_sequence = msvdx_cmd->sequence;
27944+ psb_fence_error(dev, PSB_ENGINE_VIDEO,
27945+ dev_priv->msvdx_current_sequence,
27946+ DRM_FENCE_TYPE_EXE, DRM_CMD_HANG);
27947+ list_del(list);
27948+ kfree(msvdx_cmd->cmd);
27949+ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue),
27950+ DRM_MEM_DRIVER);
27951+ }
27952+}
27953+
27954+static void psb_msvdx_reset_wq(struct work_struct *work)
27955+{
27956+ struct drm_psb_private *dev_priv =
27957+ container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
27958+
27959+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
27960+ unsigned long irq_flags;
27961+
27962+ mutex_lock(&dev_priv->msvdx_mutex);
27963+ dev_priv->msvdx_needs_reset = 1;
27964+ dev_priv->msvdx_current_sequence++;
27965+ PSB_DEBUG_GENERAL
27966+ ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
27967+ dev_priv->msvdx_current_sequence);
27968+
27969+ psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
27970+ dev_priv->msvdx_current_sequence, DRM_FENCE_TYPE_EXE,
27971+ DRM_CMD_HANG);
27972+
27973+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
27974+ dev_priv->timer_available = 1;
27975+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
27976+
27977+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
27978+ psb_msvdx_flush_cmd_queue(scheduler->dev);
27979+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
27980+
27981+ psb_schedule_watchdog(dev_priv);
27982+ mutex_unlock(&dev_priv->msvdx_mutex);
27983+}
27984+
27985+static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv)
27986+{
27987+ struct psb_xhw_buf buf;
27988+ uint32_t bif_ctrl;
27989+
27990+ INIT_LIST_HEAD(&buf.head);
27991+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
27992+ bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
27993+ PSB_WSGX32(bif_ctrl |
27994+ _PSB_CB_CTRL_CLEAR_FAULT |
27995+ _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
27996+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
27997+ msleep(1);
27998+ PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL);
27999+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
28000+ return psb_xhw_reset_dpm(dev_priv, &buf);
28001+}
28002+
28003+/*
28004+ * Block command submission and reset hardware and schedulers.
28005+ */
28006+
28007+static void psb_reset_wq(struct work_struct *work)
28008+{
28009+ struct drm_psb_private *dev_priv =
28010+ container_of(work, struct drm_psb_private, watchdog_wq);
28011+ int lockup_2d;
28012+ int idle_2d;
28013+ unsigned long irq_flags;
28014+ int ret;
28015+ int reset_count = 0;
28016+ struct psb_xhw_buf buf;
28017+ uint32_t xhw_lockup;
28018+
28019+ /*
28020+ * Block command submission.
28021+ */
28022+
28023+ mutex_lock(&dev_priv->reset_mutex);
28024+
28025+ INIT_LIST_HEAD(&buf.head);
28026+ if (psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup) == 0) {
28027+ if (xhw_lockup == 0 && psb_extend_raster_timeout(dev_priv) == 0) {
28028+ /*
28029+ * no lockup, just re-schedule
28030+ */
28031+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28032+ dev_priv->timer_available = 1;
28033+ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
28034+ irq_flags);
28035+ psb_schedule_watchdog(dev_priv);
28036+ mutex_unlock(&dev_priv->reset_mutex);
28037+ return;
28038+ }
28039+ }
28040+#if 0
28041+ msleep(PSB_2D_TIMEOUT_MSEC);
28042+
28043+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
28044+
28045+ if (lockup_2d) {
28046+ uint32_t seq_2d;
28047+ spin_lock(&dev_priv->sequence_lock);
28048+ seq_2d = dev_priv->sequence[PSB_ENGINE_2D];
28049+ spin_unlock(&dev_priv->sequence_lock);
28050+ psb_fence_error(dev_priv->scheduler.dev,
28051+ PSB_ENGINE_2D,
28052+ seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY);
28053+ DRM_INFO("Resetting 2D engine.\n");
28054+ }
28055+
28056+ psb_reset(dev_priv, lockup_2d);
28057+#else
28058+ (void)lockup_2d;
28059+ (void)idle_2d;
28060+ psb_reset(dev_priv, 0);
28061+#endif
28062+ (void)psb_xhw_mmu_reset(dev_priv);
28063+ DRM_INFO("Resetting scheduler.\n");
28064+ psb_scheduler_pause(dev_priv);
28065+ psb_scheduler_reset(dev_priv, -EBUSY);
28066+ psb_scheduler_ta_mem_check(dev_priv);
28067+
28068+ while (dev_priv->ta_mem &&
28069+ !dev_priv->force_ta_mem_load && ++reset_count < 10) {
28070+
28071+ /*
28072+ * TA memory is currently fenced so offsets
28073+ * are valid. Reload offsets into the dpm now.
28074+ */
28075+
28076+ struct psb_xhw_buf buf;
28077+ INIT_LIST_HEAD(&buf.head);
28078+
28079+ msleep(100);
28080+ DRM_INFO("Trying to reload TA memory.\n");
28081+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
28082+ PSB_TA_MEM_FLAG_TA |
28083+ PSB_TA_MEM_FLAG_RASTER |
28084+ PSB_TA_MEM_FLAG_HOSTA |
28085+ PSB_TA_MEM_FLAG_HOSTD |
28086+ PSB_TA_MEM_FLAG_INIT,
28087+ dev_priv->ta_mem->ta_memory->offset,
28088+ dev_priv->ta_mem->hw_data->offset,
28089+ dev_priv->ta_mem->hw_cookie);
28090+ if (!ret)
28091+ break;
28092+
28093+ psb_reset(dev_priv, 0);
28094+ (void)psb_xhw_mmu_reset(dev_priv);
28095+ }
28096+
28097+ psb_scheduler_restart(dev_priv);
28098+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28099+ dev_priv->timer_available = 1;
28100+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28101+ mutex_unlock(&dev_priv->reset_mutex);
28102+}
28103+
28104+void psb_watchdog_init(struct drm_psb_private *dev_priv)
28105+{
28106+ struct timer_list *wt = &dev_priv->watchdog_timer;
28107+ unsigned long irq_flags;
28108+
28109+ dev_priv->watchdog_lock = SPIN_LOCK_UNLOCKED;
28110+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28111+ init_timer(wt);
28112+ INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq);
28113+ INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
28114+ wt->data = (unsigned long)dev_priv;
28115+ wt->function = &psb_watchdog_func;
28116+ dev_priv->timer_available = 1;
28117+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28118+}
28119+
28120+void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
28121+{
28122+ unsigned long irq_flags;
28123+
28124+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28125+ dev_priv->timer_available = 0;
28126+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28127+ (void)del_timer_sync(&dev_priv->watchdog_timer);
28128+}
28129Index: linux-2.6.27/drivers/gpu/drm/psb/psb_scene.c
28130===================================================================
28131--- /dev/null 1970-01-01 00:00:00.000000000 +0000
28132+++ linux-2.6.27/drivers/gpu/drm/psb/psb_scene.c 2009-02-05 13:29:33.000000000 +0000
28133@@ -0,0 +1,531 @@
28134+/**************************************************************************
28135+ * Copyright (c) 2007, Intel Corporation.
28136+ * All Rights Reserved.
28137+ *
28138+ * This program is free software; you can redistribute it and/or modify it
28139+ * under the terms and conditions of the GNU General Public License,
28140+ * version 2, as published by the Free Software Foundation.
28141+ *
28142+ * This program is distributed in the hope it will be useful, but WITHOUT
28143+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28144+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
28145+ * more details.
28146+ *
28147+ * You should have received a copy of the GNU General Public License along with
28148+ * this program; if not, write to the Free Software Foundation, Inc.,
28149+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28150+ *
28151+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
28152+ * develop this driver.
28153+ *
28154+ **************************************************************************/
28155+/*
28156+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
28157+ */
28158+
28159+#include "drmP.h"
28160+#include "psb_drv.h"
28161+#include "psb_scene.h"
28162+
28163+void psb_clear_scene_atomic(struct psb_scene *scene)
28164+{
28165+ int i;
28166+ struct page *page;
28167+ void *v;
28168+
28169+ for (i = 0; i < scene->clear_num_pages; ++i) {
28170+ page = drm_ttm_get_page(scene->hw_data->ttm,
28171+ scene->clear_p_start + i);
28172+ if (in_irq())
28173+ v = kmap_atomic(page, KM_IRQ0);
28174+ else
28175+ v = kmap_atomic(page, KM_USER0);
28176+
28177+ memset(v, 0, PAGE_SIZE);
28178+
28179+ if (in_irq())
28180+ kunmap_atomic(v, KM_IRQ0);
28181+ else
28182+ kunmap_atomic(v, KM_USER0);
28183+ }
28184+}
28185+
28186+int psb_clear_scene(struct psb_scene *scene)
28187+{
28188+ struct drm_bo_kmap_obj bmo;
28189+ int is_iomem;
28190+ void *addr;
28191+
28192+ int ret = drm_bo_kmap(scene->hw_data, scene->clear_p_start,
28193+ scene->clear_num_pages, &bmo);
28194+
28195+ PSB_DEBUG_RENDER("Scene clear\n");
28196+ if (ret)
28197+ return ret;
28198+
28199+ addr = drm_bmo_virtual(&bmo, &is_iomem);
28200+ BUG_ON(is_iomem);
28201+ memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT);
28202+ drm_bo_kunmap(&bmo);
28203+
28204+ return 0;
28205+}
28206+
28207+static void psb_destroy_scene_devlocked(struct psb_scene *scene)
28208+{
28209+ if (!scene)
28210+ return;
28211+
28212+ PSB_DEBUG_RENDER("Scene destroy\n");
28213+ drm_bo_usage_deref_locked(&scene->hw_data);
28214+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
28215+}
28216+
28217+void psb_scene_unref_devlocked(struct psb_scene **scene)
28218+{
28219+ struct psb_scene *tmp_scene = *scene;
28220+
28221+ PSB_DEBUG_RENDER("Scene unref\n");
28222+ *scene = NULL;
28223+ if (atomic_dec_and_test(&tmp_scene->ref_count)) {
28224+ psb_scheduler_remove_scene_refs(tmp_scene);
28225+ psb_destroy_scene_devlocked(tmp_scene);
28226+ }
28227+}
28228+
28229+struct psb_scene *psb_scene_ref(struct psb_scene *src)
28230+{
28231+ PSB_DEBUG_RENDER("Scene ref\n");
28232+ atomic_inc(&src->ref_count);
28233+ return src;
28234+}
28235+
28236+static struct psb_scene *psb_alloc_scene(struct drm_device *dev,
28237+ uint32_t w, uint32_t h)
28238+{
28239+ struct drm_psb_private *dev_priv =
28240+ (struct drm_psb_private *)dev->dev_private;
28241+ int ret = -EINVAL;
28242+ struct psb_scene *scene;
28243+ uint32_t bo_size;
28244+ struct psb_xhw_buf buf;
28245+
28246+ PSB_DEBUG_RENDER("Alloc scene w %u h %u\n", w, h);
28247+
28248+ scene = drm_calloc(1, sizeof(*scene), DRM_MEM_DRIVER);
28249+
28250+ if (!scene) {
28251+ DRM_ERROR("Out of memory allocating scene object.\n");
28252+ return NULL;
28253+ }
28254+
28255+ scene->dev = dev;
28256+ scene->w = w;
28257+ scene->h = h;
28258+ scene->hw_scene = NULL;
28259+ atomic_set(&scene->ref_count, 1);
28260+
28261+ INIT_LIST_HEAD(&buf.head);
28262+ ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h,
28263+ scene->hw_cookie, &bo_size,
28264+ &scene->clear_p_start,
28265+ &scene->clear_num_pages);
28266+ if (ret)
28267+ goto out_err;
28268+
28269+ ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
28270+ DRM_PSB_FLAG_MEM_MMU |
28271+ DRM_BO_FLAG_READ |
28272+ DRM_BO_FLAG_CACHED |
28273+ PSB_BO_FLAG_SCENE |
28274+ DRM_BO_FLAG_WRITE,
28275+ DRM_BO_HINT_DONT_FENCE,
28276+ 0, 0, &scene->hw_data);
28277+ if (ret)
28278+ goto out_err;
28279+
28280+ return scene;
28281+ out_err:
28282+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
28283+ return NULL;
28284+}
28285+
28286+int psb_validate_scene_pool(struct psb_scene_pool *pool, uint64_t flags,
28287+ uint64_t mask,
28288+ uint32_t hint,
28289+ uint32_t w,
28290+ uint32_t h,
28291+ int final_pass, struct psb_scene **scene_p)
28292+{
28293+ struct drm_device *dev = pool->dev;
28294+ struct drm_psb_private *dev_priv =
28295+ (struct drm_psb_private *)dev->dev_private;
28296+ struct psb_scene *scene = pool->scenes[pool->cur_scene];
28297+ int ret;
28298+ unsigned long irq_flags;
28299+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
28300+ uint32_t bin_pt_offset;
28301+ uint32_t bin_param_offset;
28302+
28303+ PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n", pool->cur_scene);
28304+
28305+ if (unlikely(!dev_priv->ta_mem)) {
28306+ dev_priv->ta_mem =
28307+ psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages);
28308+ if (!dev_priv->ta_mem)
28309+ return -ENOMEM;
28310+
28311+ bin_pt_offset = ~0;
28312+ bin_param_offset = ~0;
28313+ } else {
28314+ bin_pt_offset = dev_priv->ta_mem->hw_data->offset;
28315+ bin_param_offset = dev_priv->ta_mem->ta_memory->offset;
28316+ }
28317+
28318+ pool->w = w;
28319+ pool->h = h;
28320+ if (scene && (scene->w != pool->w || scene->h != pool->h)) {
28321+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28322+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
28323+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28324+ DRM_ERROR("Trying to resize a dirty scene.\n");
28325+ return -EINVAL;
28326+ }
28327+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28328+ mutex_lock(&dev->struct_mutex);
28329+ psb_scene_unref_devlocked(&pool->scenes[pool->cur_scene]);
28330+ mutex_unlock(&dev->struct_mutex);
28331+ scene = NULL;
28332+ }
28333+
28334+ if (!scene) {
28335+ pool->scenes[pool->cur_scene] = scene =
28336+ psb_alloc_scene(pool->dev, pool->w, pool->h);
28337+
28338+ if (!scene)
28339+ return -ENOMEM;
28340+
28341+ scene->flags = PSB_SCENE_FLAG_CLEARED;
28342+ }
28343+
28344+ /*
28345+ * FIXME: We need atomic bit manipulation here for the
28346+ * scheduler. For now use the spinlock.
28347+ */
28348+
28349+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28350+ if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) {
28351+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28352+ PSB_DEBUG_RENDER("Waiting to clear scene memory.\n");
28353+ mutex_lock(&scene->hw_data->mutex);
28354+ ret = drm_bo_wait(scene->hw_data, 0, 0, 0);
28355+ mutex_unlock(&scene->hw_data->mutex);
28356+ if (ret)
28357+ return ret;
28358+
28359+ ret = psb_clear_scene(scene);
28360+
28361+ if (ret)
28362+ return ret;
28363+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28364+ scene->flags |= PSB_SCENE_FLAG_CLEARED;
28365+ }
28366+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28367+
28368+ ret = drm_bo_do_validate(scene->hw_data, flags, mask, hint,
28369+ PSB_ENGINE_TA, 0, NULL);
28370+ if (ret)
28371+ return ret;
28372+ ret = drm_bo_do_validate(dev_priv->ta_mem->hw_data, 0, 0, 0,
28373+ PSB_ENGINE_TA, 0, NULL);
28374+ if (ret)
28375+ return ret;
28376+ ret = drm_bo_do_validate(dev_priv->ta_mem->ta_memory, 0, 0, 0,
28377+ PSB_ENGINE_TA, 0, NULL);
28378+ if (ret)
28379+ return ret;
28380+
28381+ if (unlikely(bin_param_offset !=
28382+ dev_priv->ta_mem->ta_memory->offset ||
28383+ bin_pt_offset !=
28384+ dev_priv->ta_mem->hw_data->offset ||
28385+ dev_priv->force_ta_mem_load)) {
28386+
28387+ struct psb_xhw_buf buf;
28388+
28389+ INIT_LIST_HEAD(&buf.head);
28390+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
28391+ PSB_TA_MEM_FLAG_TA |
28392+ PSB_TA_MEM_FLAG_RASTER |
28393+ PSB_TA_MEM_FLAG_HOSTA |
28394+ PSB_TA_MEM_FLAG_HOSTD |
28395+ PSB_TA_MEM_FLAG_INIT,
28396+ dev_priv->ta_mem->ta_memory->offset,
28397+ dev_priv->ta_mem->hw_data->offset,
28398+ dev_priv->ta_mem->hw_cookie);
28399+ if (ret)
28400+ return ret;
28401+
28402+ dev_priv->force_ta_mem_load = 0;
28403+ }
28404+
28405+ if (final_pass) {
28406+
28407+ /*
28408+ * Clear the scene on next use. Advance the scene counter.
28409+ */
28410+
28411+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28412+ scene->flags &= ~PSB_SCENE_FLAG_CLEARED;
28413+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28414+ pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes;
28415+ }
28416+
28417+ *scene_p = psb_scene_ref(scene);
28418+ return 0;
28419+}
28420+
28421+static void psb_scene_pool_destroy_devlocked(struct psb_scene_pool *pool)
28422+{
28423+ int i;
28424+
28425+ if (!pool)
28426+ return;
28427+
28428+ PSB_DEBUG_RENDER("Scene pool destroy.\n");
28429+ for (i = 0; i < pool->num_scenes; ++i) {
28430+ PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i,
28431+ (unsigned long)pool->scenes[i]);
28432+ if (pool->scenes[i])
28433+ psb_scene_unref_devlocked(&pool->scenes[i]);
28434+ }
28435+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
28436+}
28437+
28438+void psb_scene_pool_unref_devlocked(struct psb_scene_pool **pool)
28439+{
28440+ struct psb_scene_pool *tmp_pool = *pool;
28441+ struct drm_device *dev = tmp_pool->dev;
28442+
28443+ PSB_DEBUG_RENDER("Scene pool unref\n");
28444+ (void)dev;
28445+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
28446+ *pool = NULL;
28447+ if (--tmp_pool->ref_count == 0)
28448+ psb_scene_pool_destroy_devlocked(tmp_pool);
28449+}
28450+
28451+struct psb_scene_pool *psb_scene_pool_ref_devlocked(struct psb_scene_pool *src)
28452+{
28453+ ++src->ref_count;
28454+ return src;
28455+}
28456+
28457+/*
28458+ * Callback for user object manager.
28459+ */
28460+
28461+static void psb_scene_pool_destroy(struct drm_file *priv,
28462+ struct drm_user_object *base)
28463+{
28464+ struct psb_scene_pool *pool =
28465+ drm_user_object_entry(base, struct psb_scene_pool, user);
28466+
28467+ psb_scene_pool_unref_devlocked(&pool);
28468+}
28469+
28470+struct psb_scene_pool *psb_scene_pool_lookup_devlocked(struct drm_file *priv,
28471+ uint32_t handle,
28472+ int check_owner)
28473+{
28474+ struct drm_user_object *uo;
28475+ struct psb_scene_pool *pool;
28476+
28477+ uo = drm_lookup_user_object(priv, handle);
28478+ if (!uo || (uo->type != PSB_USER_OBJECT_SCENE_POOL)) {
28479+ DRM_ERROR("Could not find scene pool object 0x%08x\n", handle);
28480+ return NULL;
28481+ }
28482+
28483+ if (check_owner && priv != uo->owner) {
28484+ if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
28485+ return NULL;
28486+ }
28487+
28488+ pool = drm_user_object_entry(uo, struct psb_scene_pool, user);
28489+ return psb_scene_pool_ref_devlocked(pool);
28490+}
28491+
28492+struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
28493+ int shareable,
28494+ uint32_t num_scenes,
28495+ uint32_t w, uint32_t h)
28496+{
28497+ struct drm_device *dev = priv->minor->dev;
28498+ struct psb_scene_pool *pool;
28499+ int ret;
28500+
28501+ PSB_DEBUG_RENDER("Scene pool alloc\n");
28502+ pool = drm_calloc(1, sizeof(*pool), DRM_MEM_DRIVER);
28503+ if (!pool) {
28504+ DRM_ERROR("Out of memory allocating scene pool object.\n");
28505+ return NULL;
28506+ }
28507+ pool->w = w;
28508+ pool->h = h;
28509+ pool->dev = dev;
28510+ pool->num_scenes = num_scenes;
28511+
28512+ mutex_lock(&dev->struct_mutex);
28513+ ret = drm_add_user_object(priv, &pool->user, shareable);
28514+ if (ret)
28515+ goto out_err;
28516+
28517+ pool->user.type = PSB_USER_OBJECT_SCENE_POOL;
28518+ pool->user.remove = &psb_scene_pool_destroy;
28519+ pool->ref_count = 2;
28520+ mutex_unlock(&dev->struct_mutex);
28521+ return pool;
28522+ out_err:
28523+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
28524+ return NULL;
28525+}
28526+
28527+/*
28528+ * Code to support multiple ta memory buffers.
28529+ */
28530+
28531+static void psb_destroy_ta_mem_devlocked(struct psb_ta_mem *ta_mem)
28532+{
28533+ if (!ta_mem)
28534+ return;
28535+
28536+ drm_bo_usage_deref_locked(&ta_mem->hw_data);
28537+ drm_bo_usage_deref_locked(&ta_mem->ta_memory);
28538+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
28539+}
28540+
28541+void psb_ta_mem_unref_devlocked(struct psb_ta_mem **ta_mem)
28542+{
28543+ struct psb_ta_mem *tmp_ta_mem = *ta_mem;
28544+ struct drm_device *dev = tmp_ta_mem->dev;
28545+
28546+ (void)dev;
28547+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
28548+ *ta_mem = NULL;
28549+ if (--tmp_ta_mem->ref_count == 0)
28550+ psb_destroy_ta_mem_devlocked(tmp_ta_mem);
28551+}
28552+
28553+void psb_ta_mem_ref_devlocked(struct psb_ta_mem **dst, struct psb_ta_mem *src)
28554+{
28555+ struct drm_device *dev = src->dev;
28556+
28557+ (void)dev;
28558+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
28559+ *dst = src;
28560+ ++src->ref_count;
28561+}
28562+
28563+struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages)
28564+{
28565+ struct drm_psb_private *dev_priv =
28566+ (struct drm_psb_private *)dev->dev_private;
28567+ int ret = -EINVAL;
28568+ struct psb_ta_mem *ta_mem;
28569+ uint32_t bo_size;
28570+ struct psb_xhw_buf buf;
28571+
28572+ INIT_LIST_HEAD(&buf.head);
28573+
28574+ ta_mem = drm_calloc(1, sizeof(*ta_mem), DRM_MEM_DRIVER);
28575+
28576+ if (!ta_mem) {
28577+ DRM_ERROR("Out of memory allocating parameter memory.\n");
28578+ return NULL;
28579+ }
28580+
28581+ ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages,
28582+ ta_mem->hw_cookie, &bo_size);
28583+ if (ret == -ENOMEM) {
28584+ DRM_ERROR("Parameter memory size is too small.\n");
28585+ DRM_INFO("Attempted to use %u kiB of parameter memory.\n",
28586+ (unsigned int)(pages * (PAGE_SIZE / 1024)));
28587+ DRM_INFO("The Xpsb driver thinks this is too small and\n");
28588+ DRM_INFO("suggests %u kiB. Check the psb DRM\n",
28589+ (unsigned int)(bo_size / 1024));
28590+ DRM_INFO("\"ta_mem_size\" parameter!\n");
28591+ }
28592+ if (ret)
28593+ goto out_err0;
28594+
28595+ bo_size = pages * PAGE_SIZE;
28596+ ta_mem->dev = dev;
28597+ ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
28598+ DRM_PSB_FLAG_MEM_MMU | DRM_BO_FLAG_READ |
28599+ DRM_BO_FLAG_WRITE |
28600+ PSB_BO_FLAG_SCENE,
28601+ DRM_BO_HINT_DONT_FENCE, 0, 0,
28602+ &ta_mem->hw_data);
28603+ if (ret)
28604+ goto out_err0;
28605+
28606+ ret =
28607+ drm_buffer_object_create(dev, pages << PAGE_SHIFT,
28608+ drm_bo_type_kernel,
28609+ DRM_PSB_FLAG_MEM_RASTGEOM |
28610+ DRM_BO_FLAG_READ |
28611+ DRM_BO_FLAG_WRITE |
28612+ PSB_BO_FLAG_SCENE,
28613+ DRM_BO_HINT_DONT_FENCE, 0,
28614+ 1024 * 1024 >> PAGE_SHIFT,
28615+ &ta_mem->ta_memory);
28616+ if (ret)
28617+ goto out_err1;
28618+
28619+ ta_mem->ref_count = 1;
28620+ return ta_mem;
28621+ out_err1:
28622+ drm_bo_usage_deref_unlocked(&ta_mem->hw_data);
28623+ out_err0:
28624+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
28625+ return NULL;
28626+}
28627+
28628+int drm_psb_scene_unref_ioctl(struct drm_device *dev,
28629+ void *data, struct drm_file *file_priv)
28630+{
28631+ struct drm_psb_scene *scene = (struct drm_psb_scene *)data;
28632+ struct drm_user_object *uo;
28633+ struct drm_ref_object *ro;
28634+ int ret = 0;
28635+
28636+ mutex_lock(&dev->struct_mutex);
28637+ if (!scene->handle_valid)
28638+ goto out_unlock;
28639+
28640+ uo = drm_lookup_user_object(file_priv, scene->handle);
28641+ if (!uo) {
28642+ ret = -EINVAL;
28643+ goto out_unlock;
28644+ }
28645+ if (uo->type != PSB_USER_OBJECT_SCENE_POOL) {
28646+ DRM_ERROR("Not a scene pool object.\n");
28647+ ret = -EINVAL;
28648+ goto out_unlock;
28649+ }
28650+ if (uo->owner != file_priv) {
28651+ DRM_ERROR("Not owner of scene pool object.\n");
28652+ ret = -EPERM;
28653+ goto out_unlock;
28654+ }
28655+
28656+ scene->handle_valid = 0;
28657+ ro = drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE);
28658+ BUG_ON(!ro);
28659+ drm_remove_ref_object(file_priv, ro);
28660+
28661+ out_unlock:
28662+ mutex_unlock(&dev->struct_mutex);
28663+ return ret;
28664+}
28665Index: linux-2.6.27/drivers/gpu/drm/psb/psb_scene.h
28666===================================================================
28667--- /dev/null 1970-01-01 00:00:00.000000000 +0000
28668+++ linux-2.6.27/drivers/gpu/drm/psb/psb_scene.h 2009-02-05 13:29:33.000000000 +0000
28669@@ -0,0 +1,112 @@
28670+/**************************************************************************
28671+ * Copyright (c) 2007, Intel Corporation.
28672+ * All Rights Reserved.
28673+ *
28674+ * This program is free software; you can redistribute it and/or modify it
28675+ * under the terms and conditions of the GNU General Public License,
28676+ * version 2, as published by the Free Software Foundation.
28677+ *
28678+ * This program is distributed in the hope it will be useful, but WITHOUT
28679+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28680+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
28681+ * more details.
28682+ *
28683+ * You should have received a copy of the GNU General Public License along with
28684+ * this program; if not, write to the Free Software Foundation, Inc.,
28685+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28686+ *
28687+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
28688+ * develop this driver.
28689+ *
28690+ **************************************************************************/
28691+/*
28692+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
28693+ */
28694+
28695+#ifndef _PSB_SCENE_H_
28696+#define _PSB_SCENE_H_
28697+
28698+#define PSB_USER_OBJECT_SCENE_POOL drm_driver_type0
28699+#define PSB_USER_OBJECT_TA_MEM drm_driver_type1
28700+#define PSB_MAX_NUM_SCENES 8
28701+
28702+struct psb_hw_scene;
28703+struct psb_hw_ta_mem;
28704+
28705+struct psb_scene_pool {
28706+ struct drm_device *dev;
28707+ struct drm_user_object user;
28708+ uint32_t ref_count;
28709+ uint32_t w;
28710+ uint32_t h;
28711+ uint32_t cur_scene;
28712+ struct psb_scene *scenes[PSB_MAX_NUM_SCENES];
28713+ uint32_t num_scenes;
28714+};
28715+
28716+struct psb_scene {
28717+ struct drm_device *dev;
28718+ atomic_t ref_count;
28719+ uint32_t hw_cookie[PSB_SCENE_HW_COOKIE_SIZE];
28720+ uint32_t bo_size;
28721+ uint32_t w;
28722+ uint32_t h;
28723+ struct psb_ta_mem *ta_mem;
28724+ struct psb_hw_scene *hw_scene;
28725+ struct drm_buffer_object *hw_data;
28726+ uint32_t flags;
28727+ uint32_t clear_p_start;
28728+ uint32_t clear_num_pages;
28729+};
28730+
28731+struct psb_scene_entry {
28732+ struct list_head head;
28733+ struct psb_scene *scene;
28734+};
28735+
28736+struct psb_user_scene {
28737+ struct drm_device *dev;
28738+ struct drm_user_object user;
28739+};
28740+
28741+struct psb_ta_mem {
28742+ struct drm_device *dev;
28743+ struct drm_user_object user;
28744+ uint32_t ref_count;
28745+ uint32_t hw_cookie[PSB_TA_MEM_HW_COOKIE_SIZE];
28746+ uint32_t bo_size;
28747+ struct drm_buffer_object *ta_memory;
28748+ struct drm_buffer_object *hw_data;
28749+ int is_deallocating;
28750+ int deallocating_scheduled;
28751+};
28752+
28753+extern struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
28754+ int shareable,
28755+ uint32_t num_scenes,
28756+ uint32_t w, uint32_t h);
28757+extern void psb_scene_pool_unref_devlocked(struct psb_scene_pool **pool);
28758+extern struct psb_scene_pool *psb_scene_pool_lookup_devlocked(struct drm_file
28759+ *priv,
28760+ uint32_t handle,
28761+ int check_owner);
28762+extern int psb_validate_scene_pool(struct psb_scene_pool *pool, uint64_t flags,
28763+ uint64_t mask, uint32_t hint, uint32_t w,
28764+ uint32_t h, int final_pass,
28765+ struct psb_scene **scene_p);
28766+extern void psb_scene_unref_devlocked(struct psb_scene **scene);
28767+extern struct psb_scene *psb_scene_ref(struct psb_scene *src);
28768+extern int drm_psb_scene_unref_ioctl(struct drm_device *dev,
28769+ void *data, struct drm_file *file_priv);
28770+
28771+static inline uint32_t psb_scene_pool_handle(struct psb_scene_pool *pool)
28772+{
28773+ return pool->user.hash.key;
28774+}
28775+extern struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev,
28776+ uint32_t pages);
28777+extern void psb_ta_mem_ref_devlocked(struct psb_ta_mem **dst,
28778+ struct psb_ta_mem *src);
28779+extern void psb_ta_mem_unref_devlocked(struct psb_ta_mem **ta_mem);
28780+
28781+#endif
28782Index: linux-2.6.27/drivers/gpu/drm/psb/psb_schedule.c
28783===================================================================
28784--- /dev/null 1970-01-01 00:00:00.000000000 +0000
28785+++ linux-2.6.27/drivers/gpu/drm/psb/psb_schedule.c 2009-02-05 13:29:33.000000000 +0000
28786@@ -0,0 +1,1445 @@
28787+/**************************************************************************
28788+ * Copyright (c) 2007, Intel Corporation.
28789+ * All Rights Reserved.
28790+ *
28791+ * This program is free software; you can redistribute it and/or modify it
28792+ * under the terms and conditions of the GNU General Public License,
28793+ * version 2, as published by the Free Software Foundation.
28794+ *
28795+ * This program is distributed in the hope it will be useful, but WITHOUT
28796+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28797+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
28798+ * more details.
28799+ *
28800+ * You should have received a copy of the GNU General Public License along with
28801+ * this program; if not, write to the Free Software Foundation, Inc.,
28802+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28803+ *
28804+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
28805+ * develop this driver.
28806+ *
28807+ **************************************************************************/
28808+/*
28809+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
28810+ */
28811+
28812+#include "drmP.h"
28813+#include "psb_drm.h"
28814+#include "psb_drv.h"
28815+#include "psb_reg.h"
28816+#include "psb_scene.h"
28817+
28818+#define PSB_ALLOWED_RASTER_RUNTIME (DRM_HZ * 20)
28819+#define PSB_RASTER_TIMEOUT (DRM_HZ / 2)
28820+#define PSB_TA_TIMEOUT (DRM_HZ / 5)
28821+
28822+#undef PSB_SOFTWARE_WORKAHEAD
28823+
28824+#ifdef PSB_STABLE_SETTING
28825+
28826+/*
28827+ * Software blocks completely while the engines are working so there can be no
28828+ * overlap.
28829+ */
28830+
28831+#define PSB_WAIT_FOR_RASTER_COMPLETION
28832+#define PSB_WAIT_FOR_TA_COMPLETION
28833+
28834+#elif defined(PSB_PARANOID_SETTING)
28835+/*
28836+ * Software blocks "almost" while the engines are working so there can be no
28837+ * overlap.
28838+ */
28839+
28840+#define PSB_WAIT_FOR_RASTER_COMPLETION
28841+#define PSB_WAIT_FOR_TA_COMPLETION
28842+#define PSB_BE_PARANOID
28843+
28844+#elif defined(PSB_SOME_OVERLAP_BUT_LOCKUP)
28845+/*
28846+ * Software leaps ahead while the rasterizer is running and prepares
28847+ * a new ta job that can be scheduled before the rasterizer has
28848+ * finished.
28849+ */
28850+
28851+#define PSB_WAIT_FOR_TA_COMPLETION
28852+
28853+#elif defined(PSB_SOFTWARE_WORKAHEAD)
28854+/*
28855+ * Don't sync, but allow software to work ahead. and queue a number of jobs.
28856+ * But block overlapping in the scheduler.
28857+ */
28858+
28859+#define PSB_BLOCK_OVERLAP
28860+#define ONLY_ONE_JOB_IN_RASTER_QUEUE
28861+
28862+#endif
28863+
28864+/*
28865+ * Avoid pixelbe pagefaults on C0.
28866+ */
28867+#if 0
28868+#define PSB_BLOCK_OVERLAP
28869+#endif
28870+
28871+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
28872+ struct psb_scheduler *scheduler,
28873+ uint32_t reply_flag);
28874+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
28875+ struct psb_scheduler *scheduler,
28876+ uint32_t reply_flag);
28877+
28878+#ifdef FIX_TG_16
28879+
28880+static void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
28881+static int psb_2d_trylock(struct drm_psb_private *dev_priv);
28882+static int psb_check_2d_idle(struct drm_psb_private *dev_priv);
28883+
28884+#endif
28885+
28886+void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
28887+ int *lockup, int *idle)
28888+{
28889+ unsigned long irq_flags;
28890+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
28891+
28892+ *lockup = 0;
28893+ *idle = 1;
28894+
28895+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28896+
28897+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
28898+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
28899+ *lockup = 1;
28900+ }
28901+ if (!*lockup
28902+ && (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
28903+ && time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
28904+ *lockup = 1;
28905+ }
28906+ if (!*lockup)
28907+ *idle = scheduler->idle;
28908+
28909+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28910+}
28911+
28912+static inline void psb_set_idle(struct psb_scheduler *scheduler)
28913+{
28914+ scheduler->idle =
28915+ (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] == NULL) &&
28916+ (scheduler->current_task[PSB_SCENE_ENGINE_TA] == NULL);
28917+ if (scheduler->idle)
28918+ wake_up(&scheduler->idle_queue);
28919+}
28920+
28921+/*
28922+ * Call with the scheduler spinlock held.
28923+ * Assigns a scene context to either the ta or the rasterizer,
28924+ * flushing out other scenes to memory if necessary.
28925+ */
28926+
28927+static int psb_set_scene_fire(struct psb_scheduler *scheduler,
28928+ struct psb_scene *scene,
28929+ int engine, struct psb_task *task)
28930+{
28931+ uint32_t flags = 0;
28932+ struct psb_hw_scene *hw_scene;
28933+ struct drm_device *dev = scene->dev;
28934+ struct drm_psb_private *dev_priv =
28935+ (struct drm_psb_private *)dev->dev_private;
28936+
28937+ hw_scene = scene->hw_scene;
28938+ if (hw_scene && hw_scene->last_scene == scene) {
28939+
28940+ /*
28941+ * Reuse the last hw scene context and delete it from the
28942+ * free list.
28943+ */
28944+
28945+ PSB_DEBUG_RENDER("Reusing hw scene %d.\n",
28946+ hw_scene->context_number);
28947+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
28948+
28949+ /*
28950+ * No hw context initialization to be done.
28951+ */
28952+
28953+ flags |= PSB_SCENE_FLAG_SETUP_ONLY;
28954+ }
28955+
28956+ list_del_init(&hw_scene->head);
28957+
28958+ } else {
28959+ struct list_head *list;
28960+ hw_scene = NULL;
28961+
28962+ /*
28963+ * Grab a new hw scene context.
28964+ */
28965+
28966+ list_for_each(list, &scheduler->hw_scenes) {
28967+ hw_scene = list_entry(list, struct psb_hw_scene, head);
28968+ break;
28969+ }
28970+ BUG_ON(!hw_scene);
28971+ PSB_DEBUG_RENDER("New hw scene %d.\n",
28972+ hw_scene->context_number);
28973+
28974+ list_del_init(list);
28975+ }
28976+ scene->hw_scene = hw_scene;
28977+ hw_scene->last_scene = scene;
28978+
28979+ flags |= PSB_SCENE_FLAG_SETUP;
28980+
28981+ /*
28982+ * Switch context and setup the engine.
28983+ */
28984+
28985+ return psb_xhw_scene_bind_fire(dev_priv,
28986+ &task->buf,
28987+ task->flags,
28988+ hw_scene->context_number,
28989+ scene->hw_cookie,
28990+ task->oom_cmds,
28991+ task->oom_cmd_size,
28992+ scene->hw_data->offset,
28993+ engine, flags | scene->flags);
28994+}
28995+
28996+static inline void psb_report_fence(struct psb_scheduler *scheduler,
28997+ uint32_t class,
28998+ uint32_t sequence,
28999+ uint32_t type, int call_handler)
29000+{
29001+ struct psb_scheduler_seq *seq = &scheduler->seq[type];
29002+
29003+ seq->sequence = sequence;
29004+ seq->reported = 0;
29005+ if (call_handler)
29006+ psb_fence_handler(scheduler->dev, class);
29007+}
29008+
29009+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
29010+ struct psb_scheduler *scheduler);
29011+
29012+static void psb_schedule_ta(struct drm_psb_private *dev_priv,
29013+ struct psb_scheduler *scheduler)
29014+{
29015+ struct psb_task *task = NULL;
29016+ struct list_head *list, *next;
29017+ int pushed_raster_task = 0;
29018+
29019+ PSB_DEBUG_RENDER("schedule ta\n");
29020+
29021+ if (scheduler->idle_count != 0)
29022+ return;
29023+
29024+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL)
29025+ return;
29026+
29027+ if (scheduler->ta_state)
29028+ return;
29029+
29030+ /*
29031+ * Skip the ta stage for rasterization-only
29032+ * tasks. They arrive here to make sure we're rasterizing
29033+ * tasks in the correct order.
29034+ */
29035+
29036+ list_for_each_safe(list, next, &scheduler->ta_queue) {
29037+ task = list_entry(list, struct psb_task, head);
29038+ if (task->task_type != psb_raster_task)
29039+ break;
29040+
29041+ list_del_init(list);
29042+ list_add_tail(list, &scheduler->raster_queue);
29043+ psb_report_fence(scheduler, task->engine, task->sequence,
29044+ _PSB_FENCE_TA_DONE_SHIFT, 1);
29045+ task = NULL;
29046+ pushed_raster_task = 1;
29047+ }
29048+
29049+ if (pushed_raster_task)
29050+ psb_schedule_raster(dev_priv, scheduler);
29051+
29052+ if (!task)
29053+ return;
29054+
29055+ /*
29056+ * Still waiting for a vistest?
29057+ */
29058+
29059+ if (scheduler->feedback_task == task)
29060+ return;
29061+
29062+#ifdef ONLY_ONE_JOB_IN_RASTER_QUEUE
29063+
29064+ /*
29065+ * Block ta from trying to use both hardware contexts
29066+ * without the rasterizer starting to render from one of them.
29067+ */
29068+
29069+ if (!list_empty(&scheduler->raster_queue)) {
29070+ return;
29071+ }
29072+#endif
29073+
29074+#ifdef PSB_BLOCK_OVERLAP
29075+ /*
29076+ * Make sure rasterizer isn't doing anything.
29077+ */
29078+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
29079+ return;
29080+#endif
29081+ if (list_empty(&scheduler->hw_scenes))
29082+ return;
29083+
29084+#ifdef FIX_TG_16
29085+ if (psb_check_2d_idle(dev_priv))
29086+ return;
29087+#endif
29088+
29089+ list_del_init(&task->head);
29090+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
29091+ scheduler->ta_state = 1;
29092+
29093+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = task;
29094+ scheduler->idle = 0;
29095+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
29096+
29097+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
29098+ 0x00000000 : PSB_RF_FIRE_TA;
29099+
29100+ (void)psb_reg_submit(dev_priv, task->ta_cmds, task->ta_cmd_size);
29101+ psb_set_scene_fire(scheduler, task->scene, PSB_SCENE_ENGINE_TA, task);
29102+ psb_schedule_watchdog(dev_priv);
29103+}
29104+
29105+static int psb_fire_raster(struct psb_scheduler *scheduler,
29106+ struct psb_task *task)
29107+{
29108+ struct drm_device *dev = scheduler->dev;
29109+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)
29110+ dev->dev_private;
29111+
29112+ PSB_DEBUG_RENDER("Fire raster %d\n", task->sequence);
29113+
29114+ return psb_xhw_fire_raster(dev_priv, &task->buf, task->flags);
29115+}
29116+
29117+/*
29118+ * Take the first rasterization task from the hp raster queue or from the
29119+ * raster queue and fire the rasterizer.
29120+ */
29121+
29122+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
29123+ struct psb_scheduler *scheduler)
29124+{
29125+ struct psb_task *task;
29126+ struct list_head *list;
29127+
29128+ if (scheduler->idle_count != 0)
29129+ return;
29130+
29131+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) {
29132+ PSB_DEBUG_RENDER("Raster busy.\n");
29133+ return;
29134+ }
29135+#ifdef PSB_BLOCK_OVERLAP
29136+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) {
29137+ PSB_DEBUG_RENDER("TA busy.\n");
29138+ return;
29139+ }
29140+#endif
29141+
29142+ if (!list_empty(&scheduler->hp_raster_queue))
29143+ list = scheduler->hp_raster_queue.next;
29144+ else if (!list_empty(&scheduler->raster_queue))
29145+ list = scheduler->raster_queue.next;
29146+ else {
29147+ PSB_DEBUG_RENDER("Nothing in list\n");
29148+ return;
29149+ }
29150+
29151+ task = list_entry(list, struct psb_task, head);
29152+
29153+ /*
29154+ * Sometimes changing ZLS format requires an ISP reset.
29155+ * Doesn't seem to consume too much time.
29156+ */
29157+
29158+ if (task->scene)
29159+ PSB_WSGX32(_PSB_CS_RESET_ISP_RESET, PSB_CR_SOFT_RESET);
29160+
29161+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = task;
29162+
29163+ list_del_init(list);
29164+ scheduler->idle = 0;
29165+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
29166+ scheduler->total_raster_jiffies = 0;
29167+
29168+ if (task->scene)
29169+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
29170+
29171+ (void)psb_reg_submit(dev_priv, task->raster_cmds,
29172+ task->raster_cmd_size);
29173+
29174+ if (task->scene) {
29175+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
29176+ 0x00000000 : PSB_RF_FIRE_RASTER;
29177+ psb_set_scene_fire(scheduler,
29178+ task->scene, PSB_SCENE_ENGINE_RASTER, task);
29179+ } else {
29180+ task->reply_flags = PSB_RF_DEALLOC | PSB_RF_FIRE_RASTER;
29181+ psb_fire_raster(scheduler, task);
29182+ }
29183+ psb_schedule_watchdog(dev_priv);
29184+}
29185+
29186+int psb_extend_raster_timeout(struct drm_psb_private *dev_priv)
29187+{
29188+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29189+ unsigned long irq_flags;
29190+ int ret;
29191+
29192+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29193+ scheduler->total_raster_jiffies +=
29194+ jiffies - scheduler->raster_end_jiffies + PSB_RASTER_TIMEOUT;
29195+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
29196+ ret = (scheduler->total_raster_jiffies > PSB_ALLOWED_RASTER_RUNTIME) ?
29197+ -EBUSY : 0;
29198+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29199+ return ret;
29200+}
29201+
29202+/*
29203+ * TA done handler.
29204+ */
29205+
29206+static void psb_ta_done(struct drm_psb_private *dev_priv,
29207+ struct psb_scheduler *scheduler)
29208+{
29209+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
29210+ struct psb_scene *scene = task->scene;
29211+
29212+ PSB_DEBUG_RENDER("TA done %u\n", task->sequence);
29213+
29214+ switch (task->ta_complete_action) {
29215+ case PSB_RASTER_BLOCK:
29216+ scheduler->ta_state = 1;
29217+ scene->flags |=
29218+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
29219+ list_add_tail(&task->head, &scheduler->raster_queue);
29220+ break;
29221+ case PSB_RASTER:
29222+ scene->flags |=
29223+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
29224+ list_add_tail(&task->head, &scheduler->raster_queue);
29225+ break;
29226+ case PSB_RETURN:
29227+ scheduler->ta_state = 0;
29228+ scene->flags |= PSB_SCENE_FLAG_DIRTY;
29229+ list_add_tail(&scene->hw_scene->head, &scheduler->hw_scenes);
29230+
29231+ break;
29232+ }
29233+
29234+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
29235+
29236+#ifdef FIX_TG_16
29237+ psb_2d_atomic_unlock(dev_priv);
29238+#endif
29239+
29240+ if (task->ta_complete_action != PSB_RASTER_BLOCK)
29241+ psb_report_fence(scheduler, task->engine, task->sequence,
29242+ _PSB_FENCE_TA_DONE_SHIFT, 1);
29243+
29244+ psb_schedule_raster(dev_priv, scheduler);
29245+ psb_schedule_ta(dev_priv, scheduler);
29246+ psb_set_idle(scheduler);
29247+
29248+ if (task->ta_complete_action != PSB_RETURN)
29249+ return;
29250+
29251+ list_add_tail(&task->head, &scheduler->task_done_queue);
29252+ schedule_delayed_work(&scheduler->wq, 1);
29253+}
29254+
29255+/*
29256+ * Rasterizer done handler.
29257+ */
29258+
29259+static void psb_raster_done(struct drm_psb_private *dev_priv,
29260+ struct psb_scheduler *scheduler)
29261+{
29262+ struct psb_task *task =
29263+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
29264+ struct psb_scene *scene = task->scene;
29265+ uint32_t complete_action = task->raster_complete_action;
29266+
29267+ PSB_DEBUG_RENDER("Raster done %u\n", task->sequence);
29268+
29269+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
29270+
29271+ if (complete_action != PSB_RASTER)
29272+ psb_schedule_raster(dev_priv, scheduler);
29273+
29274+ if (scene) {
29275+ if (task->feedback.page) {
29276+ if (unlikely(scheduler->feedback_task)) {
29277+ /*
29278+ * This should never happen, since the previous
29279+ * feedback query will return before the next
29280+ * raster task is fired.
29281+ */
29282+ DRM_ERROR("Feedback task busy.\n");
29283+ }
29284+ scheduler->feedback_task = task;
29285+ psb_xhw_vistest(dev_priv, &task->buf);
29286+ }
29287+ switch (complete_action) {
29288+ case PSB_RETURN:
29289+ scene->flags &=
29290+ ~(PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
29291+ list_add_tail(&scene->hw_scene->head,
29292+ &scheduler->hw_scenes);
29293+ psb_report_fence(scheduler, task->engine,
29294+ task->sequence,
29295+ _PSB_FENCE_SCENE_DONE_SHIFT, 1);
29296+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM) {
29297+ scheduler->ta_state = 0;
29298+ }
29299+ break;
29300+ case PSB_RASTER:
29301+ list_add(&task->head, &scheduler->raster_queue);
29302+ task->raster_complete_action = PSB_RETURN;
29303+ psb_schedule_raster(dev_priv, scheduler);
29304+ break;
29305+ case PSB_TA:
29306+ list_add(&task->head, &scheduler->ta_queue);
29307+ scheduler->ta_state = 0;
29308+ task->raster_complete_action = PSB_RETURN;
29309+ task->ta_complete_action = PSB_RASTER;
29310+ break;
29311+
29312+ }
29313+ }
29314+ psb_schedule_ta(dev_priv, scheduler);
29315+ psb_set_idle(scheduler);
29316+
29317+ if (complete_action == PSB_RETURN) {
29318+ if (task->scene == NULL) {
29319+ psb_report_fence(scheduler, task->engine,
29320+ task->sequence,
29321+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
29322+ }
29323+ if (!task->feedback.page) {
29324+ list_add_tail(&task->head, &scheduler->task_done_queue);
29325+ schedule_delayed_work(&scheduler->wq, 1);
29326+ }
29327+ }
29328+}
29329+
29330+void psb_scheduler_pause(struct drm_psb_private *dev_priv)
29331+{
29332+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29333+ unsigned long irq_flags;
29334+
29335+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29336+ scheduler->idle_count++;
29337+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29338+}
29339+
29340+void psb_scheduler_restart(struct drm_psb_private *dev_priv)
29341+{
29342+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29343+ unsigned long irq_flags;
29344+
29345+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29346+ if (--scheduler->idle_count == 0) {
29347+ psb_schedule_ta(dev_priv, scheduler);
29348+ psb_schedule_raster(dev_priv, scheduler);
29349+ }
29350+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29351+}
29352+
29353+int psb_scheduler_idle(struct drm_psb_private *dev_priv)
29354+{
29355+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29356+ unsigned long irq_flags;
29357+ int ret;
29358+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29359+ ret = scheduler->idle_count != 0 && scheduler->idle;
29360+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29361+ return ret;
29362+}
29363+
29364+int psb_scheduler_finished(struct drm_psb_private *dev_priv)
29365+{
29366+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29367+ unsigned long irq_flags;
29368+ int ret;
29369+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29370+ ret = (scheduler->idle &&
29371+ list_empty(&scheduler->raster_queue) &&
29372+ list_empty(&scheduler->ta_queue) &&
29373+ list_empty(&scheduler->hp_raster_queue));
29374+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29375+ return ret;
29376+}
29377+
29378+static void psb_ta_oom(struct drm_psb_private *dev_priv,
29379+ struct psb_scheduler *scheduler)
29380+{
29381+
29382+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
29383+ if (!task)
29384+ return;
29385+
29386+ if (task->aborting)
29387+ return;
29388+ task->aborting = 1;
29389+
29390+ DRM_INFO("Info: TA out of parameter memory.\n");
29391+
29392+ (void)psb_xhw_ta_oom(dev_priv, &task->buf, task->scene->hw_cookie);
29393+}
29394+
29395+static void psb_ta_oom_reply(struct drm_psb_private *dev_priv,
29396+ struct psb_scheduler *scheduler)
29397+{
29398+
29399+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
29400+ uint32_t flags;
29401+ if (!task)
29402+ return;
29403+
29404+ psb_xhw_ta_oom_reply(dev_priv, &task->buf,
29405+ task->scene->hw_cookie,
29406+ &task->ta_complete_action,
29407+ &task->raster_complete_action, &flags);
29408+ task->flags |= flags;
29409+ task->aborting = 0;
29410+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM_REPLY);
29411+}
29412+
29413+static void psb_ta_hw_scene_freed(struct drm_psb_private *dev_priv,
29414+ struct psb_scheduler *scheduler)
29415+{
29416+ DRM_ERROR("TA hw scene freed.\n");
29417+}
29418+
29419+static void psb_vistest_reply(struct drm_psb_private *dev_priv,
29420+ struct psb_scheduler *scheduler)
29421+{
29422+ struct psb_task *task = scheduler->feedback_task;
29423+ uint8_t *feedback_map;
29424+ uint32_t add;
29425+ uint32_t cur;
29426+ struct drm_psb_vistest *vistest;
29427+ int i;
29428+
29429+ scheduler->feedback_task = NULL;
29430+ if (!task) {
29431+ DRM_ERROR("No Poulsbo feedback task.\n");
29432+ return;
29433+ }
29434+ if (!task->feedback.page) {
29435+ DRM_ERROR("No Poulsbo feedback page.\n");
29436+ goto out;
29437+ }
29438+
29439+ if (in_irq())
29440+ feedback_map = kmap_atomic(task->feedback.page, KM_IRQ0);
29441+ else
29442+ feedback_map = kmap_atomic(task->feedback.page, KM_USER0);
29443+
29444+ /*
29445+ * Loop over all requested vistest components here.
29446+ * Only one (vistest) currently.
29447+ */
29448+
29449+ vistest = (struct drm_psb_vistest *)
29450+ (feedback_map + task->feedback.offset);
29451+
29452+ for (i = 0; i < PSB_HW_FEEDBACK_SIZE; ++i) {
29453+ add = task->buf.arg.arg.feedback[i];
29454+ cur = vistest->vt[i];
29455+
29456+ /*
29457+ * Vistest saturates.
29458+ */
29459+
29460+ vistest->vt[i] = (cur + add < cur) ? ~0 : cur + add;
29461+ }
29462+ if (in_irq())
29463+ kunmap_atomic(feedback_map, KM_IRQ0);
29464+ else
29465+ kunmap_atomic(feedback_map, KM_USER0);
29466+ out:
29467+ psb_report_fence(scheduler, task->engine, task->sequence,
29468+ _PSB_FENCE_FEEDBACK_SHIFT, 1);
29469+
29470+ if (list_empty(&task->head)) {
29471+ list_add_tail(&task->head, &scheduler->task_done_queue);
29472+ schedule_delayed_work(&scheduler->wq, 1);
29473+ } else
29474+ psb_schedule_ta(dev_priv, scheduler);
29475+}
29476+
29477+static void psb_ta_fire_reply(struct drm_psb_private *dev_priv,
29478+ struct psb_scheduler *scheduler)
29479+{
29480+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
29481+
29482+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
29483+
29484+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_FIRE_TA);
29485+}
29486+
29487+static void psb_raster_fire_reply(struct drm_psb_private *dev_priv,
29488+ struct psb_scheduler *scheduler)
29489+{
29490+ struct psb_task *task =
29491+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
29492+ uint32_t reply_flags;
29493+
29494+ if (!task) {
29495+ DRM_ERROR("Null task.\n");
29496+ return;
29497+ }
29498+
29499+ task->raster_complete_action = task->buf.arg.arg.sb.rca;
29500+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
29501+
29502+ reply_flags = PSB_RF_FIRE_RASTER;
29503+ if (task->raster_complete_action == PSB_RASTER)
29504+ reply_flags |= PSB_RF_DEALLOC;
29505+
29506+ psb_dispatch_raster(dev_priv, scheduler, reply_flags);
29507+}
29508+
29509+static int psb_user_interrupt(struct drm_psb_private *dev_priv,
29510+ struct psb_scheduler *scheduler)
29511+{
29512+ uint32_t type;
29513+ int ret;
29514+ unsigned long irq_flags;
29515+
29516+ /*
29517+ * Xhw cannot write directly to the comm page, so
29518+ * do it here. Firmware would have written directly.
29519+ */
29520+
29521+ ret = psb_xhw_handler(dev_priv);
29522+ if (unlikely(ret))
29523+ return ret;
29524+
29525+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
29526+ type = dev_priv->comm[PSB_COMM_USER_IRQ];
29527+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
29528+ if (dev_priv->comm[PSB_COMM_USER_IRQ_LOST]) {
29529+ dev_priv->comm[PSB_COMM_USER_IRQ_LOST] = 0;
29530+ DRM_ERROR("Lost Poulsbo hardware event.\n");
29531+ }
29532+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
29533+
29534+ if (type == 0)
29535+ return 0;
29536+
29537+ switch (type) {
29538+ case PSB_UIRQ_VISTEST:
29539+ psb_vistest_reply(dev_priv, scheduler);
29540+ break;
29541+ case PSB_UIRQ_OOM_REPLY:
29542+ psb_ta_oom_reply(dev_priv, scheduler);
29543+ break;
29544+ case PSB_UIRQ_FIRE_TA_REPLY:
29545+ psb_ta_fire_reply(dev_priv, scheduler);
29546+ break;
29547+ case PSB_UIRQ_FIRE_RASTER_REPLY:
29548+ psb_raster_fire_reply(dev_priv, scheduler);
29549+ break;
29550+ default:
29551+ DRM_ERROR("Unknown Poulsbo hardware event. %d\n", type);
29552+ }
29553+ return 0;
29554+}
29555+
29556+int psb_forced_user_interrupt(struct drm_psb_private *dev_priv)
29557+{
29558+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29559+ unsigned long irq_flags;
29560+ int ret;
29561+
29562+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29563+ ret = psb_user_interrupt(dev_priv, scheduler);
29564+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29565+ return ret;
29566+}
29567+
29568+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
29569+ struct psb_scheduler *scheduler,
29570+ uint32_t reply_flag)
29571+{
29572+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
29573+ uint32_t flags;
29574+ uint32_t mask;
29575+
29576+ task->reply_flags |= reply_flag;
29577+ flags = task->reply_flags;
29578+ mask = PSB_RF_FIRE_TA;
29579+
29580+ if (!(flags & mask))
29581+ return;
29582+
29583+ mask = PSB_RF_TA_DONE;
29584+ if ((flags & mask) == mask) {
29585+ task->reply_flags &= ~mask;
29586+ psb_ta_done(dev_priv, scheduler);
29587+ }
29588+
29589+ mask = PSB_RF_OOM;
29590+ if ((flags & mask) == mask) {
29591+ task->reply_flags &= ~mask;
29592+ psb_ta_oom(dev_priv, scheduler);
29593+ }
29594+
29595+ mask = (PSB_RF_OOM_REPLY | PSB_RF_TERMINATE);
29596+ if ((flags & mask) == mask) {
29597+ task->reply_flags &= ~mask;
29598+ psb_ta_done(dev_priv, scheduler);
29599+ }
29600+}
29601+
29602+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
29603+ struct psb_scheduler *scheduler,
29604+ uint32_t reply_flag)
29605+{
29606+ struct psb_task *task =
29607+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
29608+ uint32_t flags;
29609+ uint32_t mask;
29610+
29611+ task->reply_flags |= reply_flag;
29612+ flags = task->reply_flags;
29613+ mask = PSB_RF_FIRE_RASTER;
29614+
29615+ if (!(flags & mask))
29616+ return;
29617+
29618+ /*
29619+ * For rasterizer-only tasks, don't report fence done here,
29620+ * as this is time consuming and the rasterizer wants a new
29621+ * task immediately. For other tasks, the hardware is probably
29622+ * still busy deallocating TA memory, so we can report
29623+ * fence done in parallel.
29624+ */
29625+
29626+ if (task->raster_complete_action == PSB_RETURN &&
29627+ (reply_flag & PSB_RF_RASTER_DONE) && task->scene != NULL) {
29628+ psb_report_fence(scheduler, task->engine, task->sequence,
29629+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
29630+ }
29631+
29632+ mask = PSB_RF_RASTER_DONE | PSB_RF_DEALLOC;
29633+ if ((flags & mask) == mask) {
29634+ task->reply_flags &= ~mask;
29635+ psb_raster_done(dev_priv, scheduler);
29636+ }
29637+}
29638+
29639+void psb_scheduler_handler(struct drm_psb_private *dev_priv, uint32_t status)
29640+{
29641+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29642+
29643+ spin_lock(&scheduler->lock);
29644+
29645+ if (status & _PSB_CE_PIXELBE_END_RENDER) {
29646+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_RASTER_DONE);
29647+ }
29648+ if (status & _PSB_CE_DPM_3D_MEM_FREE) {
29649+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_DEALLOC);
29650+ }
29651+ if (status & _PSB_CE_TA_FINISHED) {
29652+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TA_DONE);
29653+ }
29654+ if (status & _PSB_CE_TA_TERMINATE) {
29655+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TERMINATE);
29656+ }
29657+ if (status & (_PSB_CE_DPM_REACHED_MEM_THRESH |
29658+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
29659+ _PSB_CE_DPM_OUT_OF_MEMORY_MT)) {
29660+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM);
29661+ }
29662+ if (status & _PSB_CE_DPM_TA_MEM_FREE) {
29663+ psb_ta_hw_scene_freed(dev_priv, scheduler);
29664+ }
29665+ if (status & _PSB_CE_SW_EVENT) {
29666+ psb_user_interrupt(dev_priv, scheduler);
29667+ }
29668+ spin_unlock(&scheduler->lock);
29669+}
29670+
29671+static void psb_free_task_wq(struct work_struct *work)
29672+{
29673+ struct psb_scheduler *scheduler =
29674+ container_of(work, struct psb_scheduler, wq.work);
29675+
29676+ struct drm_device *dev = scheduler->dev;
29677+ struct list_head *list, *next;
29678+ unsigned long irq_flags;
29679+ struct psb_task *task;
29680+
29681+ if (!mutex_trylock(&scheduler->task_wq_mutex))
29682+ return;
29683+
29684+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29685+ list_for_each_safe(list, next, &scheduler->task_done_queue) {
29686+ task = list_entry(list, struct psb_task, head);
29687+ list_del_init(list);
29688+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29689+
29690+ PSB_DEBUG_RENDER("Checking Task %d: Scene 0x%08lx, "
29691+ "Feedback bo 0x%08lx, done %d\n",
29692+ task->sequence, (unsigned long)task->scene,
29693+ (unsigned long)task->feedback.bo,
29694+ atomic_read(&task->buf.done));
29695+
29696+ if (task->scene) {
29697+ mutex_lock(&dev->struct_mutex);
29698+ PSB_DEBUG_RENDER("Unref scene %d\n", task->sequence);
29699+ psb_scene_unref_devlocked(&task->scene);
29700+ if (task->feedback.bo) {
29701+ PSB_DEBUG_RENDER("Unref feedback bo %d\n",
29702+ task->sequence);
29703+ drm_bo_usage_deref_locked(&task->feedback.bo);
29704+ }
29705+ mutex_unlock(&dev->struct_mutex);
29706+ }
29707+
29708+ if (atomic_read(&task->buf.done)) {
29709+ PSB_DEBUG_RENDER("Deleting task %d\n", task->sequence);
29710+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
29711+ task = NULL;
29712+ }
29713+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29714+ if (task != NULL)
29715+ list_add(list, &scheduler->task_done_queue);
29716+ }
29717+ if (!list_empty(&scheduler->task_done_queue)) {
29718+ PSB_DEBUG_RENDER("Rescheduling wq\n");
29719+ schedule_delayed_work(&scheduler->wq, 1);
29720+ }
29721+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29722+
29723+ mutex_unlock(&scheduler->task_wq_mutex);
29724+}
29725+
29726+/*
29727+ * Check if any of the tasks in the queues is using a scene.
29728+ * In that case we know the TA memory buffer objects are
29729+ * fenced and will not be evicted until that fence is signaled.
29730+ */
29731+
29732+void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv)
29733+{
29734+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29735+ unsigned long irq_flags;
29736+ struct psb_task *task;
29737+ struct psb_task *next_task;
29738+
29739+ dev_priv->force_ta_mem_load = 1;
29740+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29741+ list_for_each_entry_safe(task, next_task, &scheduler->ta_queue, head) {
29742+ if (task->scene) {
29743+ dev_priv->force_ta_mem_load = 0;
29744+ break;
29745+ }
29746+ }
29747+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
29748+ head) {
29749+ if (task->scene) {
29750+ dev_priv->force_ta_mem_load = 0;
29751+ break;
29752+ }
29753+ }
29754+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29755+}
29756+
29757+void psb_scheduler_reset(struct drm_psb_private *dev_priv, int error_condition)
29758+{
29759+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29760+ unsigned long wait_jiffies;
29761+ unsigned long cur_jiffies;
29762+ struct psb_task *task;
29763+ struct psb_task *next_task;
29764+ unsigned long irq_flags;
29765+
29766+ psb_scheduler_pause(dev_priv);
29767+ if (!psb_scheduler_idle(dev_priv)) {
29768+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29769+
29770+ cur_jiffies = jiffies;
29771+ wait_jiffies = cur_jiffies;
29772+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] &&
29773+ time_after_eq(scheduler->ta_end_jiffies, wait_jiffies))
29774+ wait_jiffies = scheduler->ta_end_jiffies;
29775+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] &&
29776+ time_after_eq(scheduler->raster_end_jiffies, wait_jiffies))
29777+ wait_jiffies = scheduler->raster_end_jiffies;
29778+
29779+ wait_jiffies -= cur_jiffies;
29780+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29781+
29782+ (void)wait_event_timeout(scheduler->idle_queue,
29783+ psb_scheduler_idle(dev_priv),
29784+ wait_jiffies);
29785+ }
29786+
29787+ if (!psb_scheduler_idle(dev_priv)) {
29788+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29789+ task = scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
29790+ if (task) {
29791+ DRM_ERROR("Detected Poulsbo rasterizer lockup.\n");
29792+ if (task->engine == PSB_ENGINE_HPRAST) {
29793+ psb_fence_error(scheduler->dev,
29794+ PSB_ENGINE_HPRAST,
29795+ task->sequence,
29796+ _PSB_FENCE_TYPE_RASTER_DONE,
29797+ error_condition);
29798+
29799+ list_del(&task->head);
29800+ psb_xhw_clean_buf(dev_priv, &task->buf);
29801+ list_add_tail(&task->head,
29802+ &scheduler->task_done_queue);
29803+ } else {
29804+ list_add(&task->head, &scheduler->raster_queue);
29805+ }
29806+ }
29807+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
29808+ task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
29809+ if (task) {
29810+ DRM_ERROR("Detected Poulsbo ta lockup.\n");
29811+ list_add_tail(&task->head, &scheduler->raster_queue);
29812+#ifdef FIX_TG_16
29813+ psb_2d_atomic_unlock(dev_priv);
29814+#endif
29815+ }
29816+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
29817+ scheduler->ta_state = 0;
29818+
29819+#ifdef FIX_TG_16
29820+ atomic_set(&dev_priv->ta_wait_2d, 0);
29821+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
29822+ wake_up(&dev_priv->queue_2d);
29823+#endif
29824+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29825+ }
29826+
29827+ /*
29828+ * Empty raster queue.
29829+ */
29830+
29831+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29832+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
29833+ head) {
29834+ struct psb_scene *scene = task->scene;
29835+
29836+ psb_fence_error(scheduler->dev,
29837+ task->engine,
29838+ task->sequence,
29839+ _PSB_FENCE_TYPE_TA_DONE |
29840+ _PSB_FENCE_TYPE_RASTER_DONE |
29841+ _PSB_FENCE_TYPE_SCENE_DONE |
29842+ _PSB_FENCE_TYPE_FEEDBACK, error_condition);
29843+ if (scene) {
29844+ scene->flags = 0;
29845+ if (scene->hw_scene) {
29846+ list_add_tail(&scene->hw_scene->head,
29847+ &scheduler->hw_scenes);
29848+ scene->hw_scene = NULL;
29849+ }
29850+ }
29851+
29852+ psb_xhw_clean_buf(dev_priv, &task->buf);
29853+ list_del(&task->head);
29854+ list_add_tail(&task->head, &scheduler->task_done_queue);
29855+ }
29856+
29857+ schedule_delayed_work(&scheduler->wq, 1);
29858+ scheduler->idle = 1;
29859+ wake_up(&scheduler->idle_queue);
29860+
29861+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29862+ psb_scheduler_restart(dev_priv);
29863+
29864+}
29865+
29866+int psb_scheduler_init(struct drm_device *dev, struct psb_scheduler *scheduler)
29867+{
29868+ struct psb_hw_scene *hw_scene;
29869+ int i;
29870+
29871+ memset(scheduler, 0, sizeof(*scheduler));
29872+ scheduler->dev = dev;
29873+ mutex_init(&scheduler->task_wq_mutex);
29874+ scheduler->lock = SPIN_LOCK_UNLOCKED;
29875+ scheduler->idle = 1;
29876+
29877+ INIT_LIST_HEAD(&scheduler->ta_queue);
29878+ INIT_LIST_HEAD(&scheduler->raster_queue);
29879+ INIT_LIST_HEAD(&scheduler->hp_raster_queue);
29880+ INIT_LIST_HEAD(&scheduler->hw_scenes);
29881+ INIT_LIST_HEAD(&scheduler->task_done_queue);
29882+ INIT_DELAYED_WORK(&scheduler->wq, &psb_free_task_wq);
29883+ init_waitqueue_head(&scheduler->idle_queue);
29884+
29885+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
29886+ hw_scene = &scheduler->hs[i];
29887+ hw_scene->context_number = i;
29888+ list_add_tail(&hw_scene->head, &scheduler->hw_scenes);
29889+ }
29890+
29891+ for (i = 0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) {
29892+ scheduler->seq[i].reported = 0;
29893+ }
29894+
29895+ return 0;
29896+}
29897+
29898+/*
29899+ * Scene references maintained by the scheduler are not refcounted.
29900+ * Remove all references to a particular scene here.
29901+ */
29902+
29903+void psb_scheduler_remove_scene_refs(struct psb_scene *scene)
29904+{
29905+ struct drm_psb_private *dev_priv =
29906+ (struct drm_psb_private *)scene->dev->dev_private;
29907+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29908+ struct psb_hw_scene *hw_scene;
29909+ unsigned long irq_flags;
29910+ unsigned int i;
29911+
29912+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29913+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
29914+ hw_scene = &scheduler->hs[i];
29915+ if (hw_scene->last_scene == scene) {
29916+ BUG_ON(list_empty(&hw_scene->head));
29917+ hw_scene->last_scene = NULL;
29918+ }
29919+ }
29920+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29921+}
29922+
29923+void psb_scheduler_takedown(struct psb_scheduler *scheduler)
29924+{
29925+ flush_scheduled_work();
29926+}
29927+
29928+static int psb_setup_task_devlocked(struct drm_device *dev,
29929+ struct drm_psb_cmdbuf_arg *arg,
29930+ struct drm_buffer_object *raster_cmd_buffer,
29931+ struct drm_buffer_object *ta_cmd_buffer,
29932+ struct drm_buffer_object *oom_cmd_buffer,
29933+ struct psb_scene *scene,
29934+ enum psb_task_type task_type,
29935+ uint32_t engine,
29936+ uint32_t flags, struct psb_task **task_p)
29937+{
29938+ struct psb_task *task;
29939+ int ret;
29940+
29941+ if (ta_cmd_buffer && arg->ta_size > PSB_MAX_TA_CMDS) {
29942+ DRM_ERROR("Too many ta cmds %d.\n", arg->ta_size);
29943+ return -EINVAL;
29944+ }
29945+ if (raster_cmd_buffer && arg->cmdbuf_size > PSB_MAX_RASTER_CMDS) {
29946+ DRM_ERROR("Too many raster cmds %d.\n", arg->cmdbuf_size);
29947+ return -EINVAL;
29948+ }
29949+ if (oom_cmd_buffer && arg->oom_size > PSB_MAX_OOM_CMDS) {
29950+ DRM_ERROR("Too many raster cmds %d.\n", arg->oom_size);
29951+ return -EINVAL;
29952+ }
29953+
29954+ task = drm_calloc(1, sizeof(*task), DRM_MEM_DRIVER);
29955+ if (!task)
29956+ return -ENOMEM;
29957+
29958+ atomic_set(&task->buf.done, 1);
29959+ task->engine = engine;
29960+ INIT_LIST_HEAD(&task->head);
29961+ INIT_LIST_HEAD(&task->buf.head);
29962+ if (ta_cmd_buffer && arg->ta_size != 0) {
29963+ task->ta_cmd_size = arg->ta_size;
29964+ ret = psb_submit_copy_cmdbuf(dev, ta_cmd_buffer,
29965+ arg->ta_offset,
29966+ arg->ta_size,
29967+ PSB_ENGINE_TA, task->ta_cmds);
29968+ if (ret)
29969+ goto out_err;
29970+ }
29971+ if (raster_cmd_buffer) {
29972+ task->raster_cmd_size = arg->cmdbuf_size;
29973+ ret = psb_submit_copy_cmdbuf(dev, raster_cmd_buffer,
29974+ arg->cmdbuf_offset,
29975+ arg->cmdbuf_size,
29976+ PSB_ENGINE_TA, task->raster_cmds);
29977+ if (ret)
29978+ goto out_err;
29979+ }
29980+ if (oom_cmd_buffer && arg->oom_size != 0) {
29981+ task->oom_cmd_size = arg->oom_size;
29982+ ret = psb_submit_copy_cmdbuf(dev, oom_cmd_buffer,
29983+ arg->oom_offset,
29984+ arg->oom_size,
29985+ PSB_ENGINE_TA, task->oom_cmds);
29986+ if (ret)
29987+ goto out_err;
29988+ }
29989+ task->task_type = task_type;
29990+ task->flags = flags;
29991+ if (scene)
29992+ task->scene = psb_scene_ref(scene);
29993+
29994+ *task_p = task;
29995+ return 0;
29996+ out_err:
29997+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
29998+ *task_p = NULL;
29999+ return ret;
30000+}
30001+
30002+int psb_cmdbuf_ta(struct drm_file *priv,
30003+ struct drm_psb_cmdbuf_arg *arg,
30004+ struct drm_buffer_object *cmd_buffer,
30005+ struct drm_buffer_object *ta_buffer,
30006+ struct drm_buffer_object *oom_buffer,
30007+ struct psb_scene *scene,
30008+ struct psb_feedback_info *feedback,
30009+ struct drm_fence_arg *fence_arg)
30010+{
30011+ struct drm_device *dev = priv->minor->dev;
30012+ struct drm_psb_private *dev_priv = dev->dev_private;
30013+ struct drm_fence_object *fence = NULL;
30014+ struct psb_task *task = NULL;
30015+ int ret;
30016+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30017+ unsigned long irq_flags;
30018+
30019+ PSB_DEBUG_RENDER("Cmdbuf ta\n");
30020+
30021+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
30022+ if (ret)
30023+ return -EAGAIN;
30024+
30025+ mutex_lock(&dev->struct_mutex);
30026+ ret = psb_setup_task_devlocked(dev, arg, cmd_buffer, ta_buffer,
30027+ oom_buffer, scene,
30028+ psb_ta_task, PSB_ENGINE_TA,
30029+ PSB_FIRE_FLAG_RASTER_DEALLOC, &task);
30030+ mutex_unlock(&dev->struct_mutex);
30031+
30032+ if (ret)
30033+ goto out_err;
30034+
30035+ task->feedback = *feedback;
30036+
30037+ /*
30038+ * Hand the task over to the scheduler.
30039+ */
30040+
30041+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30042+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
30043+
30044+ psb_report_fence(scheduler, PSB_ENGINE_TA, task->sequence, 0, 1);
30045+
30046+ task->ta_complete_action = PSB_RASTER;
30047+ task->raster_complete_action = PSB_RETURN;
30048+
30049+ list_add_tail(&task->head, &scheduler->ta_queue);
30050+ PSB_DEBUG_RENDER("queued ta %u\n", task->sequence);
30051+
30052+ psb_schedule_ta(dev_priv, scheduler);
30053+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30054+
30055+ psb_fence_or_sync(priv, PSB_ENGINE_TA, arg, fence_arg, &fence);
30056+ drm_regs_fence(&dev_priv->use_manager, fence);
30057+ if (fence)
30058+ fence_arg->signaled |= 0x1;
30059+
30060+ out_err:
30061+ if (ret && ret != -EAGAIN)
30062+ DRM_ERROR("TA task queue job failed.\n");
30063+
30064+ if (fence) {
30065+#ifdef PSB_WAIT_FOR_TA_COMPLETION
30066+ drm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
30067+ _PSB_FENCE_TYPE_TA_DONE);
30068+#ifdef PSB_BE_PARANOID
30069+ drm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
30070+ _PSB_FENCE_TYPE_SCENE_DONE);
30071+#endif
30072+#endif
30073+ drm_fence_usage_deref_unlocked(&fence);
30074+ }
30075+ mutex_unlock(&dev_priv->reset_mutex);
30076+
30077+ return ret;
30078+}
30079+
30080+int psb_cmdbuf_raster(struct drm_file *priv,
30081+ struct drm_psb_cmdbuf_arg *arg,
30082+ struct drm_buffer_object *cmd_buffer,
30083+ struct drm_fence_arg *fence_arg)
30084+{
30085+ struct drm_device *dev = priv->minor->dev;
30086+ struct drm_psb_private *dev_priv = dev->dev_private;
30087+ struct drm_fence_object *fence = NULL;
30088+ struct psb_task *task = NULL;
30089+ int ret;
30090+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30091+ unsigned long irq_flags;
30092+
30093+ PSB_DEBUG_RENDER("Cmdbuf Raster\n");
30094+
30095+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
30096+ if (ret)
30097+ return -EAGAIN;
30098+
30099+ mutex_lock(&dev->struct_mutex);
30100+ ret = psb_setup_task_devlocked(dev, arg, cmd_buffer, NULL, NULL,
30101+ NULL, psb_raster_task,
30102+ PSB_ENGINE_TA, 0, &task);
30103+ mutex_unlock(&dev->struct_mutex);
30104+
30105+ if (ret)
30106+ goto out_err;
30107+
30108+ /*
30109+ * Hand the task over to the scheduler.
30110+ */
30111+
30112+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30113+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
30114+ psb_report_fence(scheduler, PSB_ENGINE_TA, task->sequence, 0, 1);
30115+ task->ta_complete_action = PSB_RASTER;
30116+ task->raster_complete_action = PSB_RETURN;
30117+
30118+ list_add_tail(&task->head, &scheduler->ta_queue);
30119+ PSB_DEBUG_RENDER("queued raster %u\n", task->sequence);
30120+ psb_schedule_ta(dev_priv, scheduler);
30121+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30122+
30123+ psb_fence_or_sync(priv, PSB_ENGINE_TA, arg, fence_arg, &fence);
30124+ drm_regs_fence(&dev_priv->use_manager, fence);
30125+ if (fence)
30126+ fence_arg->signaled |= 0x1;
30127+ out_err:
30128+ if (ret && ret != -EAGAIN)
30129+ DRM_ERROR("Raster task queue job failed.\n");
30130+
30131+ if (fence) {
30132+#ifdef PSB_WAIT_FOR_RASTER_COMPLETION
30133+ drm_fence_object_wait(fence, 1, 1, fence->type);
30134+#endif
30135+ drm_fence_usage_deref_unlocked(&fence);
30136+ }
30137+
30138+ mutex_unlock(&dev_priv->reset_mutex);
30139+
30140+ return ret;
30141+}
30142+
30143+#ifdef FIX_TG_16
30144+
30145+static int psb_check_2d_idle(struct drm_psb_private *dev_priv)
30146+{
30147+ if (psb_2d_trylock(dev_priv)) {
30148+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
30149+ !((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
30150+ _PSB_C2B_STATUS_BUSY))) {
30151+ return 0;
30152+ }
30153+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 0, 1) == 0)
30154+ psb_2D_irq_on(dev_priv);
30155+
30156+ PSB_WSGX32(PSB_2D_FENCE_BH, PSB_SGX_2D_SLAVE_PORT);
30157+ PSB_WSGX32(PSB_2D_FLUSH_BH, PSB_SGX_2D_SLAVE_PORT);
30158+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT);
30159+
30160+ psb_2d_atomic_unlock(dev_priv);
30161+ }
30162+
30163+ atomic_set(&dev_priv->ta_wait_2d, 1);
30164+ return -EBUSY;
30165+}
30166+
30167+static void psb_atomic_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
30168+{
30169+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30170+
30171+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d, 1, 0) == 1) {
30172+ psb_schedule_ta(dev_priv, scheduler);
30173+ if (atomic_read(&dev_priv->waiters_2d) != 0)
30174+ wake_up(&dev_priv->queue_2d);
30175+ }
30176+}
30177+
30178+void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
30179+{
30180+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30181+ unsigned long irq_flags;
30182+
30183+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30184+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 1, 0) == 1) {
30185+ atomic_set(&dev_priv->ta_wait_2d, 0);
30186+ psb_2D_irq_off(dev_priv);
30187+ psb_schedule_ta(dev_priv, scheduler);
30188+ if (atomic_read(&dev_priv->waiters_2d) != 0)
30189+ wake_up(&dev_priv->queue_2d);
30190+ }
30191+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30192+}
30193+
30194+/*
30195+ * 2D locking functions. Can't use a mutex since the trylock() and
30196+ * unlock() methods need to be accessible from interrupt context.
30197+ */
30198+
30199+static int psb_2d_trylock(struct drm_psb_private *dev_priv)
30200+{
30201+ return (atomic_cmpxchg(&dev_priv->lock_2d, 0, 1) == 0);
30202+}
30203+
30204+static void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv)
30205+{
30206+ atomic_set(&dev_priv->lock_2d, 0);
30207+ if (atomic_read(&dev_priv->waiters_2d) != 0)
30208+ wake_up(&dev_priv->queue_2d);
30209+}
30210+
30211+void psb_2d_unlock(struct drm_psb_private *dev_priv)
30212+{
30213+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30214+ unsigned long irq_flags;
30215+
30216+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30217+ psb_2d_atomic_unlock(dev_priv);
30218+ if (atomic_read(&dev_priv->ta_wait_2d) != 0)
30219+ psb_atomic_resume_ta_2d_idle(dev_priv);
30220+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30221+}
30222+
30223+void psb_2d_lock(struct drm_psb_private *dev_priv)
30224+{
30225+ atomic_inc(&dev_priv->waiters_2d);
30226+ wait_event(dev_priv->queue_2d, atomic_read(&dev_priv->ta_wait_2d) == 0);
30227+ wait_event(dev_priv->queue_2d, psb_2d_trylock(dev_priv));
30228+ atomic_dec(&dev_priv->waiters_2d);
30229+}
30230+
30231+#endif
30232Index: linux-2.6.27/drivers/gpu/drm/psb/psb_schedule.h
30233===================================================================
30234--- /dev/null 1970-01-01 00:00:00.000000000 +0000
30235+++ linux-2.6.27/drivers/gpu/drm/psb/psb_schedule.h 2009-02-05 13:29:33.000000000 +0000
30236@@ -0,0 +1,170 @@
30237+/**************************************************************************
30238+ * Copyright (c) 2007, Intel Corporation.
30239+ * All Rights Reserved.
30240+ *
30241+ * This program is free software; you can redistribute it and/or modify it
30242+ * under the terms and conditions of the GNU General Public License,
30243+ * version 2, as published by the Free Software Foundation.
30244+ *
30245+ * This program is distributed in the hope it will be useful, but WITHOUT
30246+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
30247+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
30248+ * more details.
30249+ *
30250+ * You should have received a copy of the GNU General Public License along with
30251+ * this program; if not, write to the Free Software Foundation, Inc.,
30252+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
30253+ *
30254+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
30255+ * develop this driver.
30256+ *
30257+ **************************************************************************/
30258+/*
30259+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
30260+ */
30261+
30262+#ifndef _PSB_SCHEDULE_H_
30263+#define _PSB_SCHEDULE_H_
30264+
30265+#include "drmP.h"
30266+
30267+enum psb_task_type {
30268+ psb_ta_midscene_task,
30269+ psb_ta_task,
30270+ psb_raster_task,
30271+ psb_freescene_task
30272+};
30273+
30274+#define PSB_MAX_TA_CMDS 60
30275+#define PSB_MAX_RASTER_CMDS 60
30276+#define PSB_MAX_OOM_CMDS 6
30277+
30278+struct psb_xhw_buf {
30279+ struct list_head head;
30280+ int copy_back;
30281+ atomic_t done;
30282+ struct drm_psb_xhw_arg arg;
30283+
30284+};
30285+
30286+struct psb_feedback_info {
30287+ struct drm_buffer_object *bo;
30288+ struct page *page;
30289+ uint32_t offset;
30290+};
30291+
30292+struct psb_task {
30293+ struct list_head head;
30294+ struct psb_scene *scene;
30295+ struct psb_feedback_info feedback;
30296+ enum psb_task_type task_type;
30297+ uint32_t engine;
30298+ uint32_t sequence;
30299+ uint32_t ta_cmds[PSB_MAX_TA_CMDS];
30300+ uint32_t raster_cmds[PSB_MAX_RASTER_CMDS];
30301+ uint32_t oom_cmds[PSB_MAX_OOM_CMDS];
30302+ uint32_t ta_cmd_size;
30303+ uint32_t raster_cmd_size;
30304+ uint32_t oom_cmd_size;
30305+ uint32_t feedback_offset;
30306+ uint32_t ta_complete_action;
30307+ uint32_t raster_complete_action;
30308+ uint32_t hw_cookie;
30309+ uint32_t flags;
30310+ uint32_t reply_flags;
30311+ uint32_t aborting;
30312+ struct psb_xhw_buf buf;
30313+};
30314+
30315+struct psb_hw_scene {
30316+ struct list_head head;
30317+ uint32_t context_number;
30318+
30319+ /*
30320+ * This pointer does not refcount the last_scene_buffer,
30321+ * so we must make sure it is set to NULL before destroying
30322+ * the corresponding task.
30323+ */
30324+
30325+ struct psb_scene *last_scene;
30326+};
30327+
30328+struct psb_scene;
30329+struct drm_psb_private;
30330+
30331+struct psb_scheduler_seq {
30332+ uint32_t sequence;
30333+ int reported;
30334+};
30335+
30336+struct psb_scheduler {
30337+ struct drm_device *dev;
30338+ struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES];
30339+ struct psb_hw_scene hs[PSB_NUM_HW_SCENES];
30340+ struct mutex task_wq_mutex;
30341+ spinlock_t lock;
30342+ struct list_head hw_scenes;
30343+ struct list_head ta_queue;
30344+ struct list_head raster_queue;
30345+ struct list_head hp_raster_queue;
30346+ struct list_head task_done_queue;
30347+ struct psb_task *current_task[PSB_SCENE_NUM_ENGINES];
30348+ struct psb_task *feedback_task;
30349+ int ta_state;
30350+ struct psb_hw_scene *pending_hw_scene;
30351+ uint32_t pending_hw_scene_seq;
30352+ struct delayed_work wq;
30353+ struct psb_scene_pool *pool;
30354+ uint32_t idle_count;
30355+ int idle;
30356+ wait_queue_head_t idle_queue;
30357+ unsigned long ta_end_jiffies;
30358+ unsigned long raster_end_jiffies;
30359+ unsigned long total_raster_jiffies;
30360+};
30361+
30362+#define PSB_RF_FIRE_TA (1 << 0)
30363+#define PSB_RF_OOM (1 << 1)
30364+#define PSB_RF_OOM_REPLY (1 << 2)
30365+#define PSB_RF_TERMINATE (1 << 3)
30366+#define PSB_RF_TA_DONE (1 << 4)
30367+#define PSB_RF_FIRE_RASTER (1 << 5)
30368+#define PSB_RF_RASTER_DONE (1 << 6)
30369+#define PSB_RF_DEALLOC (1 << 7)
30370+
30371+extern struct psb_scene_pool *psb_alloc_scene_pool(struct drm_file *priv,
30372+ int shareable, uint32_t w,
30373+ uint32_t h);
30374+extern uint32_t psb_scene_handle(struct psb_scene *scene);
30375+extern int psb_scheduler_init(struct drm_device *dev,
30376+ struct psb_scheduler *scheduler);
30377+extern void psb_scheduler_takedown(struct psb_scheduler *scheduler);
30378+extern int psb_cmdbuf_ta(struct drm_file *priv,
30379+ struct drm_psb_cmdbuf_arg *arg,
30380+ struct drm_buffer_object *cmd_buffer,
30381+ struct drm_buffer_object *ta_buffer,
30382+ struct drm_buffer_object *oom_buffer,
30383+ struct psb_scene *scene,
30384+ struct psb_feedback_info *feedback,
30385+ struct drm_fence_arg *fence_arg);
30386+extern int psb_cmdbuf_raster(struct drm_file *priv,
30387+ struct drm_psb_cmdbuf_arg *arg,
30388+ struct drm_buffer_object *cmd_buffer,
30389+ struct drm_fence_arg *fence_arg);
30390+extern void psb_scheduler_handler(struct drm_psb_private *dev_priv,
30391+ uint32_t status);
30392+extern void psb_scheduler_pause(struct drm_psb_private *dev_priv);
30393+extern void psb_scheduler_restart(struct drm_psb_private *dev_priv);
30394+extern int psb_scheduler_idle(struct drm_psb_private *dev_priv);
30395+extern int psb_scheduler_finished(struct drm_psb_private *dev_priv);
30396+
30397+extern void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
30398+ int *lockup, int *idle);
30399+extern void psb_scheduler_reset(struct drm_psb_private *dev_priv,
30400+ int error_condition);
30401+extern int psb_forced_user_interrupt(struct drm_psb_private *dev_priv);
30402+extern void psb_scheduler_remove_scene_refs(struct psb_scene *scene);
30403+extern void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv);
30404+extern int psb_extend_raster_timeout(struct drm_psb_private *dev_priv);
30405+
30406+#endif
30407Index: linux-2.6.27/drivers/gpu/drm/psb/psb_setup.c
30408===================================================================
30409--- /dev/null 1970-01-01 00:00:00.000000000 +0000
30410+++ linux-2.6.27/drivers/gpu/drm/psb/psb_setup.c 2009-02-05 13:29:33.000000000 +0000
30411@@ -0,0 +1,17 @@
30412+#include "drmP.h"
30413+#include "drm.h"
30414+#include "drm_crtc.h"
30415+#include "drm_edid.h"
30416+#include "intel_drv.h"
30417+#include "psb_drv.h"
30418+#include "i915_reg.h"
30419+#include "intel_crt.c"
30420+
30421+/* Fixed name */
30422+#define ACPI_EDID_LCD "\\_SB_.PCI0.GFX0.DD04._DDC"
30423+#define ACPI_DOD "\\_SB_.PCI0.GFX0._DOD"
30424+
30425+#include "intel_lvds.c"
30426+#include "intel_sdvo.c"
30427+#include "intel_display.c"
30428+#include "intel_modes.c"
30429Index: linux-2.6.27/drivers/gpu/drm/psb/psb_sgx.c
30430===================================================================
30431--- /dev/null 1970-01-01 00:00:00.000000000 +0000
30432+++ linux-2.6.27/drivers/gpu/drm/psb/psb_sgx.c 2009-02-05 13:29:33.000000000 +0000
30433@@ -0,0 +1,1422 @@
30434+/**************************************************************************
30435+ * Copyright (c) 2007, Intel Corporation.
30436+ * All Rights Reserved.
30437+ *
30438+ * This program is free software; you can redistribute it and/or modify it
30439+ * under the terms and conditions of the GNU General Public License,
30440+ * version 2, as published by the Free Software Foundation.
30441+ *
30442+ * This program is distributed in the hope it will be useful, but WITHOUT
30443+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
30444+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
30445+ * more details.
30446+ *
30447+ * You should have received a copy of the GNU General Public License along with
30448+ * this program; if not, write to the Free Software Foundation, Inc.,
30449+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
30450+ *
30451+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
30452+ * develop this driver.
30453+ *
30454+ **************************************************************************/
30455+/*
30456+ */
30457+
30458+#include "drmP.h"
30459+#include "psb_drv.h"
30460+#include "psb_drm.h"
30461+#include "psb_reg.h"
30462+#include "psb_scene.h"
30463+
30464+#include "psb_msvdx.h"
30465+
30466+int psb_submit_video_cmdbuf(struct drm_device *dev,
30467+ struct drm_buffer_object *cmd_buffer,
30468+ unsigned long cmd_offset, unsigned long cmd_size,
30469+ struct drm_fence_object *fence);
30470+
30471+struct psb_dstbuf_cache {
30472+ unsigned int dst;
30473+ uint32_t *use_page;
30474+ unsigned int use_index;
30475+ uint32_t use_background;
30476+ struct drm_buffer_object *dst_buf;
30477+ unsigned long dst_offset;
30478+ uint32_t *dst_page;
30479+ unsigned int dst_page_offset;
30480+ struct drm_bo_kmap_obj dst_kmap;
30481+ int dst_is_iomem;
30482+};
30483+
30484+struct psb_buflist_item {
30485+ struct drm_buffer_object *bo;
30486+ void __user *data;
30487+ int ret;
30488+ int presumed_offset_correct;
30489+};
30490+
30491+
30492+#define PSB_REG_GRAN_SHIFT 2
30493+#define PSB_REG_GRANULARITY (1 << PSB_REG_GRAN_SHIFT)
30494+#define PSB_MAX_REG 0x1000
30495+
30496+static const uint32_t disallowed_ranges[][2] = {
30497+ {0x0000, 0x0200},
30498+ {0x0208, 0x0214},
30499+ {0x021C, 0x0224},
30500+ {0x0230, 0x0234},
30501+ {0x0248, 0x024C},
30502+ {0x0254, 0x0358},
30503+ {0x0428, 0x0428},
30504+ {0x0430, 0x043C},
30505+ {0x0498, 0x04B4},
30506+ {0x04CC, 0x04D8},
30507+ {0x04E0, 0x07FC},
30508+ {0x0804, 0x0A58},
30509+ {0x0A68, 0x0A80},
30510+ {0x0AA0, 0x0B1C},
30511+ {0x0B2C, 0x0CAC},
30512+ {0x0CB4, PSB_MAX_REG - PSB_REG_GRANULARITY}
30513+};
30514+
30515+static uint32_t psb_disallowed_regs[PSB_MAX_REG /
30516+ (PSB_REG_GRANULARITY *
30517+ (sizeof(uint32_t) << 3))];
30518+
30519+static inline int psb_disallowed(uint32_t reg)
30520+{
30521+ reg >>= PSB_REG_GRAN_SHIFT;
30522+ return ((psb_disallowed_regs[reg >> 5] & (1 << (reg & 31))) != 0);
30523+}
30524+
30525+void psb_init_disallowed(void)
30526+{
30527+ int i;
30528+ uint32_t reg, tmp;
30529+ static int initialized = 0;
30530+
30531+ if (initialized)
30532+ return;
30533+
30534+ initialized = 1;
30535+ memset(psb_disallowed_regs, 0, sizeof(psb_disallowed_regs));
30536+
30537+ for (i = 0; i < (sizeof(disallowed_ranges) / (2 * sizeof(uint32_t)));
30538+ ++i) {
30539+ for (reg = disallowed_ranges[i][0];
30540+ reg <= disallowed_ranges[i][1]; reg += 4) {
30541+ tmp = reg >> 2;
30542+ psb_disallowed_regs[tmp >> 5] |= (1 << (tmp & 31));
30543+ }
30544+ }
30545+}
30546+
30547+static int psb_memcpy_check(uint32_t * dst, const uint32_t * src, uint32_t size)
30548+{
30549+ size >>= 3;
30550+ while (size--) {
30551+ if (unlikely((*src >= 0x1000) || psb_disallowed(*src))) {
30552+ DRM_ERROR("Forbidden SGX register access: "
30553+ "0x%04x.\n", *src);
30554+ return -EPERM;
30555+ }
30556+ *dst++ = *src++;
30557+ *dst++ = *src++;
30558+ }
30559+ return 0;
30560+}
30561+
30562+static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
30563+ unsigned size)
30564+{
30565+ uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
30566+ int ret = 0;
30567+
30568+ retry:
30569+ if (avail < size) {
30570+#if 0
30571+ /* We'd ideally
30572+ * like to have an IRQ-driven event here.
30573+ */
30574+
30575+ psb_2D_irq_on(dev_priv);
30576+ DRM_WAIT_ON(ret, dev_priv->event_2d_queue, DRM_HZ,
30577+ ((avail = PSB_RSGX32(PSB_CR_2D_SOCIF)) >= size));
30578+ psb_2D_irq_off(dev_priv);
30579+ if (ret == 0)
30580+ return 0;
30581+ if (ret == -EINTR) {
30582+ ret = 0;
30583+ goto retry;
30584+ }
30585+#else
30586+ avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
30587+ goto retry;
30588+#endif
30589+ }
30590+ return ret;
30591+}
30592+
30593+int psb_2d_submit(struct drm_psb_private *dev_priv, uint32_t * cmdbuf,
30594+ unsigned size)
30595+{
30596+ int ret = 0;
30597+ int i;
30598+ unsigned submit_size;
30599+
30600+ while (size > 0) {
30601+ submit_size = (size < 0x60) ? size : 0x60;
30602+ size -= submit_size;
30603+ ret = psb_2d_wait_available(dev_priv, submit_size);
30604+ if (ret)
30605+ return ret;
30606+
30607+ submit_size <<= 2;
30608+
30609+ for (i = 0; i < submit_size; i += 4) {
30610+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
30611+ }
30612+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
30613+ }
30614+ return 0;
30615+}
30616+
30617+int psb_blit_sequence(struct drm_psb_private *dev_priv, uint32_t sequence)
30618+{
30619+ uint32_t buffer[8];
30620+ uint32_t *bufp = buffer;
30621+ int ret;
30622+
30623+ *bufp++ = PSB_2D_FENCE_BH;
30624+
30625+ *bufp++ = PSB_2D_DST_SURF_BH |
30626+ PSB_2D_DST_8888ARGB | (4 << PSB_2D_DST_STRIDE_SHIFT);
30627+ *bufp++ = dev_priv->comm_mmu_offset - dev_priv->mmu_2d_offset;
30628+
30629+ *bufp++ = PSB_2D_BLIT_BH |
30630+ PSB_2D_ROT_NONE |
30631+ PSB_2D_COPYORDER_TL2BR |
30632+ PSB_2D_DSTCK_DISABLE |
30633+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
30634+
30635+ *bufp++ = sequence << PSB_2D_FILLCOLOUR_SHIFT;
30636+ *bufp++ = (0 << PSB_2D_DST_XSTART_SHIFT) |
30637+ (0 << PSB_2D_DST_YSTART_SHIFT);
30638+ *bufp++ = (1 << PSB_2D_DST_XSIZE_SHIFT) | (1 << PSB_2D_DST_YSIZE_SHIFT);
30639+
30640+ *bufp++ = PSB_2D_FLUSH_BH;
30641+
30642+ psb_2d_lock(dev_priv);
30643+ ret = psb_2d_submit(dev_priv, buffer, bufp - buffer);
30644+ psb_2d_unlock(dev_priv);
30645+
30646+ if (!ret)
30647+ psb_schedule_watchdog(dev_priv);
30648+ return ret;
30649+}
30650+
30651+int psb_emit_2d_copy_blit(struct drm_device *dev,
30652+ uint32_t src_offset,
30653+ uint32_t dst_offset, uint32_t pages, int direction)
30654+{
30655+ uint32_t cur_pages;
30656+ struct drm_psb_private *dev_priv = dev->dev_private;
30657+ uint32_t buf[10];
30658+ uint32_t *bufp;
30659+ uint32_t xstart;
30660+ uint32_t ystart;
30661+ uint32_t blit_cmd;
30662+ uint32_t pg_add;
30663+ int ret = 0;
30664+
30665+ if (!dev_priv)
30666+ return 0;
30667+
30668+ if (direction) {
30669+ pg_add = (pages - 1) << PAGE_SHIFT;
30670+ src_offset += pg_add;
30671+ dst_offset += pg_add;
30672+ }
30673+
30674+ blit_cmd = PSB_2D_BLIT_BH |
30675+ PSB_2D_ROT_NONE |
30676+ PSB_2D_DSTCK_DISABLE |
30677+ PSB_2D_SRCCK_DISABLE |
30678+ PSB_2D_USE_PAT |
30679+ PSB_2D_ROP3_SRCCOPY |
30680+ (direction ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TL2BR);
30681+ xstart = (direction) ? ((PAGE_SIZE - 1) >> 2) : 0;
30682+
30683+ psb_2d_lock(dev_priv);
30684+ while (pages > 0) {
30685+ cur_pages = pages;
30686+ if (cur_pages > 2048)
30687+ cur_pages = 2048;
30688+ pages -= cur_pages;
30689+ ystart = (direction) ? cur_pages - 1 : 0;
30690+
30691+ bufp = buf;
30692+ *bufp++ = PSB_2D_FENCE_BH;
30693+
30694+ *bufp++ = PSB_2D_DST_SURF_BH | PSB_2D_DST_8888ARGB |
30695+ (PAGE_SIZE << PSB_2D_DST_STRIDE_SHIFT);
30696+ *bufp++ = dst_offset;
30697+ *bufp++ = PSB_2D_SRC_SURF_BH | PSB_2D_SRC_8888ARGB |
30698+ (PAGE_SIZE << PSB_2D_SRC_STRIDE_SHIFT);
30699+ *bufp++ = src_offset;
30700+ *bufp++ =
30701+ PSB_2D_SRC_OFF_BH | (xstart << PSB_2D_SRCOFF_XSTART_SHIFT) |
30702+ (ystart << PSB_2D_SRCOFF_YSTART_SHIFT);
30703+ *bufp++ = blit_cmd;
30704+ *bufp++ = (xstart << PSB_2D_DST_XSTART_SHIFT) |
30705+ (ystart << PSB_2D_DST_YSTART_SHIFT);
30706+ *bufp++ = ((PAGE_SIZE >> 2) << PSB_2D_DST_XSIZE_SHIFT) |
30707+ (cur_pages << PSB_2D_DST_YSIZE_SHIFT);
30708+
30709+ ret = psb_2d_submit(dev_priv, buf, bufp - buf);
30710+ if (ret)
30711+ goto out;
30712+ pg_add = (cur_pages << PAGE_SHIFT) * ((direction) ? -1 : 1);
30713+ src_offset += pg_add;
30714+ dst_offset += pg_add;
30715+ }
30716+ out:
30717+ psb_2d_unlock(dev_priv);
30718+ return ret;
30719+}
30720+
30721+void psb_init_2d(struct drm_psb_private *dev_priv)
30722+{
30723+ dev_priv->sequence_lock = SPIN_LOCK_UNLOCKED;
30724+ psb_reset(dev_priv, 1);
30725+ dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start;
30726+ PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE);
30727+ (void)PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE);
30728+}
30729+
30730+int psb_idle_2d(struct drm_device *dev)
30731+{
30732+ struct drm_psb_private *dev_priv = dev->dev_private;
30733+ unsigned long _end = jiffies + DRM_HZ;
30734+ int busy = 0;
30735+
30736+ /*
30737+ * First idle the 2D engine.
30738+ */
30739+
30740+ if (dev_priv->engine_lockup_2d)
30741+ return -EBUSY;
30742+
30743+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
30744+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
30745+ goto out;
30746+
30747+ do {
30748+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
30749+ } while (busy && !time_after_eq(jiffies, _end));
30750+
30751+ if (busy)
30752+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
30753+ if (busy)
30754+ goto out;
30755+
30756+ do {
30757+ busy =
30758+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY)
30759+ != 0);
30760+ } while (busy && !time_after_eq(jiffies, _end));
30761+ if (busy)
30762+ busy =
30763+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY)
30764+ != 0);
30765+
30766+ out:
30767+ if (busy)
30768+ dev_priv->engine_lockup_2d = 1;
30769+
30770+ return (busy) ? -EBUSY : 0;
30771+}
30772+
30773+int psb_idle_3d(struct drm_device *dev)
30774+{
30775+ struct drm_psb_private *dev_priv = dev->dev_private;
30776+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30777+ int ret;
30778+
30779+ ret = wait_event_timeout(scheduler->idle_queue,
30780+ psb_scheduler_finished(dev_priv), DRM_HZ * 10);
30781+
30782+ return (ret < 1) ? -EBUSY : 0;
30783+}
30784+
30785+static void psb_dereference_buffers_locked(struct psb_buflist_item *buffers,
30786+ unsigned num_buffers)
30787+{
30788+ while (num_buffers--)
30789+ drm_bo_usage_deref_locked(&((buffers++)->bo));
30790+
30791+}
30792+
30793+static int psb_check_presumed(struct drm_bo_op_arg *arg,
30794+ struct drm_buffer_object *bo,
30795+ uint32_t __user * data, int *presumed_ok)
30796+{
30797+ struct drm_bo_op_req *req = &arg->d.req;
30798+ uint32_t hint_offset;
30799+ uint32_t hint = req->bo_req.hint;
30800+
30801+ *presumed_ok = 0;
30802+
30803+ if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
30804+ return 0;
30805+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
30806+ *presumed_ok = 1;
30807+ return 0;
30808+ }
30809+ if (bo->offset == req->bo_req.presumed_offset) {
30810+ *presumed_ok = 1;
30811+ return 0;
30812+ }
30813+
30814+ /*
30815+ * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
30816+ * the user-space IOCTL argument list, since the buffer has moved,
30817+ * we're about to apply relocations and we might subsequently
30818+ * hit an -EAGAIN. In that case the argument list will be reused by
30819+ * user-space, but the presumed offset is no longer valid.
30820+ *
30821+ * Needless to say, this is a bit ugly.
30822+ */
30823+
30824+ hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
30825+ hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
30826+ return __put_user(hint, data + hint_offset);
30827+}
30828+
30829+static int psb_validate_buffer_list(struct drm_file *file_priv,
30830+ unsigned fence_class,
30831+ unsigned long data,
30832+ struct psb_buflist_item *buffers,
30833+ unsigned *num_buffers)
30834+{
30835+ struct drm_bo_op_arg arg;
30836+ struct drm_bo_op_req *req = &arg.d.req;
30837+ int ret = 0;
30838+ unsigned buf_count = 0;
30839+ struct psb_buflist_item *item = buffers;
30840+
30841+ do {
30842+ if (buf_count >= *num_buffers) {
30843+ DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
30844+ ret = -EINVAL;
30845+ goto out_err;
30846+ }
30847+ item = buffers + buf_count;
30848+ item->bo = NULL;
30849+
30850+ if (copy_from_user(&arg, (void __user *)data, sizeof(arg))) {
30851+ ret = -EFAULT;
30852+ DRM_ERROR("Error copying validate list.\n"
30853+ "\tbuffer %d, user addr 0x%08lx %d\n",
30854+ buf_count, (unsigned long)data, sizeof(arg));
30855+ goto out_err;
30856+ }
30857+
30858+ ret = 0;
30859+ if (req->op != drm_bo_validate) {
30860+ DRM_ERROR
30861+ ("Buffer object operation wasn't \"validate\".\n");
30862+ ret = -EINVAL;
30863+ goto out_err;
30864+ }
30865+
30866+ item->ret = 0;
30867+ item->data = (void *)__user data;
30868+ ret = drm_bo_handle_validate(file_priv,
30869+ req->bo_req.handle,
30870+ fence_class,
30871+ req->bo_req.flags,
30872+ req->bo_req.mask,
30873+ req->bo_req.hint,
30874+ 0, NULL, &item->bo);
30875+ if (ret)
30876+ goto out_err;
30877+
30878+ PSB_DEBUG_GENERAL("Validated buffer at 0x%08lx\n",
30879+ buffers[buf_count].bo->offset);
30880+
30881+ buf_count++;
30882+
30883+
30884+ ret = psb_check_presumed(&arg, item->bo,
30885+ (uint32_t __user *)
30886+ (unsigned long) data,
30887+ &item->presumed_offset_correct);
30888+
30889+ if (ret)
30890+ goto out_err;
30891+
30892+ data = arg.next;
30893+ } while (data);
30894+
30895+ *num_buffers = buf_count;
30896+
30897+ return 0;
30898+ out_err:
30899+
30900+ *num_buffers = buf_count;
30901+ item->ret = (ret != -EAGAIN) ? ret : 0;
30902+ return ret;
30903+}
30904+
30905+int
30906+psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t * regs,
30907+ unsigned int cmds)
30908+{
30909+ int i;
30910+
30911+ /*
30912+ * cmds is 32-bit words.
30913+ */
30914+
30915+ cmds >>= 1;
30916+ for (i = 0; i < cmds; ++i) {
30917+ PSB_WSGX32(regs[1], regs[0]);
30918+ regs += 2;
30919+ }
30920+ wmb();
30921+ return 0;
30922+}
30923+
30924+/*
30925+ * Security: Block user-space writing to MMU mapping registers.
30926+ * This is important for security and brings Poulsbo DRM
30927+ * up to par with the other DRM drivers. Using this,
30928+ * user-space should not be able to map arbitrary memory
30929+ * pages to graphics memory, but all user-space processes
30930+ * basically have access to all buffer objects mapped to
30931+ * graphics memory.
30932+ */
30933+
30934+int
30935+psb_submit_copy_cmdbuf(struct drm_device *dev,
30936+ struct drm_buffer_object *cmd_buffer,
30937+ unsigned long cmd_offset,
30938+ unsigned long cmd_size,
30939+ int engine, uint32_t * copy_buffer)
30940+{
30941+ unsigned long cmd_end = cmd_offset + (cmd_size << 2);
30942+ struct drm_psb_private *dev_priv = dev->dev_private;
30943+ unsigned long cmd_page_offset = cmd_offset - (cmd_offset & PAGE_MASK);
30944+ unsigned long cmd_next;
30945+ struct drm_bo_kmap_obj cmd_kmap;
30946+ uint32_t *cmd_page;
30947+ unsigned cmds;
30948+ int is_iomem;
30949+ int ret = 0;
30950+
30951+ if (cmd_size == 0)
30952+ return 0;
30953+
30954+ if (engine == PSB_ENGINE_2D)
30955+ psb_2d_lock(dev_priv);
30956+
30957+ do {
30958+ cmd_next = drm_bo_offset_end(cmd_offset, cmd_end);
30959+ ret = drm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT,
30960+ 1, &cmd_kmap);
30961+
30962+ if (ret)
30963+ return ret;
30964+ cmd_page = drm_bmo_virtual(&cmd_kmap, &is_iomem);
30965+ cmd_page_offset = (cmd_offset & ~PAGE_MASK) >> 2;
30966+ cmds = (cmd_next - cmd_offset) >> 2;
30967+
30968+ switch (engine) {
30969+ case PSB_ENGINE_2D:
30970+ ret =
30971+ psb_2d_submit(dev_priv, cmd_page + cmd_page_offset,
30972+ cmds);
30973+ break;
30974+ case PSB_ENGINE_RASTERIZER:
30975+ case PSB_ENGINE_TA:
30976+ case PSB_ENGINE_HPRAST:
30977+ PSB_DEBUG_GENERAL("Reg copy.\n");
30978+ ret = psb_memcpy_check(copy_buffer,
30979+ cmd_page + cmd_page_offset,
30980+ cmds * sizeof(uint32_t));
30981+ copy_buffer += cmds;
30982+ break;
30983+ default:
30984+ ret = -EINVAL;
30985+ }
30986+ drm_bo_kunmap(&cmd_kmap);
30987+ if (ret)
30988+ break;
30989+ } while (cmd_offset = cmd_next, cmd_offset != cmd_end);
30990+
30991+ if (engine == PSB_ENGINE_2D)
30992+ psb_2d_unlock(dev_priv);
30993+
30994+ return ret;
30995+}
30996+
30997+static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
30998+{
30999+ if (dst_cache->dst_page) {
31000+ drm_bo_kunmap(&dst_cache->dst_kmap);
31001+ dst_cache->dst_page = NULL;
31002+ }
31003+ dst_cache->dst_buf = NULL;
31004+ dst_cache->dst = ~0;
31005+ dst_cache->use_page = NULL;
31006+}
31007+
31008+static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
31009+ struct psb_buflist_item *buffers,
31010+ unsigned int dst, unsigned long dst_offset)
31011+{
31012+ int ret;
31013+
31014+ PSB_DEBUG_RELOC("Destination buffer is %d.\n", dst);
31015+
31016+ if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
31017+ psb_clear_dstbuf_cache(dst_cache);
31018+ dst_cache->dst = dst;
31019+ dst_cache->dst_buf = buffers[dst].bo;
31020+ }
31021+
31022+ if (unlikely(dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
31023+ DRM_ERROR("Relocation destination out of bounds.\n");
31024+ return -EINVAL;
31025+ }
31026+
31027+ if (!drm_bo_same_page(dst_cache->dst_offset, dst_offset) ||
31028+ NULL == dst_cache->dst_page) {
31029+ if (NULL != dst_cache->dst_page) {
31030+ drm_bo_kunmap(&dst_cache->dst_kmap);
31031+ dst_cache->dst_page = NULL;
31032+ }
31033+
31034+ ret = drm_bo_kmap(dst_cache->dst_buf, dst_offset >> PAGE_SHIFT,
31035+ 1, &dst_cache->dst_kmap);
31036+ if (ret) {
31037+ DRM_ERROR("Could not map destination buffer for "
31038+ "relocation.\n");
31039+ return ret;
31040+ }
31041+
31042+ dst_cache->dst_page = drm_bmo_virtual(&dst_cache->dst_kmap,
31043+ &dst_cache->dst_is_iomem);
31044+ dst_cache->dst_offset = dst_offset & PAGE_MASK;
31045+ dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
31046+ }
31047+ return 0;
31048+}
31049+
31050+static int psb_apply_reloc(struct drm_psb_private *dev_priv,
31051+ uint32_t fence_class,
31052+ const struct drm_psb_reloc *reloc,
31053+ struct psb_buflist_item *buffers,
31054+ int num_buffers,
31055+ struct psb_dstbuf_cache *dst_cache,
31056+ int no_wait, int interruptible)
31057+{
31058+ int reg;
31059+ uint32_t val;
31060+ uint32_t background;
31061+ unsigned int index;
31062+ int ret;
31063+ unsigned int shift;
31064+ unsigned int align_shift;
31065+ uint32_t fence_type;
31066+ struct drm_buffer_object *reloc_bo;
31067+
31068+ PSB_DEBUG_RELOC("Reloc type %d\n"
31069+ "\t where 0x%04x\n"
31070+ "\t buffer 0x%04x\n"
31071+ "\t mask 0x%08x\n"
31072+ "\t shift 0x%08x\n"
31073+ "\t pre_add 0x%08x\n"
31074+ "\t background 0x%08x\n"
31075+ "\t dst_buffer 0x%08x\n"
31076+ "\t arg0 0x%08x\n"
31077+ "\t arg1 0x%08x\n",
31078+ reloc->reloc_op,
31079+ reloc->where,
31080+ reloc->buffer,
31081+ reloc->mask,
31082+ reloc->shift,
31083+ reloc->pre_add,
31084+ reloc->background,
31085+ reloc->dst_buffer, reloc->arg0, reloc->arg1);
31086+
31087+ if (unlikely(reloc->buffer >= num_buffers)) {
31088+ DRM_ERROR("Illegal relocation buffer %d.\n", reloc->buffer);
31089+ return -EINVAL;
31090+ }
31091+
31092+ if (buffers[reloc->buffer].presumed_offset_correct)
31093+ return 0;
31094+
31095+ if (unlikely(reloc->dst_buffer >= num_buffers)) {
31096+ DRM_ERROR("Illegal destination buffer for relocation %d.\n",
31097+ reloc->dst_buffer);
31098+ return -EINVAL;
31099+ }
31100+
31101+ ret = psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer,
31102+ reloc->where << 2);
31103+ if (ret)
31104+ return ret;
31105+
31106+ reloc_bo = buffers[reloc->buffer].bo;
31107+
31108+ if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
31109+ DRM_ERROR("Illegal relocation offset add.\n");
31110+ return -EINVAL;
31111+ }
31112+
31113+ switch (reloc->reloc_op) {
31114+ case PSB_RELOC_OP_OFFSET:
31115+ val = reloc_bo->offset + reloc->pre_add;
31116+ break;
31117+ case PSB_RELOC_OP_2D_OFFSET:
31118+ val = reloc_bo->offset + reloc->pre_add -
31119+ dev_priv->mmu_2d_offset;
31120+ if (unlikely(val >= PSB_2D_SIZE)) {
31121+ DRM_ERROR("2D relocation out of bounds\n");
31122+ return -EINVAL;
31123+ }
31124+ break;
31125+ case PSB_RELOC_OP_PDS_OFFSET:
31126+ val = reloc_bo->offset + reloc->pre_add - PSB_MEM_PDS_START;
31127+ if (unlikely(val >= (PSB_MEM_MMU_START - PSB_MEM_PDS_START))) {
31128+ DRM_ERROR("PDS relocation out of bounds\n");
31129+ return -EINVAL;
31130+ }
31131+ break;
31132+ case PSB_RELOC_OP_USE_OFFSET:
31133+ case PSB_RELOC_OP_USE_REG:
31134+
31135+ /*
31136+ * Security:
31137+ * Only allow VERTEX or PIXEL data masters, as
31138+ * shaders run under other data masters may in theory
31139+ * alter MMU mappings.
31140+ */
31141+
31142+ if (unlikely(reloc->arg1 != _PSB_CUC_DM_PIXEL &&
31143+ reloc->arg1 != _PSB_CUC_DM_VERTEX)) {
31144+ DRM_ERROR("Invalid data master in relocation. %d\n",
31145+ reloc->arg1);
31146+ return -EPERM;
31147+ }
31148+
31149+ fence_type = reloc_bo->fence_type;
31150+ ret = psb_grab_use_base(dev_priv,
31151+ reloc_bo->offset +
31152+ reloc->pre_add, reloc->arg0,
31153+ reloc->arg1, fence_class,
31154+ fence_type, no_wait,
31155+ interruptible, &reg, &val);
31156+ if (ret)
31157+ return ret;
31158+
31159+ val = (reloc->reloc_op == PSB_RELOC_OP_USE_REG) ? reg : val;
31160+ break;
31161+ default:
31162+ DRM_ERROR("Unimplemented relocation.\n");
31163+ return -EINVAL;
31164+ }
31165+
31166+ shift = (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
31167+ align_shift = (reloc->shift & PSB_RELOC_ALSHIFT_MASK) >>
31168+ PSB_RELOC_ALSHIFT_SHIFT;
31169+
31170+ val = ((val >> align_shift) << shift);
31171+ index = reloc->where - dst_cache->dst_page_offset;
31172+
31173+ background = reloc->background;
31174+
31175+ if (reloc->reloc_op == PSB_RELOC_OP_USE_OFFSET) {
31176+ if (dst_cache->use_page == dst_cache->dst_page &&
31177+ dst_cache->use_index == index)
31178+ background = dst_cache->use_background;
31179+ else
31180+ background = dst_cache->dst_page[index];
31181+ }
31182+#if 0
31183+ if (dst_cache->dst_page[index] != PSB_RELOC_MAGIC &&
31184+ reloc->reloc_op != PSB_RELOC_OP_USE_OFFSET)
31185+ DRM_ERROR("Inconsistent relocation 0x%08lx.\n",
31186+ (unsigned long)dst_cache->dst_page[index]);
31187+#endif
31188+
31189+ val = (background & ~reloc->mask) | (val & reloc->mask);
31190+ dst_cache->dst_page[index] = val;
31191+
31192+ if (reloc->reloc_op == PSB_RELOC_OP_USE_OFFSET ||
31193+ reloc->reloc_op == PSB_RELOC_OP_USE_REG) {
31194+ dst_cache->use_page = dst_cache->dst_page;
31195+ dst_cache->use_index = index;
31196+ dst_cache->use_background = val;
31197+ }
31198+
31199+ PSB_DEBUG_RELOC("Reloc buffer %d index 0x%08x, value 0x%08x\n",
31200+ reloc->dst_buffer, index, dst_cache->dst_page[index]);
31201+
31202+ return 0;
31203+}
31204+
31205+static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
31206+ unsigned int num_pages)
31207+{
31208+ int ret = 0;
31209+
31210+ spin_lock(&dev_priv->reloc_lock);
31211+ if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
31212+ dev_priv->rel_mapped_pages += num_pages;
31213+ ret = 1;
31214+ }
31215+ spin_unlock(&dev_priv->reloc_lock);
31216+ return ret;
31217+}
31218+
31219+static int psb_fixup_relocs(struct drm_file *file_priv,
31220+ uint32_t fence_class,
31221+ unsigned int num_relocs,
31222+ unsigned int reloc_offset,
31223+ uint32_t reloc_handle,
31224+ struct psb_buflist_item *buffers,
31225+ unsigned int num_buffers,
31226+ int no_wait, int interruptible)
31227+{
31228+ struct drm_device *dev = file_priv->minor->dev;
31229+ struct drm_psb_private *dev_priv =
31230+ (struct drm_psb_private *)dev->dev_private;
31231+ struct drm_buffer_object *reloc_buffer = NULL;
31232+ unsigned int reloc_num_pages;
31233+ unsigned int reloc_first_page;
31234+ unsigned int reloc_last_page;
31235+ struct psb_dstbuf_cache dst_cache;
31236+ struct drm_psb_reloc *reloc;
31237+ struct drm_bo_kmap_obj reloc_kmap;
31238+ int reloc_is_iomem;
31239+ int count;
31240+ int ret = 0;
31241+ int registered = 0;
31242+ int short_circuit = 1;
31243+ int i;
31244+
31245+ if (num_relocs == 0)
31246+ return 0;
31247+
31248+ for (i=0; i<num_buffers; ++i) {
31249+ if (!buffers[i].presumed_offset_correct) {
31250+ short_circuit = 0;
31251+ break;
31252+ }
31253+ }
31254+
31255+ if (short_circuit)
31256+ return 0;
31257+
31258+ memset(&dst_cache, 0, sizeof(dst_cache));
31259+ memset(&reloc_kmap, 0, sizeof(reloc_kmap));
31260+
31261+ mutex_lock(&dev->struct_mutex);
31262+ reloc_buffer = drm_lookup_buffer_object(file_priv, reloc_handle, 1);
31263+ mutex_unlock(&dev->struct_mutex);
31264+ if (!reloc_buffer)
31265+ goto out;
31266+
31267+ reloc_first_page = reloc_offset >> PAGE_SHIFT;
31268+ reloc_last_page =
31269+ (reloc_offset +
31270+ num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
31271+ reloc_num_pages = reloc_last_page - reloc_first_page + 1;
31272+ reloc_offset &= ~PAGE_MASK;
31273+
31274+ if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
31275+ DRM_ERROR("Relocation buffer is too large\n");
31276+ ret = -EINVAL;
31277+ goto out;
31278+ }
31279+
31280+ DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
31281+ (registered =
31282+ psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
31283+
31284+ if (ret == -EINTR) {
31285+ ret = -EAGAIN;
31286+ goto out;
31287+ }
31288+ if (ret) {
31289+ DRM_ERROR("Error waiting for space to map "
31290+ "relocation buffer.\n");
31291+ goto out;
31292+ }
31293+
31294+ ret = drm_bo_kmap(reloc_buffer, reloc_first_page,
31295+ reloc_num_pages, &reloc_kmap);
31296+
31297+ if (ret) {
31298+ DRM_ERROR("Could not map relocation buffer.\n"
31299+ "\tReloc buffer id 0x%08x.\n"
31300+ "\tReloc first page %d.\n"
31301+ "\tReloc num pages %d.\n",
31302+ reloc_handle, reloc_first_page, reloc_num_pages);
31303+ goto out;
31304+ }
31305+
31306+ reloc = (struct drm_psb_reloc *)
31307+ ((unsigned long)drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem) +
31308+ reloc_offset);
31309+
31310+ for (count = 0; count < num_relocs; ++count) {
31311+ ret = psb_apply_reloc(dev_priv, fence_class,
31312+ reloc, buffers,
31313+ num_buffers, &dst_cache,
31314+ no_wait, interruptible);
31315+ if (ret)
31316+ goto out1;
31317+ reloc++;
31318+ }
31319+
31320+ out1:
31321+ drm_bo_kunmap(&reloc_kmap);
31322+ out:
31323+ if (registered) {
31324+ spin_lock(&dev_priv->reloc_lock);
31325+ dev_priv->rel_mapped_pages -= reloc_num_pages;
31326+ spin_unlock(&dev_priv->reloc_lock);
31327+ DRM_WAKEUP(&dev_priv->rel_mapped_queue);
31328+ }
31329+
31330+ psb_clear_dstbuf_cache(&dst_cache);
31331+ if (reloc_buffer)
31332+ drm_bo_usage_deref_unlocked(&reloc_buffer);
31333+ return ret;
31334+}
31335+
31336+static int psb_cmdbuf_2d(struct drm_file *priv,
31337+ struct drm_psb_cmdbuf_arg *arg,
31338+ struct drm_buffer_object *cmd_buffer,
31339+ struct drm_fence_arg *fence_arg)
31340+{
31341+ struct drm_device *dev = priv->minor->dev;
31342+ struct drm_psb_private *dev_priv =
31343+ (struct drm_psb_private *)dev->dev_private;
31344+ int ret;
31345+
31346+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
31347+ if (ret)
31348+ return -EAGAIN;
31349+
31350+ ret = psb_submit_copy_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
31351+ arg->cmdbuf_size, PSB_ENGINE_2D, NULL);
31352+ if (ret)
31353+ goto out_unlock;
31354+
31355+ psb_fence_or_sync(priv, PSB_ENGINE_2D, arg, fence_arg, NULL);
31356+
31357+ mutex_lock(&cmd_buffer->mutex);
31358+ if (cmd_buffer->fence != NULL)
31359+ drm_fence_usage_deref_unlocked(&cmd_buffer->fence);
31360+ mutex_unlock(&cmd_buffer->mutex);
31361+ out_unlock:
31362+ mutex_unlock(&dev_priv->reset_mutex);
31363+ return ret;
31364+}
31365+
31366+#if 0
31367+static int psb_dump_page(struct drm_buffer_object *bo,
31368+ unsigned int page_offset, unsigned int num)
31369+{
31370+ struct drm_bo_kmap_obj kmobj;
31371+ int is_iomem;
31372+ uint32_t *p;
31373+ int ret;
31374+ unsigned int i;
31375+
31376+ ret = drm_bo_kmap(bo, page_offset, 1, &kmobj);
31377+ if (ret)
31378+ return ret;
31379+
31380+ p = drm_bmo_virtual(&kmobj, &is_iomem);
31381+ for (i = 0; i < num; ++i)
31382+ PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
31383+
31384+ drm_bo_kunmap(&kmobj);
31385+ return 0;
31386+}
31387+#endif
31388+
31389+static void psb_idle_engine(struct drm_device *dev, int engine)
31390+{
31391+ struct drm_psb_private *dev_priv =
31392+ (struct drm_psb_private *)dev->dev_private;
31393+ uint32_t dummy;
31394+
31395+ switch (engine) {
31396+ case PSB_ENGINE_2D:
31397+
31398+ /*
31399+ * Make sure we flush 2D properly using a dummy
31400+ * fence sequence emit.
31401+ */
31402+
31403+ (void)psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
31404+ &dummy, &dummy);
31405+ psb_2d_lock(dev_priv);
31406+ (void)psb_idle_2d(dev);
31407+ psb_2d_unlock(dev_priv);
31408+ break;
31409+ case PSB_ENGINE_TA:
31410+ case PSB_ENGINE_RASTERIZER:
31411+ case PSB_ENGINE_HPRAST:
31412+ (void)psb_idle_3d(dev);
31413+ break;
31414+ default:
31415+
31416+ /*
31417+ * FIXME: Insert video engine idle command here.
31418+ */
31419+
31420+ break;
31421+ }
31422+}
31423+
31424+void psb_fence_or_sync(struct drm_file *priv,
31425+ int engine,
31426+ struct drm_psb_cmdbuf_arg *arg,
31427+ struct drm_fence_arg *fence_arg,
31428+ struct drm_fence_object **fence_p)
31429+{
31430+ struct drm_device *dev = priv->minor->dev;
31431+ int ret;
31432+ struct drm_fence_object *fence;
31433+
31434+ ret = drm_fence_buffer_objects(dev, NULL, arg->fence_flags,
31435+ NULL, &fence);
31436+
31437+ if (ret) {
31438+
31439+ /*
31440+ * Fence creation failed.
31441+ * Fall back to synchronous operation and idle the engine.
31442+ */
31443+
31444+ psb_idle_engine(dev, engine);
31445+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
31446+
31447+ /*
31448+ * Communicate to user-space that
31449+ * fence creation has failed and that
31450+ * the engine is idle.
31451+ */
31452+
31453+ fence_arg->handle = ~0;
31454+ fence_arg->error = ret;
31455+ }
31456+
31457+ drm_putback_buffer_objects(dev);
31458+ if (fence_p)
31459+ *fence_p = NULL;
31460+ return;
31461+ }
31462+
31463+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
31464+
31465+ ret = drm_fence_add_user_object(priv, fence,
31466+ arg->fence_flags &
31467+ DRM_FENCE_FLAG_SHAREABLE);
31468+ if (!ret)
31469+ drm_fence_fill_arg(fence, fence_arg);
31470+ else {
31471+ /*
31472+ * Fence user object creation failed.
31473+ * We must idle the engine here as well, as user-
31474+ * space expects a fence object to wait on. Since we
31475+ * have a fence object we wait for it to signal
31476+ * to indicate engine "sufficiently" idle.
31477+ */
31478+
31479+ (void)drm_fence_object_wait(fence, 0, 1, fence->type);
31480+ drm_fence_usage_deref_unlocked(&fence);
31481+ fence_arg->handle = ~0;
31482+ fence_arg->error = ret;
31483+ }
31484+ }
31485+
31486+ if (fence_p)
31487+ *fence_p = fence;
31488+ else if (fence)
31489+ drm_fence_usage_deref_unlocked(&fence);
31490+}
31491+
31492+int psb_handle_copyback(struct drm_device *dev,
31493+ struct psb_buflist_item *buffers,
31494+ unsigned int num_buffers, int ret, void *data)
31495+{
31496+ struct drm_psb_private *dev_priv =
31497+ (struct drm_psb_private *)dev->dev_private;
31498+ struct drm_bo_op_arg arg;
31499+ struct psb_buflist_item *item = buffers;
31500+ struct drm_buffer_object *bo;
31501+ int err = ret;
31502+ int i;
31503+
31504+ /*
31505+ * Clear the unfenced use base register lists and buffer lists.
31506+ */
31507+
31508+ if (ret) {
31509+ drm_regs_fence(&dev_priv->use_manager, NULL);
31510+ drm_putback_buffer_objects(dev);
31511+ }
31512+
31513+ if (ret != -EAGAIN) {
31514+ for (i = 0; i < num_buffers; ++i) {
31515+ arg.handled = 1;
31516+ arg.d.rep.ret = item->ret;
31517+ bo = item->bo;
31518+ mutex_lock(&bo->mutex);
31519+ drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
31520+ mutex_unlock(&bo->mutex);
31521+ if (copy_to_user(item->data, &arg, sizeof(arg)))
31522+ err = -EFAULT;
31523+ ++item;
31524+ }
31525+ }
31526+
31527+ return err;
31528+}
31529+
31530+static int psb_cmdbuf_video(struct drm_file *priv,
31531+ struct drm_psb_cmdbuf_arg *arg,
31532+ unsigned int num_buffers,
31533+ struct drm_buffer_object *cmd_buffer,
31534+ struct drm_fence_arg *fence_arg)
31535+{
31536+ struct drm_device *dev = priv->minor->dev;
31537+ struct drm_fence_object *fence;
31538+ int ret;
31539+
31540+ /*
31541+ * Check this. Doesn't seem right. Have fencing done AFTER command
31542+ * submission and make sure drm_psb_idle idles the MSVDX completely.
31543+ */
31544+
31545+ psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, arg, fence_arg, &fence);
31546+ ret = psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
31547+ arg->cmdbuf_size, fence);
31548+
31549+ if (ret)
31550+ return ret;
31551+
31552+ drm_fence_usage_deref_unlocked(&fence);
31553+ mutex_lock(&cmd_buffer->mutex);
31554+ if (cmd_buffer->fence != NULL)
31555+ drm_fence_usage_deref_unlocked(&cmd_buffer->fence);
31556+ mutex_unlock(&cmd_buffer->mutex);
31557+ return 0;
31558+}
31559+
31560+int psb_feedback_buf(struct drm_file *file_priv,
31561+ uint32_t feedback_ops,
31562+ uint32_t handle,
31563+ uint32_t offset,
31564+ uint32_t feedback_breakpoints,
31565+ uint32_t feedback_size, struct psb_feedback_info *feedback)
31566+{
31567+ struct drm_buffer_object *bo;
31568+ struct page *page;
31569+ uint32_t page_no;
31570+ uint32_t page_offset;
31571+ int ret;
31572+
31573+ if (feedback_ops & ~PSB_FEEDBACK_OP_VISTEST) {
31574+ DRM_ERROR("Illegal feedback op.\n");
31575+ return -EINVAL;
31576+ }
31577+
31578+ if (feedback_breakpoints != 0) {
31579+ DRM_ERROR("Feedback breakpoints not implemented yet.\n");
31580+ return -EINVAL;
31581+ }
31582+
31583+ if (feedback_size < PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) {
31584+ DRM_ERROR("Feedback buffer size too small.\n");
31585+ return -EINVAL;
31586+ }
31587+
31588+ page_offset = offset & ~PAGE_MASK;
31589+ if ((PAGE_SIZE - PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t))
31590+ < page_offset) {
31591+ DRM_ERROR("Illegal feedback buffer alignment.\n");
31592+ return -EINVAL;
31593+ }
31594+
31595+ ret = drm_bo_handle_validate(file_priv,
31596+ handle,
31597+ PSB_ENGINE_TA,
31598+ DRM_BO_FLAG_MEM_LOCAL |
31599+ DRM_BO_FLAG_CACHED |
31600+ DRM_BO_FLAG_WRITE |
31601+ PSB_BO_FLAG_FEEDBACK,
31602+ DRM_BO_MASK_MEM |
31603+ DRM_BO_FLAG_CACHED |
31604+ DRM_BO_FLAG_WRITE |
31605+ PSB_BO_FLAG_FEEDBACK, 0, 0, NULL, &bo);
31606+ if (ret)
31607+ return ret;
31608+
31609+ page_no = offset >> PAGE_SHIFT;
31610+ if (page_no >= bo->num_pages) {
31611+ ret = -EINVAL;
31612+ DRM_ERROR("Illegal feedback buffer offset.\n");
31613+ goto out_unref;
31614+ }
31615+
31616+ if (bo->ttm == NULL) {
31617+ ret = -EINVAL;
31618+ DRM_ERROR("Vistest buffer without TTM.\n");
31619+ goto out_unref;
31620+ }
31621+
31622+ page = drm_ttm_get_page(bo->ttm, page_no);
31623+ if (!page) {
31624+ ret = -ENOMEM;
31625+ goto out_unref;
31626+ }
31627+
31628+ feedback->page = page;
31629+ feedback->bo = bo;
31630+ feedback->offset = page_offset;
31631+ return 0;
31632+
31633+ out_unref:
31634+ drm_bo_usage_deref_unlocked(&bo);
31635+ return ret;
31636+}
31637+
31638+int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
31639+ struct drm_file *file_priv)
31640+{
31641+ drm_psb_cmdbuf_arg_t *arg = data;
31642+ int ret = 0;
31643+ unsigned num_buffers;
31644+ struct drm_buffer_object *cmd_buffer = NULL;
31645+ struct drm_buffer_object *ta_buffer = NULL;
31646+ struct drm_buffer_object *oom_buffer = NULL;
31647+ struct drm_fence_arg fence_arg;
31648+ struct drm_psb_scene user_scene;
31649+ struct psb_scene_pool *pool = NULL;
31650+ struct psb_scene *scene = NULL;
31651+ struct drm_psb_private *dev_priv =
31652+ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
31653+ int engine;
31654+ struct psb_feedback_info feedback;
31655+
31656+ if (!dev_priv)
31657+ return -EINVAL;
31658+
31659+ ret = drm_bo_read_lock(&dev->bm.bm_lock);
31660+ if (ret)
31661+ return ret;
31662+
31663+ num_buffers = PSB_NUM_VALIDATE_BUFFERS;
31664+
31665+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
31666+ if (ret) {
31667+ drm_bo_read_unlock(&dev->bm.bm_lock);
31668+ return -EAGAIN;
31669+ }
31670+ if (unlikely(dev_priv->buffers == NULL)) {
31671+ dev_priv->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
31672+ sizeof(*dev_priv->buffers));
31673+ if (dev_priv->buffers == NULL) {
31674+ drm_bo_read_unlock(&dev->bm.bm_lock);
31675+ return -ENOMEM;
31676+ }
31677+ }
31678+
31679+
31680+ engine = (arg->engine == PSB_ENGINE_RASTERIZER) ?
31681+ PSB_ENGINE_TA : arg->engine;
31682+
31683+ ret =
31684+ psb_validate_buffer_list(file_priv, engine,
31685+ (unsigned long)arg->buffer_list,
31686+ dev_priv->buffers, &num_buffers);
31687+ if (ret)
31688+ goto out_err0;
31689+
31690+ ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
31691+ arg->reloc_offset, arg->reloc_handle,
31692+ dev_priv->buffers, num_buffers, 0, 1);
31693+ if (ret)
31694+ goto out_err0;
31695+
31696+ mutex_lock(&dev->struct_mutex);
31697+ cmd_buffer = drm_lookup_buffer_object(file_priv, arg->cmdbuf_handle, 1);
31698+ mutex_unlock(&dev->struct_mutex);
31699+ if (!cmd_buffer) {
31700+ ret = -EINVAL;
31701+ goto out_err0;
31702+ }
31703+
31704+ switch (arg->engine) {
31705+ case PSB_ENGINE_2D:
31706+ ret = psb_cmdbuf_2d(file_priv, arg, cmd_buffer, &fence_arg);
31707+ if (ret)
31708+ goto out_err0;
31709+ break;
31710+ case PSB_ENGINE_VIDEO:
31711+ ret =
31712+ psb_cmdbuf_video(file_priv, arg, num_buffers, cmd_buffer,
31713+ &fence_arg);
31714+ if (ret)
31715+ goto out_err0;
31716+ break;
31717+ case PSB_ENGINE_RASTERIZER:
31718+ ret = psb_cmdbuf_raster(file_priv, arg, cmd_buffer, &fence_arg);
31719+ if (ret)
31720+ goto out_err0;
31721+ break;
31722+ case PSB_ENGINE_TA:
31723+ if (arg->ta_handle == arg->cmdbuf_handle) {
31724+ mutex_lock(&dev->struct_mutex);
31725+ atomic_inc(&cmd_buffer->usage);
31726+ ta_buffer = cmd_buffer;
31727+ mutex_unlock(&dev->struct_mutex);
31728+ } else {
31729+ mutex_lock(&dev->struct_mutex);
31730+ ta_buffer =
31731+ drm_lookup_buffer_object(file_priv,
31732+ arg->ta_handle, 1);
31733+ mutex_unlock(&dev->struct_mutex);
31734+ if (!ta_buffer) {
31735+ ret = -EINVAL;
31736+ goto out_err0;
31737+ }
31738+ }
31739+ if (arg->oom_size != 0) {
31740+ if (arg->oom_handle == arg->cmdbuf_handle) {
31741+ mutex_lock(&dev->struct_mutex);
31742+ atomic_inc(&cmd_buffer->usage);
31743+ oom_buffer = cmd_buffer;
31744+ mutex_unlock(&dev->struct_mutex);
31745+ } else {
31746+ mutex_lock(&dev->struct_mutex);
31747+ oom_buffer =
31748+ drm_lookup_buffer_object(file_priv,
31749+ arg->oom_handle,
31750+ 1);
31751+ mutex_unlock(&dev->struct_mutex);
31752+ if (!oom_buffer) {
31753+ ret = -EINVAL;
31754+ goto out_err0;
31755+ }
31756+ }
31757+ }
31758+
31759+ ret = copy_from_user(&user_scene, (void __user *)
31760+ ((unsigned long)arg->scene_arg),
31761+ sizeof(user_scene));
31762+ if (ret)
31763+ goto out_err0;
31764+
31765+ if (!user_scene.handle_valid) {
31766+ pool = psb_scene_pool_alloc(file_priv, 0,
31767+ user_scene.num_buffers,
31768+ user_scene.w, user_scene.h);
31769+ if (!pool) {
31770+ ret = -ENOMEM;
31771+ goto out_err0;
31772+ }
31773+
31774+ user_scene.handle = psb_scene_pool_handle(pool);
31775+ user_scene.handle_valid = 1;
31776+ ret = copy_to_user((void __user *)
31777+ ((unsigned long)arg->scene_arg),
31778+ &user_scene, sizeof(user_scene));
31779+
31780+ if (ret)
31781+ goto out_err0;
31782+ } else {
31783+ mutex_lock(&dev->struct_mutex);
31784+ pool = psb_scene_pool_lookup_devlocked(file_priv,
31785+ user_scene.
31786+ handle, 1);
31787+ mutex_unlock(&dev->struct_mutex);
31788+ if (!pool) {
31789+ ret = -EINVAL;
31790+ goto out_err0;
31791+ }
31792+ }
31793+
31794+ mutex_lock(&dev_priv->reset_mutex);
31795+ ret = psb_validate_scene_pool(pool, 0, 0, 0,
31796+ user_scene.w,
31797+ user_scene.h,
31798+ arg->ta_flags &
31799+ PSB_TA_FLAG_LASTPASS, &scene);
31800+ mutex_unlock(&dev_priv->reset_mutex);
31801+
31802+ if (ret)
31803+ goto out_err0;
31804+
31805+ memset(&feedback, 0, sizeof(feedback));
31806+ if (arg->feedback_ops) {
31807+ ret = psb_feedback_buf(file_priv,
31808+ arg->feedback_ops,
31809+ arg->feedback_handle,
31810+ arg->feedback_offset,
31811+ arg->feedback_breakpoints,
31812+ arg->feedback_size, &feedback);
31813+ if (ret)
31814+ goto out_err0;
31815+ }
31816+ ret = psb_cmdbuf_ta(file_priv, arg, cmd_buffer, ta_buffer,
31817+ oom_buffer, scene, &feedback, &fence_arg);
31818+ if (ret)
31819+ goto out_err0;
31820+ break;
31821+ default:
31822+ DRM_ERROR("Unimplemented command submission mechanism (%x).\n",
31823+ arg->engine);
31824+ ret = -EINVAL;
31825+ goto out_err0;
31826+ }
31827+
31828+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
31829+ ret = copy_to_user((void __user *)
31830+ ((unsigned long)arg->fence_arg),
31831+ &fence_arg, sizeof(fence_arg));
31832+ }
31833+
31834+ out_err0:
31835+ ret =
31836+ psb_handle_copyback(dev, dev_priv->buffers, num_buffers, ret, data);
31837+ mutex_lock(&dev->struct_mutex);
31838+ if (scene)
31839+ psb_scene_unref_devlocked(&scene);
31840+ if (pool)
31841+ psb_scene_pool_unref_devlocked(&pool);
31842+ if (cmd_buffer)
31843+ drm_bo_usage_deref_locked(&cmd_buffer);
31844+ if (ta_buffer)
31845+ drm_bo_usage_deref_locked(&ta_buffer);
31846+ if (oom_buffer)
31847+ drm_bo_usage_deref_locked(&oom_buffer);
31848+
31849+ psb_dereference_buffers_locked(dev_priv->buffers, num_buffers);
31850+ mutex_unlock(&dev->struct_mutex);
31851+ mutex_unlock(&dev_priv->cmdbuf_mutex);
31852+
31853+ drm_bo_read_unlock(&dev->bm.bm_lock);
31854+ return ret;
31855+}
31856Index: linux-2.6.27/drivers/gpu/drm/psb/psb_xhw.c
31857===================================================================
31858--- /dev/null 1970-01-01 00:00:00.000000000 +0000
31859+++ linux-2.6.27/drivers/gpu/drm/psb/psb_xhw.c 2009-02-05 13:29:33.000000000 +0000
31860@@ -0,0 +1,614 @@
31861+/**************************************************************************
31862+ * Copyright (c) 2007, Intel Corporation.
31863+ * All Rights Reserved.
31864+ *
31865+ * This program is free software; you can redistribute it and/or modify it
31866+ * under the terms and conditions of the GNU General Public License,
31867+ * version 2, as published by the Free Software Foundation.
31868+ *
31869+ * This program is distributed in the hope it will be useful, but WITHOUT
31870+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
31871+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
31872+ * more details.
31873+ *
31874+ * You should have received a copy of the GNU General Public License along with
31875+ * this program; if not, write to the Free Software Foundation, Inc.,
31876+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
31877+ *
31878+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
31879+ * develop this driver.
31880+ *
31881+ **************************************************************************/
31882+/*
31883+ * Make calls into closed source X server code.
31884+ */
31885+
31886+#include "drmP.h"
31887+#include "psb_drv.h"
31888+
31889+void
31890+psb_xhw_clean_buf(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
31891+{
31892+ unsigned long irq_flags;
31893+
31894+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
31895+ list_del_init(&buf->head);
31896+ if (dev_priv->xhw_cur_buf == buf)
31897+ dev_priv->xhw_cur_buf = NULL;
31898+ atomic_set(&buf->done, 1);
31899+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
31900+}
31901+
31902+static inline int psb_xhw_add(struct drm_psb_private *dev_priv,
31903+ struct psb_xhw_buf *buf)
31904+{
31905+ unsigned long irq_flags;
31906+
31907+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
31908+ atomic_set(&buf->done, 0);
31909+ if (unlikely(!dev_priv->xhw_submit_ok)) {
31910+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
31911+ DRM_ERROR("No Xpsb 3D extension available.\n");
31912+ return -EINVAL;
31913+ }
31914+ if (!list_empty(&buf->head)) {
31915+ DRM_ERROR("Recursive list adding.\n");
31916+ goto out;
31917+ }
31918+ list_add_tail(&buf->head, &dev_priv->xhw_in);
31919+ wake_up_interruptible(&dev_priv->xhw_queue);
31920+ out:
31921+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
31922+ return 0;
31923+}
31924+
31925+int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
31926+ struct psb_xhw_buf *buf,
31927+ uint32_t w,
31928+ uint32_t h,
31929+ uint32_t * hw_cookie,
31930+ uint32_t * bo_size,
31931+ uint32_t * clear_p_start, uint32_t * clear_num_pages)
31932+{
31933+ struct drm_psb_xhw_arg *xa = &buf->arg;
31934+ int ret;
31935+
31936+ buf->copy_back = 1;
31937+ xa->op = PSB_XHW_SCENE_INFO;
31938+ xa->irq_op = 0;
31939+ xa->issue_irq = 0;
31940+ xa->arg.si.w = w;
31941+ xa->arg.si.h = h;
31942+
31943+ ret = psb_xhw_add(dev_priv, buf);
31944+ if (ret)
31945+ return ret;
31946+
31947+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
31948+ atomic_read(&buf->done), DRM_HZ);
31949+
31950+ if (!atomic_read(&buf->done)) {
31951+ psb_xhw_clean_buf(dev_priv, buf);
31952+ return -EBUSY;
31953+ }
31954+
31955+ if (!xa->ret) {
31956+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
31957+ *bo_size = xa->arg.si.size;
31958+ *clear_p_start = xa->arg.si.clear_p_start;
31959+ *clear_num_pages = xa->arg.si.clear_num_pages;
31960+ }
31961+ return xa->ret;
31962+}
31963+
31964+int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
31965+ struct psb_xhw_buf *buf, uint32_t fire_flags)
31966+{
31967+ struct drm_psb_xhw_arg *xa = &buf->arg;
31968+
31969+ buf->copy_back = 0;
31970+ xa->op = PSB_XHW_FIRE_RASTER;
31971+ xa->issue_irq = 0;
31972+ xa->arg.sb.fire_flags = 0;
31973+
31974+ return psb_xhw_add(dev_priv, buf);
31975+}
31976+
31977+int psb_xhw_vistest(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
31978+{
31979+ struct drm_psb_xhw_arg *xa = &buf->arg;
31980+
31981+ buf->copy_back = 1;
31982+ xa->op = PSB_XHW_VISTEST;
31983+ /*
31984+ * Could perhaps decrease latency somewhat by
31985+ * issuing an irq in this case.
31986+ */
31987+ xa->issue_irq = 0;
31988+ xa->irq_op = PSB_UIRQ_VISTEST;
31989+ return psb_xhw_add(dev_priv, buf);
31990+}
31991+
31992+int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
31993+ struct psb_xhw_buf *buf,
31994+ uint32_t fire_flags,
31995+ uint32_t hw_context,
31996+ uint32_t * cookie,
31997+ uint32_t * oom_cmds,
31998+ uint32_t num_oom_cmds,
31999+ uint32_t offset, uint32_t engine, uint32_t flags)
32000+{
32001+ struct drm_psb_xhw_arg *xa = &buf->arg;
32002+
32003+ buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM);
32004+ xa->op = PSB_XHW_SCENE_BIND_FIRE;
32005+ xa->issue_irq = (buf->copy_back) ? 1 : 0;
32006+ if (unlikely(buf->copy_back))
32007+ xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ?
32008+ PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY;
32009+ else
32010+ xa->irq_op = 0;
32011+ xa->arg.sb.fire_flags = fire_flags;
32012+ xa->arg.sb.hw_context = hw_context;
32013+ xa->arg.sb.offset = offset;
32014+ xa->arg.sb.engine = engine;
32015+ xa->arg.sb.flags = flags;
32016+ xa->arg.sb.num_oom_cmds = num_oom_cmds;
32017+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
32018+ if (num_oom_cmds)
32019+ memcpy(xa->arg.sb.oom_cmds, oom_cmds,
32020+ sizeof(uint32_t) * num_oom_cmds);
32021+ return psb_xhw_add(dev_priv, buf);
32022+}
32023+
32024+int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
32025+{
32026+ struct drm_psb_xhw_arg *xa = &buf->arg;
32027+ int ret;
32028+
32029+ buf->copy_back = 1;
32030+ xa->op = PSB_XHW_RESET_DPM;
32031+ xa->issue_irq = 0;
32032+ xa->irq_op = 0;
32033+
32034+ ret = psb_xhw_add(dev_priv, buf);
32035+ if (ret)
32036+ return ret;
32037+
32038+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
32039+ atomic_read(&buf->done), 3 * DRM_HZ);
32040+
32041+ if (!atomic_read(&buf->done)) {
32042+ psb_xhw_clean_buf(dev_priv, buf);
32043+ return -EBUSY;
32044+ }
32045+
32046+ return xa->ret;
32047+}
32048+
32049+int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
32050+ struct psb_xhw_buf *buf, uint32_t * value)
32051+{
32052+ struct drm_psb_xhw_arg *xa = &buf->arg;
32053+ int ret;
32054+
32055+ *value = 0;
32056+
32057+ buf->copy_back = 1;
32058+ xa->op = PSB_XHW_CHECK_LOCKUP;
32059+ xa->issue_irq = 0;
32060+ xa->irq_op = 0;
32061+
32062+ ret = psb_xhw_add(dev_priv, buf);
32063+ if (ret)
32064+ return ret;
32065+
32066+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
32067+ atomic_read(&buf->done), DRM_HZ * 3);
32068+
32069+ if (!atomic_read(&buf->done)) {
32070+ psb_xhw_clean_buf(dev_priv, buf);
32071+ return -EBUSY;
32072+ }
32073+
32074+ if (!xa->ret)
32075+ *value = xa->arg.cl.value;
32076+
32077+ return xa->ret;
32078+}
32079+
32080+static int psb_xhw_terminate(struct drm_psb_private *dev_priv,
32081+ struct psb_xhw_buf *buf)
32082+{
32083+ struct drm_psb_xhw_arg *xa = &buf->arg;
32084+ unsigned long irq_flags;
32085+
32086+ buf->copy_back = 0;
32087+ xa->op = PSB_XHW_TERMINATE;
32088+ xa->issue_irq = 0;
32089+
32090+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32091+ dev_priv->xhw_submit_ok = 0;
32092+ atomic_set(&buf->done, 0);
32093+ if (!list_empty(&buf->head)) {
32094+ DRM_ERROR("Recursive list adding.\n");
32095+ goto out;
32096+ }
32097+ list_add_tail(&buf->head, &dev_priv->xhw_in);
32098+ out:
32099+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32100+ wake_up_interruptible(&dev_priv->xhw_queue);
32101+
32102+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
32103+ atomic_read(&buf->done), DRM_HZ / 10);
32104+
32105+ if (!atomic_read(&buf->done)) {
32106+ DRM_ERROR("Xpsb terminate timeout.\n");
32107+ psb_xhw_clean_buf(dev_priv, buf);
32108+ return -EBUSY;
32109+ }
32110+
32111+ return 0;
32112+}
32113+
32114+int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
32115+ struct psb_xhw_buf *buf,
32116+ uint32_t pages, uint32_t * hw_cookie, uint32_t * size)
32117+{
32118+ struct drm_psb_xhw_arg *xa = &buf->arg;
32119+ int ret;
32120+
32121+ buf->copy_back = 1;
32122+ xa->op = PSB_XHW_TA_MEM_INFO;
32123+ xa->issue_irq = 0;
32124+ xa->irq_op = 0;
32125+ xa->arg.bi.pages = pages;
32126+
32127+ ret = psb_xhw_add(dev_priv, buf);
32128+ if (ret)
32129+ return ret;
32130+
32131+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
32132+ atomic_read(&buf->done), DRM_HZ);
32133+
32134+ if (!atomic_read(&buf->done)) {
32135+ psb_xhw_clean_buf(dev_priv, buf);
32136+ return -EBUSY;
32137+ }
32138+
32139+ if (!xa->ret)
32140+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
32141+
32142+ *size = xa->arg.bi.size;
32143+ return xa->ret;
32144+}
32145+
32146+int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
32147+ struct psb_xhw_buf *buf,
32148+ uint32_t flags,
32149+ uint32_t param_offset,
32150+ uint32_t pt_offset, uint32_t * hw_cookie)
32151+{
32152+ struct drm_psb_xhw_arg *xa = &buf->arg;
32153+ int ret;
32154+
32155+ buf->copy_back = 1;
32156+ xa->op = PSB_XHW_TA_MEM_LOAD;
32157+ xa->issue_irq = 0;
32158+ xa->irq_op = 0;
32159+ xa->arg.bl.flags = flags;
32160+ xa->arg.bl.param_offset = param_offset;
32161+ xa->arg.bl.pt_offset = pt_offset;
32162+ memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie));
32163+
32164+ ret = psb_xhw_add(dev_priv, buf);
32165+ if (ret)
32166+ return ret;
32167+
32168+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
32169+ atomic_read(&buf->done), 3 * DRM_HZ);
32170+
32171+ if (!atomic_read(&buf->done)) {
32172+ psb_xhw_clean_buf(dev_priv, buf);
32173+ return -EBUSY;
32174+ }
32175+
32176+ if (!xa->ret)
32177+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
32178+
32179+ return xa->ret;
32180+}
32181+
32182+int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
32183+ struct psb_xhw_buf *buf, uint32_t * cookie)
32184+{
32185+ struct drm_psb_xhw_arg *xa = &buf->arg;
32186+
32187+ /*
32188+ * This calls the extensive closed source
32189+ * OOM handler, which resolves the condition and
32190+ * sends a reply telling the scheduler what to do
32191+ * with the task.
32192+ */
32193+
32194+ buf->copy_back = 1;
32195+ xa->op = PSB_XHW_OOM;
32196+ xa->issue_irq = 1;
32197+ xa->irq_op = PSB_UIRQ_OOM_REPLY;
32198+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
32199+
32200+ return psb_xhw_add(dev_priv, buf);
32201+}
32202+
32203+void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
32204+ struct psb_xhw_buf *buf,
32205+ uint32_t * cookie,
32206+ uint32_t * bca, uint32_t * rca, uint32_t * flags)
32207+{
32208+ struct drm_psb_xhw_arg *xa = &buf->arg;
32209+
32210+ /*
32211+ * Get info about how to schedule an OOM task.
32212+ */
32213+
32214+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
32215+ *bca = xa->arg.oom.bca;
32216+ *rca = xa->arg.oom.rca;
32217+ *flags = xa->arg.oom.flags;
32218+}
32219+
32220+void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
32221+ struct psb_xhw_buf *buf, uint32_t * cookie)
32222+{
32223+ struct drm_psb_xhw_arg *xa = &buf->arg;
32224+
32225+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
32226+}
32227+
32228+int psb_xhw_resume(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
32229+{
32230+ struct drm_psb_xhw_arg *xa = &buf->arg;
32231+
32232+ buf->copy_back = 0;
32233+ xa->op = PSB_XHW_RESUME;
32234+ xa->issue_irq = 0;
32235+ xa->irq_op = 0;
32236+ return psb_xhw_add(dev_priv, buf);
32237+}
32238+
32239+void psb_xhw_takedown(struct drm_psb_private *dev_priv)
32240+{
32241+}
32242+
32243+int psb_xhw_init(struct drm_device *dev)
32244+{
32245+ struct drm_psb_private *dev_priv =
32246+ (struct drm_psb_private *)dev->dev_private;
32247+ unsigned long irq_flags;
32248+
32249+ INIT_LIST_HEAD(&dev_priv->xhw_in);
32250+ dev_priv->xhw_lock = SPIN_LOCK_UNLOCKED;
32251+ atomic_set(&dev_priv->xhw_client, 0);
32252+ init_waitqueue_head(&dev_priv->xhw_queue);
32253+ init_waitqueue_head(&dev_priv->xhw_caller_queue);
32254+ mutex_init(&dev_priv->xhw_mutex);
32255+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32256+ dev_priv->xhw_on = 0;
32257+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32258+
32259+ return 0;
32260+}
32261+
32262+static int psb_xhw_init_init(struct drm_device *dev,
32263+ struct drm_file *file_priv,
32264+ struct drm_psb_xhw_init_arg *arg)
32265+{
32266+ struct drm_psb_private *dev_priv =
32267+ (struct drm_psb_private *)dev->dev_private;
32268+ int ret;
32269+ int is_iomem;
32270+
32271+ if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) {
32272+ unsigned long irq_flags;
32273+
32274+ mutex_lock(&dev->struct_mutex);
32275+ dev_priv->xhw_bo =
32276+ drm_lookup_buffer_object(file_priv, arg->buffer_handle, 1);
32277+ mutex_unlock(&dev->struct_mutex);
32278+ if (!dev_priv->xhw_bo) {
32279+ ret = -EINVAL;
32280+ goto out_err;
32281+ }
32282+ ret = drm_bo_kmap(dev_priv->xhw_bo, 0,
32283+ dev_priv->xhw_bo->num_pages,
32284+ &dev_priv->xhw_kmap);
32285+ if (ret) {
32286+ DRM_ERROR("Failed mapping X server "
32287+ "communications buffer.\n");
32288+ goto out_err0;
32289+ }
32290+ dev_priv->xhw = drm_bmo_virtual(&dev_priv->xhw_kmap, &is_iomem);
32291+ if (is_iomem) {
32292+ DRM_ERROR("X server communications buffer"
32293+ "is in device memory.\n");
32294+ ret = -EINVAL;
32295+ goto out_err1;
32296+ }
32297+ dev_priv->xhw_file = file_priv;
32298+
32299+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32300+ dev_priv->xhw_on = 1;
32301+ dev_priv->xhw_submit_ok = 1;
32302+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32303+ return 0;
32304+ } else {
32305+ DRM_ERROR("Xhw is already initialized.\n");
32306+ return -EBUSY;
32307+ }
32308+ out_err1:
32309+ dev_priv->xhw = NULL;
32310+ drm_bo_kunmap(&dev_priv->xhw_kmap);
32311+ out_err0:
32312+ drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
32313+ out_err:
32314+ atomic_dec(&dev_priv->xhw_client);
32315+ return ret;
32316+}
32317+
32318+static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv)
32319+{
32320+ struct psb_xhw_buf *cur_buf, *next;
32321+ unsigned long irq_flags;
32322+
32323+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32324+ dev_priv->xhw_submit_ok = 0;
32325+
32326+ list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) {
32327+ list_del_init(&cur_buf->head);
32328+ if (cur_buf->copy_back) {
32329+ cur_buf->arg.ret = -EINVAL;
32330+ }
32331+ atomic_set(&cur_buf->done, 1);
32332+ }
32333+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32334+ wake_up(&dev_priv->xhw_caller_queue);
32335+}
32336+
32337+void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
32338+ struct drm_file *file_priv, int closing)
32339+{
32340+
32341+ if (dev_priv->xhw_file == file_priv &&
32342+ atomic_add_unless(&dev_priv->xhw_client, -1, 0)) {
32343+
32344+ if (closing)
32345+ psb_xhw_queue_empty(dev_priv);
32346+ else {
32347+ struct psb_xhw_buf buf;
32348+ INIT_LIST_HEAD(&buf.head);
32349+
32350+ psb_xhw_terminate(dev_priv, &buf);
32351+ psb_xhw_queue_empty(dev_priv);
32352+ }
32353+
32354+ dev_priv->xhw = NULL;
32355+ drm_bo_kunmap(&dev_priv->xhw_kmap);
32356+ drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
32357+ dev_priv->xhw_file = NULL;
32358+ }
32359+}
32360+
32361+int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
32362+ struct drm_file *file_priv)
32363+{
32364+ struct drm_psb_xhw_init_arg *arg = (struct drm_psb_xhw_init_arg *)data;
32365+ struct drm_psb_private *dev_priv =
32366+ (struct drm_psb_private *)dev->dev_private;
32367+
32368+ switch (arg->operation) {
32369+ case PSB_XHW_INIT:
32370+ return psb_xhw_init_init(dev, file_priv, arg);
32371+ case PSB_XHW_TAKEDOWN:
32372+ psb_xhw_init_takedown(dev_priv, file_priv, 0);
32373+ }
32374+ return 0;
32375+}
32376+
32377+static int psb_xhw_in_empty(struct drm_psb_private *dev_priv)
32378+{
32379+ int empty;
32380+ unsigned long irq_flags;
32381+
32382+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32383+ empty = list_empty(&dev_priv->xhw_in);
32384+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32385+ return empty;
32386+}
32387+
32388+int psb_xhw_handler(struct drm_psb_private *dev_priv)
32389+{
32390+ unsigned long irq_flags;
32391+ struct drm_psb_xhw_arg *xa;
32392+ struct psb_xhw_buf *buf;
32393+
32394+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32395+
32396+ if (!dev_priv->xhw_on) {
32397+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32398+ return -EINVAL;
32399+ }
32400+
32401+ buf = dev_priv->xhw_cur_buf;
32402+ if (buf && buf->copy_back) {
32403+ xa = &buf->arg;
32404+ memcpy(xa, dev_priv->xhw, sizeof(*xa));
32405+ dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op;
32406+ atomic_set(&buf->done, 1);
32407+ wake_up(&dev_priv->xhw_caller_queue);
32408+ } else
32409+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
32410+
32411+ dev_priv->xhw_cur_buf = 0;
32412+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32413+ return 0;
32414+}
32415+
32416+int psb_xhw_ioctl(struct drm_device *dev, void *data,
32417+ struct drm_file *file_priv)
32418+{
32419+ struct drm_psb_private *dev_priv =
32420+ (struct drm_psb_private *)dev->dev_private;
32421+ unsigned long irq_flags;
32422+ struct drm_psb_xhw_arg *xa;
32423+ int ret;
32424+ struct list_head *list;
32425+ struct psb_xhw_buf *buf;
32426+
32427+ if (!dev_priv)
32428+ return -EINVAL;
32429+
32430+ if (mutex_lock_interruptible(&dev_priv->xhw_mutex))
32431+ return -EAGAIN;
32432+
32433+ if (psb_forced_user_interrupt(dev_priv)) {
32434+ mutex_unlock(&dev_priv->xhw_mutex);
32435+ return -EINVAL;
32436+ }
32437+
32438+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32439+ while (list_empty(&dev_priv->xhw_in)) {
32440+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32441+ ret = wait_event_interruptible_timeout(dev_priv->xhw_queue,
32442+ !psb_xhw_in_empty
32443+ (dev_priv), DRM_HZ);
32444+ if (ret == -ERESTARTSYS || ret == 0) {
32445+ mutex_unlock(&dev_priv->xhw_mutex);
32446+ return -EAGAIN;
32447+ }
32448+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32449+ }
32450+
32451+ list = dev_priv->xhw_in.next;
32452+ list_del_init(list);
32453+
32454+ buf = list_entry(list, struct psb_xhw_buf, head);
32455+ xa = &buf->arg;
32456+ memcpy(dev_priv->xhw, xa, sizeof(*xa));
32457+
32458+ if (unlikely(buf->copy_back))
32459+ dev_priv->xhw_cur_buf = buf;
32460+ else {
32461+ atomic_set(&buf->done, 1);
32462+ dev_priv->xhw_cur_buf = NULL;
32463+ }
32464+
32465+ if (xa->op == PSB_XHW_TERMINATE) {
32466+ dev_priv->xhw_on = 0;
32467+ wake_up(&dev_priv->xhw_caller_queue);
32468+ }
32469+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32470+
32471+ mutex_unlock(&dev_priv->xhw_mutex);
32472+
32473+ return 0;
32474+}
32475Index: linux-2.6.27/drivers/gpu/drm/Kconfig
32476===================================================================
32477--- linux-2.6.27.orig/drivers/gpu/drm/Kconfig 2008-10-09 23:13:53.000000000 +0100
32478+++ linux-2.6.27/drivers/gpu/drm/Kconfig 2009-02-05 13:29:33.000000000 +0000
32479@@ -105,3 +105,9 @@
32480 help
32481 Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
32482 chipset. If M is selected the module will be called savage.
32483+
32484+config DRM_PSB
32485+ tristate "Intel Poulsbo"
32486+ depends on DRM && PCI && I2C_ALGOBIT
32487+ help
32488+ Choose this option if you have an Intel Poulsbo chipset.
32489Index: linux-2.6.27/include/drm/drm_crtc.h
32490===================================================================
32491--- /dev/null 1970-01-01 00:00:00.000000000 +0000
32492+++ linux-2.6.27/include/drm/drm_crtc.h 2009-02-05 13:29:33.000000000 +0000
32493@@ -0,0 +1,592 @@
32494+/*
32495+ * Copyright © 2006 Keith Packard
32496+ * Copyright © 2007 Intel Corporation
32497+ * Jesse Barnes <jesse.barnes@intel.com>
32498+ */
32499+#ifndef __DRM_CRTC_H__
32500+#define __DRM_CRTC_H__
32501+
32502+#include <linux/i2c.h>
32503+#include <linux/spinlock.h>
32504+#include <linux/types.h>
32505+#include <linux/idr.h>
32506+
32507+#include <linux/fb.h>
32508+
32509+struct drm_device;
32510+
32511+/*
32512+ * Note on terminology: here, for brevity and convenience, we refer to output
32513+ * control chips as 'CRTCs'. They can control any type of output, VGA, LVDS,
32514+ * DVI, etc. And 'screen' refers to the whole of the visible display, which
32515+ * may span multiple monitors (and therefore multiple CRTC and output
32516+ * structures).
32517+ */
32518+
32519+enum drm_mode_status {
32520+ MODE_OK = 0, /* Mode OK */
32521+ MODE_HSYNC, /* hsync out of range */
32522+ MODE_VSYNC, /* vsync out of range */
32523+ MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
32524+ MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
32525+ MODE_BAD_WIDTH, /* requires an unsupported linepitch */
32526+ MODE_NOMODE, /* no mode with a maching name */
32527+ MODE_NO_INTERLACE, /* interlaced mode not supported */
32528+ MODE_NO_DBLESCAN, /* doublescan mode not supported */
32529+ MODE_NO_VSCAN, /* multiscan mode not supported */
32530+ MODE_MEM, /* insufficient video memory */
32531+ MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
32532+ MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
32533+ MODE_MEM_VIRT, /* insufficient video memory given virtual size */
32534+ MODE_NOCLOCK, /* no fixed clock available */
32535+ MODE_CLOCK_HIGH, /* clock required is too high */
32536+ MODE_CLOCK_LOW, /* clock required is too low */
32537+ MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
32538+ MODE_BAD_HVALUE, /* horizontal timing was out of range */
32539+ MODE_BAD_VVALUE, /* vertical timing was out of range */
32540+ MODE_BAD_VSCAN, /* VScan value out of range */
32541+ MODE_HSYNC_NARROW, /* horizontal sync too narrow */
32542+ MODE_HSYNC_WIDE, /* horizontal sync too wide */
32543+ MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
32544+ MODE_HBLANK_WIDE, /* horizontal blanking too wide */
32545+ MODE_VSYNC_NARROW, /* vertical sync too narrow */
32546+ MODE_VSYNC_WIDE, /* vertical sync too wide */
32547+ MODE_VBLANK_NARROW, /* vertical blanking too narrow */
32548+ MODE_VBLANK_WIDE, /* vertical blanking too wide */
32549+ MODE_PANEL, /* exceeds panel dimensions */
32550+ MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
32551+ MODE_ONE_WIDTH, /* only one width is supported */
32552+ MODE_ONE_HEIGHT, /* only one height is supported */
32553+ MODE_ONE_SIZE, /* only one resolution is supported */
32554+ MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
32555+ MODE_UNVERIFIED = -3, /* mode needs to reverified */
32556+ MODE_BAD = -2, /* unspecified reason */
32557+ MODE_ERROR = -1 /* error condition */
32558+};
32559+
32560+#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
32561+ DRM_MODE_TYPE_CRTC_C)
32562+
32563+#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
32564+ .name = nm, .status = 0, .type = (t), .clock = (c), \
32565+ .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
32566+ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
32567+ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
32568+ .vscan = (vs), .flags = (f), .vrefresh = 0
32569+
32570+struct drm_display_mode {
32571+ /* Header */
32572+ struct list_head head;
32573+ char name[DRM_DISPLAY_MODE_LEN];
32574+ int mode_id;
32575+ int output_count;
32576+ enum drm_mode_status status;
32577+ int type;
32578+
32579+ /* Proposed mode values */
32580+ int clock;
32581+ int hdisplay;
32582+ int hsync_start;
32583+ int hsync_end;
32584+ int htotal;
32585+ int hskew;
32586+ int vdisplay;
32587+ int vsync_start;
32588+ int vsync_end;
32589+ int vtotal;
32590+ int vscan;
32591+ unsigned int flags;
32592+
32593+ /* Actual mode we give to hw */
32594+ int clock_index;
32595+ int synth_clock;
32596+ int crtc_hdisplay;
32597+ int crtc_hblank_start;
32598+ int crtc_hblank_end;
32599+ int crtc_hsync_start;
32600+ int crtc_hsync_end;
32601+ int crtc_htotal;
32602+ int crtc_hskew;
32603+ int crtc_vdisplay;
32604+ int crtc_vblank_start;
32605+ int crtc_vblank_end;
32606+ int crtc_vsync_start;
32607+ int crtc_vsync_end;
32608+ int crtc_vtotal;
32609+ int crtc_hadjusted;
32610+ int crtc_vadjusted;
32611+
32612+ /* Driver private mode info */
32613+ int private_size;
32614+ int *private;
32615+ int private_flags;
32616+
32617+ int vrefresh;
32618+ float hsync;
32619+};
32620+
32621+/* Video mode flags */
32622+#define V_PHSYNC (1<<0)
32623+#define V_NHSYNC (1<<1)
32624+#define V_PVSYNC (1<<2)
32625+#define V_NVSYNC (1<<3)
32626+#define V_INTERLACE (1<<4)
32627+#define V_DBLSCAN (1<<5)
32628+#define V_CSYNC (1<<6)
32629+#define V_PCSYNC (1<<7)
32630+#define V_NCSYNC (1<<8)
32631+#define V_HSKEW (1<<9) /* hskew provided */
32632+#define V_BCAST (1<<10)
32633+#define V_PIXMUX (1<<11)
32634+#define V_DBLCLK (1<<12)
32635+#define V_CLKDIV2 (1<<13)
32636+
32637+#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
32638+#define DPMSModeOn 0
32639+#define DPMSModeStandby 1
32640+#define DPMSModeSuspend 2
32641+#define DPMSModeOff 3
32642+
32643+enum drm_output_status {
32644+ output_status_connected = 1,
32645+ output_status_disconnected = 2,
32646+ output_status_unknown = 3,
32647+};
32648+
32649+enum subpixel_order {
32650+ SubPixelUnknown = 0,
32651+ SubPixelHorizontalRGB,
32652+ SubPixelHorizontalBGR,
32653+ SubPixelVerticalRGB,
32654+ SubPixelVerticalBGR,
32655+ SubPixelNone,
32656+};
32657+
32658+/*
32659+ * Describes a given display (e.g. CRT or flat panel) and its limitations.
32660+ */
32661+struct drm_display_info {
32662+ char name[DRM_DISPLAY_INFO_LEN];
32663+ /* Input info */
32664+ bool serration_vsync;
32665+ bool sync_on_green;
32666+ bool composite_sync;
32667+ bool separate_syncs;
32668+ bool blank_to_black;
32669+ unsigned char video_level;
32670+ bool digital;
32671+ /* Physical size */
32672+ unsigned int width_mm;
32673+ unsigned int height_mm;
32674+
32675+ /* Display parameters */
32676+ unsigned char gamma; /* FIXME: storage format */
32677+ bool gtf_supported;
32678+ bool standard_color;
32679+ enum {
32680+ monochrome,
32681+ rgb,
32682+ other,
32683+ unknown,
32684+ } display_type;
32685+ bool active_off_supported;
32686+ bool suspend_supported;
32687+ bool standby_supported;
32688+
32689+ /* Color info FIXME: storage format */
32690+ unsigned short redx, redy;
32691+ unsigned short greenx, greeny;
32692+ unsigned short bluex, bluey;
32693+ unsigned short whitex, whitey;
32694+
32695+ /* Clock limits FIXME: storage format */
32696+ unsigned int min_vfreq, max_vfreq;
32697+ unsigned int min_hfreq, max_hfreq;
32698+ unsigned int pixel_clock;
32699+
32700+ /* White point indices FIXME: storage format */
32701+ unsigned int wpx1, wpy1;
32702+ unsigned int wpgamma1;
32703+ unsigned int wpx2, wpy2;
32704+ unsigned int wpgamma2;
32705+
32706+ /* Preferred mode (if any) */
32707+ struct drm_display_mode *preferred_mode;
32708+ char *raw_edid; /* if any */
32709+};
32710+
32711+struct drm_framebuffer {
32712+ struct drm_device *dev;
32713+ struct list_head head;
32714+ int id; /* idr assigned */
32715+ unsigned int pitch;
32716+ unsigned long offset;
32717+ unsigned int width;
32718+ unsigned int height;
32719+ /* depth can be 15 or 16 */
32720+ unsigned int depth;
32721+ int bits_per_pixel;
32722+ int flags;
32723+ struct drm_buffer_object *bo;
32724+ void *fbdev;
32725+ u32 pseudo_palette[16];
32726+ struct drm_bo_kmap_obj kmap;
32727+ struct list_head filp_head;
32728+};
32729+
32730+struct drm_property_enum {
32731+ struct list_head head;
32732+ uint32_t value;
32733+ unsigned char name[DRM_PROP_NAME_LEN];
32734+};
32735+
32736+struct drm_property {
32737+ struct list_head head;
32738+ int id; /* idr assigned */
32739+ uint32_t flags;
32740+ char name[DRM_PROP_NAME_LEN];
32741+ uint32_t num_values;
32742+ uint32_t *values;
32743+
32744+ struct list_head enum_list;
32745+};
32746+
32747+struct drm_crtc;
32748+struct drm_output;
32749+
32750+/**
32751+ * drm_crtc_funcs - control CRTCs for a given device
32752+ * @dpms: control display power levels
32753+ * @save: save CRTC state
32754+ * @resore: restore CRTC state
32755+ * @lock: lock the CRTC
32756+ * @unlock: unlock the CRTC
32757+ * @shadow_allocate: allocate shadow pixmap
32758+ * @shadow_create: create shadow pixmap for rotation support
32759+ * @shadow_destroy: free shadow pixmap
32760+ * @mode_fixup: fixup proposed mode
32761+ * @mode_set: set the desired mode on the CRTC
32762+ * @gamma_set: specify color ramp for CRTC
32763+ * @cleanup: cleanup driver private state prior to close
32764+ *
32765+ * The drm_crtc_funcs structure is the central CRTC management structure
32766+ * in the DRM. Each CRTC controls one or more outputs (note that the name
32767+ * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc.
32768+ * outputs, not just CRTs).
32769+ *
32770+ * Each driver is responsible for filling out this structure at startup time,
32771+ * in addition to providing other modesetting features, like i2c and DDC
32772+ * bus accessors.
32773+ */
32774+struct drm_crtc_funcs {
32775+ /*
32776+ * Control power levels on the CRTC. If the mode passed in is
32777+ * unsupported, the provider must use the next lowest power level.
32778+ */
32779+ void (*dpms)(struct drm_crtc *crtc, int mode);
32780+
32781+ /* JJJ: Are these needed? */
32782+ /* Save CRTC state */
32783+ void (*save)(struct drm_crtc *crtc); /* suspend? */
32784+ /* Restore CRTC state */
32785+ void (*restore)(struct drm_crtc *crtc); /* resume? */
32786+ bool (*lock)(struct drm_crtc *crtc);
32787+ void (*unlock)(struct drm_crtc *crtc);
32788+
32789+ void (*prepare)(struct drm_crtc *crtc);
32790+ void (*commit)(struct drm_crtc *crtc);
32791+
32792+ /* Provider can fixup or change mode timings before modeset occurs */
32793+ bool (*mode_fixup)(struct drm_crtc *crtc,
32794+ struct drm_display_mode *mode,
32795+ struct drm_display_mode *adjusted_mode);
32796+ /* Actually set the mode */
32797+ void (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
32798+ struct drm_display_mode *adjusted_mode, int x, int y);
32799+ /* Set gamma on the CRTC */
32800+ void (*gamma_set)(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
32801+ int regno);
32802+ /* Driver cleanup routine */
32803+ void (*cleanup)(struct drm_crtc *crtc);
32804+};
32805+
32806+/**
32807+ * drm_crtc - central CRTC control structure
32808+ * @enabled: is this CRTC enabled?
32809+ * @x: x position on screen
32810+ * @y: y position on screen
32811+ * @desired_mode: new desired mode
32812+ * @desired_x: desired x for desired_mode
32813+ * @desired_y: desired y for desired_mode
32814+ * @funcs: CRTC control functions
32815+ * @driver_private: arbitrary driver data
32816+ *
32817+ * Each CRTC may have one or more outputs associated with it. This structure
32818+ * allows the CRTC to be controlled.
32819+ */
32820+struct drm_crtc {
32821+ struct drm_device *dev;
32822+ struct list_head head;
32823+
32824+ int id; /* idr assigned */
32825+
32826+ /* framebuffer the output is currently bound to */
32827+ struct drm_framebuffer *fb;
32828+
32829+ bool enabled;
32830+
32831+ /* JJJ: are these needed? */
32832+ bool cursor_in_range;
32833+ bool cursor_shown;
32834+
32835+ struct drm_display_mode mode;
32836+
32837+ int x, y;
32838+ struct drm_display_mode *desired_mode;
32839+ int desired_x, desired_y;
32840+ const struct drm_crtc_funcs *funcs;
32841+ void *driver_private;
32842+
32843+ /* RRCrtcPtr randr_crtc? */
32844+};
32845+
32846+extern struct drm_crtc *drm_crtc_create(struct drm_device *dev,
32847+ const struct drm_crtc_funcs *funcs);
32848+
32849+/**
32850+ * drm_output_funcs - control outputs on a given device
32851+ * @init: setup this output
32852+ * @dpms: set power state (see drm_crtc_funcs above)
32853+ * @save: save output state
32854+ * @restore: restore output state
32855+ * @mode_valid: is this mode valid on the given output?
32856+ * @mode_fixup: try to fixup proposed mode for this output
32857+ * @mode_set: set this mode
32858+ * @detect: is this output active?
32859+ * @get_modes: get mode list for this output
32860+ * @set_property: property for this output may need update
32861+ * @cleanup: output is going away, cleanup
32862+ *
32863+ * Each CRTC may have one or more outputs attached to it. The functions
32864+ * below allow the core DRM code to control outputs, enumerate available modes,
32865+ * etc.
32866+ */
32867+struct drm_output_funcs {
32868+ void (*init)(struct drm_output *output);
32869+ void (*dpms)(struct drm_output *output, int mode);
32870+ void (*save)(struct drm_output *output);
32871+ void (*restore)(struct drm_output *output);
32872+ int (*mode_valid)(struct drm_output *output,
32873+ struct drm_display_mode *mode);
32874+ bool (*mode_fixup)(struct drm_output *output,
32875+ struct drm_display_mode *mode,
32876+ struct drm_display_mode *adjusted_mode);
32877+ void (*prepare)(struct drm_output *output);
32878+ void (*commit)(struct drm_output *output);
32879+ void (*mode_set)(struct drm_output *output,
32880+ struct drm_display_mode *mode,
32881+ struct drm_display_mode *adjusted_mode);
32882+ enum drm_output_status (*detect)(struct drm_output *output);
32883+ int (*get_modes)(struct drm_output *output);
32884+ /* JJJ: type checking for properties via property value type */
32885+ bool (*set_property)(struct drm_output *output, int prop, void *val);
32886+ void (*cleanup)(struct drm_output *output);
32887+};
32888+
32889+#define DRM_OUTPUT_MAX_UMODES 16
32890+#define DRM_OUTPUT_MAX_PROPERTY 16
32891+#define DRM_OUTPUT_LEN 32
32892+/**
32893+ * drm_output - central DRM output control structure
32894+ * @crtc: CRTC this output is currently connected to, NULL if none
32895+ * @possible_crtcs: bitmap of CRTCS this output could be attached to
32896+ * @possible_clones: bitmap of possible outputs this output could clone
32897+ * @interlace_allowed: can this output handle interlaced modes?
32898+ * @doublescan_allowed: can this output handle doublescan?
32899+ * @available_modes: modes available on this output (from get_modes() + user)
32900+ * @initial_x: initial x position for this output
32901+ * @initial_y: initial y position for this output
32902+ * @status: output connected?
32903+ * @subpixel_order: for this output
32904+ * @mm_width: displayable width of output in mm
32905+ * @mm_height: displayable height of output in mm
32906+ * @name: name of output (should be one of a few standard names)
32907+ * @funcs: output control functions
32908+ * @driver_private: private driver data
32909+ *
32910+ * Each output may be connected to one or more CRTCs, or may be clonable by
32911+ * another output if they can share a CRTC. Each output also has a specific
32912+ * position in the broader display (referred to as a 'screen' though it could
32913+ * span multiple monitors).
32914+ */
32915+struct drm_output {
32916+ struct drm_device *dev;
32917+ struct list_head head;
32918+ struct drm_crtc *crtc;
32919+ int id; /* idr assigned */
32920+ unsigned long possible_crtcs;
32921+ unsigned long possible_clones;
32922+ bool interlace_allowed;
32923+ bool doublescan_allowed;
32924+ struct list_head modes; /* list of modes on this output */
32925+
32926+ /*
32927+ OptionInfoPtr options;
32928+ XF86ConfMonitorPtr conf_monitor;
32929+ */
32930+ int initial_x, initial_y;
32931+ enum drm_output_status status;
32932+
32933+ /* these are modes added by probing with DDC or the BIOS */
32934+ struct list_head probed_modes;
32935+
32936+ /* xf86MonPtr MonInfo; */
32937+ enum subpixel_order subpixel_order;
32938+ int mm_width, mm_height;
32939+ struct drm_display_info *monitor_info; /* if any */
32940+ char name[DRM_OUTPUT_LEN];
32941+ const struct drm_output_funcs *funcs;
32942+ void *driver_private;
32943+
32944+ u32 user_mode_ids[DRM_OUTPUT_MAX_UMODES];
32945+
32946+ u32 property_ids[DRM_OUTPUT_MAX_PROPERTY];
32947+ u32 property_values[DRM_OUTPUT_MAX_PROPERTY];
32948+};
32949+
32950+/**
32951+ * struct drm_mode_config_funcs - configure CRTCs for a given screen layout
32952+ * @resize: adjust CRTCs as necessary for the proposed layout
32953+ *
32954+ * Currently only a resize hook is available. DRM will call back into the
32955+ * driver with a new screen width and height. If the driver can't support
32956+ * the proposed size, it can return false. Otherwise it should adjust
32957+ * the CRTC<->output mappings as needed and update its view of the screen.
32958+ */
32959+struct drm_mode_config_funcs {
32960+ bool (*resize)(struct drm_device *dev, int width, int height);
32961+};
32962+
32963+/**
32964+ * drm_mode_config - Mode configuration control structure
32965+ *
32966+ */
32967+struct drm_mode_config {
32968+ struct mutex mutex; /* protects configuration and IDR */
32969+ struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, output, modes - just makes life easier */
32970+ /* this is limited to one for now */
32971+ int num_fb;
32972+ struct list_head fb_list;
32973+ int num_output;
32974+ struct list_head output_list;
32975+
32976+ /* int compat_output? */
32977+ int num_crtc;
32978+ struct list_head crtc_list;
32979+
32980+ struct list_head usermode_list;
32981+
32982+ struct list_head property_list;
32983+
32984+ int min_width, min_height;
32985+ int max_width, max_height;
32986+ /* DamagePtr rotationDamage? */
32987+ /* DGA stuff? */
32988+ struct drm_mode_config_funcs *funcs;
32989+ unsigned long fb_base;
32990+};
32991+
32992+struct drm_output *drm_output_create(struct drm_device *dev,
32993+ const struct drm_output_funcs *funcs,
32994+ const char *name);
32995+extern void drm_output_destroy(struct drm_output *output);
32996+extern bool drm_output_rename(struct drm_output *output, const char *name);
32997+extern void drm_fb_release(struct file *filp);
32998+
32999+extern struct edid *drm_get_edid(struct drm_output *output,
33000+ struct i2c_adapter *adapter);
33001+extern int drm_add_edid_modes(struct drm_output *output, struct edid *edid);
33002+extern void drm_mode_probed_add(struct drm_output *output, struct drm_display_mode *mode);
33003+extern void drm_mode_remove(struct drm_output *output, struct drm_display_mode *mode);
33004+extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
33005+ struct drm_display_mode *mode);
33006+extern void drm_mode_debug_printmodeline(struct drm_device *dev,
33007+ struct drm_display_mode *mode);
33008+extern void drm_mode_config_init(struct drm_device *dev);
33009+extern void drm_mode_config_cleanup(struct drm_device *dev);
33010+extern void drm_mode_set_name(struct drm_display_mode *mode);
33011+extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
33012+extern void drm_disable_unused_functions(struct drm_device *dev);
33013+
33014+extern void drm_mode_addmode(struct drm_device *dev, struct drm_display_mode *user_mode);
33015+extern int drm_mode_rmmode(struct drm_device *dev, struct drm_display_mode *mode);
33016+
33017+/* for us by fb module */
33018+extern int drm_mode_attachmode_crtc(struct drm_device *dev,
33019+ struct drm_crtc *crtc,
33020+ struct drm_display_mode *mode);
33021+extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
33022+
33023+extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
33024+extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
33025+extern void drm_mode_list_concat(struct list_head *head,
33026+ struct list_head *new);
33027+extern void drm_mode_validate_size(struct drm_device *dev,
33028+ struct list_head *mode_list,
33029+ int maxX, int maxY, int maxPitch);
33030+extern void drm_mode_prune_invalid(struct drm_device *dev,
33031+ struct list_head *mode_list, bool verbose);
33032+extern void drm_mode_sort(struct list_head *mode_list);
33033+extern int drm_mode_vrefresh(struct drm_display_mode *mode);
33034+extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
33035+ int adjust_flags);
33036+extern void drm_mode_output_list_update(struct drm_output *output);
33037+
33038+extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
33039+extern bool drm_initial_config(struct drm_device *dev, bool cangrow);
33040+extern void drm_framebuffer_set_object(struct drm_device *dev,
33041+ unsigned long handle);
33042+extern struct drm_framebuffer *drm_framebuffer_create(struct drm_device *dev);
33043+extern void drm_framebuffer_destroy(struct drm_framebuffer *fb);
33044+extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
33045+extern int drmfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
33046+extern bool drm_crtc_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
33047+ int x, int y);
33048+
33049+extern int drm_output_attach_property(struct drm_output *output,
33050+ struct drm_property *property, int init_val);
33051+extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
33052+ const char *name, int num_values);
33053+extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
33054+extern int drm_property_add_enum(struct drm_property *property, int index,
33055+ uint32_t value, const char *name);
33056+
33057+/* IOCTLs */
33058+extern int drm_mode_getresources(struct drm_device *dev,
33059+ void *data, struct drm_file *file_priv);
33060+
33061+extern int drm_mode_getcrtc(struct drm_device *dev,
33062+ void *data, struct drm_file *file_priv);
33063+extern int drm_mode_getoutput(struct drm_device *dev,
33064+ void *data, struct drm_file *file_priv);
33065+extern int drm_mode_setcrtc(struct drm_device *dev,
33066+ void *data, struct drm_file *file_priv);
33067+extern int drm_mode_addfb(struct drm_device *dev,
33068+ void *data, struct drm_file *file_priv);
33069+extern int drm_mode_rmfb(struct drm_device *dev,
33070+ void *data, struct drm_file *file_priv);
33071+extern int drm_mode_getfb(struct drm_device *dev,
33072+ void *data, struct drm_file *file_priv);
33073+extern int drm_mode_addmode_ioctl(struct drm_device *dev,
33074+ void *data, struct drm_file *file_priv);
33075+extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
33076+ void *data, struct drm_file *file_priv);
33077+extern int drm_mode_attachmode_ioctl(struct drm_device *dev,
33078+ void *data, struct drm_file *file_priv);
33079+extern int drm_mode_detachmode_ioctl(struct drm_device *dev,
33080+ void *data, struct drm_file *file_priv);
33081+
33082+extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
33083+ void *data, struct drm_file *file_priv);
33084+#endif /* __DRM_CRTC_H__ */
33085+
33086Index: linux-2.6.27/include/drm/drm_edid.h
33087===================================================================
33088--- /dev/null 1970-01-01 00:00:00.000000000 +0000
33089+++ linux-2.6.27/include/drm/drm_edid.h 2009-02-05 13:29:33.000000000 +0000
33090@@ -0,0 +1,179 @@
33091+#ifndef __DRM_EDID_H__
33092+#define __DRM_EDID_H__
33093+
33094+#include <linux/types.h>
33095+
33096+#define EDID_LENGTH 128
33097+#define DDC_ADDR 0x50
33098+
33099+#ifdef BIG_ENDIAN
33100+#error "EDID structure is little endian, need big endian versions"
33101+#endif
33102+
33103+struct est_timings {
33104+ u8 t1;
33105+ u8 t2;
33106+ u8 mfg_rsvd;
33107+} __attribute__((packed));
33108+
33109+struct std_timing {
33110+ u8 hsize; /* need to multiply by 8 then add 248 */
33111+ u8 vfreq:6; /* need to add 60 */
33112+ u8 aspect_ratio:2; /* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
33113+} __attribute__((packed));
33114+
33115+/* If detailed data is pixel timing */
33116+struct detailed_pixel_timing {
33117+ u8 hactive_lo;
33118+ u8 hblank_lo;
33119+ u8 hblank_hi:4;
33120+ u8 hactive_hi:4;
33121+ u8 vactive_lo;
33122+ u8 vblank_lo;
33123+ u8 vblank_hi:4;
33124+ u8 vactive_hi:4;
33125+ u8 hsync_offset_lo;
33126+ u8 hsync_pulse_width_lo;
33127+ u8 vsync_pulse_width_lo:4;
33128+ u8 vsync_offset_lo:4;
33129+ u8 hsync_pulse_width_hi:2;
33130+ u8 hsync_offset_hi:2;
33131+ u8 vsync_pulse_width_hi:2;
33132+ u8 vsync_offset_hi:2;
33133+ u8 width_mm_lo;
33134+ u8 height_mm_lo;
33135+ u8 height_mm_hi:4;
33136+ u8 width_mm_hi:4;
33137+ u8 hborder;
33138+ u8 vborder;
33139+ u8 unknown0:1;
33140+ u8 vsync_positive:1;
33141+ u8 hsync_positive:1;
33142+ u8 separate_sync:2;
33143+ u8 stereo:1;
33144+ u8 unknown6:1;
33145+ u8 interlaced:1;
33146+} __attribute__((packed));
33147+
33148+/* If it's not pixel timing, it'll be one of the below */
33149+struct detailed_data_string {
33150+ u8 str[13];
33151+} __attribute__((packed));
33152+
33153+struct detailed_data_monitor_range {
33154+ u8 min_vfreq;
33155+ u8 max_vfreq;
33156+ u8 min_hfreq_khz;
33157+ u8 max_hfreq_khz;
33158+ u8 pixel_clock_mhz; /* need to multiply by 10 */
33159+ u16 sec_gtf_toggle; /* A000=use above, 20=use below */ /* FIXME: byte order */
33160+ u8 hfreq_start_khz; /* need to multiply by 2 */
33161+ u8 c; /* need to divide by 2 */
33162+ u16 m; /* FIXME: byte order */
33163+ u8 k;
33164+ u8 j; /* need to divide by 2 */
33165+} __attribute__((packed));
33166+
33167+struct detailed_data_wpindex {
33168+ u8 white_y_lo:2;
33169+ u8 white_x_lo:2;
33170+ u8 pad:4;
33171+ u8 white_x_hi;
33172+ u8 white_y_hi;
33173+ u8 gamma; /* need to divide by 100 then add 1 */
33174+} __attribute__((packed));
33175+
33176+struct detailed_data_color_point {
33177+ u8 windex1;
33178+ u8 wpindex1[3];
33179+ u8 windex2;
33180+ u8 wpindex2[3];
33181+} __attribute__((packed));
33182+
33183+struct detailed_non_pixel {
33184+ u8 pad1;
33185+ u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
33186+ fb=color point data, fa=standard timing data,
33187+ f9=undefined, f8=mfg. reserved */
33188+ u8 pad2;
33189+ union {
33190+ struct detailed_data_string str;
33191+ struct detailed_data_monitor_range range;
33192+ struct detailed_data_wpindex color;
33193+ struct std_timing timings[5];
33194+ } data;
33195+} __attribute__((packed));
33196+
33197+#define EDID_DETAIL_STD_MODES 0xfa
33198+#define EDID_DETAIL_MONITOR_CPDATA 0xfb
33199+#define EDID_DETAIL_MONITOR_NAME 0xfc
33200+#define EDID_DETAIL_MONITOR_RANGE 0xfd
33201+#define EDID_DETAIL_MONITOR_STRING 0xfe
33202+#define EDID_DETAIL_MONITOR_SERIAL 0xff
33203+
33204+struct detailed_timing {
33205+ u16 pixel_clock; /* need to multiply by 10 KHz */ /* FIXME: byte order */
33206+ union {
33207+ struct detailed_pixel_timing pixel_data;
33208+ struct detailed_non_pixel other_data;
33209+ } data;
33210+} __attribute__((packed));
33211+
33212+struct edid {
33213+ u8 header[8];
33214+ /* Vendor & product info */
33215+ u16 mfg_id; /* FIXME: byte order */
33216+ u16 prod_code; /* FIXME: byte order */
33217+ u32 serial; /* FIXME: byte order */
33218+ u8 mfg_week;
33219+ u8 mfg_year;
33220+ /* EDID version */
33221+ u8 version;
33222+ u8 revision;
33223+ /* Display info: */
33224+ /* input definition */
33225+ u8 serration_vsync:1;
33226+ u8 sync_on_green:1;
33227+ u8 composite_sync:1;
33228+ u8 separate_syncs:1;
33229+ u8 blank_to_black:1;
33230+ u8 video_level:2;
33231+ u8 digital:1; /* bits below must be zero if set */
33232+ u8 width_cm;
33233+ u8 height_cm;
33234+ u8 gamma;
33235+ /* feature support */
33236+ u8 default_gtf:1;
33237+ u8 preferred_timing:1;
33238+ u8 standard_color:1;
33239+ u8 display_type:2; /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
33240+ u8 pm_active_off:1;
33241+ u8 pm_suspend:1;
33242+ u8 pm_standby:1;
33243+ /* Color characteristics */
33244+ u8 red_green_lo;
33245+ u8 black_white_lo;
33246+ u8 red_x;
33247+ u8 red_y;
33248+ u8 green_x;
33249+ u8 green_y;
33250+ u8 blue_x;
33251+ u8 blue_y;
33252+ u8 white_x;
33253+ u8 white_y;
33254+ /* Est. timings and mfg rsvd timings*/
33255+ struct est_timings established_timings;
33256+ /* Standard timings 1-8*/
33257+ struct std_timing standard_timings[8];
33258+ /* Detailing timings 1-4 */
33259+ struct detailed_timing detailed_timings[4];
33260+ /* Number of 128 byte ext. blocks */
33261+ u8 extensions;
33262+ /* Checksum */
33263+ u8 checksum;
33264+} __attribute__((packed));
33265+
33266+extern unsigned char *drm_ddc_read(struct i2c_adapter *adapter);
33267+extern int drm_get_acpi_edid(char *method, char *edid, ssize_t length);
33268+
33269+#endif /* __DRM_EDID_H__ */
33270Index: linux-2.6.27/include/drm/drm_objects.h
33271===================================================================
33272--- /dev/null 1970-01-01 00:00:00.000000000 +0000
33273+++ linux-2.6.27/include/drm/drm_objects.h 2009-02-05 13:29:33.000000000 +0000
33274@@ -0,0 +1,717 @@
33275+/**************************************************************************
33276+ *
33277+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
33278+ * All Rights Reserved.
33279+ *
33280+ * Permission is hereby granted, free of charge, to any person obtaining a
33281+ * copy of this software and associated documentation files (the
33282+ * "Software"), to deal in the Software without restriction, including
33283+ * without limitation the rights to use, copy, modify, merge, publish,
33284+ * distribute, sub license, and/or sell copies of the Software, and to
33285+ * permit persons to whom the Software is furnished to do so, subject to
33286+ * the following conditions:
33287+ *
33288+ * The above copyright notice and this permission notice (including the
33289+ * next paragraph) shall be included in all copies or substantial portions
33290+ * of the Software.
33291+ *
33292+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33293+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33294+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
33295+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
33296+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
33297+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
33298+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
33299+ *
33300+ **************************************************************************/
33301+/*
33302+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
33303+ */
33304+
33305+#ifndef _DRM_OBJECTS_H
33306+#define _DRM_OBJECTS_H
33307+
33308+struct drm_device;
33309+struct drm_bo_mem_reg;
33310+
33311+/***************************************************
33312+ * User space objects. (drm_object.c)
33313+ */
33314+
33315+#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
33316+
33317+enum drm_object_type {
33318+ drm_fence_type,
33319+ drm_buffer_type,
33320+ drm_lock_type,
33321+ /*
33322+ * Add other user space object types here.
33323+ */
33324+ drm_driver_type0 = 256,
33325+ drm_driver_type1,
33326+ drm_driver_type2,
33327+ drm_driver_type3,
33328+ drm_driver_type4
33329+};
33330+
33331+/*
33332+ * A user object is a structure that helps the drm give out user handles
33333+ * to kernel internal objects and to keep track of these objects so that
33334+ * they can be destroyed, for example when the user space process exits.
33335+ * Designed to be accessible using a user space 32-bit handle.
33336+ */
33337+
33338+struct drm_user_object {
33339+ struct drm_hash_item hash;
33340+ struct list_head list;
33341+ enum drm_object_type type;
33342+ atomic_t refcount;
33343+ int shareable;
33344+ struct drm_file *owner;
33345+ void (*ref_struct_locked) (struct drm_file *priv,
33346+ struct drm_user_object *obj,
33347+ enum drm_ref_type ref_action);
33348+ void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
33349+ enum drm_ref_type unref_action);
33350+ void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
33351+};
33352+
33353+/*
33354+ * A ref object is a structure which is used to
33355+ * keep track of references to user objects and to keep track of these
33356+ * references so that they can be destroyed for example when the user space
33357+ * process exits. Designed to be accessible using a pointer to the _user_ object.
33358+ */
33359+
33360+struct drm_ref_object {
33361+ struct drm_hash_item hash;
33362+ struct list_head list;
33363+ atomic_t refcount;
33364+ enum drm_ref_type unref_action;
33365+};
33366+
33367+/**
33368+ * Must be called with the struct_mutex held.
33369+ */
33370+
33371+extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
33372+ int shareable);
33373+/**
33374+ * Must be called with the struct_mutex held.
33375+ */
33376+
33377+extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
33378+ uint32_t key);
33379+
33380+/*
33381+ * Must be called with the struct_mutex held. May temporarily release it.
33382+ */
33383+
33384+extern int drm_add_ref_object(struct drm_file *priv,
33385+ struct drm_user_object *referenced_object,
33386+ enum drm_ref_type ref_action);
33387+
33388+/*
33389+ * Must be called with the struct_mutex held.
33390+ */
33391+
33392+struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
33393+ struct drm_user_object *referenced_object,
33394+ enum drm_ref_type ref_action);
33395+/*
33396+ * Must be called with the struct_mutex held.
33397+ * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
33398+ * release the struct_mutex before calling drm_remove_ref_object.
33399+ * This function may temporarily release the struct_mutex.
33400+ */
33401+
33402+extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
33403+extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
33404+ enum drm_object_type type,
33405+ struct drm_user_object **object);
33406+extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
33407+ enum drm_object_type type);
33408+
33409+/***************************************************
33410+ * Fence objects. (drm_fence.c)
33411+ */
33412+
33413+struct drm_fence_object {
33414+ struct drm_user_object base;
33415+ struct drm_device *dev;
33416+ atomic_t usage;
33417+
33418+ /*
33419+ * The below three fields are protected by the fence manager spinlock.
33420+ */
33421+
33422+ struct list_head ring;
33423+ int fence_class;
33424+ uint32_t native_types;
33425+ uint32_t type;
33426+ uint32_t signaled_types;
33427+ uint32_t sequence;
33428+ uint32_t waiting_types;
33429+ uint32_t error;
33430+};
33431+
33432+#define _DRM_FENCE_CLASSES 8
33433+
33434+struct drm_fence_class_manager {
33435+ struct list_head ring;
33436+ uint32_t pending_flush;
33437+ uint32_t waiting_types;
33438+ wait_queue_head_t fence_queue;
33439+ uint32_t highest_waiting_sequence;
33440+ uint32_t latest_queued_sequence;
33441+};
33442+
33443+struct drm_fence_manager {
33444+ int initialized;
33445+ rwlock_t lock;
33446+ struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES];
33447+ uint32_t num_classes;
33448+ atomic_t count;
33449+};
33450+
33451+struct drm_fence_driver {
33452+ unsigned long *waiting_jiffies;
33453+ uint32_t num_classes;
33454+ uint32_t wrap_diff;
33455+ uint32_t flush_diff;
33456+ uint32_t sequence_mask;
33457+
33458+ /*
33459+ * Driver implemented functions:
33460+ * has_irq() : 1 if the hardware can update the indicated type_flags using an
33461+ * irq handler. 0 if polling is required.
33462+ *
33463+ * emit() : Emit a sequence number to the command stream.
33464+ * Return the sequence number.
33465+ *
33466+ * flush() : Make sure the flags indicated in fc->pending_flush will eventually
33467+ * signal for fc->highest_received_sequence and all preceding sequences.
33468+ * Acknowledge by clearing the flags fc->pending_flush.
33469+ *
33470+ * poll() : Call drm_fence_handler with any new information.
33471+ *
33472+ * needed_flush() : Given the current state of the fence->type flags and previusly
33473+ * executed or queued flushes, return the type_flags that need flushing.
33474+ *
33475+ * wait(): Wait for the "mask" flags to signal on a given fence, performing
33476+ * whatever's necessary to make this happen.
33477+ */
33478+
33479+ int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
33480+ uint32_t flags);
33481+ int (*emit) (struct drm_device *dev, uint32_t fence_class,
33482+ uint32_t flags, uint32_t *breadcrumb,
33483+ uint32_t *native_type);
33484+ void (*flush) (struct drm_device *dev, uint32_t fence_class);
33485+ void (*poll) (struct drm_device *dev, uint32_t fence_class,
33486+ uint32_t types);
33487+ uint32_t (*needed_flush) (struct drm_fence_object *fence);
33488+ int (*wait) (struct drm_fence_object *fence, int lazy,
33489+ int interruptible, uint32_t mask);
33490+};
33491+
33492+extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
33493+ int interruptible, uint32_t mask,
33494+ unsigned long end_jiffies);
33495+extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
33496+ uint32_t sequence, uint32_t type,
33497+ uint32_t error);
33498+extern void drm_fence_manager_init(struct drm_device *dev);
33499+extern void drm_fence_manager_takedown(struct drm_device *dev);
33500+extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
33501+ uint32_t sequence);
33502+extern int drm_fence_object_flush(struct drm_fence_object *fence,
33503+ uint32_t type);
33504+extern int drm_fence_object_signaled(struct drm_fence_object *fence,
33505+ uint32_t type);
33506+extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
33507+extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
33508+extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
33509+extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
33510+ struct drm_fence_object *src);
33511+extern int drm_fence_object_wait(struct drm_fence_object *fence,
33512+ int lazy, int ignore_signals, uint32_t mask);
33513+extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
33514+ uint32_t fence_flags, uint32_t fence_class,
33515+ struct drm_fence_object **c_fence);
33516+extern int drm_fence_object_emit(struct drm_fence_object *fence,
33517+ uint32_t fence_flags, uint32_t class,
33518+ uint32_t type);
33519+extern void drm_fence_fill_arg(struct drm_fence_object *fence,
33520+ struct drm_fence_arg *arg);
33521+
33522+extern int drm_fence_add_user_object(struct drm_file *priv,
33523+ struct drm_fence_object *fence,
33524+ int shareable);
33525+
33526+extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
33527+ struct drm_file *file_priv);
33528+extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data,
33529+ struct drm_file *file_priv);
33530+extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data,
33531+ struct drm_file *file_priv);
33532+extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data,
33533+ struct drm_file *file_priv);
33534+extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data,
33535+ struct drm_file *file_priv);
33536+extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data,
33537+ struct drm_file *file_priv);
33538+extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data,
33539+ struct drm_file *file_priv);
33540+extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data,
33541+ struct drm_file *file_priv);
33542+extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
33543+ struct drm_file *file_priv);
33544+/**************************************************
33545+ *TTMs
33546+ */
33547+
33548+/*
33549+ * The ttm backend GTT interface. (In our case AGP).
33550+ * Any similar type of device (PCIE?)
33551+ * needs only to implement these functions to be usable with the TTM interface.
33552+ * The AGP backend implementation lives in drm_agpsupport.c
33553+ * basically maps these calls to available functions in agpgart.
33554+ * Each drm device driver gets an
33555+ * additional function pointer that creates these types,
33556+ * so that the device can choose the correct aperture.
33557+ * (Multiple AGP apertures, etc.)
33558+ * Most device drivers will let this point to the standard AGP implementation.
33559+ */
33560+
33561+#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
33562+#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
33563+
33564+struct drm_ttm_backend;
33565+struct drm_ttm_backend_func {
33566+ int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
33567+ int (*populate) (struct drm_ttm_backend *backend,
33568+ unsigned long num_pages, struct page **pages);
33569+ void (*clear) (struct drm_ttm_backend *backend);
33570+ int (*bind) (struct drm_ttm_backend *backend,
33571+ struct drm_bo_mem_reg *bo_mem);
33572+ int (*unbind) (struct drm_ttm_backend *backend);
33573+ void (*destroy) (struct drm_ttm_backend *backend);
33574+};
33575+
33576+
33577+struct drm_ttm_backend {
33578+ struct drm_device *dev;
33579+ uint32_t flags;
33580+ struct drm_ttm_backend_func *func;
33581+};
33582+
33583+struct drm_ttm {
33584+ struct page *dummy_read_page;
33585+ struct page **pages;
33586+ uint32_t page_flags;
33587+ unsigned long num_pages;
33588+ atomic_t vma_count;
33589+ struct drm_device *dev;
33590+ int destroy;
33591+ uint32_t mapping_offset;
33592+ struct drm_ttm_backend *be;
33593+ enum {
33594+ ttm_bound,
33595+ ttm_evicted,
33596+ ttm_unbound,
33597+ ttm_unpopulated,
33598+ } state;
33599+
33600+};
33601+
33602+extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
33603+extern int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
33604+extern void drm_ttm_unbind(struct drm_ttm *ttm);
33605+extern void drm_ttm_evict(struct drm_ttm *ttm);
33606+extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
33607+extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
33608+extern void drm_ttm_cache_flush(void);
33609+extern int drm_ttm_populate(struct drm_ttm *ttm);
33610+extern int drm_ttm_set_user(struct drm_ttm *ttm,
33611+ struct task_struct *tsk,
33612+ int write,
33613+ unsigned long start,
33614+ unsigned long num_pages,
33615+ struct page *dummy_read_page);
33616+unsigned long drm_ttm_size(struct drm_device *dev,
33617+ unsigned long num_pages,
33618+ int user_bo);
33619+
33620+
33621+/*
33622+ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
33623+ * this which calls this function iff there are no vmas referencing it anymore.
33624+ * Otherwise it is called when the last vma exits.
33625+ */
33626+
33627+extern int drm_destroy_ttm(struct drm_ttm *ttm);
33628+
33629+#define DRM_FLAG_MASKED(_old, _new, _mask) {\
33630+(_old) ^= (((_old) ^ (_new)) & (_mask)); \
33631+}
33632+
33633+#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
33634+#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
33635+
33636+/*
33637+ * Page flags.
33638+ */
33639+
33640+#define DRM_TTM_PAGE_UNCACHED (1 << 0)
33641+#define DRM_TTM_PAGE_USED (1 << 1)
33642+#define DRM_TTM_PAGE_BOUND (1 << 2)
33643+#define DRM_TTM_PAGE_PRESENT (1 << 3)
33644+#define DRM_TTM_PAGE_VMALLOC (1 << 4)
33645+#define DRM_TTM_PAGE_USER (1 << 5)
33646+#define DRM_TTM_PAGE_USER_WRITE (1 << 6)
33647+#define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
33648+#define DRM_TTM_PAGE_USER_DMA (1 << 8)
33649+
33650+/***************************************************
33651+ * Buffer objects. (drm_bo.c, drm_bo_move.c)
33652+ */
33653+
33654+struct drm_bo_mem_reg {
33655+ struct drm_mm_node *mm_node;
33656+ unsigned long size;
33657+ unsigned long num_pages;
33658+ uint32_t page_alignment;
33659+ uint32_t mem_type;
33660+ uint64_t flags;
33661+ uint64_t mask;
33662+ uint32_t desired_tile_stride;
33663+ uint32_t hw_tile_stride;
33664+};
33665+
33666+enum drm_bo_type {
33667+ drm_bo_type_dc,
33668+ drm_bo_type_user,
33669+ drm_bo_type_kernel, /* for initial kernel allocations */
33670+};
33671+
33672+struct drm_buffer_object {
33673+ struct drm_device *dev;
33674+ struct drm_user_object base;
33675+
33676+ /*
33677+ * If there is a possibility that the usage variable is zero,
33678+ * then dev->struct_mutext should be locked before incrementing it.
33679+ */
33680+
33681+ atomic_t usage;
33682+ unsigned long buffer_start;
33683+ enum drm_bo_type type;
33684+ unsigned long offset;
33685+ atomic_t mapped;
33686+ struct drm_bo_mem_reg mem;
33687+
33688+ struct list_head lru;
33689+ struct list_head ddestroy;
33690+
33691+ uint32_t fence_type;
33692+ uint32_t fence_class;
33693+ uint32_t new_fence_type;
33694+ uint32_t new_fence_class;
33695+ struct drm_fence_object *fence;
33696+ uint32_t priv_flags;
33697+ wait_queue_head_t event_queue;
33698+ struct mutex mutex;
33699+ unsigned long num_pages;
33700+ unsigned long reserved_size;
33701+
33702+ /* For pinned buffers */
33703+ struct drm_mm_node *pinned_node;
33704+ uint32_t pinned_mem_type;
33705+ struct list_head pinned_lru;
33706+
33707+ /* For vm */
33708+ struct drm_ttm *ttm;
33709+ struct drm_map_list map_list;
33710+ uint32_t memory_type;
33711+ unsigned long bus_offset;
33712+ uint32_t vm_flags;
33713+ void *iomap;
33714+
33715+#ifdef DRM_ODD_MM_COMPAT
33716+ /* dev->struct_mutex only protected. */
33717+ struct list_head vma_list;
33718+ struct list_head p_mm_list;
33719+#endif
33720+
33721+};
33722+
33723+#define _DRM_BO_FLAG_UNFENCED 0x00000001
33724+#define _DRM_BO_FLAG_EVICTED 0x00000002
33725+
33726+struct drm_mem_type_manager {
33727+ int has_type;
33728+ int use_type;
33729+ struct drm_mm manager;
33730+ struct list_head lru;
33731+ struct list_head pinned;
33732+ uint32_t flags;
33733+ uint32_t drm_bus_maptype;
33734+ unsigned long gpu_offset;
33735+ unsigned long io_offset;
33736+ unsigned long io_size;
33737+ void *io_addr;
33738+};
33739+
33740+struct drm_bo_lock {
33741+ struct drm_user_object base;
33742+ wait_queue_head_t queue;
33743+ atomic_t write_lock_pending;
33744+ atomic_t readers;
33745+};
33746+
33747+#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
33748+#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
33749+#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */
33750+#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap
33751+ before kernel access. */
33752+#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
33753+#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
33754+
33755+struct drm_buffer_manager {
33756+ struct drm_bo_lock bm_lock;
33757+ struct mutex evict_mutex;
33758+ int nice_mode;
33759+ int initialized;
33760+ struct drm_file *last_to_validate;
33761+ struct drm_mem_type_manager man[DRM_BO_MEM_TYPES];
33762+ struct list_head unfenced;
33763+ struct list_head ddestroy;
33764+ struct delayed_work wq;
33765+ uint32_t fence_type;
33766+ unsigned long cur_pages;
33767+ atomic_t count;
33768+ struct page *dummy_read_page;
33769+};
33770+
33771+struct drm_bo_driver {
33772+ const uint32_t *mem_type_prio;
33773+ const uint32_t *mem_busy_prio;
33774+ uint32_t num_mem_type_prio;
33775+ uint32_t num_mem_busy_prio;
33776+ struct drm_ttm_backend *(*create_ttm_backend_entry)
33777+ (struct drm_device *dev);
33778+ int (*backend_size) (struct drm_device *dev,
33779+ unsigned long num_pages);
33780+ int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
33781+ uint32_t *type);
33782+ int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
33783+ int (*init_mem_type) (struct drm_device *dev, uint32_t type,
33784+ struct drm_mem_type_manager *man);
33785+ uint32_t(*evict_mask) (struct drm_buffer_object *bo);
33786+ int (*move) (struct drm_buffer_object *bo,
33787+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
33788+ void (*ttm_cache_flush)(struct drm_ttm *ttm);
33789+
33790+ /*
33791+ * command_stream_barrier
33792+ *
33793+ * @dev: The drm device.
33794+ *
33795+ * @bo: The buffer object to validate.
33796+ *
33797+ * @new_fence_class: The new fence class for the buffer object.
33798+ *
33799+ * @new_fence_type: The new fence type for the buffer object.
33800+ *
33801+ * @no_wait: whether this should give up and return -EBUSY
33802+ * if this operation would require sleeping
33803+ *
33804+ * Insert a command stream barrier that makes sure that the
33805+ * buffer is idle once the commands associated with the
33806+ * current validation are starting to execute. If an error
33807+ * condition is returned, or the function pointer is NULL,
33808+ * the drm core will force buffer idle
33809+ * during validation.
33810+ */
33811+
33812+ int (*command_stream_barrier) (struct drm_buffer_object *bo,
33813+ uint32_t new_fence_class,
33814+ uint32_t new_fence_type,
33815+ int no_wait);
33816+};
33817+
33818+/*
33819+ * buffer objects (drm_bo.c)
33820+ */
33821+extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
33822+extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
33823+extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
33824+extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
33825+extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
33826+extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
33827+extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
33828+extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
33829+extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
33830+extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
33831+extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
33832+extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
33833+extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
33834+extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
33835+extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
33836+extern int drm_bo_driver_finish(struct drm_device *dev);
33837+extern int drm_bo_driver_init(struct drm_device *dev);
33838+extern int drm_bo_pci_offset(struct drm_device *dev,
33839+ struct drm_bo_mem_reg *mem,
33840+ unsigned long *bus_base,
33841+ unsigned long *bus_offset,
33842+ unsigned long *bus_size);
33843+extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem);
33844+
33845+extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo);
33846+extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
33847+extern void drm_putback_buffer_objects(struct drm_device *dev);
33848+extern int drm_fence_buffer_objects(struct drm_device *dev,
33849+ struct list_head *list,
33850+ uint32_t fence_flags,
33851+ struct drm_fence_object *fence,
33852+ struct drm_fence_object **used_fence);
33853+extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
33854+extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
33855+ enum drm_bo_type type, uint64_t mask,
33856+ uint32_t hint, uint32_t page_alignment,
33857+ unsigned long buffer_start,
33858+ struct drm_buffer_object **bo);
33859+extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
33860+ int no_wait);
33861+extern int drm_bo_mem_space(struct drm_buffer_object *bo,
33862+ struct drm_bo_mem_reg *mem, int no_wait);
33863+extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
33864+ uint64_t new_mem_flags,
33865+ int no_wait, int move_unfenced);
33866+extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);
33867+extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
33868+ unsigned long p_offset, unsigned long p_size);
33869+extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
33870+ uint32_t fence_class, uint64_t flags,
33871+ uint64_t mask, uint32_t hint,
33872+ int use_old_fence_class,
33873+ struct drm_bo_info_rep *rep,
33874+ struct drm_buffer_object **bo_rep);
33875+extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
33876+ uint32_t handle,
33877+ int check_owner);
33878+extern int drm_bo_do_validate(struct drm_buffer_object *bo,
33879+ uint64_t flags, uint64_t mask, uint32_t hint,
33880+ uint32_t fence_class,
33881+ int no_wait,
33882+ struct drm_bo_info_rep *rep);
33883+extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
33884+ struct drm_bo_info_rep *rep);
33885+/*
33886+ * Buffer object memory move- and map helpers.
33887+ * drm_bo_move.c
33888+ */
33889+
33890+extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
33891+ int evict, int no_wait,
33892+ struct drm_bo_mem_reg *new_mem);
33893+extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
33894+ int evict,
33895+ int no_wait, struct drm_bo_mem_reg *new_mem);
33896+extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
33897+ int evict, int no_wait,
33898+ uint32_t fence_class, uint32_t fence_type,
33899+ uint32_t fence_flags,
33900+ struct drm_bo_mem_reg *new_mem);
33901+extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
33902+extern unsigned long drm_bo_offset_end(unsigned long offset,
33903+ unsigned long end);
33904+
33905+struct drm_bo_kmap_obj {
33906+ void *virtual;
33907+ struct page *page;
33908+ enum {
33909+ bo_map_iomap,
33910+ bo_map_vmap,
33911+ bo_map_kmap,
33912+ bo_map_premapped,
33913+ } bo_kmap_type;
33914+};
33915+
33916+static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
33917+{
33918+ *is_iomem = (map->bo_kmap_type == bo_map_iomap ||
33919+ map->bo_kmap_type == bo_map_premapped);
33920+ return map->virtual;
33921+}
33922+extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
33923+extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
33924+ unsigned long num_pages, struct drm_bo_kmap_obj *map);
33925+
33926+
33927+/*
33928+ * drm_regman.c
33929+ */
33930+
33931+struct drm_reg {
33932+ struct list_head head;
33933+ struct drm_fence_object *fence;
33934+ uint32_t fence_type;
33935+ uint32_t new_fence_type;
33936+};
33937+
33938+struct drm_reg_manager {
33939+ struct list_head free;
33940+ struct list_head lru;
33941+ struct list_head unfenced;
33942+
33943+ int (*reg_reusable)(const struct drm_reg *reg, const void *data);
33944+ void (*reg_destroy)(struct drm_reg *reg);
33945+};
33946+
33947+extern int drm_regs_alloc(struct drm_reg_manager *manager,
33948+ const void *data,
33949+ uint32_t fence_class,
33950+ uint32_t fence_type,
33951+ int interruptible,
33952+ int no_wait,
33953+ struct drm_reg **reg);
33954+
33955+extern void drm_regs_fence(struct drm_reg_manager *regs,
33956+ struct drm_fence_object *fence);
33957+
33958+extern void drm_regs_free(struct drm_reg_manager *manager);
33959+extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg);
33960+extern void drm_regs_init(struct drm_reg_manager *manager,
33961+ int (*reg_reusable)(const struct drm_reg *,
33962+ const void *),
33963+ void (*reg_destroy)(struct drm_reg *));
33964+
33965+extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
33966+ void **virtual);
33967+extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
33968+ void *virtual);
33969+/*
33970+ * drm_bo_lock.c
33971+ * Simple replacement for the hardware lock on buffer manager init and clean.
33972+ */
33973+
33974+
33975+extern void drm_bo_init_lock(struct drm_bo_lock *lock);
33976+extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
33977+extern int drm_bo_read_lock(struct drm_bo_lock *lock);
33978+extern int drm_bo_write_lock(struct drm_bo_lock *lock,
33979+ struct drm_file *file_priv);
33980+
33981+extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
33982+ struct drm_file *file_priv);
33983+
33984+#ifdef CONFIG_DEBUG_MUTEXES
33985+#define DRM_ASSERT_LOCKED(_mutex) \
33986+ BUG_ON(!mutex_is_locked(_mutex) || \
33987+ ((_mutex)->owner != current_thread_info()))
33988+#else
33989+#define DRM_ASSERT_LOCKED(_mutex)
33990+#endif
33991+#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/i915_split.patch b/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/i915_split.patch
deleted file mode 100644
index 1841a681d2..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/i915_split.patch
+++ /dev/null
@@ -1,1627 +0,0 @@
1Index: linux-2.6.28/drivers/gpu/drm/i915/intel_tv.c
2===================================================================
3--- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_tv.c 2009-02-19 12:59:22.000000000 +0000
4+++ linux-2.6.28/drivers/gpu/drm/i915/intel_tv.c 2009-02-19 12:59:28.000000000 +0000
5@@ -902,7 +902,7 @@
6 intel_tv_dpms(struct drm_encoder *encoder, int mode)
7 {
8 struct drm_device *dev = encoder->dev;
9- struct drm_i915_private *dev_priv = dev->dev_private;
10+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
11
12 switch(mode) {
13 case DRM_MODE_DPMS_ON:
14@@ -920,7 +920,7 @@
15 intel_tv_save(struct drm_connector *connector)
16 {
17 struct drm_device *dev = connector->dev;
18- struct drm_i915_private *dev_priv = dev->dev_private;
19+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
20 struct intel_output *intel_output = to_intel_output(connector);
21 struct intel_tv_priv *tv_priv = intel_output->dev_priv;
22 int i;
23@@ -970,7 +970,7 @@
24 intel_tv_restore(struct drm_connector *connector)
25 {
26 struct drm_device *dev = connector->dev;
27- struct drm_i915_private *dev_priv = dev->dev_private;
28+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
29 struct intel_output *intel_output = to_intel_output(connector);
30 struct intel_tv_priv *tv_priv = intel_output->dev_priv;
31 struct drm_crtc *crtc = connector->encoder->crtc;
32@@ -1117,7 +1117,7 @@
33 struct drm_display_mode *adjusted_mode)
34 {
35 struct drm_device *dev = encoder->dev;
36- struct drm_i915_private *dev_priv = dev->dev_private;
37+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
38 struct drm_crtc *crtc = encoder->crtc;
39 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
40 struct intel_output *intel_output = enc_to_intel_output(encoder);
41@@ -1362,6 +1362,7 @@
42 struct drm_encoder *encoder = &intel_output->enc;
43 struct drm_device *dev = encoder->dev;
44 struct drm_i915_private *dev_priv = dev->dev_private;
45+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
46 unsigned long irqflags;
47 u32 tv_ctl, save_tv_ctl;
48 u32 tv_dac, save_tv_dac;
49@@ -1626,6 +1627,7 @@
50 intel_tv_init(struct drm_device *dev)
51 {
52 struct drm_i915_private *dev_priv = dev->dev_private;
53+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
54 struct drm_connector *connector;
55 struct intel_output *intel_output;
56 struct intel_tv_priv *tv_priv;
57Index: linux-2.6.28/drivers/gpu/drm/i915/intel_modes.c
58===================================================================
59--- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_modes.c 2009-02-19 12:59:22.000000000 +0000
60+++ linux-2.6.28/drivers/gpu/drm/i915/intel_modes.c 2009-02-19 12:59:28.000000000 +0000
61@@ -81,3 +81,6 @@
62
63 return ret;
64 }
65+EXPORT_SYMBOL(intel_ddc_get_modes);
66+
67+MODULE_LICENSE("GPL and additional rights");
68Index: linux-2.6.28/drivers/gpu/drm/i915/intel_i2c.c
69===================================================================
70--- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_i2c.c 2009-02-19 12:59:22.000000000 +0000
71+++ linux-2.6.28/drivers/gpu/drm/i915/intel_i2c.c 2009-02-20 14:50:20.000000000 +0000
72@@ -43,7 +43,7 @@
73 static int get_clock(void *data)
74 {
75 struct intel_i2c_chan *chan = data;
76- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
77+ struct drm_i915_common_private *dev_priv_common = chan->drm_dev->dev_private;
78 u32 val;
79
80 val = I915_READ(chan->reg);
81@@ -53,7 +53,7 @@
82 static int get_data(void *data)
83 {
84 struct intel_i2c_chan *chan = data;
85- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
86+ struct drm_i915_common_private *dev_priv_common = chan->drm_dev->dev_private;
87 u32 val;
88
89 val = I915_READ(chan->reg);
90@@ -64,7 +64,7 @@
91 {
92 struct intel_i2c_chan *chan = data;
93 struct drm_device *dev = chan->drm_dev;
94- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
95+ struct drm_i915_common_private *dev_priv_common = chan->drm_dev->dev_private;
96 u32 reserved = 0, clock_bits;
97
98 /* On most chips, these bits must be preserved in software. */
99@@ -85,7 +85,7 @@
100 {
101 struct intel_i2c_chan *chan = data;
102 struct drm_device *dev = chan->drm_dev;
103- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
104+ struct drm_i915_common_private *dev_priv_common = chan->drm_dev->dev_private;
105 u32 reserved = 0, data_bits;
106
107 /* On most chips, these bits must be preserved in software. */
108@@ -167,6 +167,7 @@
109 kfree(chan);
110 return NULL;
111 }
112+EXPORT_SYMBOL(intel_i2c_create);
113
114 /**
115 * intel_i2c_destroy - unregister and free i2c bus resources
116@@ -182,3 +183,4 @@
117 i2c_del_adapter(&chan->adapter);
118 kfree(chan);
119 }
120+EXPORT_SYMBOL(intel_i2c_destroy);
121Index: linux-2.6.28/drivers/gpu/drm/i915/intel_dvo.c
122===================================================================
123--- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_dvo.c 2009-02-19 12:59:22.000000000 +0000
124+++ linux-2.6.28/drivers/gpu/drm/i915/intel_dvo.c 2009-02-19 15:14:20.000000000 +0000
125@@ -78,7 +78,7 @@
126
127 static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
128 {
129- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
130+ struct drm_i915_common_private *dev_priv_common = encoder->dev->dev_private;
131 struct intel_output *intel_output = enc_to_intel_output(encoder);
132 struct intel_dvo_device *dvo = intel_output->dev_priv;
133 u32 dvo_reg = dvo->dvo_reg;
134@@ -98,15 +98,16 @@
135 static void intel_dvo_save(struct drm_connector *connector)
136 {
137 struct drm_i915_private *dev_priv = connector->dev->dev_private;
138+ struct drm_i915_common_private *dev_priv_common = connector->dev->dev_private;
139 struct intel_output *intel_output = to_intel_output(connector);
140 struct intel_dvo_device *dvo = intel_output->dev_priv;
141
142 /* Each output should probably just save the registers it touches,
143 * but for now, use more overkill.
144 */
145- dev_priv->saveDVOA = I915_READ(DVOA);
146- dev_priv->saveDVOB = I915_READ(DVOB);
147- dev_priv->saveDVOC = I915_READ(DVOC);
148+ dev_priv->common.saveDVOA = I915_READ(DVOA);
149+ dev_priv->common.saveDVOB = I915_READ(DVOB);
150+ dev_priv->common.saveDVOC = I915_READ(DVOC);
151
152 dvo->dev_ops->save(dvo);
153 }
154@@ -114,14 +115,15 @@
155 static void intel_dvo_restore(struct drm_connector *connector)
156 {
157 struct drm_i915_private *dev_priv = connector->dev->dev_private;
158+ struct drm_i915_common_private *dev_priv_common = connector->dev->dev_private;
159 struct intel_output *intel_output = to_intel_output(connector);
160 struct intel_dvo_device *dvo = intel_output->dev_priv;
161
162 dvo->dev_ops->restore(dvo);
163
164- I915_WRITE(DVOA, dev_priv->saveDVOA);
165- I915_WRITE(DVOB, dev_priv->saveDVOB);
166- I915_WRITE(DVOC, dev_priv->saveDVOC);
167+ I915_WRITE(DVOA, dev_priv->common.saveDVOA);
168+ I915_WRITE(DVOB, dev_priv->common.saveDVOB);
169+ I915_WRITE(DVOC, dev_priv->common.saveDVOC);
170 }
171
172 static int intel_dvo_mode_valid(struct drm_connector *connector,
173@@ -183,7 +185,7 @@
174 struct drm_display_mode *adjusted_mode)
175 {
176 struct drm_device *dev = encoder->dev;
177- struct drm_i915_private *dev_priv = dev->dev_private;
178+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
179 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
180 struct intel_output *intel_output = enc_to_intel_output(encoder);
181 struct intel_dvo_device *dvo = intel_output->dev_priv;
182@@ -349,7 +351,7 @@
183 intel_dvo_get_current_mode (struct drm_connector *connector)
184 {
185 struct drm_device *dev = connector->dev;
186- struct drm_i915_private *dev_priv = dev->dev_private;
187+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
188 struct intel_output *intel_output = to_intel_output(connector);
189 struct intel_dvo_device *dvo = intel_output->dev_priv;
190 uint32_t dvo_reg = dvo->dvo_reg;
191Index: linux-2.6.28/drivers/gpu/drm/i915/intel_hdmi.c
192===================================================================
193--- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_hdmi.c 2009-02-19 12:59:22.000000000 +0000
194+++ linux-2.6.28/drivers/gpu/drm/i915/intel_hdmi.c 2009-02-19 12:59:28.000000000 +0000
195@@ -46,7 +46,7 @@
196 struct drm_display_mode *adjusted_mode)
197 {
198 struct drm_device *dev = encoder->dev;
199- struct drm_i915_private *dev_priv = dev->dev_private;
200+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
201 struct drm_crtc *crtc = encoder->crtc;
202 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
203 struct intel_output *intel_output = enc_to_intel_output(encoder);
204@@ -71,7 +71,7 @@
205 static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
206 {
207 struct drm_device *dev = encoder->dev;
208- struct drm_i915_private *dev_priv = dev->dev_private;
209+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
210 struct intel_output *intel_output = enc_to_intel_output(encoder);
211 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
212 u32 temp;
213@@ -89,7 +89,7 @@
214 static void intel_hdmi_save(struct drm_connector *connector)
215 {
216 struct drm_device *dev = connector->dev;
217- struct drm_i915_private *dev_priv = dev->dev_private;
218+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
219 struct intel_output *intel_output = to_intel_output(connector);
220 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
221
222@@ -99,7 +99,7 @@
223 static void intel_hdmi_restore(struct drm_connector *connector)
224 {
225 struct drm_device *dev = connector->dev;
226- struct drm_i915_private *dev_priv = dev->dev_private;
227+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
228 struct intel_output *intel_output = to_intel_output(connector);
229 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
230
231@@ -132,7 +132,7 @@
232 intel_hdmi_detect(struct drm_connector *connector)
233 {
234 struct drm_device *dev = connector->dev;
235- struct drm_i915_private *dev_priv = dev->dev_private;
236+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
237 struct intel_output *intel_output = to_intel_output(connector);
238 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
239 u32 temp, bit;
240@@ -220,7 +220,7 @@
241
242 void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
243 {
244- struct drm_i915_private *dev_priv = dev->dev_private;
245+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
246 struct drm_connector *connector;
247 struct intel_output *intel_output;
248 struct intel_hdmi_priv *hdmi_priv;
249Index: linux-2.6.28/drivers/gpu/drm/i915/i915_suspend.c
250===================================================================
251--- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_suspend.c 2009-02-19 12:59:22.000000000 +0000
252+++ linux-2.6.28/drivers/gpu/drm/i915/i915_suspend.c 2009-02-19 12:59:28.000000000 +0000
253@@ -31,7 +31,7 @@
254
255 static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
256 {
257- struct drm_i915_private *dev_priv = dev->dev_private;
258+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
259
260 if (pipe == PIPE_A)
261 return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
262@@ -41,7 +41,7 @@
263
264 static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
265 {
266- struct drm_i915_private *dev_priv = dev->dev_private;
267+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
268 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
269 u32 *array;
270 int i;
271@@ -50,9 +50,9 @@
272 return;
273
274 if (pipe == PIPE_A)
275- array = dev_priv->save_palette_a;
276+ array = dev_priv_common->save_palette_a;
277 else
278- array = dev_priv->save_palette_b;
279+ array = dev_priv_common->save_palette_b;
280
281 for(i = 0; i < 256; i++)
282 array[i] = I915_READ(reg + (i << 2));
283@@ -60,7 +60,7 @@
284
285 static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
286 {
287- struct drm_i915_private *dev_priv = dev->dev_private;
288+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
289 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
290 u32 *array;
291 int i;
292@@ -69,9 +69,9 @@
293 return;
294
295 if (pipe == PIPE_A)
296- array = dev_priv->save_palette_a;
297+ array = dev_priv_common->save_palette_a;
298 else
299- array = dev_priv->save_palette_b;
300+ array = dev_priv_common->save_palette_b;
301
302 for(i = 0; i < 256; i++)
303 I915_WRITE(reg + (i << 2), array[i]);
304@@ -79,7 +79,7 @@
305
306 static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
307 {
308- struct drm_i915_private *dev_priv = dev->dev_private;
309+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
310
311 I915_WRITE8(index_port, reg);
312 return I915_READ8(data_port);
313@@ -87,7 +87,7 @@
314
315 static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
316 {
317- struct drm_i915_private *dev_priv = dev->dev_private;
318+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
319
320 I915_READ8(st01);
321 I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
322@@ -96,7 +96,7 @@
323
324 static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
325 {
326- struct drm_i915_private *dev_priv = dev->dev_private;
327+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
328
329 I915_READ8(st01);
330 I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
331@@ -105,7 +105,7 @@
332
333 static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
334 {
335- struct drm_i915_private *dev_priv = dev->dev_private;
336+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
337
338 I915_WRITE8(index_port, reg);
339 I915_WRITE8(data_port, val);
340@@ -113,7 +113,8 @@
341
342 static void i915_save_vga(struct drm_device *dev)
343 {
344- struct drm_i915_private *dev_priv = dev->dev_private;
345+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
346+ struct drm_i915_common_private *dev_priv = dev->dev_private;
347 int i;
348 u16 cr_index, cr_data, st01;
349
350@@ -176,7 +177,8 @@
351
352 static void i915_restore_vga(struct drm_device *dev)
353 {
354- struct drm_i915_private *dev_priv = dev->dev_private;
355+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
356+ struct drm_i915_common_private *dev_priv = dev->dev_private;
357 int i;
358 u16 cr_index, cr_data, st01;
359
360@@ -235,7 +237,8 @@
361
362 int i915_save_state(struct drm_device *dev)
363 {
364- struct drm_i915_private *dev_priv = dev->dev_private;
365+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
366+ struct drm_i915_common_private *dev_priv = dev->dev_private;
367 int i;
368
369 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
370@@ -367,7 +370,8 @@
371
372 int i915_restore_state(struct drm_device *dev)
373 {
374- struct drm_i915_private *dev_priv = dev->dev_private;
375+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
376+ struct drm_i915_common_private *dev_priv = dev->dev_private;
377 int i;
378
379 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
380Index: linux-2.6.28/drivers/gpu/drm/i915/i915_opregion.c
381===================================================================
382--- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_opregion.c 2009-02-19 12:59:22.000000000 +0000
383+++ linux-2.6.28/drivers/gpu/drm/i915/i915_opregion.c 2009-02-19 12:59:28.000000000 +0000
384@@ -139,6 +139,7 @@
385 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
386 {
387 struct drm_i915_private *dev_priv = dev->dev_private;
388+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
389 struct opregion_asle *asle = dev_priv->opregion.asle;
390 u32 blc_pwm_ctl, blc_pwm_ctl2;
391
392@@ -172,7 +173,8 @@
393
394 static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
395 {
396- struct drm_i915_private *dev_priv = dev->dev_private;
397+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
398+
399 if (pfmb & ASLE_PFMB_PWM_VALID) {
400 u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
401 u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
402Index: linux-2.6.28/drivers/gpu/drm/i915/i915_gem.c
403===================================================================
404--- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_gem.c 2009-02-19 12:59:22.000000000 +0000
405+++ linux-2.6.28/drivers/gpu/drm/i915/i915_gem.c 2009-02-19 12:59:28.000000000 +0000
406@@ -877,6 +877,7 @@
407 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
408 {
409 drm_i915_private_t *dev_priv = dev->dev_private;
410+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
411 struct drm_i915_gem_request *request;
412 uint32_t seqno;
413 int was_empty;
414@@ -942,6 +943,7 @@
415 static uint32_t
416 i915_retire_commands(struct drm_device *dev)
417 {
418+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
419 drm_i915_private_t *dev_priv = dev->dev_private;
420 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
421 uint32_t flush_domains = 0;
422@@ -1049,12 +1051,14 @@
423 void
424 i915_gem_retire_work_handler(struct work_struct *work)
425 {
426+ struct drm_i915_common_private *dev_priv_common;
427 drm_i915_private_t *dev_priv;
428 struct drm_device *dev;
429
430 dev_priv = container_of(work, drm_i915_private_t,
431 mm.retire_work.work);
432 dev = dev_priv->dev;
433+ dev_priv_common = dev->dev_private;
434
435 mutex_lock(&dev->struct_mutex);
436 i915_gem_retire_requests(dev);
437@@ -1109,6 +1113,7 @@
438 uint32_t invalidate_domains,
439 uint32_t flush_domains)
440 {
441+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
442 drm_i915_private_t *dev_priv = dev->dev_private;
443 uint32_t cmd;
444 RING_LOCALS;
445@@ -1422,7 +1427,7 @@
446 {
447 struct drm_gem_object *obj = reg->obj;
448 struct drm_device *dev = obj->dev;
449- drm_i915_private_t *dev_priv = dev->dev_private;
450+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
451 struct drm_i915_gem_object *obj_priv = obj->driver_private;
452 int regnum = obj_priv->fence_reg;
453 uint64_t val;
454@@ -1442,8 +1447,8 @@
455 {
456 struct drm_gem_object *obj = reg->obj;
457 struct drm_device *dev = obj->dev;
458- drm_i915_private_t *dev_priv = dev->dev_private;
459 struct drm_i915_gem_object *obj_priv = obj->driver_private;
460+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
461 int regnum = obj_priv->fence_reg;
462 uint32_t val;
463 uint32_t pitch_val;
464@@ -1475,7 +1480,7 @@
465 {
466 struct drm_gem_object *obj = reg->obj;
467 struct drm_device *dev = obj->dev;
468- drm_i915_private_t *dev_priv = dev->dev_private;
469+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
470 struct drm_i915_gem_object *obj_priv = obj->driver_private;
471 int regnum = obj_priv->fence_reg;
472 uint32_t val;
473@@ -1605,6 +1610,7 @@
474 {
475 struct drm_device *dev = obj->dev;
476 drm_i915_private_t *dev_priv = dev->dev_private;
477+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
478 struct drm_i915_gem_object *obj_priv = obj->driver_private;
479
480 if (IS_I965G(dev))
481@@ -2327,6 +2333,7 @@
482 uint64_t exec_offset)
483 {
484 drm_i915_private_t *dev_priv = dev->dev_private;
485+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
486 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
487 (uintptr_t) exec->cliprects_ptr;
488 int nbox = exec->num_cliprects;
489@@ -3035,6 +3042,7 @@
490 i915_gem_init_hws(struct drm_device *dev)
491 {
492 drm_i915_private_t *dev_priv = dev->dev_private;
493+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
494 struct drm_gem_object *obj;
495 struct drm_i915_gem_object *obj_priv;
496 int ret;
497@@ -3081,6 +3089,7 @@
498 i915_gem_init_ringbuffer(struct drm_device *dev)
499 {
500 drm_i915_private_t *dev_priv = dev->dev_private;
501+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
502 struct drm_gem_object *obj;
503 struct drm_i915_gem_object *obj_priv;
504 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
505@@ -3186,6 +3195,7 @@
506 void
507 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
508 {
509+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
510 drm_i915_private_t *dev_priv = dev->dev_private;
511
512 if (dev_priv->ring.ring_obj == NULL)
513Index: linux-2.6.28/drivers/gpu/drm/i915/i915_gem_proc.c
514===================================================================
515--- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_gem_proc.c 2009-02-19 12:59:22.000000000 +0000
516+++ linux-2.6.28/drivers/gpu/drm/i915/i915_gem_proc.c 2009-02-19 12:59:28.000000000 +0000
517@@ -213,6 +213,7 @@
518 struct drm_minor *minor = (struct drm_minor *) data;
519 struct drm_device *dev = minor->dev;
520 drm_i915_private_t *dev_priv = dev->dev_private;
521+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
522 int len = 0;
523
524 if (offset > DRM_PROC_LIMIT) {
525Index: linux-2.6.28/drivers/gpu/drm/i915/i915_gem_tiling.c
526===================================================================
527--- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_gem_tiling.c 2009-02-19 12:59:22.000000000 +0000
528+++ linux-2.6.28/drivers/gpu/drm/i915/i915_gem_tiling.c 2009-02-19 12:59:28.000000000 +0000
529@@ -87,6 +87,7 @@
530 i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
531 {
532 drm_i915_private_t *dev_priv = dev->dev_private;
533+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
534 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
535 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
536
537Index: linux-2.6.28/drivers/gpu/drm/i915/i915_irq.c
538===================================================================
539--- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_irq.c 2009-02-19 12:59:22.000000000 +0000
540+++ linux-2.6.28/drivers/gpu/drm/i915/i915_irq.c 2009-02-20 14:53:08.000000000 +0000
541@@ -64,6 +64,8 @@
542 void
543 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
544 {
545+ struct drm_i915_common_private *dev_priv_common = (struct drm_i915_common_private *) dev_priv;
546+
547 if ((dev_priv->irq_mask_reg & mask) != 0) {
548 dev_priv->irq_mask_reg &= ~mask;
549 I915_WRITE(IMR, dev_priv->irq_mask_reg);
550@@ -74,6 +76,8 @@
551 static inline void
552 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
553 {
554+ struct drm_i915_common_private *dev_priv_common = (struct drm_i915_common_private *) dev_priv;
555+
556 if ((dev_priv->irq_mask_reg & mask) != mask) {
557 dev_priv->irq_mask_reg |= mask;
558 I915_WRITE(IMR, dev_priv->irq_mask_reg);
559@@ -94,6 +98,8 @@
560 void
561 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
562 {
563+ struct drm_i915_common_private *dev_priv_common = (struct drm_i915_common_private *) dev_priv;
564+
565 if ((dev_priv->pipestat[pipe] & mask) != mask) {
566 u32 reg = i915_pipestat(pipe);
567
568@@ -107,6 +113,8 @@
569 void
570 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
571 {
572+ struct drm_i915_common_private *dev_priv_common = (struct drm_i915_common_private *) dev_priv;
573+
574 if ((dev_priv->pipestat[pipe] & mask) != 0) {
575 u32 reg = i915_pipestat(pipe);
576
577@@ -128,7 +136,7 @@
578 static int
579 i915_pipe_enabled(struct drm_device *dev, int pipe)
580 {
581- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
582+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
583 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
584
585 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
586@@ -142,7 +150,7 @@
587 */
588 u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
589 {
590- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
591+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
592 unsigned long high_frame;
593 unsigned long low_frame;
594 u32 high1, high2, low, count;
595@@ -178,6 +186,7 @@
596 {
597 struct drm_device *dev = (struct drm_device *) arg;
598 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
599+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
600 struct drm_i915_master_private *master_priv;
601 u32 iir, new_iir;
602 u32 pipea_stats, pipeb_stats;
603@@ -284,6 +293,7 @@
604 static int i915_emit_irq(struct drm_device * dev)
605 {
606 drm_i915_private_t *dev_priv = dev->dev_private;
607+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
608 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
609 RING_LOCALS;
610
611@@ -409,6 +419,7 @@
612 */
613 int i915_enable_vblank(struct drm_device *dev, int pipe)
614 {
615+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
616 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
617 unsigned long irqflags;
618 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
619@@ -510,6 +521,7 @@
620 */
621 void i915_driver_irq_preinstall(struct drm_device * dev)
622 {
623+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
625
626 atomic_set(&dev_priv->irq_received, 0);
627@@ -554,6 +566,7 @@
628
629 void i915_driver_irq_uninstall(struct drm_device * dev)
630 {
631+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
632 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
633
634 if (!dev_priv)
635Index: linux-2.6.28/drivers/gpu/drm/i915/Makefile
636===================================================================
637--- linux-2.6.28.orig/drivers/gpu/drm/i915/Makefile 2009-02-19 12:59:23.000000000 +0000
638+++ linux-2.6.28/drivers/gpu/drm/i915/Makefile 2009-02-19 12:59:28.000000000 +0000
639@@ -9,24 +9,29 @@
640 i915_gem_debug.o \
641 i915_gem_proc.o \
642 i915_gem_tiling.o \
643- intel_display.o \
644- intel_crt.o \
645- intel_lvds.o \
646 intel_bios.o \
647- intel_hdmi.o \
648- intel_sdvo.o \
649- intel_modes.o \
650- intel_i2c.o \
651 intel_fb.o \
652 intel_tv.o \
653+
654+intel_gfx_common-y := \
655+ intel_display.o \
656+ intel_modes.o \
657+ intel_i2c.o \
658+ intel_crt.o \
659 intel_dvo.o \
660+ intel_hdmi.o \
661+ intel_lvds.o \
662+ intel_sdvo.o \
663 dvo_ch7xxx.o \
664 dvo_ch7017.o \
665 dvo_ivch.o \
666 dvo_tfp410.o \
667 dvo_sil164.o
668
669+
670 i915-$(CONFIG_ACPI) += i915_opregion.o
671 i915-$(CONFIG_COMPAT) += i915_ioc32.o
672
673 obj-$(CONFIG_DRM_I915) += i915.o
674+
675+obj-$(CONFIG_DRM_INTEL_COMMON) += intel_gfx_common.o
676Index: linux-2.6.28/drivers/gpu/drm/i915/i915_common.h
677===================================================================
678--- /dev/null 1970-01-01 00:00:00.000000000 +0000
679+++ linux-2.6.28/drivers/gpu/drm/i915/i915_common.h 2009-02-20 14:49:42.000000000 +0000
680@@ -0,0 +1,184 @@
681+/*
682+ *
683+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
684+ * All Rights Reserved.
685+ *
686+ * Permission is hereby granted, free of charge, to any person obtaining a
687+ * copy of this software and associated documentation files (the
688+ * "Software"), to deal in the Software without restriction, including
689+ * without limitation the rights to use, copy, modify, merge, publish,
690+ * distribute, sub license, and/or sell copies of the Software, and to
691+ * permit persons to whom the Software is furnished to do so, subject to
692+ * the following conditions:
693+ *
694+ * The above copyright notice and this permission notice (including the
695+ * next paragraph) shall be included in all copies or substantial portions
696+ * of the Software.
697+ *
698+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
699+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
700+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
701+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
702+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
703+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
704+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
705+ *
706+ */
707+
708+#ifndef _I915_COMMON_H_
709+#define _I915_COMMON_H_
710+
711+typedef struct drm_i915_common_private {
712+ //struct drm_device *dev;
713+
714+ void __iomem *regs;
715+
716+ //drm_dma_handle_t *status_page_dmah;
717+ //void *hw_status_page;
718+ //dma_addr_t dma_status_page;
719+ //uint32_t counter;
720+ //unsigned int status_gfx_addr;
721+ //drm_local_map_t hws_map;
722+ //struct drm_gem_object *hws_obj;
723+
724+ //unsigned int cpp;
725+ //int back_offset;
726+ //int front_offset;
727+ //int current_page;
728+ //int page_flipping;
729+
730+ //wait_queue_head_t irq_queue;
731+ //atomic_t irq_received;
732+ /** Protects user_irq_refcount and irq_mask_reg */
733+ //spinlock_t user_irq_lock;
734+ /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
735+ //int user_irq_refcount;
736+ /** Cached value of IMR to avoid reads in updating the bitfield */
737+ //u32 irq_mask_reg;
738+ //u32 pipestat[2];
739+
740+ //int tex_lru_log_granularity;
741+ //int allow_batchbuffer;
742+ //struct mem_block *agp_heap;
743+ //unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
744+ //int vblank_pipe;
745+
746+ //bool cursor_needs_physical;
747+
748+ //struct drm_mm vram;
749+
750+ //int irq_enabled;
751+
752+ /* LVDS info */
753+ int backlight_duty_cycle; /* restore backlight to this value */
754+ bool panel_wants_dither;
755+ struct drm_display_mode *panel_fixed_mode;
756+ //struct drm_display_mode *vbt_mode; /* if any */
757+
758+ /* Feature bits from the VBIOS */
759+ //unsigned int int_tv_support:1;
760+ //unsigned int lvds_dither:1;
761+ //unsigned int lvds_vbt:1;
762+ //unsigned int int_crt_support:1;
763+
764+ //int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
765+ //int num_fence_regs; /* 8 on pre-965, 16 otherwise */
766+
767+ /* Register state */
768+ u8 saveLBB;
769+ u32 saveDSPACNTR;
770+ u32 saveDSPBCNTR;
771+ u32 saveDSPARB;
772+ u32 saveRENDERSTANDBY;
773+ u32 saveHWS;
774+ u32 savePIPEACONF;
775+ u32 savePIPEBCONF;
776+ u32 savePIPEASRC;
777+ u32 savePIPEBSRC;
778+ u32 saveFPA0;
779+ u32 saveFPA1;
780+ u32 saveDPLL_A;
781+ u32 saveDPLL_A_MD;
782+ u32 saveHTOTAL_A;
783+ u32 saveHBLANK_A;
784+ u32 saveHSYNC_A;
785+ u32 saveVTOTAL_A;
786+ u32 saveVBLANK_A;
787+ u32 saveVSYNC_A;
788+ u32 saveBCLRPAT_A;
789+ u32 savePIPEASTAT;
790+ u32 saveDSPASTRIDE;
791+ u32 saveDSPASIZE;
792+ u32 saveDSPAPOS;
793+ u32 saveDSPAADDR;
794+ u32 saveDSPASURF;
795+ u32 saveDSPATILEOFF;
796+ u32 savePFIT_PGM_RATIOS;
797+ u32 saveBLC_PWM_CTL;
798+ u32 saveBLC_PWM_CTL2;
799+ u32 saveFPB0;
800+ u32 saveFPB1;
801+ u32 saveDPLL_B;
802+ u32 saveDPLL_B_MD;
803+ u32 saveHTOTAL_B;
804+ u32 saveHBLANK_B;
805+ u32 saveHSYNC_B;
806+ u32 saveVTOTAL_B;
807+ u32 saveVBLANK_B;
808+ u32 saveVSYNC_B;
809+ u32 saveBCLRPAT_B;
810+ u32 savePIPEBSTAT;
811+ u32 saveDSPBSTRIDE;
812+ u32 saveDSPBSIZE;
813+ u32 saveDSPBPOS;
814+ u32 saveDSPBADDR;
815+ u32 saveDSPBSURF;
816+ u32 saveDSPBTILEOFF;
817+ u32 saveVGA0;
818+ u32 saveVGA1;
819+ u32 saveVGA_PD;
820+ u32 saveVGACNTRL;
821+ u32 saveADPA;
822+ u32 saveLVDS;
823+ u32 savePP_ON_DELAYS;
824+ u32 savePP_OFF_DELAYS;
825+ u32 saveDVOA;
826+ u32 saveDVOB;
827+ u32 saveDVOC;
828+ u32 savePP_ON;
829+ u32 savePP_OFF;
830+ u32 savePP_CONTROL;
831+ u32 savePP_DIVISOR;
832+ u32 savePFIT_CONTROL;
833+ u32 save_palette_a[256];
834+ u32 save_palette_b[256];
835+ u32 saveFBC_CFB_BASE;
836+ u32 saveFBC_LL_BASE;
837+ u32 saveFBC_CONTROL;
838+ u32 saveFBC_CONTROL2;
839+ u32 saveIER;
840+ u32 saveIIR;
841+ u32 saveIMR;
842+ u32 saveCACHE_MODE_0;
843+ u32 saveD_STATE;
844+ u32 saveCG_2D_DIS;
845+ u32 saveMI_ARB_STATE;
846+ u32 saveSWF0[16];
847+ u32 saveSWF1[16];
848+ u32 saveSWF2[3];
849+ u8 saveMSR;
850+ u8 saveSR[8];
851+ u8 saveGR[25];
852+ u8 saveAR_INDEX;
853+ u8 saveAR[21];
854+ u8 saveDACMASK;
855+ u8 saveDACDATA[256*3]; /* 256 3-byte colors */
856+ u8 saveCR[37];
857+} drm_i915_common_private_t;
858+
859+struct drm_i915_master_private {
860+ drm_local_map_t *sarea;
861+ struct _drm_i915_sarea *sarea_priv;
862+};
863+
864+#endif
865Index: linux-2.6.28/drivers/gpu/drm/i915/i915_drv.h
866===================================================================
867--- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_drv.h 2009-02-19 12:59:23.000000000 +0000
868+++ linux-2.6.28/drivers/gpu/drm/i915/i915_drv.h 2009-02-19 16:30:19.000000000 +0000
869@@ -32,6 +32,7 @@
870
871 #include "i915_reg.h"
872 #include "intel_bios.h"
873+#include "i915_common.h"
874 #include <linux/io-mapping.h>
875
876 /* General customization:
877@@ -116,10 +117,6 @@
878 int enabled;
879 };
880
881-struct drm_i915_master_private {
882- drm_local_map_t *sarea;
883- struct _drm_i915_sarea *sarea_priv;
884-};
885 #define I915_FENCE_REG_NONE -1
886
887 struct drm_i915_fence_reg {
888@@ -127,12 +124,15 @@
889 };
890
891 typedef struct drm_i915_private {
892- struct drm_device *dev;
893+ /* common is assumed to be the first item in this structure */
894+ struct drm_i915_common_private common;
895
896- void __iomem *regs;
897-
898- drm_i915_ring_buffer_t ring;
899+ struct drm_device *dev;
900
901+ //void __iomem *regs;
902+
903+ drm_i915_ring_buffer_t ring;
904+
905 drm_dma_handle_t *status_page_dmah;
906 void *hw_status_page;
907 dma_addr_t dma_status_page;
908@@ -169,12 +169,12 @@
909
910 int irq_enabled;
911
912- struct intel_opregion opregion;
913-
914+ struct intel_opregion opregion;
915+
916 /* LVDS info */
917- int backlight_duty_cycle; /* restore backlight to this value */
918- bool panel_wants_dither;
919- struct drm_display_mode *panel_fixed_mode;
920+ //int backlight_duty_cycle; /* restore backlight to this value */
921+ //bool panel_wants_dither;
922+ //struct drm_display_mode *panel_fixed_mode;
923 struct drm_display_mode *vbt_mode; /* if any */
924
925 /* Feature bits from the VBIOS */
926@@ -183,101 +183,10 @@
927 unsigned int lvds_vbt:1;
928 unsigned int int_crt_support:1;
929
930- struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
931+ struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
932 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
933 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
934
935- /* Register state */
936- u8 saveLBB;
937- u32 saveDSPACNTR;
938- u32 saveDSPBCNTR;
939- u32 saveDSPARB;
940- u32 saveRENDERSTANDBY;
941- u32 saveHWS;
942- u32 savePIPEACONF;
943- u32 savePIPEBCONF;
944- u32 savePIPEASRC;
945- u32 savePIPEBSRC;
946- u32 saveFPA0;
947- u32 saveFPA1;
948- u32 saveDPLL_A;
949- u32 saveDPLL_A_MD;
950- u32 saveHTOTAL_A;
951- u32 saveHBLANK_A;
952- u32 saveHSYNC_A;
953- u32 saveVTOTAL_A;
954- u32 saveVBLANK_A;
955- u32 saveVSYNC_A;
956- u32 saveBCLRPAT_A;
957- u32 savePIPEASTAT;
958- u32 saveDSPASTRIDE;
959- u32 saveDSPASIZE;
960- u32 saveDSPAPOS;
961- u32 saveDSPAADDR;
962- u32 saveDSPASURF;
963- u32 saveDSPATILEOFF;
964- u32 savePFIT_PGM_RATIOS;
965- u32 saveBLC_PWM_CTL;
966- u32 saveBLC_PWM_CTL2;
967- u32 saveFPB0;
968- u32 saveFPB1;
969- u32 saveDPLL_B;
970- u32 saveDPLL_B_MD;
971- u32 saveHTOTAL_B;
972- u32 saveHBLANK_B;
973- u32 saveHSYNC_B;
974- u32 saveVTOTAL_B;
975- u32 saveVBLANK_B;
976- u32 saveVSYNC_B;
977- u32 saveBCLRPAT_B;
978- u32 savePIPEBSTAT;
979- u32 saveDSPBSTRIDE;
980- u32 saveDSPBSIZE;
981- u32 saveDSPBPOS;
982- u32 saveDSPBADDR;
983- u32 saveDSPBSURF;
984- u32 saveDSPBTILEOFF;
985- u32 saveVGA0;
986- u32 saveVGA1;
987- u32 saveVGA_PD;
988- u32 saveVGACNTRL;
989- u32 saveADPA;
990- u32 saveLVDS;
991- u32 savePP_ON_DELAYS;
992- u32 savePP_OFF_DELAYS;
993- u32 saveDVOA;
994- u32 saveDVOB;
995- u32 saveDVOC;
996- u32 savePP_ON;
997- u32 savePP_OFF;
998- u32 savePP_CONTROL;
999- u32 savePP_DIVISOR;
1000- u32 savePFIT_CONTROL;
1001- u32 save_palette_a[256];
1002- u32 save_palette_b[256];
1003- u32 saveFBC_CFB_BASE;
1004- u32 saveFBC_LL_BASE;
1005- u32 saveFBC_CONTROL;
1006- u32 saveFBC_CONTROL2;
1007- u32 saveIER;
1008- u32 saveIIR;
1009- u32 saveIMR;
1010- u32 saveCACHE_MODE_0;
1011- u32 saveD_STATE;
1012- u32 saveCG_2D_DIS;
1013- u32 saveMI_ARB_STATE;
1014- u32 saveSWF0[16];
1015- u32 saveSWF1[16];
1016- u32 saveSWF2[3];
1017- u8 saveMSR;
1018- u8 saveSR[8];
1019- u8 saveGR[25];
1020- u8 saveAR_INDEX;
1021- u8 saveAR[21];
1022- u8 saveDACMASK;
1023- u8 saveDACDATA[256*3]; /* 256 3-byte colors */
1024- u8 saveCR[37];
1025-
1026 struct {
1027 struct drm_mm gtt_space;
1028
1029@@ -672,17 +581,18 @@
1030 LOCK_TEST_WITH_RETURN(dev, file_priv); \
1031 } while (0)
1032
1033-#define I915_READ(reg) readl(dev_priv->regs + (reg))
1034-#define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg))
1035-#define I915_READ16(reg) readw(dev_priv->regs + (reg))
1036-#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
1037-#define I915_READ8(reg) readb(dev_priv->regs + (reg))
1038-#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
1039+
1040+#define I915_READ(reg) readl(dev_priv_common->regs + (reg))
1041+#define I915_WRITE(reg, val) writel(val, dev_priv_common->regs + (reg))
1042+#define I915_READ16(reg) readw(dev_priv_common->regs + (reg))
1043+#define I915_WRITE16(reg, val) writel(val, dev_priv_common->regs + (reg))
1044+#define I915_READ8(reg) readb(dev_priv_common->regs + (reg))
1045+#define I915_WRITE8(reg, val) writeb(val, dev_priv_common->regs + (reg))
1046 #ifdef writeq
1047-#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
1048+#define I915_WRITE64(reg, val) writeq(val, dev_priv_common->regs + (reg))
1049 #else
1050-#define I915_WRITE64(reg, val) (writel(val, dev_priv->regs + (reg)), \
1051- writel(upper_32_bits(val), dev_priv->regs + \
1052+#define I915_WRITE64(reg, val) (writel(val, dev_priv_common->regs + (reg)), \
1053+ writel(upper_32_bits(val), dev_priv_common->regs + \
1054 (reg) + 4))
1055 #endif
1056 #define POSTING_READ(reg) (void)I915_READ(reg)
1057@@ -776,10 +686,15 @@
1058 (dev)->pci_device == 0x29D2)
1059
1060 #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
1061- IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
1062+ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
1063+ IS_POULSBO(dev))
1064+
1065+#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \
1066+ ((dev)->pci_device == 0x8109))
1067
1068 #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
1069- IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
1070+ IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
1071+ IS_POULSBO(dev))
1072
1073 #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
1074 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev))
1075Index: linux-2.6.28/drivers/gpu/drm/i915/intel_display.c
1076===================================================================
1077--- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_display.c 2009-02-19 12:59:23.000000000 +0000
1078+++ linux-2.6.28/drivers/gpu/drm/i915/intel_display.c 2009-02-20 14:53:08.000000000 +0000
1079@@ -282,7 +282,7 @@
1080 int refclk, intel_clock_t *best_clock)
1081 {
1082 struct drm_device *dev = crtc->dev;
1083- struct drm_i915_private *dev_priv = dev->dev_private;
1084+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1085 intel_clock_t clock;
1086 const intel_limit_t *limit = intel_limit(crtc);
1087 int err = target;
1088@@ -475,7 +475,7 @@
1089 {
1090 struct drm_device *dev = crtc->dev;
1091 struct drm_i915_master_private *master_priv;
1092- struct drm_i915_private *dev_priv = dev->dev_private;
1093+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1094 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1095 int pipe = intel_crtc->pipe;
1096 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
1097@@ -613,6 +613,7 @@
1098 /* lvds has its own version of prepare see intel_lvds_prepare */
1099 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
1100 }
1101+EXPORT_SYMBOL(intel_encoder_prepare);
1102
1103 void intel_encoder_commit (struct drm_encoder *encoder)
1104 {
1105@@ -620,6 +621,7 @@
1106 /* lvds has its own version of commit see intel_lvds_commit */
1107 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
1108 }
1109+EXPORT_SYMBOL(intel_encoder_commit);
1110
1111 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
1112 struct drm_display_mode *mode,
1113@@ -687,7 +689,7 @@
1114 */
1115 static int intel_panel_fitter_pipe (struct drm_device *dev)
1116 {
1117- struct drm_i915_private *dev_priv = dev->dev_private;
1118+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1119 u32 pfit_control;
1120
1121 /* i830 doesn't have a panel fitter */
1122@@ -715,7 +717,7 @@
1123 struct drm_framebuffer *old_fb)
1124 {
1125 struct drm_device *dev = crtc->dev;
1126- struct drm_i915_private *dev_priv = dev->dev_private;
1127+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1128 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1129 int pipe = intel_crtc->pipe;
1130 int fp_reg = (pipe == 0) ? FPA0 : FPB0;
1131@@ -980,7 +982,7 @@
1132 uint32_t width, uint32_t height)
1133 {
1134 struct drm_device *dev = crtc->dev;
1135- struct drm_i915_private *dev_priv = dev->dev_private;
1136+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1137 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1138 struct drm_gem_object *bo;
1139 struct drm_i915_gem_object *obj_priv;
1140@@ -1071,7 +1073,7 @@
1141 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1142 {
1143 struct drm_device *dev = crtc->dev;
1144- struct drm_i915_private *dev_priv = dev->dev_private;
1145+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1146 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1147 int pipe = intel_crtc->pipe;
1148 uint32_t temp = 0;
1149@@ -1106,6 +1108,7 @@
1150 intel_crtc->lut_g[regno] = green >> 8;
1151 intel_crtc->lut_b[regno] = blue >> 8;
1152 }
1153+EXPORT_SYMBOL(intel_crtc_fb_gamma_set);
1154
1155 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
1156 u16 *blue, uint32_t size)
1157@@ -1228,6 +1231,7 @@
1158
1159 return crtc;
1160 }
1161+EXPORT_SYMBOL(intel_get_load_detect_pipe);
1162
1163 void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode)
1164 {
1165@@ -1251,11 +1255,12 @@
1166 crtc_funcs->dpms(crtc, dpms_mode);
1167 }
1168 }
1169+EXPORT_SYMBOL(intel_release_load_detect_pipe);
1170
1171 /* Returns the clock of the currently programmed mode of the given pipe. */
1172 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1173 {
1174- struct drm_i915_private *dev_priv = dev->dev_private;
1175+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1176 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1177 int pipe = intel_crtc->pipe;
1178 u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
1179@@ -1333,7 +1338,7 @@
1180 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
1181 struct drm_crtc *crtc)
1182 {
1183- struct drm_i915_private *dev_priv = dev->dev_private;
1184+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1185 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1186 int pipe = intel_crtc->pipe;
1187 struct drm_display_mode *mode;
1188@@ -1361,6 +1366,7 @@
1189
1190 return mode;
1191 }
1192+EXPORT_SYMBOL(intel_crtc_mode_get);
1193
1194 static void intel_crtc_destroy(struct drm_crtc *crtc)
1195 {
1196@@ -1415,11 +1421,6 @@
1197 intel_crtc->mode_set.connectors = (struct drm_connector **)(intel_crtc + 1);
1198 intel_crtc->mode_set.num_connectors = 0;
1199
1200- if (i915_fbpercrtc) {
1201-
1202-
1203-
1204- }
1205 }
1206
1207 struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
1208@@ -1433,6 +1434,7 @@
1209 }
1210 return crtc;
1211 }
1212+EXPORT_SYMBOL(intel_get_crtc_from_pipe);
1213
1214 static int intel_connector_clones(struct drm_device *dev, int type_mask)
1215 {
1216@@ -1575,7 +1577,7 @@
1217
1218 return 0;
1219 }
1220-
1221+EXPORT_SYMBOL(intel_framebuffer_create);
1222
1223 static struct drm_framebuffer *
1224 intel_user_framebuffer_create(struct drm_device *dev,
1225@@ -1643,12 +1645,13 @@
1226
1227 intel_setup_outputs(dev);
1228 }
1229+EXPORT_SYMBOL(intel_modeset_init);
1230
1231 void intel_modeset_cleanup(struct drm_device *dev)
1232 {
1233 drm_mode_config_cleanup(dev);
1234 }
1235-
1236+EXPORT_SYMBOL(intel_modeset_cleanup);
1237
1238 /* current intel driver doesn't take advantage of encoders
1239 always give back the encoder for the connector
1240@@ -1659,3 +1662,5 @@
1241
1242 return &intel_output->enc;
1243 }
1244+EXPORT_SYMBOL(intel_best_encoder);
1245+
1246Index: linux-2.6.28/drivers/gpu/drm/i915/intel_crt.c
1247===================================================================
1248--- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_crt.c 2009-02-19 12:59:23.000000000 +0000
1249+++ linux-2.6.28/drivers/gpu/drm/i915/intel_crt.c 2009-02-20 14:53:08.000000000 +0000
1250@@ -36,7 +36,7 @@
1251 static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
1252 {
1253 struct drm_device *dev = encoder->dev;
1254- struct drm_i915_private *dev_priv = dev->dev_private;
1255+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1256 u32 temp;
1257
1258 temp = I915_READ(ADPA);
1259@@ -88,7 +88,7 @@
1260 struct drm_device *dev = encoder->dev;
1261 struct drm_crtc *crtc = encoder->crtc;
1262 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1263- struct drm_i915_private *dev_priv = dev->dev_private;
1264+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1265 int dpll_md_reg;
1266 u32 adpa, dpll_md;
1267
1268@@ -132,7 +132,7 @@
1269 static bool intel_crt_detect_hotplug(struct drm_connector *connector)
1270 {
1271 struct drm_device *dev = connector->dev;
1272- struct drm_i915_private *dev_priv = dev->dev_private;
1273+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1274 u32 temp;
1275
1276 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
1277Index: linux-2.6.28/drivers/gpu/drm/i915/i915_dma.c
1278===================================================================
1279--- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_dma.c 2009-02-19 12:59:23.000000000 +0000
1280+++ linux-2.6.28/drivers/gpu/drm/i915/i915_dma.c 2009-02-20 12:12:41.000000000 +0000
1281@@ -41,6 +41,7 @@
1282 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
1283 {
1284 drm_i915_private_t *dev_priv = dev->dev_private;
1285+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1286 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1287 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
1288 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
1289@@ -82,6 +83,7 @@
1290 static int i915_init_phys_hws(struct drm_device *dev)
1291 {
1292 drm_i915_private_t *dev_priv = dev->dev_private;
1293+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1294 /* Program Hardware Status Page */
1295 dev_priv->status_page_dmah =
1296 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
1297@@ -107,6 +109,8 @@
1298 static void i915_free_hws(struct drm_device *dev)
1299 {
1300 drm_i915_private_t *dev_priv = dev->dev_private;
1301+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1302+
1303 if (dev_priv->status_page_dmah) {
1304 drm_pci_free(dev, dev_priv->status_page_dmah);
1305 dev_priv->status_page_dmah = NULL;
1306@@ -124,6 +128,7 @@
1307 void i915_kernel_lost_context(struct drm_device * dev)
1308 {
1309 drm_i915_private_t *dev_priv = dev->dev_private;
1310+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1311 struct drm_i915_master_private *master_priv;
1312 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
1313
1314@@ -231,6 +236,7 @@
1315 static int i915_dma_resume(struct drm_device * dev)
1316 {
1317 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1318+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1319
1320 DRM_DEBUG("%s\n", __func__);
1321
1322@@ -358,6 +364,7 @@
1323
1324 static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
1325 {
1326+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1327 drm_i915_private_t *dev_priv = dev->dev_private;
1328 int i;
1329 RING_LOCALS;
1330@@ -401,6 +408,7 @@
1331 int i, int DR1, int DR4)
1332 {
1333 drm_i915_private_t *dev_priv = dev->dev_private;
1334+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1335 struct drm_clip_rect box;
1336 RING_LOCALS;
1337
1338@@ -442,6 +450,7 @@
1339 static void i915_emit_breadcrumb(struct drm_device *dev)
1340 {
1341 drm_i915_private_t *dev_priv = dev->dev_private;
1342+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1343 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1344 RING_LOCALS;
1345
1346@@ -495,6 +504,7 @@
1347 drm_i915_batchbuffer_t * batch)
1348 {
1349 drm_i915_private_t *dev_priv = dev->dev_private;
1350+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1351 struct drm_clip_rect __user *boxes = batch->cliprects;
1352 int nbox = batch->num_cliprects;
1353 int i = 0, count;
1354@@ -544,6 +554,7 @@
1355
1356 static int i915_dispatch_flip(struct drm_device * dev)
1357 {
1358+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1359 drm_i915_private_t *dev_priv = dev->dev_private;
1360 struct drm_i915_master_private *master_priv =
1361 dev->primary->master->driver_priv;
1362@@ -775,6 +786,7 @@
1363 static int i915_set_status_page(struct drm_device *dev, void *data,
1364 struct drm_file *file_priv)
1365 {
1366+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1367 drm_i915_private_t *dev_priv = dev->dev_private;
1368 drm_i915_hws_addr_t *hws = data;
1369
1370@@ -930,6 +942,7 @@
1371
1372 static int i915_load_modeset_init(struct drm_device *dev)
1373 {
1374+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1375 struct drm_i915_private *dev_priv = dev->dev_private;
1376 unsigned long agp_size, prealloc_size;
1377 int fb_bar = IS_I9XX(dev) ? 2 : 0;
1378@@ -1073,8 +1086,8 @@
1379 base = drm_get_resource_start(dev, mmio_bar);
1380 size = drm_get_resource_len(dev, mmio_bar);
1381
1382- dev_priv->regs = ioremap(base, size);
1383- if (!dev_priv->regs) {
1384+ dev_priv->common.regs = ioremap(base, size);
1385+ if (!dev_priv->common.regs) {
1386 DRM_ERROR("failed to map registers\n");
1387 ret = -EIO;
1388 goto free_priv;
1389@@ -1126,7 +1139,7 @@
1390 return 0;
1391
1392 out_rmmap:
1393- iounmap(dev_priv->regs);
1394+ iounmap(dev_priv->common.regs);
1395 free_priv:
1396 drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
1397 return ret;
1398@@ -1144,8 +1157,8 @@
1399 if (dev->pdev->msi_enabled)
1400 pci_disable_msi(dev->pdev);
1401
1402- if (dev_priv->regs != NULL)
1403- iounmap(dev_priv->regs);
1404+ if (dev_priv->common.regs != NULL)
1405+ iounmap(dev_priv->common.regs);
1406
1407 intel_opregion_free(dev);
1408
1409Index: linux-2.6.28/drivers/gpu/drm/i915/intel_sdvo.c
1410===================================================================
1411--- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_sdvo.c 2009-02-19 12:59:23.000000000 +0000
1412+++ linux-2.6.28/drivers/gpu/drm/i915/intel_sdvo.c 2009-02-20 14:53:08.000000000 +0000
1413@@ -62,7 +62,7 @@
1414 static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
1415 {
1416 struct drm_device *dev = intel_output->base.dev;
1417- struct drm_i915_private *dev_priv = dev->dev_private;
1418+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1419 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1420 u32 bval = val, cval = val;
1421 int i;
1422@@ -552,7 +552,7 @@
1423 struct drm_display_mode *adjusted_mode)
1424 {
1425 struct drm_device *dev = encoder->dev;
1426- struct drm_i915_private *dev_priv = dev->dev_private;
1427+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1428 struct drm_crtc *crtc = encoder->crtc;
1429 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1430 struct intel_output *intel_output = enc_to_intel_output(encoder);
1431@@ -659,7 +659,7 @@
1432 if (IS_I965G(dev)) {
1433 /* done in crtc_mode_set as the dpll_md reg must be written
1434 early */
1435- } else if (IS_I945G(dev) || IS_I945GM(dev)) {
1436+ } else if (IS_POULSBO(dev) || IS_I945G(dev) || IS_I945GM(dev)) {
1437 /* done in crtc_mode_set as it lives inside the
1438 dpll register */
1439 } else {
1440@@ -672,7 +672,7 @@
1441 static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1442 {
1443 struct drm_device *dev = encoder->dev;
1444- struct drm_i915_private *dev_priv = dev->dev_private;
1445+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1446 struct intel_output *intel_output = enc_to_intel_output(encoder);
1447 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1448 u32 temp;
1449@@ -722,7 +722,7 @@
1450 static void intel_sdvo_save(struct drm_connector *connector)
1451 {
1452 struct drm_device *dev = connector->dev;
1453- struct drm_i915_private *dev_priv = dev->dev_private;
1454+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1455 struct intel_output *intel_output = to_intel_output(connector);
1456 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1457 int o;
1458@@ -759,7 +759,7 @@
1459 static void intel_sdvo_restore(struct drm_connector *connector)
1460 {
1461 struct drm_device *dev = connector->dev;
1462- struct drm_i915_private *dev_priv = dev->dev_private;
1463+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1464 struct intel_output *intel_output = to_intel_output(connector);
1465 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1466 int o;
1467Index: linux-2.6.28/drivers/gpu/drm/i915/intel_lvds.c
1468===================================================================
1469--- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_lvds.c 2009-02-19 12:59:23.000000000 +0000
1470+++ linux-2.6.28/drivers/gpu/drm/i915/intel_lvds.c 2009-02-20 14:53:08.000000000 +0000
1471@@ -67,7 +67,7 @@
1472 */
1473 static void intel_lvds_set_power(struct drm_device *dev, bool on)
1474 {
1475- struct drm_i915_private *dev_priv = dev->dev_private;
1476+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1477 u32 pp_status;
1478
1479 if (on) {
1480@@ -104,35 +104,35 @@
1481 static void intel_lvds_save(struct drm_connector *connector)
1482 {
1483 struct drm_device *dev = connector->dev;
1484- struct drm_i915_private *dev_priv = dev->dev_private;
1485+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1486
1487- dev_priv->savePP_ON = I915_READ(PP_ON_DELAYS);
1488- dev_priv->savePP_OFF = I915_READ(PP_OFF_DELAYS);
1489- dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
1490- dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
1491- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
1492- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
1493+ dev_priv_common->savePP_ON = I915_READ(PP_ON_DELAYS);
1494+ dev_priv_common->savePP_OFF = I915_READ(PP_OFF_DELAYS);
1495+ dev_priv_common->savePP_CONTROL = I915_READ(PP_CONTROL);
1496+ dev_priv_common->savePP_DIVISOR = I915_READ(PP_DIVISOR);
1497+ dev_priv_common->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
1498+ dev_priv_common->backlight_duty_cycle = (dev_priv_common->saveBLC_PWM_CTL &
1499 BACKLIGHT_DUTY_CYCLE_MASK);
1500
1501 /*
1502 * If the light is off at server startup, just make it full brightness
1503 */
1504- if (dev_priv->backlight_duty_cycle == 0)
1505- dev_priv->backlight_duty_cycle =
1506+ if (dev_priv_common->backlight_duty_cycle == 0)
1507+ lvds_backlight=
1508 intel_lvds_get_max_backlight(dev);
1509 }
1510
1511 static void intel_lvds_restore(struct drm_connector *connector)
1512 {
1513 struct drm_device *dev = connector->dev;
1514- struct drm_i915_private *dev_priv = dev->dev_private;
1515+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1516
1517- I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
1518- I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON);
1519- I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF);
1520- I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
1521- I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
1522- if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
1523+ I915_WRITE(BLC_PWM_CTL, dev_priv_common->saveBLC_PWM_CTL);
1524+ I915_WRITE(PP_ON_DELAYS, dev_priv_common->savePP_ON);
1525+ I915_WRITE(PP_OFF_DELAYS, dev_priv_common->savePP_OFF);
1526+ I915_WRITE(PP_DIVISOR, dev_priv_common->savePP_DIVISOR);
1527+ I915_WRITE(PP_CONTROL, dev_priv_common->savePP_CONTROL);
1528+ if (dev_priv_common->savePP_CONTROL & POWER_TARGET_ON)
1529 intel_lvds_set_power(dev, true);
1530 else
1531 intel_lvds_set_power(dev, false);
1532@@ -142,8 +142,8 @@
1533 struct drm_display_mode *mode)
1534 {
1535 struct drm_device *dev = connector->dev;
1536- struct drm_i915_private *dev_priv = dev->dev_private;
1537- struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
1538+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1539+ struct drm_display_mode *fixed_mode = dev_priv_common->panel_fixed_mode;
1540
1541 if (fixed_mode) {
1542 if (mode->hdisplay > fixed_mode->hdisplay)
1543@@ -160,7 +160,7 @@
1544 struct drm_display_mode *adjusted_mode)
1545 {
1546 struct drm_device *dev = encoder->dev;
1547- struct drm_i915_private *dev_priv = dev->dev_private;
1548+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1549 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
1550 struct drm_encoder *tmp_encoder;
1551
1552@@ -240,7 +240,7 @@
1553 struct drm_display_mode *adjusted_mode)
1554 {
1555 struct drm_device *dev = encoder->dev;
1556- struct drm_i915_private *dev_priv = dev->dev_private;
1557+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
1558 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
1559 u32 pfit_control;
1560
1561@@ -264,7 +264,7 @@
1562 pfit_control = 0;
1563
1564 if (!IS_I965G(dev)) {
1565- if (dev_priv->panel_wants_dither)
1566+ if (dev_priv_common->panel_wants_dither)
1567 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
1568 }
1569 else
1570@@ -475,16 +475,16 @@
1571 crtc = intel_get_crtc_from_pipe(dev, pipe);
1572
1573 if (crtc && (lvds & LVDS_PORT_EN)) {
1574- dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
1575- if (dev_priv->panel_fixed_mode) {
1576- dev_priv->panel_fixed_mode->type |=
1577+ dev_priv_common->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
1578+ if (dev_priv_common->panel_fixed_mode) {
1579+ dev_priv_common->panel_fixed_mode->type |=
1580 DRM_MODE_TYPE_PREFERRED;
1581 goto out; /* FIXME: check for quirks */
1582 }
1583 }
1584
1585 /* If we still don't have a mode after all that, give up. */
1586- if (!dev_priv->panel_fixed_mode)
1587+ if (!dev_priv_common->panel_fixed_mode)
1588 goto failed;
1589
1590 /* FIXME: detect aopen & mac mini type stuff automatically? */
1591@@ -509,9 +509,9 @@
1592 * 800x600 display.
1593 */
1594
1595- if (dev_priv->panel_fixed_mode != NULL &&
1596- dev_priv->panel_fixed_mode->hdisplay == 800 &&
1597- dev_priv->panel_fixed_mode->vdisplay == 600) {
1598+ if (dev_priv_common->panel_fixed_mode != NULL &&
1599+ dev_priv_common->panel_fixed_mode->hdisplay == 800 &&
1600+ dev_priv_common->panel_fixed_mode->vdisplay == 600) {
1601 DRM_DEBUG("Suspected Mac Mini, ignoring the LVDS\n");
1602 goto failed;
1603 }
1604Index: linux-2.6.28/drivers/gpu/drm/Kconfig
1605===================================================================
1606--- linux-2.6.28.orig/drivers/gpu/drm/Kconfig 2009-02-19 12:59:22.000000000 +0000
1607+++ linux-2.6.28/drivers/gpu/drm/Kconfig 2009-02-20 14:53:08.000000000 +0000
1608@@ -43,6 +43,11 @@
1609
1610 If M is selected, the module will be called radeon.
1611
1612+config DRM_INTEL_COMMON
1613+ tristate
1614+ help
1615+ Code common to several Intel drivers (autoselected)
1616+
1617 config DRM_I810
1618 tristate "Intel I810"
1619 depends on DRM && AGP && AGP_INTEL
1620@@ -70,6 +75,7 @@
1621 select FB_CFB_FILLRECT
1622 select FB_CFB_COPYAREA
1623 select FB_CFB_IMAGEBLIT
1624+ select DRM_INTEL_COMMON
1625 depends on FB
1626 tristate "i915 driver"
1627 help
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/psb-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/psb-driver.patch
deleted file mode 100644
index 8154523b5d..0000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/psb-driver.patch
+++ /dev/null
@@ -1,21566 +0,0 @@
1Index: linux-2.6.28/include/drm/drm.h
2===================================================================
3--- linux-2.6.28.orig/include/drm/drm.h 2009-02-25 15:37:00.000000000 +0000
4+++ linux-2.6.28/include/drm/drm.h 2009-02-25 15:37:02.000000000 +0000
5@@ -174,6 +174,7 @@
6 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
7 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
8 _DRM_GEM = 6, /**< GEM object */
9+ _DRM_TTM = 7,
10 };
11
12 /**
13@@ -601,6 +602,271 @@
14
15 #include "drm_mode.h"
16
17+#define DRM_FENCE_FLAG_EMIT 0x00000001
18+#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
19+#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
20+#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
21+#define DRM_FENCE_FLAG_NO_USER 0x00000010
22+
23+/* Reserved for driver use */
24+#define DRM_FENCE_MASK_DRIVER 0xFF000000
25+
26+#define DRM_FENCE_TYPE_EXE 0x00000001
27+
28+struct drm_fence_arg {
29+ unsigned int handle;
30+ unsigned int fence_class;
31+ unsigned int type;
32+ unsigned int flags;
33+ unsigned int signaled;
34+ unsigned int error;
35+ unsigned int sequence;
36+ unsigned int pad64;
37+ uint64_t expand_pad[2]; /*Future expansion */
38+};
39+
40+/* Buffer permissions, referring to how the GPU uses the buffers.
41+ * these translate to fence types used for the buffers.
42+ * Typically a texture buffer is read, A destination buffer is write and
43+ * a command (batch-) buffer is exe. Can be or-ed together.
44+ */
45+
46+#define DRM_BO_FLAG_READ (1ULL << 0)
47+#define DRM_BO_FLAG_WRITE (1ULL << 1)
48+#define DRM_BO_FLAG_EXE (1ULL << 2)
49+
50+/*
51+ * Status flags. Can be read to determine the actual state of a buffer.
52+ * Can also be set in the buffer mask before validation.
53+ */
54+
55+/*
56+ * Mask: Never evict this buffer. Not even with force. This type of buffer is only
57+ * available to root and must be manually removed before buffer manager shutdown
58+ * or lock.
59+ * Flags: Acknowledge
60+ */
61+#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
62+
63+/*
64+ * Mask: Require that the buffer is placed in mappable memory when validated.
65+ * If not set the buffer may or may not be in mappable memory when validated.
66+ * Flags: If set, the buffer is in mappable memory.
67+ */
68+#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
69+
70+/* Mask: The buffer should be shareable with other processes.
71+ * Flags: The buffer is shareable with other processes.
72+ */
73+#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
74+
75+/* Mask: If set, place the buffer in cache-coherent memory if available.
76+ * If clear, never place the buffer in cache coherent memory if validated.
77+ * Flags: The buffer is currently in cache-coherent memory.
78+ */
79+#define DRM_BO_FLAG_CACHED (1ULL << 7)
80+
81+/* Mask: Make sure that every time this buffer is validated,
82+ * it ends up on the same location provided that the memory mask is the same.
83+ * The buffer will also not be evicted when claiming space for
84+ * other buffers. Basically a pinned buffer but it may be thrown out as
85+ * part of buffer manager shutdown or locking.
86+ * Flags: Acknowledge.
87+ */
88+#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
89+
90+/* Mask: Make sure the buffer is in cached memory when mapped
91+ * Flags: Acknowledge.
92+ * Buffers allocated with this flag should not be used for suballocators
93+ * This type may have issues on CPUs with over-aggressive caching
94+ * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
95+ */
96+#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
97+
98+
99+/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
100+ * Flags: Acknowledge.
101+ */
102+#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
103+
104+/*
105+ * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
106+ * Flags: Acknowledge.
107+ */
108+#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
109+#define DRM_BO_FLAG_TILE (1ULL << 15)
110+
111+/*
112+ * Memory type flags that can be or'ed together in the mask, but only
113+ * one appears in flags.
114+ */
115+
116+/* System memory */
117+#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
118+/* Translation table memory */
119+#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
120+/* Vram memory */
121+#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
122+/* Up to the driver to define. */
123+#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
124+#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
125+#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
126+#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
127+#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
128+/* We can add more of these now with a 64-bit flag type */
129+
130+/* Memory flag mask */
131+#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
132+#define DRM_BO_MASK_MEMTYPE 0x00000000FF0800A0ULL
133+
134+/* Driver-private flags */
135+#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
136+
137+/* Don't block on validate and map */
138+#define DRM_BO_HINT_DONT_BLOCK 0x00000002
139+/* Don't place this buffer on the unfenced list.*/
140+#define DRM_BO_HINT_DONT_FENCE 0x00000004
141+#define DRM_BO_HINT_WAIT_LAZY 0x00000008
142+#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
143+
144+#define DRM_BO_INIT_MAGIC 0xfe769812
145+#define DRM_BO_INIT_MAJOR 1
146+#define DRM_BO_INIT_MINOR 0
147+#define DRM_BO_INIT_PATCH 0
148+
149+
150+struct drm_bo_info_req {
151+ uint64_t mask;
152+ uint64_t flags;
153+ unsigned int handle;
154+ unsigned int hint;
155+ unsigned int fence_class;
156+ unsigned int desired_tile_stride;
157+ unsigned int tile_info;
158+ unsigned int pad64;
159+ uint64_t presumed_offset;
160+};
161+
162+struct drm_bo_create_req {
163+ uint64_t mask;
164+ uint64_t size;
165+ uint64_t buffer_start;
166+ unsigned int hint;
167+ unsigned int page_alignment;
168+};
169+
170+
171+/*
172+ * Reply flags
173+ */
174+
175+#define DRM_BO_REP_BUSY 0x00000001
176+
177+struct drm_bo_info_rep {
178+ uint64_t flags;
179+ uint64_t mask;
180+ uint64_t size;
181+ uint64_t offset;
182+ uint64_t arg_handle;
183+ uint64_t buffer_start;
184+ unsigned int handle;
185+ unsigned int fence_flags;
186+ unsigned int rep_flags;
187+ unsigned int page_alignment;
188+ unsigned int desired_tile_stride;
189+ unsigned int hw_tile_stride;
190+ unsigned int tile_info;
191+ unsigned int pad64;
192+ uint64_t expand_pad[4]; /*Future expansion */
193+};
194+
195+struct drm_bo_arg_rep {
196+ struct drm_bo_info_rep bo_info;
197+ int ret;
198+ unsigned int pad64;
199+};
200+
201+struct drm_bo_create_arg {
202+ union {
203+ struct drm_bo_create_req req;
204+ struct drm_bo_info_rep rep;
205+ } d;
206+};
207+
208+struct drm_bo_handle_arg {
209+ unsigned int handle;
210+};
211+
212+struct drm_bo_reference_info_arg {
213+ union {
214+ struct drm_bo_handle_arg req;
215+ struct drm_bo_info_rep rep;
216+ } d;
217+};
218+
219+struct drm_bo_map_wait_idle_arg {
220+ union {
221+ struct drm_bo_info_req req;
222+ struct drm_bo_info_rep rep;
223+ } d;
224+};
225+
226+struct drm_bo_op_req {
227+ enum {
228+ drm_bo_validate,
229+ drm_bo_fence,
230+ drm_bo_ref_fence,
231+ } op;
232+ unsigned int arg_handle;
233+ struct drm_bo_info_req bo_req;
234+};
235+
236+
237+struct drm_bo_op_arg {
238+ uint64_t next;
239+ union {
240+ struct drm_bo_op_req req;
241+ struct drm_bo_arg_rep rep;
242+ } d;
243+ int handled;
244+ unsigned int pad64;
245+};
246+
247+
248+#define DRM_BO_MEM_LOCAL 0
249+#define DRM_BO_MEM_TT 1
250+#define DRM_BO_MEM_VRAM 2
251+#define DRM_BO_MEM_PRIV0 3
252+#define DRM_BO_MEM_PRIV1 4
253+#define DRM_BO_MEM_PRIV2 5
254+#define DRM_BO_MEM_PRIV3 6
255+#define DRM_BO_MEM_PRIV4 7
256+
257+#define DRM_BO_MEM_TYPES 8 /* For now. */
258+
259+#define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
260+#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
261+
262+struct drm_bo_version_arg {
263+ uint32_t major;
264+ uint32_t minor;
265+ uint32_t patchlevel;
266+};
267+
268+struct drm_mm_type_arg {
269+ unsigned int mem_type;
270+ unsigned int lock_flags;
271+};
272+
273+struct drm_mm_init_arg {
274+ unsigned int magic;
275+ unsigned int major;
276+ unsigned int minor;
277+ unsigned int mem_type;
278+ uint64_t p_offset;
279+ uint64_t p_size;
280+};
281+
282 #define DRM_IOCTL_BASE 'd'
283 #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
284 #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
285@@ -688,6 +954,39 @@
286 #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
287 #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
288
289+#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
290+#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
291+#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
292+#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
293+
294+#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
295+#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
296+#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
297+#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
298+#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
299+#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
300+#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
301+#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
302+
303+#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
304+#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
305+#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
306+#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
307+#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
308+#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
309+#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
310+#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
311+#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg)
312+
313+
314+#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
315+#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
316+#define DRM_IOCTL_MODE_GETOUTPUT DRM_IOWR(0xA2, struct drm_mode_get_output)
317+
318+#define DRM_IOCTL_MODE_ADDMODE DRM_IOWR(0xA7, struct drm_mode_modeinfo)
319+#define DRM_IOCTL_MODE_RMMODE DRM_IOWR(0xA8, unsigned int)
320+/*@}*/
321+
322 /**
323 * Device specific ioctls should only be in their respective headers
324 * The device specific ioctl range is from 0x40 to 0x99.
325@@ -742,6 +1041,11 @@
326 typedef struct drm_agp_info drm_agp_info_t;
327 typedef struct drm_scatter_gather drm_scatter_gather_t;
328 typedef struct drm_set_version drm_set_version_t;
329+
330+typedef struct drm_fence_arg drm_fence_arg_t;
331+typedef struct drm_mm_type_arg drm_mm_type_arg_t;
332+typedef struct drm_mm_init_arg drm_mm_init_arg_t;
333+typedef enum drm_bo_type drm_bo_type_t;
334 #endif
335
336 #endif
337Index: linux-2.6.28/include/drm/drmP.h
338===================================================================
339--- linux-2.6.28.orig/include/drm/drmP.h 2009-02-25 15:37:00.000000000 +0000
340+++ linux-2.6.28/include/drm/drmP.h 2009-02-25 15:37:02.000000000 +0000
341@@ -57,6 +57,7 @@
342 #include <linux/dma-mapping.h>
343 #include <linux/mm.h>
344 #include <linux/cdev.h>
345+#include <linux/i2c.h>
346 #include <linux/mutex.h>
347 #if defined(__alpha__) || defined(__powerpc__)
348 #include <asm/pgtable.h> /* For pte_wrprotect */
349@@ -147,9 +148,24 @@
350 #define DRM_MEM_CTXLIST 21
351 #define DRM_MEM_MM 22
352 #define DRM_MEM_HASHTAB 23
353+#define DRM_MEM_OBJECTS 24
354+#define DRM_MEM_FENCE 25
355+#define DRM_MEM_TTM 26
356+#define DRM_MEM_BUFOBJ 27
357
358 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
359 #define DRM_MAP_HASH_OFFSET 0x10000000
360+#define DRM_MAP_HASH_ORDER 12
361+#define DRM_OBJECT_HASH_ORDER 12
362+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
363+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
364+/*
365+ * This should be small enough to allow the use of kmalloc for hash tables
366+ * instead of vmalloc.
367+ */
368+
369+#define DRM_FILE_HASH_ORDER 8
370+#define DRM_MM_INIT_MAX_PAGES 256
371
372 /*@}*/
373
374@@ -378,6 +394,14 @@
375 struct drm_freelist freelist;
376 };
377
378+
379+enum drm_ref_type {
380+ _DRM_REF_USE = 0,
381+ _DRM_REF_TYPE1,
382+ _DRM_NO_REF_TYPES
383+};
384+
385+
386 /** File private data */
387 struct drm_file {
388 int authenticated;
389@@ -387,6 +411,7 @@
390 unsigned long ioctl_count;
391 struct list_head lhead;
392 struct drm_minor *minor;
393+ int remove_auth_on_close;
394 unsigned long lock_count;
395
396 /** Mapping of mm object handles to object pointers. */
397@@ -394,6 +419,16 @@
398 /** Lock for synchronization of access to object_idr. */
399 spinlock_t table_lock;
400
401+ /*
402+ * The user object hash table is global and resides in the
403+ * drm_device structure. We protect the lists and hash tables with the
404+ * device struct_mutex. A bit coarse-grained but probably the best
405+ * option.
406+ */
407+
408+ struct list_head refd_objects;
409+
410+ struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
411 struct file *filp;
412 void *driver_priv;
413
414@@ -659,6 +694,10 @@
415 void *driver_priv; /**< Private structure for driver to use */
416 };
417
418+#include "drm_objects.h"
419+#include "drm_edid.h"
420+#include "drm_crtc.h"
421+
422 /**
423 * DRM driver structure. This structure represent the common code for
424 * a family of cards. There will one drm_device for each card present
425@@ -766,6 +805,13 @@
426 int (*proc_init)(struct drm_minor *minor);
427 void (*proc_cleanup)(struct drm_minor *minor);
428
429+ /* FB routines, if present */
430+ int (*fb_probe)(struct drm_device *dev, struct drm_crtc *crtc);
431+ int (*fb_remove)(struct drm_device *dev, struct drm_crtc *crtc);
432+
433+ struct drm_fence_driver *fence_driver;
434+ struct drm_bo_driver *bo_driver;
435+
436 /**
437 * Driver-specific constructor for drm_gem_objects, to set up
438 * obj->driver_private.
439@@ -821,8 +867,11 @@
440 */
441 struct drm_device {
442 struct list_head driver_item; /**< list of devices per driver */
443+ //char *unique; /**< Unique identifier: e.g., busid */
444+ //int unique_len; /**< Length of unique field */
445 char *devname; /**< For /proc/interrupts */
446 int if_version; /**< Highest interface version set */
447+ //int blocked; /**< Blocked due to VC switch? */
448
449 /** \name Locks */
450 /*@{ */
451@@ -847,12 +896,18 @@
452 /*@} */
453
454 struct list_head filelist;
455+ struct drm_open_hash magiclist; /**< magic hash table */
456+ struct list_head magicfree;
457
458 /** \name Memory management */
459 /*@{ */
460 struct list_head maplist; /**< Linked list of regions */
461 int map_count; /**< Number of mappable regions */
462 struct drm_open_hash map_hash; /**< User token hash table for maps */
463+ struct drm_mm offset_manager; /**< User token manager */
464+ struct drm_open_hash object_hash; /**< User token hash table for objects */
465+ struct address_space *dev_mapping; /**< For unmap_mapping_range() */
466+ struct page *ttm_dummy_page;
467
468 /** \name Context handle management */
469 /*@{ */
470@@ -864,6 +919,7 @@
471
472 struct list_head vmalist; /**< List of vmas (for debugging) */
473
474+ struct drm_lock_data lock; /**< Information on hardware lock */
475 /*@} */
476
477 /** \name DMA queues (contexts) */
478@@ -936,7 +992,6 @@
479 int num_crtcs; /**< Number of CRTCs on this device */
480 void *dev_private; /**< device private data */
481 void *mm_private;
482- struct address_space *dev_mapping;
483 struct drm_sigdata sigdata; /**< For block_all_signals */
484 sigset_t sigmask;
485
486@@ -945,6 +1000,8 @@
487 unsigned int agp_buffer_token;
488 struct drm_minor *control; /**< Control node for card */
489 struct drm_minor *primary; /**< render type primary screen head */
490+ struct drm_fence_manager fm;
491+ struct drm_buffer_manager bm;
492
493 /** \name Drawable information */
494 /*@{ */
495@@ -976,6 +1033,27 @@
496 return dev->pdev->irq;
497 }
498
499+#if __OS_HAS_AGP
500+struct drm_agp_ttm_backend {
501+ struct drm_ttm_backend backend;
502+ DRM_AGP_MEM *mem;
503+ struct agp_bridge_data *bridge;
504+ int populated;
505+};
506+#endif
507+
508+typedef struct ati_pcigart_ttm_backend {
509+ struct drm_ttm_backend backend;
510+ int populated;
511+ void (*gart_flush_fn)(struct drm_device *dev);
512+ struct drm_ati_pcigart_info *gart_info;
513+ unsigned long offset;
514+ struct page **pages;
515+ int num_pages;
516+ int bound;
517+ struct drm_device *dev;
518+} ati_pcigart_ttm_backend_t;
519+
520 static __inline__ int drm_core_check_feature(struct drm_device *dev,
521 int feature)
522 {
523@@ -1042,6 +1120,9 @@
524 /* Driver support (drm_drv.h) */
525 extern int drm_init(struct drm_driver *driver);
526 extern void drm_exit(struct drm_driver *driver);
527+extern void drm_cleanup_pci(struct pci_dev *pdev);
528+extern void drm_vbl_send_signals(struct drm_device *dev, int crtc);
529+extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev);
530 extern int drm_ioctl(struct inode *inode, struct file *filp,
531 unsigned int cmd, unsigned long arg);
532 extern long drm_compat_ioctl(struct file *filp,
533Index: linux-2.6.28/include/drm/drm_pciids.h
534===================================================================
535--- linux-2.6.28.orig/include/drm/drm_pciids.h 2008-12-24 23:26:37.000000000 +0000
536+++ linux-2.6.28/include/drm/drm_pciids.h 2009-02-25 15:37:02.000000000 +0000
537@@ -419,3 +419,9 @@
538 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
539 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
540 {0, 0, 0}
541+
542+#define psb_PCI_IDS \
543+ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
544+ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
545+ {0, 0, 0}
546+
547Index: linux-2.6.28/drivers/gpu/drm/Makefile
548===================================================================
549--- linux-2.6.28.orig/drivers/gpu/drm/Makefile 2009-02-25 15:36:50.000000000 +0000
550+++ linux-2.6.28/drivers/gpu/drm/Makefile 2009-02-25 15:55:10.000000000 +0000
551@@ -10,6 +10,8 @@
552 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
553 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
554 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
555+ drm_fence.o drm_object.o drm_ttm.o drm_bo.o \
556+ drm_bo_lock.o drm_bo_move.o drm_regman.o \
557 drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o
558
559 drm-$(CONFIG_COMPAT) += drm_ioc32.o
560@@ -21,7 +23,8 @@
561 obj-$(CONFIG_DRM_MGA) += mga/
562 obj-$(CONFIG_DRM_I810) += i810/
563 obj-$(CONFIG_DRM_I830) += i830/
564-obj-$(CONFIG_DRM_I915) += i915/
565+obj-$(CONFIG_DRM_INTEL_COMMON) += i915/
566+obj-$(CONFIG_DRM_PSB) += psb/
567 obj-$(CONFIG_DRM_SIS) += sis/
568 obj-$(CONFIG_DRM_SAVAGE)+= savage/
569 obj-$(CONFIG_DRM_VIA) +=via/
570Index: linux-2.6.28/drivers/gpu/drm/drm_agpsupport.c
571===================================================================
572--- linux-2.6.28.orig/drivers/gpu/drm/drm_agpsupport.c 2008-12-24 23:26:37.000000000 +0000
573+++ linux-2.6.28/drivers/gpu/drm/drm_agpsupport.c 2009-02-25 15:37:02.000000000 +0000
574@@ -502,4 +502,156 @@
575 }
576 EXPORT_SYMBOL(drm_agp_chipset_flush);
577
578+/*
579+ * AGP ttm backend interface.
580+ */
581+
582+#ifndef AGP_USER_TYPES
583+#define AGP_USER_TYPES (1 << 16)
584+#define AGP_USER_MEMORY (AGP_USER_TYPES)
585+#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
586+#endif
587+#define AGP_REQUIRED_MAJOR 0
588+#define AGP_REQUIRED_MINOR 102
589+
590+static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
591+{
592+ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
593+}
594+
595+
596+static int drm_agp_populate(struct drm_ttm_backend *backend,
597+ unsigned long num_pages, struct page **pages)
598+{
599+ struct drm_agp_ttm_backend *agp_be =
600+ container_of(backend, struct drm_agp_ttm_backend, backend);
601+ struct page **cur_page, **last_page = pages + num_pages;
602+ DRM_AGP_MEM *mem;
603+
604+ DRM_DEBUG("drm_agp_populate_ttm\n");
605+ mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
606+ if (!mem)
607+ return -ENOMEM;
608+
609+ DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
610+ mem->page_count = 0;
611+ for (cur_page = pages; cur_page < last_page; ++cur_page)
612+ mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
613+ agp_be->mem = mem;
614+ return 0;
615+}
616+
617+static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
618+ struct drm_bo_mem_reg *bo_mem)
619+{
620+ struct drm_agp_ttm_backend *agp_be =
621+ container_of(backend, struct drm_agp_ttm_backend, backend);
622+ DRM_AGP_MEM *mem = agp_be->mem;
623+ int ret;
624+ int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED);
625+
626+ DRM_DEBUG("drm_agp_bind_ttm\n");
627+ mem->is_flushed = 1;
628+ mem->type = AGP_USER_MEMORY;
629+ /* CACHED MAPPED implies not snooped memory */
630+ if (snooped)
631+ mem->type = AGP_USER_CACHED_MEMORY;
632+
633+ ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);
634+ if (ret)
635+ DRM_ERROR("AGP Bind memory failed\n");
636+
637+ DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
638+ DRM_BE_FLAG_BOUND_CACHED : 0,
639+ DRM_BE_FLAG_BOUND_CACHED);
640+ return ret;
641+}
642+
643+static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend)
644+{
645+ struct drm_agp_ttm_backend *agp_be =
646+ container_of(backend, struct drm_agp_ttm_backend, backend);
647+
648+ DRM_DEBUG("drm_agp_unbind_ttm\n");
649+ if (agp_be->mem->is_bound)
650+ return drm_agp_unbind_memory(agp_be->mem);
651+ else
652+ return 0;
653+}
654+
655+static void drm_agp_clear_ttm(struct drm_ttm_backend *backend)
656+{
657+ struct drm_agp_ttm_backend *agp_be =
658+ container_of(backend, struct drm_agp_ttm_backend, backend);
659+ DRM_AGP_MEM *mem = agp_be->mem;
660+
661+ DRM_DEBUG("drm_agp_clear_ttm\n");
662+ if (mem) {
663+ backend->func->unbind(backend);
664+ agp_free_memory(mem);
665+ }
666+ agp_be->mem = NULL;
667+}
668+
669+static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend)
670+{
671+ struct drm_agp_ttm_backend *agp_be;
672+
673+ if (backend) {
674+ DRM_DEBUG("drm_agp_destroy_ttm\n");
675+ agp_be = container_of(backend, struct drm_agp_ttm_backend, backend);
676+ if (agp_be && agp_be->mem)
677+ backend->func->clear(backend);
678+ }
679+}
680+
681+static struct drm_ttm_backend_func agp_ttm_backend = {
682+ .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
683+ .populate = drm_agp_populate,
684+ .clear = drm_agp_clear_ttm,
685+ .bind = drm_agp_bind_ttm,
686+ .unbind = drm_agp_unbind_ttm,
687+ .destroy = drm_agp_destroy_ttm,
688+};
689+
690+struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
691+{
692+
693+ struct drm_agp_ttm_backend *agp_be;
694+ struct agp_kern_info *info;
695+
696+ if (!dev->agp) {
697+ DRM_ERROR("AGP is not initialized.\n");
698+ return NULL;
699+ }
700+ info = &dev->agp->agp_info;
701+
702+ if (info->version.major != AGP_REQUIRED_MAJOR ||
703+ info->version.minor < AGP_REQUIRED_MINOR) {
704+ DRM_ERROR("Wrong agpgart version %d.%d\n"
705+ "\tYou need at least version %d.%d.\n",
706+ info->version.major,
707+ info->version.minor,
708+ AGP_REQUIRED_MAJOR,
709+ AGP_REQUIRED_MINOR);
710+ return NULL;
711+ }
712+
713+
714+ agp_be = drm_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
715+ if (!agp_be)
716+ return NULL;
717+
718+ agp_be->mem = NULL;
719+
720+ agp_be->bridge = dev->agp->bridge;
721+ agp_be->populated = 0;
722+ agp_be->backend.func = &agp_ttm_backend;
723+ agp_be->backend.dev = dev;
724+
725+ return &agp_be->backend;
726+}
727+EXPORT_SYMBOL(drm_agp_init_ttm);
728+
729+
730 #endif /* __OS_HAS_AGP */
731Index: linux-2.6.28/drivers/gpu/drm/drm_bo.c
732===================================================================
733--- /dev/null 1970-01-01 00:00:00.000000000 +0000
734+++ linux-2.6.28/drivers/gpu/drm/drm_bo.c 2009-02-25 15:37:02.000000000 +0000
735@@ -0,0 +1,2660 @@
736+/**************************************************************************
737+ *
738+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
739+ * All Rights Reserved.
740+ *
741+ * Permission is hereby granted, free of charge, to any person obtaining a
742+ * copy of this software and associated documentation files (the
743+ * "Software"), to deal in the Software without restriction, including
744+ * without limitation the rights to use, copy, modify, merge, publish,
745+ * distribute, sub license, and/or sell copies of the Software, and to
746+ * permit persons to whom the Software is furnished to do so, subject to
747+ * the following conditions:
748+ *
749+ * The above copyright notice and this permission notice (including the
750+ * next paragraph) shall be included in all copies or substantial portions
751+ * of the Software.
752+ *
753+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
754+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
755+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
756+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
757+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
758+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
759+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
760+ *
761+ **************************************************************************/
762+/*
763+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
764+ */
765+
766+#include "drmP.h"
767+
768+/*
769+ * Locking may look a bit complicated but isn't really:
770+ *
771+ * The buffer usage atomic_t needs to be protected by dev->struct_mutex
772+ * when there is a chance that it can be zero before or after the operation.
773+ *
774+ * dev->struct_mutex also protects all lists and list heads,
775+ * Hash tables and hash heads.
776+ *
777+ * bo->mutex protects the buffer object itself excluding the usage field.
778+ * bo->mutex does also protect the buffer list heads, so to manipulate those,
779+ * we need both the bo->mutex and the dev->struct_mutex.
780+ *
781+ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
782+ * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
783+ * the list traversal will, in general, need to be restarted.
784+ *
785+ */
786+
787+static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
788+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
789+static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
790+static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
791+
792+static inline uint64_t drm_bo_type_flags(unsigned type)
793+{
794+ return (1ULL << (24 + type));
795+}
796+
797+/*
798+ * bo locked. dev->struct_mutex locked.
799+ */
800+
801+void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
802+{
803+ struct drm_mem_type_manager *man;
804+
805+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
806+ DRM_ASSERT_LOCKED(&bo->mutex);
807+
808+ man = &bo->dev->bm.man[bo->pinned_mem_type];
809+ list_add_tail(&bo->pinned_lru, &man->pinned);
810+}
811+
812+void drm_bo_add_to_lru(struct drm_buffer_object *bo)
813+{
814+ struct drm_mem_type_manager *man;
815+
816+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
817+
818+ if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
819+ || bo->mem.mem_type != bo->pinned_mem_type) {
820+ man = &bo->dev->bm.man[bo->mem.mem_type];
821+ list_add_tail(&bo->lru, &man->lru);
822+ } else {
823+ INIT_LIST_HEAD(&bo->lru);
824+ }
825+}
826+
827+static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
828+{
829+#ifdef DRM_ODD_MM_COMPAT
830+ int ret;
831+
832+ if (!bo->map_list.map)
833+ return 0;
834+
835+ ret = drm_bo_lock_kmm(bo);
836+ if (ret)
837+ return ret;
838+ drm_bo_unmap_virtual(bo);
839+ if (old_is_pci)
840+ drm_bo_finish_unmap(bo);
841+#else
842+ if (!bo->map_list.map)
843+ return 0;
844+
845+ drm_bo_unmap_virtual(bo);
846+#endif
847+ return 0;
848+}
849+
850+static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
851+{
852+#ifdef DRM_ODD_MM_COMPAT
853+ int ret;
854+
855+ if (!bo->map_list.map)
856+ return;
857+
858+ ret = drm_bo_remap_bound(bo);
859+ if (ret) {
860+ DRM_ERROR("Failed to remap a bound buffer object.\n"
861+ "\tThis might cause a sigbus later.\n");
862+ }
863+ drm_bo_unlock_kmm(bo);
864+#endif
865+}
866+
867+/*
868+ * Call bo->mutex locked.
869+ */
870+
871+static int drm_bo_add_ttm(struct drm_buffer_object *bo)
872+{
873+ struct drm_device *dev = bo->dev;
874+ int ret = 0;
875+
876+ DRM_ASSERT_LOCKED(&bo->mutex);
877+ bo->ttm = NULL;
878+
879+ switch (bo->type) {
880+ case drm_bo_type_dc:
881+ case drm_bo_type_kernel:
882+ bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
883+ if (!bo->ttm)
884+ ret = -ENOMEM;
885+ break;
886+ case drm_bo_type_user:
887+ bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
888+ if (!bo->ttm)
889+ ret = -ENOMEM;
890+
891+ ret = drm_ttm_set_user(bo->ttm, current,
892+ bo->mem.mask & DRM_BO_FLAG_WRITE,
893+ bo->buffer_start,
894+ bo->num_pages,
895+ dev->bm.dummy_read_page);
896+ if (ret)
897+ return ret;
898+
899+ break;
900+ default:
901+ DRM_ERROR("Illegal buffer object type\n");
902+ ret = -EINVAL;
903+ break;
904+ }
905+
906+ return ret;
907+}
908+
909+static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
910+ struct drm_bo_mem_reg *mem,
911+ int evict, int no_wait)
912+{
913+ struct drm_device *dev = bo->dev;
914+ struct drm_buffer_manager *bm = &dev->bm;
915+ int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
916+ int new_is_pci = drm_mem_reg_is_pci(dev, mem);
917+ struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
918+ struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
919+ int ret = 0;
920+
921+ if (old_is_pci || new_is_pci ||
922+ ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
923+ ret = drm_bo_vm_pre_move(bo, old_is_pci);
924+ if (ret)
925+ return ret;
926+
927+ /*
928+ * Create and bind a ttm if required.
929+ */
930+
931+ if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
932+ ret = drm_bo_add_ttm(bo);
933+ if (ret)
934+ goto out_err;
935+
936+ if (mem->mem_type != DRM_BO_MEM_LOCAL) {
937+ ret = drm_bind_ttm(bo->ttm, mem);
938+ if (ret)
939+ goto out_err;
940+ }
941+
942+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
943+
944+ struct drm_bo_mem_reg *old_mem = &bo->mem;
945+ uint64_t save_flags = old_mem->flags;
946+ uint64_t save_mask = old_mem->mask;
947+
948+ *old_mem = *mem;
949+ mem->mm_node = NULL;
950+ old_mem->mask = save_mask;
951+ DRM_FLAG_MASKED(save_flags, mem->flags,
952+ DRM_BO_MASK_MEMTYPE);
953+ goto moved;
954+ }
955+
956+ }
957+
958+ if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
959+ !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
960+
961+ ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
962+
963+ } else if (dev->driver->bo_driver->move) {
964+ ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
965+
966+ } else {
967+
968+ ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
969+
970+ }
971+
972+ if (ret)
973+ goto out_err;
974+
975+moved:
976+ if (old_is_pci || new_is_pci)
977+ drm_bo_vm_post_move(bo);
978+
979+ if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
980+ ret =
981+ dev->driver->bo_driver->invalidate_caches(dev,
982+ bo->mem.flags);
983+ if (ret)
984+ DRM_ERROR("Can not flush read caches\n");
985+ }
986+
987+ DRM_FLAG_MASKED(bo->priv_flags,
988+ (evict) ? _DRM_BO_FLAG_EVICTED : 0,
989+ _DRM_BO_FLAG_EVICTED);
990+
991+ if (bo->mem.mm_node)
992+ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
993+ bm->man[bo->mem.mem_type].gpu_offset;
994+
995+
996+ return 0;
997+
998+out_err:
999+ if (old_is_pci || new_is_pci)
1000+ drm_bo_vm_post_move(bo);
1001+
1002+ new_man = &bm->man[bo->mem.mem_type];
1003+ if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
1004+ drm_ttm_unbind(bo->ttm);
1005+ drm_destroy_ttm(bo->ttm);
1006+ bo->ttm = NULL;
1007+ }
1008+
1009+ return ret;
1010+}
1011+
1012+/*
1013+ * Call bo->mutex locked.
1014+ * Wait until the buffer is idle.
1015+ */
1016+
1017+int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
1018+ int no_wait)
1019+{
1020+ int ret;
1021+
1022+ DRM_ASSERT_LOCKED(&bo->mutex);
1023+
1024+ if (bo->fence) {
1025+ if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
1026+ drm_fence_usage_deref_unlocked(&bo->fence);
1027+ return 0;
1028+ }
1029+ if (no_wait)
1030+ return -EBUSY;
1031+
1032+ ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
1033+ bo->fence_type);
1034+ if (ret)
1035+ return ret;
1036+
1037+ drm_fence_usage_deref_unlocked(&bo->fence);
1038+ }
1039+ return 0;
1040+}
1041+EXPORT_SYMBOL(drm_bo_wait);
1042+
1043+static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
1044+{
1045+ struct drm_device *dev = bo->dev;
1046+ struct drm_buffer_manager *bm = &dev->bm;
1047+
1048+ if (bo->fence) {
1049+ if (bm->nice_mode) {
1050+ unsigned long _end = jiffies + 3 * DRM_HZ;
1051+ int ret;
1052+ do {
1053+ ret = drm_bo_wait(bo, 0, 1, 0);
1054+ if (ret && allow_errors)
1055+ return ret;
1056+
1057+ } while (ret && !time_after_eq(jiffies, _end));
1058+
1059+ if (bo->fence) {
1060+ bm->nice_mode = 0;
1061+ DRM_ERROR("Detected GPU lockup or "
1062+ "fence driver was taken down. "
1063+ "Evicting buffer.\n");
1064+ }
1065+ }
1066+ if (bo->fence)
1067+ drm_fence_usage_deref_unlocked(&bo->fence);
1068+ }
1069+ return 0;
1070+}
1071+
1072+/*
1073+ * Call dev->struct_mutex locked.
1074+ * Attempts to remove all private references to a buffer by expiring its
1075+ * fence object and removing from lru lists and memory managers.
1076+ */
1077+
1078+static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
1079+{
1080+ struct drm_device *dev = bo->dev;
1081+ struct drm_buffer_manager *bm = &dev->bm;
1082+
1083+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
1084+
1085+ atomic_inc(&bo->usage);
1086+ mutex_unlock(&dev->struct_mutex);
1087+ mutex_lock(&bo->mutex);
1088+
1089+ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1090+
1091+ if (bo->fence && drm_fence_object_signaled(bo->fence,
1092+ bo->fence_type))
1093+ drm_fence_usage_deref_unlocked(&bo->fence);
1094+
1095+ if (bo->fence && remove_all)
1096+ (void)drm_bo_expire_fence(bo, 0);
1097+
1098+ mutex_lock(&dev->struct_mutex);
1099+
1100+ if (!atomic_dec_and_test(&bo->usage))
1101+ goto out;
1102+
1103+ if (!bo->fence) {
1104+ list_del_init(&bo->lru);
1105+ if (bo->mem.mm_node) {
1106+ drm_mm_put_block(bo->mem.mm_node);
1107+ if (bo->pinned_node == bo->mem.mm_node)
1108+ bo->pinned_node = NULL;
1109+ bo->mem.mm_node = NULL;
1110+ }
1111+ list_del_init(&bo->pinned_lru);
1112+ if (bo->pinned_node) {
1113+ drm_mm_put_block(bo->pinned_node);
1114+ bo->pinned_node = NULL;
1115+ }
1116+ list_del_init(&bo->ddestroy);
1117+ mutex_unlock(&bo->mutex);
1118+ drm_bo_destroy_locked(bo);
1119+ return;
1120+ }
1121+
1122+ if (list_empty(&bo->ddestroy)) {
1123+ drm_fence_object_flush(bo->fence, bo->fence_type);
1124+ list_add_tail(&bo->ddestroy, &bm->ddestroy);
1125+ schedule_delayed_work(&bm->wq,
1126+ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
1127+ }
1128+
1129+out:
1130+ mutex_unlock(&bo->mutex);
1131+ return;
1132+}
1133+
1134+static void drm_bo_unreserve_size(unsigned long size)
1135+{
1136+ //drm_free_memctl(size);
1137+}
1138+
1139+/*
1140+ * Verify that refcount is 0 and that there are no internal references
1141+ * to the buffer object. Then destroy it.
1142+ */
1143+
1144+static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
1145+{
1146+ struct drm_device *dev = bo->dev;
1147+ struct drm_buffer_manager *bm = &dev->bm;
1148+ unsigned long reserved_size;
1149+
1150+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
1151+
1152+ if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
1153+ list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
1154+ list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
1155+ if (bo->fence != NULL) {
1156+ DRM_ERROR("Fence was non-zero.\n");
1157+ drm_bo_cleanup_refs(bo, 0);
1158+ return;
1159+ }
1160+
1161+#ifdef DRM_ODD_MM_COMPAT
1162+ BUG_ON(!list_empty(&bo->vma_list));
1163+ BUG_ON(!list_empty(&bo->p_mm_list));
1164+#endif
1165+
1166+ if (bo->ttm) {
1167+ drm_ttm_unbind(bo->ttm);
1168+ drm_destroy_ttm(bo->ttm);
1169+ bo->ttm = NULL;
1170+ }
1171+
1172+ atomic_dec(&bm->count);
1173+
1174+ reserved_size = bo->reserved_size;
1175+
1176+ drm_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
1177+ drm_bo_unreserve_size(reserved_size);
1178+
1179+ return;
1180+ }
1181+
1182+ /*
1183+ * Some stuff is still trying to reference the buffer object.
1184+ * Get rid of those references.
1185+ */
1186+
1187+ drm_bo_cleanup_refs(bo, 0);
1188+
1189+ return;
1190+}
1191+
1192+/*
1193+ * Call dev->struct_mutex locked.
1194+ */
1195+
1196+static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
1197+{
1198+ struct drm_buffer_manager *bm = &dev->bm;
1199+
1200+ struct drm_buffer_object *entry, *nentry;
1201+ struct list_head *list, *next;
1202+
1203+ list_for_each_safe(list, next, &bm->ddestroy) {
1204+ entry = list_entry(list, struct drm_buffer_object, ddestroy);
1205+
1206+ nentry = NULL;
1207+ if (next != &bm->ddestroy) {
1208+ nentry = list_entry(next, struct drm_buffer_object,
1209+ ddestroy);
1210+ atomic_inc(&nentry->usage);
1211+ }
1212+
1213+ drm_bo_cleanup_refs(entry, remove_all);
1214+
1215+ if (nentry)
1216+ atomic_dec(&nentry->usage);
1217+ }
1218+}
1219+
1220+static void drm_bo_delayed_workqueue(struct work_struct *work)
1221+{
1222+ struct drm_buffer_manager *bm =
1223+ container_of(work, struct drm_buffer_manager, wq.work);
1224+ struct drm_device *dev = container_of(bm, struct drm_device, bm);
1225+
1226+ DRM_DEBUG("Delayed delete Worker\n");
1227+
1228+ mutex_lock(&dev->struct_mutex);
1229+ if (!bm->initialized) {
1230+ mutex_unlock(&dev->struct_mutex);
1231+ return;
1232+ }
1233+ drm_bo_delayed_delete(dev, 0);
1234+ if (bm->initialized && !list_empty(&bm->ddestroy)) {
1235+ schedule_delayed_work(&bm->wq,
1236+ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
1237+ }
1238+ mutex_unlock(&dev->struct_mutex);
1239+}
1240+
1241+void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
1242+{
1243+ struct drm_buffer_object *tmp_bo = *bo;
1244+ bo = NULL;
1245+
1246+ DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
1247+
1248+ if (atomic_dec_and_test(&tmp_bo->usage))
1249+ drm_bo_destroy_locked(tmp_bo);
1250+}
1251+EXPORT_SYMBOL(drm_bo_usage_deref_locked);
1252+
1253+static void drm_bo_base_deref_locked(struct drm_file *file_priv,
1254+ struct drm_user_object *uo)
1255+{
1256+ struct drm_buffer_object *bo =
1257+ drm_user_object_entry(uo, struct drm_buffer_object, base);
1258+
1259+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
1260+
1261+ drm_bo_takedown_vm_locked(bo);
1262+ drm_bo_usage_deref_locked(&bo);
1263+}
1264+
1265+void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
1266+{
1267+ struct drm_buffer_object *tmp_bo = *bo;
1268+ struct drm_device *dev = tmp_bo->dev;
1269+
1270+ *bo = NULL;
1271+ if (atomic_dec_and_test(&tmp_bo->usage)) {
1272+ mutex_lock(&dev->struct_mutex);
1273+ if (atomic_read(&tmp_bo->usage) == 0)
1274+ drm_bo_destroy_locked(tmp_bo);
1275+ mutex_unlock(&dev->struct_mutex);
1276+ }
1277+}
1278+EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
1279+
1280+void drm_putback_buffer_objects(struct drm_device *dev)
1281+{
1282+ struct drm_buffer_manager *bm = &dev->bm;
1283+ struct list_head *list = &bm->unfenced;
1284+ struct drm_buffer_object *entry, *next;
1285+
1286+ mutex_lock(&dev->struct_mutex);
1287+ list_for_each_entry_safe(entry, next, list, lru) {
1288+ atomic_inc(&entry->usage);
1289+ mutex_unlock(&dev->struct_mutex);
1290+
1291+ mutex_lock(&entry->mutex);
1292+ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
1293+ mutex_lock(&dev->struct_mutex);
1294+
1295+ list_del_init(&entry->lru);
1296+ DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1297+ wake_up_all(&entry->event_queue);
1298+
1299+ /*
1300+ * FIXME: Might want to put back on head of list
1301+ * instead of tail here.
1302+ */
1303+
1304+ drm_bo_add_to_lru(entry);
1305+ mutex_unlock(&entry->mutex);
1306+ drm_bo_usage_deref_locked(&entry);
1307+ }
1308+ mutex_unlock(&dev->struct_mutex);
1309+}
1310+EXPORT_SYMBOL(drm_putback_buffer_objects);
1311+
1312+
1313+/*
1314+ * Note. The caller has to register (if applicable)
1315+ * and deregister fence object usage.
1316+ */
1317+
1318+int drm_fence_buffer_objects(struct drm_device *dev,
1319+ struct list_head *list,
1320+ uint32_t fence_flags,
1321+ struct drm_fence_object *fence,
1322+ struct drm_fence_object **used_fence)
1323+{
1324+ struct drm_buffer_manager *bm = &dev->bm;
1325+ struct drm_buffer_object *entry;
1326+ uint32_t fence_type = 0;
1327+ uint32_t fence_class = ~0;
1328+ int count = 0;
1329+ int ret = 0;
1330+ struct list_head *l;
1331+
1332+ mutex_lock(&dev->struct_mutex);
1333+
1334+ if (!list)
1335+ list = &bm->unfenced;
1336+
1337+ if (fence)
1338+ fence_class = fence->fence_class;
1339+
1340+ list_for_each_entry(entry, list, lru) {
1341+ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
1342+ fence_type |= entry->new_fence_type;
1343+ if (fence_class == ~0)
1344+ fence_class = entry->new_fence_class;
1345+ else if (entry->new_fence_class != fence_class) {
1346+ DRM_ERROR("Unmatching fence classes on unfenced list: "
1347+ "%d and %d.\n",
1348+ fence_class,
1349+ entry->new_fence_class);
1350+ ret = -EINVAL;
1351+ goto out;
1352+ }
1353+ count++;
1354+ }
1355+
1356+ if (!count) {
1357+ ret = -EINVAL;
1358+ goto out;
1359+ }
1360+
1361+ if (fence) {
1362+ if ((fence_type & fence->type) != fence_type ||
1363+ (fence->fence_class != fence_class)) {
1364+ DRM_ERROR("Given fence doesn't match buffers "
1365+ "on unfenced list.\n");
1366+ ret = -EINVAL;
1367+ goto out;
1368+ }
1369+ } else {
1370+ mutex_unlock(&dev->struct_mutex);
1371+ ret = drm_fence_object_create(dev, fence_class, fence_type,
1372+ fence_flags | DRM_FENCE_FLAG_EMIT,
1373+ &fence);
1374+ mutex_lock(&dev->struct_mutex);
1375+ if (ret)
1376+ goto out;
1377+ }
1378+
1379+ count = 0;
1380+ l = list->next;
1381+ while (l != list) {
1382+ prefetch(l->next);
1383+ entry = list_entry(l, struct drm_buffer_object, lru);
1384+ atomic_inc(&entry->usage);
1385+ mutex_unlock(&dev->struct_mutex);
1386+ mutex_lock(&entry->mutex);
1387+ mutex_lock(&dev->struct_mutex);
1388+ list_del_init(l);
1389+ if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1390+ count++;
1391+ if (entry->fence)
1392+ drm_fence_usage_deref_locked(&entry->fence);
1393+ entry->fence = drm_fence_reference_locked(fence);
1394+ entry->fence_class = entry->new_fence_class;
1395+ entry->fence_type = entry->new_fence_type;
1396+ DRM_FLAG_MASKED(entry->priv_flags, 0,
1397+ _DRM_BO_FLAG_UNFENCED);
1398+ wake_up_all(&entry->event_queue);
1399+ drm_bo_add_to_lru(entry);
1400+ }
1401+ mutex_unlock(&entry->mutex);
1402+ drm_bo_usage_deref_locked(&entry);
1403+ l = list->next;
1404+ }
1405+ DRM_DEBUG("Fenced %d buffers\n", count);
1406+out:
1407+ mutex_unlock(&dev->struct_mutex);
1408+ *used_fence = fence;
1409+ return ret;
1410+}
1411+EXPORT_SYMBOL(drm_fence_buffer_objects);
1412+
1413+/*
1414+ * bo->mutex locked
1415+ */
1416+
1417+static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
1418+ int no_wait)
1419+{
1420+ int ret = 0;
1421+ struct drm_device *dev = bo->dev;
1422+ struct drm_bo_mem_reg evict_mem;
1423+
1424+ /*
1425+ * Someone might have modified the buffer before we took the
1426+ * buffer mutex.
1427+ */
1428+
1429+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
1430+ goto out;
1431+ if (bo->mem.mem_type != mem_type)
1432+ goto out;
1433+
1434+ ret = drm_bo_wait(bo, 0, 0, no_wait);
1435+
1436+ if (ret && ret != -EAGAIN) {
1437+ DRM_ERROR("Failed to expire fence before "
1438+ "buffer eviction.\n");
1439+ goto out;
1440+ }
1441+
1442+ evict_mem = bo->mem;
1443+ evict_mem.mm_node = NULL;
1444+
1445+ evict_mem = bo->mem;
1446+ evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
1447+ ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
1448+
1449+ if (ret) {
1450+ if (ret != -EAGAIN)
1451+ DRM_ERROR("Failed to find memory space for "
1452+ "buffer 0x%p eviction.\n", bo);
1453+ goto out;
1454+ }
1455+
1456+ ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
1457+
1458+ if (ret) {
1459+ if (ret != -EAGAIN)
1460+ DRM_ERROR("Buffer eviction failed\n");
1461+ goto out;
1462+ }
1463+
1464+ mutex_lock(&dev->struct_mutex);
1465+ if (evict_mem.mm_node) {
1466+ if (evict_mem.mm_node != bo->pinned_node)
1467+ drm_mm_put_block(evict_mem.mm_node);
1468+ evict_mem.mm_node = NULL;
1469+ }
1470+ list_del(&bo->lru);
1471+ drm_bo_add_to_lru(bo);
1472+ mutex_unlock(&dev->struct_mutex);
1473+
1474+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
1475+ _DRM_BO_FLAG_EVICTED);
1476+
1477+out:
1478+ return ret;
1479+}
1480+
1481+/**
1482+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
1483+ * space, or we've evicted everything and there isn't enough space.
1484+ */
1485+static int drm_bo_mem_force_space(struct drm_device *dev,
1486+ struct drm_bo_mem_reg *mem,
1487+ uint32_t mem_type, int no_wait)
1488+{
1489+ struct drm_mm_node *node;
1490+ struct drm_buffer_manager *bm = &dev->bm;
1491+ struct drm_buffer_object *entry;
1492+ struct drm_mem_type_manager *man = &bm->man[mem_type];
1493+ struct list_head *lru;
1494+ unsigned long num_pages = mem->num_pages;
1495+ int ret;
1496+
1497+ mutex_lock(&dev->struct_mutex);
1498+ do {
1499+ node = drm_mm_search_free(&man->manager, num_pages,
1500+ mem->page_alignment, 1);
1501+ if (node)
1502+ break;
1503+
1504+ lru = &man->lru;
1505+ if (lru->next == lru)
1506+ break;
1507+
1508+ entry = list_entry(lru->next, struct drm_buffer_object, lru);
1509+ atomic_inc(&entry->usage);
1510+ mutex_unlock(&dev->struct_mutex);
1511+ mutex_lock(&entry->mutex);
1512+ BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
1513+
1514+ ret = drm_bo_evict(entry, mem_type, no_wait);
1515+ mutex_unlock(&entry->mutex);
1516+ drm_bo_usage_deref_unlocked(&entry);
1517+ if (ret)
1518+ return ret;
1519+ mutex_lock(&dev->struct_mutex);
1520+ } while (1);
1521+
1522+ if (!node) {
1523+ mutex_unlock(&dev->struct_mutex);
1524+ return -ENOMEM;
1525+ }
1526+
1527+ node = drm_mm_get_block(node, num_pages, mem->page_alignment);
1528+ if (!node) {
1529+ mutex_unlock(&dev->struct_mutex);
1530+ return -ENOMEM;
1531+ }
1532+
1533+ mutex_unlock(&dev->struct_mutex);
1534+ mem->mm_node = node;
1535+ mem->mem_type = mem_type;
1536+ return 0;
1537+}
1538+
1539+static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
1540+ int disallow_fixed,
1541+ uint32_t mem_type,
1542+ uint64_t mask, uint32_t *res_mask)
1543+{
1544+ uint64_t cur_flags = drm_bo_type_flags(mem_type);
1545+ uint64_t flag_diff;
1546+
1547+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
1548+ return 0;
1549+ if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
1550+ cur_flags |= DRM_BO_FLAG_CACHED;
1551+ if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
1552+ cur_flags |= DRM_BO_FLAG_MAPPABLE;
1553+ if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
1554+ DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
1555+
1556+ if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
1557+ return 0;
1558+
1559+ if (mem_type == DRM_BO_MEM_LOCAL) {
1560+ *res_mask = cur_flags;
1561+ return 1;
1562+ }
1563+
1564+ flag_diff = (mask ^ cur_flags);
1565+ if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
1566+ cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
1567+
1568+ if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1569+ (!(mask & DRM_BO_FLAG_CACHED) ||
1570+ (mask & DRM_BO_FLAG_FORCE_CACHING)))
1571+ return 0;
1572+
1573+ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1574+ ((mask & DRM_BO_FLAG_MAPPABLE) ||
1575+ (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1576+ return 0;
1577+
1578+ *res_mask = cur_flags;
1579+ return 1;
1580+}
1581+
1582+/**
1583+ * Creates space for memory region @mem according to its type.
1584+ *
1585+ * This function first searches for free space in compatible memory types in
1586+ * the priority order defined by the driver. If free space isn't found, then
1587+ * drm_bo_mem_force_space is attempted in priority order to evict and find
1588+ * space.
1589+ */
1590+int drm_bo_mem_space(struct drm_buffer_object *bo,
1591+ struct drm_bo_mem_reg *mem, int no_wait)
1592+{
1593+ struct drm_device *dev = bo->dev;
1594+ struct drm_buffer_manager *bm = &dev->bm;
1595+ struct drm_mem_type_manager *man;
1596+
1597+ uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1598+ const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1599+ uint32_t i;
1600+ uint32_t mem_type = DRM_BO_MEM_LOCAL;
1601+ uint32_t cur_flags;
1602+ int type_found = 0;
1603+ int type_ok = 0;
1604+ int has_eagain = 0;
1605+ struct drm_mm_node *node = NULL;
1606+ int ret;
1607+
1608+ mem->mm_node = NULL;
1609+ for (i = 0; i < num_prios; ++i) {
1610+ mem_type = prios[i];
1611+ man = &bm->man[mem_type];
1612+
1613+ type_ok = drm_bo_mt_compatible(man,
1614+ bo->type == drm_bo_type_user,
1615+ mem_type, mem->mask,
1616+ &cur_flags);
1617+
1618+ if (!type_ok)
1619+ continue;
1620+
1621+ if (mem_type == DRM_BO_MEM_LOCAL)
1622+ break;
1623+
1624+ if ((mem_type == bo->pinned_mem_type) &&
1625+ (bo->pinned_node != NULL)) {
1626+ node = bo->pinned_node;
1627+ break;
1628+ }
1629+
1630+ mutex_lock(&dev->struct_mutex);
1631+ if (man->has_type && man->use_type) {
1632+ type_found = 1;
1633+ node = drm_mm_search_free(&man->manager, mem->num_pages,
1634+ mem->page_alignment, 1);
1635+ if (node)
1636+ node = drm_mm_get_block(node, mem->num_pages,
1637+ mem->page_alignment);
1638+ }
1639+ mutex_unlock(&dev->struct_mutex);
1640+ if (node)
1641+ break;
1642+ }
1643+
1644+ if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
1645+ mem->mm_node = node;
1646+ mem->mem_type = mem_type;
1647+ mem->flags = cur_flags;
1648+ return 0;
1649+ }
1650+
1651+ if (!type_found)
1652+ return -EINVAL;
1653+
1654+ num_prios = dev->driver->bo_driver->num_mem_busy_prio;
1655+ prios = dev->driver->bo_driver->mem_busy_prio;
1656+
1657+ for (i = 0; i < num_prios; ++i) {
1658+ mem_type = prios[i];
1659+ man = &bm->man[mem_type];
1660+
1661+ if (!man->has_type)
1662+ continue;
1663+
1664+ if (!drm_bo_mt_compatible(man,
1665+ bo->type == drm_bo_type_user,
1666+ mem_type,
1667+ mem->mask,
1668+ &cur_flags))
1669+ continue;
1670+
1671+ ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
1672+
1673+ if (ret == 0 && mem->mm_node) {
1674+ mem->flags = cur_flags;
1675+ return 0;
1676+ }
1677+
1678+ if (ret == -EAGAIN)
1679+ has_eagain = 1;
1680+ }
1681+
1682+ ret = (has_eagain) ? -EAGAIN : -ENOMEM;
1683+ return ret;
1684+}
1685+EXPORT_SYMBOL(drm_bo_mem_space);
1686+
1687+static int drm_bo_new_mask(struct drm_buffer_object *bo,
1688+ uint64_t new_flags, uint64_t used_mask)
1689+{
1690+ uint32_t new_props;
1691+
1692+ if (bo->type == drm_bo_type_user &&
1693+ ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
1694+ (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
1695+ DRM_ERROR("User buffers require cache-coherent memory.\n");
1696+ return -EINVAL;
1697+ }
1698+
1699+ if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
1700+ DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
1701+ return -EPERM;
1702+ }
1703+
1704+ if (likely(used_mask & DRM_BO_MASK_MEM) &&
1705+ (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
1706+ !DRM_SUSER(DRM_CURPROC)) {
1707+ if (likely(bo->mem.flags & new_flags & used_mask &
1708+ DRM_BO_MASK_MEM))
1709+ new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
1710+ (bo->mem.flags & DRM_BO_MASK_MEM);
1711+ else {
1712+ DRM_ERROR("Incompatible memory type specification "
1713+ "for NO_EVICT buffer.\n");
1714+ return -EPERM;
1715+ }
1716+ }
1717+
1718+ if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
1719+ DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
1720+ return -EPERM;
1721+ }
1722+
1723+ new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
1724+ DRM_BO_FLAG_READ);
1725+
1726+ if (!new_props) {
1727+ DRM_ERROR("Invalid buffer object rwx properties\n");
1728+ return -EINVAL;
1729+ }
1730+
1731+ bo->mem.mask = new_flags;
1732+ return 0;
1733+}
1734+
1735+/*
1736+ * Call dev->struct_mutex locked.
1737+ */
1738+
1739+struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
1740+ uint32_t handle, int check_owner)
1741+{
1742+ struct drm_user_object *uo;
1743+ struct drm_buffer_object *bo;
1744+
1745+ uo = drm_lookup_user_object(file_priv, handle);
1746+
1747+ if (!uo || (uo->type != drm_buffer_type)) {
1748+ DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
1749+ return NULL;
1750+ }
1751+
1752+ if (check_owner && file_priv != uo->owner) {
1753+ if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
1754+ return NULL;
1755+ }
1756+
1757+ bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
1758+ atomic_inc(&bo->usage);
1759+ return bo;
1760+}
1761+EXPORT_SYMBOL(drm_lookup_buffer_object);
1762+
1763+/*
1764+ * Call bo->mutex locked.
1765+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1766+ * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
1767+ */
1768+
1769+static int drm_bo_quick_busy(struct drm_buffer_object *bo)
1770+{
1771+ struct drm_fence_object *fence = bo->fence;
1772+
1773+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1774+ if (fence) {
1775+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
1776+ drm_fence_usage_deref_unlocked(&bo->fence);
1777+ return 0;
1778+ }
1779+ return 1;
1780+ }
1781+ return 0;
1782+}
1783+
1784+/*
1785+ * Call bo->mutex locked.
1786+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1787+ */
1788+
1789+static int drm_bo_busy(struct drm_buffer_object *bo)
1790+{
1791+ struct drm_fence_object *fence = bo->fence;
1792+
1793+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1794+ if (fence) {
1795+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
1796+ drm_fence_usage_deref_unlocked(&bo->fence);
1797+ return 0;
1798+ }
1799+ drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
1800+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
1801+ drm_fence_usage_deref_unlocked(&bo->fence);
1802+ return 0;
1803+ }
1804+ return 1;
1805+ }
1806+ return 0;
1807+}
1808+
1809+static int drm_bo_evict_cached(struct drm_buffer_object *bo)
1810+{
1811+ int ret = 0;
1812+
1813+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1814+ if (bo->mem.mm_node)
1815+ ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1816+ return ret;
1817+}
1818+
1819+/*
1820+ * Wait until a buffer is unmapped.
1821+ */
1822+
1823+static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
1824+{
1825+ int ret = 0;
1826+
1827+ if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1828+ return -EBUSY;
1829+
1830+ DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1831+ atomic_read(&bo->mapped) == -1);
1832+
1833+ if (ret == -EINTR)
1834+ ret = -EAGAIN;
1835+
1836+ return ret;
1837+}
1838+
1839+static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
1840+{
1841+ int ret;
1842+
1843+ mutex_lock(&bo->mutex);
1844+ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1845+ mutex_unlock(&bo->mutex);
1846+ return ret;
1847+}
1848+
1849+/*
1850+ * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1851+ * Until then, we cannot really do anything with it except delete it.
1852+ */
1853+
1854+static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
1855+ int eagain_if_wait)
1856+{
1857+ int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1858+
1859+ if (ret && no_wait)
1860+ return -EBUSY;
1861+ else if (!ret)
1862+ return 0;
1863+
1864+ ret = 0;
1865+ mutex_unlock(&bo->mutex);
1866+ DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1867+ !drm_bo_check_unfenced(bo));
1868+ mutex_lock(&bo->mutex);
1869+ if (ret == -EINTR)
1870+ return -EAGAIN;
1871+ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1872+ if (ret) {
1873+ DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1874+ return -EBUSY;
1875+ }
1876+ if (eagain_if_wait)
1877+ return -EAGAIN;
1878+
1879+ return 0;
1880+}
1881+
1882+/*
1883+ * Fill in the ioctl reply argument with buffer info.
1884+ * Bo locked.
1885+ */
1886+
1887+void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
1888+ struct drm_bo_info_rep *rep)
1889+{
1890+ if (!rep)
1891+ return;
1892+
1893+ rep->handle = bo->base.hash.key;
1894+ rep->flags = bo->mem.flags;
1895+ rep->size = bo->num_pages * PAGE_SIZE;
1896+ rep->offset = bo->offset;
1897+
1898+ if (bo->type == drm_bo_type_dc)
1899+ rep->arg_handle = bo->map_list.user_token;
1900+ else
1901+ rep->arg_handle = 0;
1902+
1903+ rep->mask = bo->mem.mask;
1904+ rep->buffer_start = bo->buffer_start;
1905+ rep->fence_flags = bo->fence_type;
1906+ rep->rep_flags = 0;
1907+ rep->page_alignment = bo->mem.page_alignment;
1908+
1909+ if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1910+ DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1911+ DRM_BO_REP_BUSY);
1912+ }
1913+}
1914+EXPORT_SYMBOL(drm_bo_fill_rep_arg);
1915+
1916+/*
1917+ * Wait for buffer idle and register that we've mapped the buffer.
1918+ * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1919+ * so that if the client dies, the mapping is automatically
1920+ * unregistered.
1921+ */
1922+
1923+static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1924+ uint32_t map_flags, unsigned hint,
1925+ struct drm_bo_info_rep *rep)
1926+{
1927+ struct drm_buffer_object *bo;
1928+ struct drm_device *dev = file_priv->minor->dev;
1929+ int ret = 0;
1930+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1931+
1932+ mutex_lock(&dev->struct_mutex);
1933+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
1934+ mutex_unlock(&dev->struct_mutex);
1935+
1936+ if (!bo)
1937+ return -EINVAL;
1938+
1939+ mutex_lock(&bo->mutex);
1940+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1941+ if (ret)
1942+ goto out;
1943+
1944+ /*
1945+ * If this returns true, we are currently unmapped.
1946+ * We need to do this test, because unmapping can
1947+ * be done without the bo->mutex held.
1948+ */
1949+
1950+ while (1) {
1951+ if (atomic_inc_and_test(&bo->mapped)) {
1952+ if (no_wait && drm_bo_busy(bo)) {
1953+ atomic_dec(&bo->mapped);
1954+ ret = -EBUSY;
1955+ goto out;
1956+ }
1957+ ret = drm_bo_wait(bo, 0, 0, no_wait);
1958+ if (ret) {
1959+ atomic_dec(&bo->mapped);
1960+ goto out;
1961+ }
1962+
1963+ if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
1964+ drm_bo_evict_cached(bo);
1965+
1966+ break;
1967+ } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
1968+
1969+ /*
1970+ * We are already mapped with different flags.
1971+ * need to wait for unmap.
1972+ */
1973+
1974+ ret = drm_bo_wait_unmapped(bo, no_wait);
1975+ if (ret)
1976+ goto out;
1977+
1978+ continue;
1979+ }
1980+ break;
1981+ }
1982+
1983+ mutex_lock(&dev->struct_mutex);
1984+ ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1985+ mutex_unlock(&dev->struct_mutex);
1986+ if (ret) {
1987+ if (atomic_add_negative(-1, &bo->mapped))
1988+ wake_up_all(&bo->event_queue);
1989+
1990+ } else
1991+ drm_bo_fill_rep_arg(bo, rep);
1992+out:
1993+ mutex_unlock(&bo->mutex);
1994+ drm_bo_usage_deref_unlocked(&bo);
1995+ return ret;
1996+}
1997+
1998+static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1999+{
2000+ struct drm_device *dev = file_priv->minor->dev;
2001+ struct drm_buffer_object *bo;
2002+ struct drm_ref_object *ro;
2003+ int ret = 0;
2004+
2005+ mutex_lock(&dev->struct_mutex);
2006+
2007+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
2008+ if (!bo) {
2009+ ret = -EINVAL;
2010+ goto out;
2011+ }
2012+
2013+ ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
2014+ if (!ro) {
2015+ ret = -EINVAL;
2016+ goto out;
2017+ }
2018+
2019+ drm_remove_ref_object(file_priv, ro);
2020+ drm_bo_usage_deref_locked(&bo);
2021+out:
2022+ mutex_unlock(&dev->struct_mutex);
2023+ return ret;
2024+}
2025+
2026+/*
2027+ * Call struct-sem locked.
2028+ */
2029+
2030+static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
2031+ struct drm_user_object *uo,
2032+ enum drm_ref_type action)
2033+{
2034+ struct drm_buffer_object *bo =
2035+ drm_user_object_entry(uo, struct drm_buffer_object, base);
2036+
2037+ /*
2038+ * We DON'T want to take the bo->lock here, because we want to
2039+ * hold it when we wait for unmapped buffer.
2040+ */
2041+
2042+ BUG_ON(action != _DRM_REF_TYPE1);
2043+
2044+ if (atomic_add_negative(-1, &bo->mapped))
2045+ wake_up_all(&bo->event_queue);
2046+}
2047+
2048+/*
2049+ * bo->mutex locked.
2050+ * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
2051+ */
2052+
2053+int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
2054+ int no_wait, int move_unfenced)
2055+{
2056+ struct drm_device *dev = bo->dev;
2057+ struct drm_buffer_manager *bm = &dev->bm;
2058+ int ret = 0;
2059+ struct drm_bo_mem_reg mem;
2060+ /*
2061+ * Flush outstanding fences.
2062+ */
2063+
2064+ drm_bo_busy(bo);
2065+
2066+ /*
2067+ * Wait for outstanding fences.
2068+ */
2069+
2070+ ret = drm_bo_wait(bo, 0, 0, no_wait);
2071+ if (ret)
2072+ return ret;
2073+
2074+ mem.num_pages = bo->num_pages;
2075+ mem.size = mem.num_pages << PAGE_SHIFT;
2076+ mem.mask = new_mem_flags;
2077+ mem.page_alignment = bo->mem.page_alignment;
2078+
2079+ mutex_lock(&bm->evict_mutex);
2080+ mutex_lock(&dev->struct_mutex);
2081+ list_del_init(&bo->lru);
2082+ mutex_unlock(&dev->struct_mutex);
2083+
2084+ /*
2085+ * Determine where to move the buffer.
2086+ */
2087+ ret = drm_bo_mem_space(bo, &mem, no_wait);
2088+ if (ret)
2089+ goto out_unlock;
2090+
2091+ ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
2092+
2093+out_unlock:
2094+ mutex_lock(&dev->struct_mutex);
2095+ if (ret || !move_unfenced) {
2096+ if (mem.mm_node) {
2097+ if (mem.mm_node != bo->pinned_node)
2098+ drm_mm_put_block(mem.mm_node);
2099+ mem.mm_node = NULL;
2100+ }
2101+ drm_bo_add_to_lru(bo);
2102+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
2103+ wake_up_all(&bo->event_queue);
2104+ DRM_FLAG_MASKED(bo->priv_flags, 0,
2105+ _DRM_BO_FLAG_UNFENCED);
2106+ }
2107+ } else {
2108+ list_add_tail(&bo->lru, &bm->unfenced);
2109+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
2110+ _DRM_BO_FLAG_UNFENCED);
2111+ }
2112+ mutex_unlock(&dev->struct_mutex);
2113+ mutex_unlock(&bm->evict_mutex);
2114+ return ret;
2115+}
2116+
2117+static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
2118+{
2119+ uint32_t flag_diff = (mem->mask ^ mem->flags);
2120+
2121+ if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
2122+ return 0;
2123+ if ((flag_diff & DRM_BO_FLAG_CACHED) &&
2124+ (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
2125+ (mem->mask & DRM_BO_FLAG_FORCE_CACHING)))
2126+ return 0;
2127+
2128+ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
2129+ ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
2130+ (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
2131+ return 0;
2132+ return 1;
2133+}
2134+
2135+/*
2136+ * bo locked.
2137+ */
2138+
2139+static int drm_buffer_object_validate(struct drm_buffer_object *bo,
2140+ uint32_t fence_class,
2141+ int move_unfenced, int no_wait)
2142+{
2143+ struct drm_device *dev = bo->dev;
2144+ struct drm_buffer_manager *bm = &dev->bm;
2145+ struct drm_bo_driver *driver = dev->driver->bo_driver;
2146+ uint32_t ftype;
2147+ int ret;
2148+
2149+ DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
2150+ (unsigned long long) bo->mem.mask,
2151+ (unsigned long long) bo->mem.flags);
2152+
2153+ ret = driver->fence_type(bo, &fence_class, &ftype);
2154+
2155+ if (ret) {
2156+ DRM_ERROR("Driver did not support given buffer permissions\n");
2157+ return ret;
2158+ }
2159+
2160+ /*
2161+ * We're switching command submission mechanism,
2162+ * or cannot simply rely on the hardware serializing for us.
2163+ *
2164+ * Insert a driver-dependant barrier or wait for buffer idle.
2165+ */
2166+
2167+ if ((fence_class != bo->fence_class) ||
2168+ ((ftype ^ bo->fence_type) & bo->fence_type)) {
2169+
2170+ ret = -EINVAL;
2171+ if (driver->command_stream_barrier) {
2172+ ret = driver->command_stream_barrier(bo,
2173+ fence_class,
2174+ ftype,
2175+ no_wait);
2176+ }
2177+ if (ret)
2178+ ret = drm_bo_wait(bo, 0, 0, no_wait);
2179+
2180+ if (ret)
2181+ return ret;
2182+
2183+ }
2184+
2185+ bo->new_fence_class = fence_class;
2186+ bo->new_fence_type = ftype;
2187+
2188+ ret = drm_bo_wait_unmapped(bo, no_wait);
2189+ if (ret) {
2190+ DRM_ERROR("Timed out waiting for buffer unmap.\n");
2191+ return ret;
2192+ }
2193+
2194+ /*
2195+ * Check whether we need to move buffer.
2196+ */
2197+
2198+ if (!drm_bo_mem_compat(&bo->mem)) {
2199+ ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
2200+ move_unfenced);
2201+ if (ret) {
2202+ if (ret != -EAGAIN)
2203+ DRM_ERROR("Failed moving buffer.\n");
2204+ if (ret == -ENOMEM)
2205+ DRM_ERROR("Out of aperture space.\n");
2206+ return ret;
2207+ }
2208+ }
2209+
2210+ /*
2211+ * Pinned buffers.
2212+ */
2213+
2214+ if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
2215+ bo->pinned_mem_type = bo->mem.mem_type;
2216+ mutex_lock(&dev->struct_mutex);
2217+ list_del_init(&bo->pinned_lru);
2218+ drm_bo_add_to_pinned_lru(bo);
2219+
2220+ if (bo->pinned_node != bo->mem.mm_node) {
2221+ if (bo->pinned_node != NULL)
2222+ drm_mm_put_block(bo->pinned_node);
2223+ bo->pinned_node = bo->mem.mm_node;
2224+ }
2225+
2226+ mutex_unlock(&dev->struct_mutex);
2227+
2228+ } else if (bo->pinned_node != NULL) {
2229+
2230+ mutex_lock(&dev->struct_mutex);
2231+
2232+ if (bo->pinned_node != bo->mem.mm_node)
2233+ drm_mm_put_block(bo->pinned_node);
2234+
2235+ list_del_init(&bo->pinned_lru);
2236+ bo->pinned_node = NULL;
2237+ mutex_unlock(&dev->struct_mutex);
2238+
2239+ }
2240+
2241+ /*
2242+ * We might need to add a TTM.
2243+ */
2244+
2245+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
2246+ ret = drm_bo_add_ttm(bo);
2247+ if (ret)
2248+ return ret;
2249+ }
2250+ DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
2251+
2252+ /*
2253+ * Finally, adjust lru to be sure.
2254+ */
2255+
2256+ mutex_lock(&dev->struct_mutex);
2257+ list_del(&bo->lru);
2258+ if (move_unfenced) {
2259+ list_add_tail(&bo->lru, &bm->unfenced);
2260+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
2261+ _DRM_BO_FLAG_UNFENCED);
2262+ } else {
2263+ drm_bo_add_to_lru(bo);
2264+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
2265+ wake_up_all(&bo->event_queue);
2266+ DRM_FLAG_MASKED(bo->priv_flags, 0,
2267+ _DRM_BO_FLAG_UNFENCED);
2268+ }
2269+ }
2270+ mutex_unlock(&dev->struct_mutex);
2271+
2272+ return 0;
2273+}
2274+
2275+int drm_bo_do_validate(struct drm_buffer_object *bo,
2276+ uint64_t flags, uint64_t mask, uint32_t hint,
2277+ uint32_t fence_class,
2278+ int no_wait,
2279+ struct drm_bo_info_rep *rep)
2280+{
2281+ int ret;
2282+
2283+ mutex_lock(&bo->mutex);
2284+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
2285+
2286+ if (ret)
2287+ goto out;
2288+
2289+ DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
2290+ ret = drm_bo_new_mask(bo, flags, mask);
2291+ if (ret)
2292+ goto out;
2293+
2294+ ret = drm_buffer_object_validate(bo,
2295+ fence_class,
2296+ !(hint & DRM_BO_HINT_DONT_FENCE),
2297+ no_wait);
2298+out:
2299+ if (rep)
2300+ drm_bo_fill_rep_arg(bo, rep);
2301+
2302+ mutex_unlock(&bo->mutex);
2303+ return ret;
2304+}
2305+EXPORT_SYMBOL(drm_bo_do_validate);
2306+
2307+
2308+int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
2309+ uint32_t fence_class,
2310+ uint64_t flags, uint64_t mask,
2311+ uint32_t hint,
2312+ int use_old_fence_class,
2313+ struct drm_bo_info_rep *rep,
2314+ struct drm_buffer_object **bo_rep)
2315+{
2316+ struct drm_device *dev = file_priv->minor->dev;
2317+ struct drm_buffer_object *bo;
2318+ int ret;
2319+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
2320+
2321+ mutex_lock(&dev->struct_mutex);
2322+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
2323+ mutex_unlock(&dev->struct_mutex);
2324+
2325+ if (!bo)
2326+ return -EINVAL;
2327+
2328+ if (use_old_fence_class)
2329+ fence_class = bo->fence_class;
2330+
2331+ /*
2332+ * Only allow creator to change shared buffer mask.
2333+ */
2334+
2335+ if (bo->base.owner != file_priv)
2336+ mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
2337+
2338+
2339+ ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
2340+ no_wait, rep);
2341+
2342+ if (!ret && bo_rep)
2343+ *bo_rep = bo;
2344+ else
2345+ drm_bo_usage_deref_unlocked(&bo);
2346+
2347+ return ret;
2348+}
2349+EXPORT_SYMBOL(drm_bo_handle_validate);
2350+
2351+static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
2352+ struct drm_bo_info_rep *rep)
2353+{
2354+ struct drm_device *dev = file_priv->minor->dev;
2355+ struct drm_buffer_object *bo;
2356+
2357+ mutex_lock(&dev->struct_mutex);
2358+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
2359+ mutex_unlock(&dev->struct_mutex);
2360+
2361+ if (!bo)
2362+ return -EINVAL;
2363+
2364+ mutex_lock(&bo->mutex);
2365+ if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
2366+ (void)drm_bo_busy(bo);
2367+ drm_bo_fill_rep_arg(bo, rep);
2368+ mutex_unlock(&bo->mutex);
2369+ drm_bo_usage_deref_unlocked(&bo);
2370+ return 0;
2371+}
2372+
2373+static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
2374+ uint32_t hint,
2375+ struct drm_bo_info_rep *rep)
2376+{
2377+ struct drm_device *dev = file_priv->minor->dev;
2378+ struct drm_buffer_object *bo;
2379+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
2380+ int ret;
2381+
2382+ mutex_lock(&dev->struct_mutex);
2383+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
2384+ mutex_unlock(&dev->struct_mutex);
2385+
2386+ if (!bo)
2387+ return -EINVAL;
2388+
2389+ mutex_lock(&bo->mutex);
2390+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
2391+ if (ret)
2392+ goto out;
2393+ ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
2394+ if (ret)
2395+ goto out;
2396+
2397+ drm_bo_fill_rep_arg(bo, rep);
2398+
2399+out:
2400+ mutex_unlock(&bo->mutex);
2401+ drm_bo_usage_deref_unlocked(&bo);
2402+ return ret;
2403+}
2404+
2405+static inline size_t drm_size_align(size_t size)
2406+{
2407+ size_t tmpSize = 4;
2408+ if (size > PAGE_SIZE)
2409+ return PAGE_ALIGN(size);
2410+ while (tmpSize < size)
2411+ tmpSize <<= 1;
2412+
2413+ return (size_t) tmpSize;
2414+}
2415+
2416+static int drm_bo_reserve_size(struct drm_device *dev,
2417+ int user_bo,
2418+ unsigned long num_pages,
2419+ unsigned long *size)
2420+{
2421+ struct drm_bo_driver *driver = dev->driver->bo_driver;
2422+
2423+ *size = drm_size_align(sizeof(struct drm_buffer_object)) +
2424+ /* Always account for a TTM, even for fixed memory types */
2425+ drm_ttm_size(dev, num_pages, user_bo) +
2426+ /* user space mapping structure */
2427+ drm_size_align(sizeof(drm_local_map_t)) +
2428+ /* file offset space, aperture space, pinned space */
2429+ 3*drm_size_align(sizeof(struct drm_mm_node *)) +
2430+ /* ttm backend */
2431+ driver->backend_size(dev, num_pages);
2432+
2433+ // FIXME - ENOMEM?
2434+ return 0;
2435+}
2436+
2437+int drm_buffer_object_create(struct drm_device *dev,
2438+ unsigned long size,
2439+ enum drm_bo_type type,
2440+ uint64_t mask,
2441+ uint32_t hint,
2442+ uint32_t page_alignment,
2443+ unsigned long buffer_start,
2444+ struct drm_buffer_object **buf_obj)
2445+{
2446+ struct drm_buffer_manager *bm = &dev->bm;
2447+ struct drm_buffer_object *bo;
2448+ int ret = 0;
2449+ unsigned long num_pages;
2450+ unsigned long reserved_size;
2451+
2452+ size += buffer_start & ~PAGE_MASK;
2453+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2454+ if (num_pages == 0) {
2455+ DRM_ERROR("Illegal buffer object size.\n");
2456+ return -EINVAL;
2457+ }
2458+
2459+ ret = drm_bo_reserve_size(dev, type == drm_bo_type_user,
2460+ num_pages, &reserved_size);
2461+
2462+ if (ret) {
2463+ DRM_DEBUG("Failed reserving space for buffer object.\n");
2464+ return ret;
2465+ }
2466+
2467+ bo = drm_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
2468+
2469+ if (!bo) {
2470+ drm_bo_unreserve_size(num_pages);
2471+ return -ENOMEM;
2472+ }
2473+
2474+ mutex_init(&bo->mutex);
2475+ mutex_lock(&bo->mutex);
2476+
2477+ bo->reserved_size = reserved_size;
2478+ atomic_set(&bo->usage, 1);
2479+ atomic_set(&bo->mapped, -1);
2480+ DRM_INIT_WAITQUEUE(&bo->event_queue);
2481+ INIT_LIST_HEAD(&bo->lru);
2482+ INIT_LIST_HEAD(&bo->pinned_lru);
2483+ INIT_LIST_HEAD(&bo->ddestroy);
2484+#ifdef DRM_ODD_MM_COMPAT
2485+ INIT_LIST_HEAD(&bo->p_mm_list);
2486+ INIT_LIST_HEAD(&bo->vma_list);
2487+#endif
2488+ bo->dev = dev;
2489+ bo->type = type;
2490+ bo->num_pages = num_pages;
2491+ bo->mem.mem_type = DRM_BO_MEM_LOCAL;
2492+ bo->mem.num_pages = bo->num_pages;
2493+ bo->mem.mm_node = NULL;
2494+ bo->mem.page_alignment = page_alignment;
2495+ bo->buffer_start = buffer_start & PAGE_MASK;
2496+ bo->priv_flags = 0;
2497+ bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
2498+ DRM_BO_FLAG_MAPPABLE;
2499+ bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
2500+ DRM_BO_FLAG_MAPPABLE;
2501+ atomic_inc(&bm->count);
2502+ ret = drm_bo_new_mask(bo, mask, mask);
2503+ if (ret)
2504+ goto out_err;
2505+
2506+ if (bo->type == drm_bo_type_dc) {
2507+ mutex_lock(&dev->struct_mutex);
2508+ ret = drm_bo_setup_vm_locked(bo);
2509+ mutex_unlock(&dev->struct_mutex);
2510+ if (ret)
2511+ goto out_err;
2512+ }
2513+
2514+ ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
2515+ if (ret)
2516+ goto out_err;
2517+
2518+ mutex_unlock(&bo->mutex);
2519+ *buf_obj = bo;
2520+ return 0;
2521+
2522+out_err:
2523+ mutex_unlock(&bo->mutex);
2524+
2525+ drm_bo_usage_deref_unlocked(&bo);
2526+ return ret;
2527+}
2528+EXPORT_SYMBOL(drm_buffer_object_create);
2529+
2530+
2531+static int drm_bo_add_user_object(struct drm_file *file_priv,
2532+ struct drm_buffer_object *bo, int shareable)
2533+{
2534+ struct drm_device *dev = file_priv->minor->dev;
2535+ int ret;
2536+
2537+ mutex_lock(&dev->struct_mutex);
2538+ ret = drm_add_user_object(file_priv, &bo->base, shareable);
2539+ if (ret)
2540+ goto out;
2541+
2542+ bo->base.remove = drm_bo_base_deref_locked;
2543+ bo->base.type = drm_buffer_type;
2544+ bo->base.ref_struct_locked = NULL;
2545+ bo->base.unref = drm_buffer_user_object_unmap;
2546+
2547+out:
2548+ mutex_unlock(&dev->struct_mutex);
2549+ return ret;
2550+}
2551+
2552+int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2553+{
2554+ struct drm_bo_create_arg *arg = data;
2555+ struct drm_bo_create_req *req = &arg->d.req;
2556+ struct drm_bo_info_rep *rep = &arg->d.rep;
2557+ struct drm_buffer_object *entry;
2558+ enum drm_bo_type bo_type;
2559+ int ret = 0;
2560+
2561+ DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
2562+ (int)(req->size / 1024), req->page_alignment * 4);
2563+
2564+ if (!dev->bm.initialized) {
2565+ DRM_ERROR("Buffer object manager is not initialized.\n");
2566+ return -EINVAL;
2567+ }
2568+
2569+ bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
2570+
2571+ if (bo_type == drm_bo_type_user)
2572+ req->mask &= ~DRM_BO_FLAG_SHAREABLE;
2573+
2574+ ret = drm_buffer_object_create(file_priv->minor->dev,
2575+ req->size, bo_type, req->mask,
2576+ req->hint, req->page_alignment,
2577+ req->buffer_start, &entry);
2578+ if (ret)
2579+ goto out;
2580+
2581+ ret = drm_bo_add_user_object(file_priv, entry,
2582+ req->mask & DRM_BO_FLAG_SHAREABLE);
2583+ if (ret) {
2584+ drm_bo_usage_deref_unlocked(&entry);
2585+ goto out;
2586+ }
2587+
2588+ mutex_lock(&entry->mutex);
2589+ drm_bo_fill_rep_arg(entry, rep);
2590+ mutex_unlock(&entry->mutex);
2591+
2592+out:
2593+ return ret;
2594+}
2595+
2596+int drm_bo_setstatus_ioctl(struct drm_device *dev,
2597+ void *data, struct drm_file *file_priv)
2598+{
2599+ struct drm_bo_map_wait_idle_arg *arg = data;
2600+ struct drm_bo_info_req *req = &arg->d.req;
2601+ struct drm_bo_info_rep *rep = &arg->d.rep;
2602+ int ret;
2603+
2604+ if (!dev->bm.initialized) {
2605+ DRM_ERROR("Buffer object manager is not initialized.\n");
2606+ return -EINVAL;
2607+ }
2608+
2609+ ret = drm_bo_read_lock(&dev->bm.bm_lock);
2610+ if (ret)
2611+ return ret;
2612+
2613+ ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
2614+ req->flags,
2615+ req->mask,
2616+ req->hint | DRM_BO_HINT_DONT_FENCE,
2617+ 1,
2618+ rep, NULL);
2619+
2620+ (void) drm_bo_read_unlock(&dev->bm.bm_lock);
2621+ if (ret)
2622+ return ret;
2623+
2624+ return 0;
2625+}
2626+
2627+int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2628+{
2629+ struct drm_bo_map_wait_idle_arg *arg = data;
2630+ struct drm_bo_info_req *req = &arg->d.req;
2631+ struct drm_bo_info_rep *rep = &arg->d.rep;
2632+ int ret;
2633+ if (!dev->bm.initialized) {
2634+ DRM_ERROR("Buffer object manager is not initialized.\n");
2635+ return -EINVAL;
2636+ }
2637+
2638+ ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
2639+ req->hint, rep);
2640+ if (ret)
2641+ return ret;
2642+
2643+ return 0;
2644+}
2645+
2646+int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2647+{
2648+ struct drm_bo_handle_arg *arg = data;
2649+ int ret;
2650+ if (!dev->bm.initialized) {
2651+ DRM_ERROR("Buffer object manager is not initialized.\n");
2652+ return -EINVAL;
2653+ }
2654+
2655+ ret = drm_buffer_object_unmap(file_priv, arg->handle);
2656+ return ret;
2657+}
2658+
2659+
2660+int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2661+{
2662+ struct drm_bo_reference_info_arg *arg = data;
2663+ struct drm_bo_handle_arg *req = &arg->d.req;
2664+ struct drm_bo_info_rep *rep = &arg->d.rep;
2665+ struct drm_user_object *uo;
2666+ int ret;
2667+
2668+ if (!dev->bm.initialized) {
2669+ DRM_ERROR("Buffer object manager is not initialized.\n");
2670+ return -EINVAL;
2671+ }
2672+
2673+ ret = drm_user_object_ref(file_priv, req->handle,
2674+ drm_buffer_type, &uo);
2675+ if (ret)
2676+ return ret;
2677+
2678+ ret = drm_bo_handle_info(file_priv, req->handle, rep);
2679+ if (ret)
2680+ return ret;
2681+
2682+ return 0;
2683+}
2684+
2685+int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2686+{
2687+ struct drm_bo_handle_arg *arg = data;
2688+ int ret = 0;
2689+
2690+ if (!dev->bm.initialized) {
2691+ DRM_ERROR("Buffer object manager is not initialized.\n");
2692+ return -EINVAL;
2693+ }
2694+
2695+ ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
2696+ return ret;
2697+}
2698+
2699+int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2700+{
2701+ struct drm_bo_reference_info_arg *arg = data;
2702+ struct drm_bo_handle_arg *req = &arg->d.req;
2703+ struct drm_bo_info_rep *rep = &arg->d.rep;
2704+ int ret;
2705+
2706+ if (!dev->bm.initialized) {
2707+ DRM_ERROR("Buffer object manager is not initialized.\n");
2708+ return -EINVAL;
2709+ }
2710+
2711+ ret = drm_bo_handle_info(file_priv, req->handle, rep);
2712+ if (ret)
2713+ return ret;
2714+
2715+ return 0;
2716+}
2717+
2718+int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2719+{
2720+ struct drm_bo_map_wait_idle_arg *arg = data;
2721+ struct drm_bo_info_req *req = &arg->d.req;
2722+ struct drm_bo_info_rep *rep = &arg->d.rep;
2723+ int ret;
2724+ if (!dev->bm.initialized) {
2725+ DRM_ERROR("Buffer object manager is not initialized.\n");
2726+ return -EINVAL;
2727+ }
2728+
2729+ ret = drm_bo_handle_wait(file_priv, req->handle,
2730+ req->hint, rep);
2731+ if (ret)
2732+ return ret;
2733+
2734+ return 0;
2735+}
2736+
2737+static int drm_bo_leave_list(struct drm_buffer_object *bo,
2738+ uint32_t mem_type,
2739+ int free_pinned,
2740+ int allow_errors)
2741+{
2742+ struct drm_device *dev = bo->dev;
2743+ int ret = 0;
2744+
2745+ mutex_lock(&bo->mutex);
2746+
2747+ ret = drm_bo_expire_fence(bo, allow_errors);
2748+ if (ret)
2749+ goto out;
2750+
2751+ if (free_pinned) {
2752+ DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2753+ mutex_lock(&dev->struct_mutex);
2754+ list_del_init(&bo->pinned_lru);
2755+ if (bo->pinned_node == bo->mem.mm_node)
2756+ bo->pinned_node = NULL;
2757+ if (bo->pinned_node != NULL) {
2758+ drm_mm_put_block(bo->pinned_node);
2759+ bo->pinned_node = NULL;
2760+ }
2761+ mutex_unlock(&dev->struct_mutex);
2762+ }
2763+
2764+ if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2765+ DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2766+ "cleanup. Removing flag and evicting.\n");
2767+ bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2768+ bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
2769+ }
2770+
2771+ if (bo->mem.mem_type == mem_type)
2772+ ret = drm_bo_evict(bo, mem_type, 0);
2773+
2774+ if (ret) {
2775+ if (allow_errors) {
2776+ goto out;
2777+ } else {
2778+ ret = 0;
2779+ DRM_ERROR("Cleanup eviction failed\n");
2780+ }
2781+ }
2782+
2783+out:
2784+ mutex_unlock(&bo->mutex);
2785+ return ret;
2786+}
2787+
2788+
2789+static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2790+ int pinned_list)
2791+{
2792+ if (pinned_list)
2793+ return list_entry(list, struct drm_buffer_object, pinned_lru);
2794+ else
2795+ return list_entry(list, struct drm_buffer_object, lru);
2796+}
2797+
2798+/*
2799+ * dev->struct_mutex locked.
2800+ */
2801+
2802+static int drm_bo_force_list_clean(struct drm_device *dev,
2803+ struct list_head *head,
2804+ unsigned mem_type,
2805+ int free_pinned,
2806+ int allow_errors,
2807+ int pinned_list)
2808+{
2809+ struct list_head *list, *next, *prev;
2810+ struct drm_buffer_object *entry, *nentry;
2811+ int ret;
2812+ int do_restart;
2813+
2814+ /*
2815+ * The list traversal is a bit odd here, because an item may
2816+ * disappear from the list when we release the struct_mutex or
2817+ * when we decrease the usage count. Also we're not guaranteed
2818+ * to drain pinned lists, so we can't always restart.
2819+ */
2820+
2821+restart:
2822+ nentry = NULL;
2823+ list_for_each_safe(list, next, head) {
2824+ prev = list->prev;
2825+
2826+ entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2827+ atomic_inc(&entry->usage);
2828+ if (nentry) {
2829+ atomic_dec(&nentry->usage);
2830+ nentry = NULL;
2831+ }
2832+
2833+ /*
2834+ * Protect the next item from destruction, so we can check
2835+ * its list pointers later on.
2836+ */
2837+
2838+ if (next != head) {
2839+ nentry = drm_bo_entry(next, pinned_list);
2840+ atomic_inc(&nentry->usage);
2841+ }
2842+ mutex_unlock(&dev->struct_mutex);
2843+
2844+ ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2845+ allow_errors);
2846+ mutex_lock(&dev->struct_mutex);
2847+
2848+ drm_bo_usage_deref_locked(&entry);
2849+ if (ret)
2850+ return ret;
2851+
2852+ /*
2853+ * Has the next item disappeared from the list?
2854+ */
2855+
2856+ do_restart = ((next->prev != list) && (next->prev != prev));
2857+
2858+ if (nentry != NULL && do_restart)
2859+ drm_bo_usage_deref_locked(&nentry);
2860+
2861+ if (do_restart)
2862+ goto restart;
2863+ }
2864+ return 0;
2865+}
2866+
2867+int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type)
2868+{
2869+ struct drm_buffer_manager *bm = &dev->bm;
2870+ struct drm_mem_type_manager *man = &bm->man[mem_type];
2871+ int ret = -EINVAL;
2872+
2873+ if (mem_type >= DRM_BO_MEM_TYPES) {
2874+ DRM_ERROR("Illegal memory type %d\n", mem_type);
2875+ return ret;
2876+ }
2877+
2878+ if (!man->has_type) {
2879+ DRM_ERROR("Trying to take down uninitialized "
2880+ "memory manager type %u\n", mem_type);
2881+ return ret;
2882+ }
2883+ man->use_type = 0;
2884+ man->has_type = 0;
2885+
2886+ ret = 0;
2887+ if (mem_type > 0) {
2888+ BUG_ON(!list_empty(&bm->unfenced));
2889+ drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2890+ drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2891+
2892+ if (drm_mm_clean(&man->manager)) {
2893+ drm_mm_takedown(&man->manager);
2894+ } else {
2895+ ret = -EBUSY;
2896+ }
2897+ }
2898+
2899+ return ret;
2900+}
2901+EXPORT_SYMBOL(drm_bo_clean_mm);
2902+
2903+/**
2904+ *Evict all buffers of a particular mem_type, but leave memory manager
2905+ *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2906+ *point since we have the hardware lock.
2907+ */
2908+
2909+static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
2910+{
2911+ int ret;
2912+ struct drm_buffer_manager *bm = &dev->bm;
2913+ struct drm_mem_type_manager *man = &bm->man[mem_type];
2914+
2915+ if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2916+ DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2917+ return -EINVAL;
2918+ }
2919+
2920+ if (!man->has_type) {
2921+ DRM_ERROR("Memory type %u has not been initialized.\n",
2922+ mem_type);
2923+ return 0;
2924+ }
2925+
2926+ ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2927+ if (ret)
2928+ return ret;
2929+ ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2930+
2931+ return ret;
2932+}
2933+
2934+int drm_bo_init_mm(struct drm_device *dev,
2935+ unsigned type,
2936+ unsigned long p_offset, unsigned long p_size)
2937+{
2938+ struct drm_buffer_manager *bm = &dev->bm;
2939+ int ret = -EINVAL;
2940+ struct drm_mem_type_manager *man;
2941+
2942+ if (type >= DRM_BO_MEM_TYPES) {
2943+ DRM_ERROR("Illegal memory type %d\n", type);
2944+ return ret;
2945+ }
2946+
2947+ man = &bm->man[type];
2948+ if (man->has_type) {
2949+ DRM_ERROR("Memory manager already initialized for type %d\n",
2950+ type);
2951+ return ret;
2952+ }
2953+
2954+ ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2955+ if (ret)
2956+ return ret;
2957+
2958+ ret = 0;
2959+ if (type != DRM_BO_MEM_LOCAL) {
2960+ if (!p_size) {
2961+ DRM_ERROR("Zero size memory manager type %d\n", type);
2962+ return ret;
2963+ }
2964+ ret = drm_mm_init(&man->manager, p_offset, p_size);
2965+ if (ret)
2966+ return ret;
2967+ }
2968+ man->has_type = 1;
2969+ man->use_type = 1;
2970+
2971+ INIT_LIST_HEAD(&man->lru);
2972+ INIT_LIST_HEAD(&man->pinned);
2973+
2974+ return 0;
2975+}
2976+EXPORT_SYMBOL(drm_bo_init_mm);
2977+
2978+/*
2979+ * This function is intended to be called on drm driver unload.
2980+ * If you decide to call it from lastclose, you must protect the call
2981+ * from a potentially racing drm_bo_driver_init in firstopen.
2982+ * (This may happen on X server restart).
2983+ */
2984+
2985+int drm_bo_driver_finish(struct drm_device *dev)
2986+{
2987+ struct drm_buffer_manager *bm = &dev->bm;
2988+ int ret = 0;
2989+ unsigned i = DRM_BO_MEM_TYPES;
2990+ struct drm_mem_type_manager *man;
2991+
2992+ mutex_lock(&dev->struct_mutex);
2993+
2994+ if (!bm->initialized)
2995+ goto out;
2996+ bm->initialized = 0;
2997+
2998+ while (i--) {
2999+ man = &bm->man[i];
3000+ if (man->has_type) {
3001+ man->use_type = 0;
3002+ if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
3003+ ret = -EBUSY;
3004+ DRM_ERROR("DRM memory manager type %d "
3005+ "is not clean.\n", i);
3006+ }
3007+ man->has_type = 0;
3008+ }
3009+ }
3010+ mutex_unlock(&dev->struct_mutex);
3011+
3012+ if (!cancel_delayed_work(&bm->wq))
3013+ flush_scheduled_work();
3014+
3015+ mutex_lock(&dev->struct_mutex);
3016+ drm_bo_delayed_delete(dev, 1);
3017+ if (list_empty(&bm->ddestroy))
3018+ DRM_DEBUG("Delayed destroy list was clean\n");
3019+
3020+ if (list_empty(&bm->man[0].lru))
3021+ DRM_DEBUG("Swap list was clean\n");
3022+
3023+ if (list_empty(&bm->man[0].pinned))
3024+ DRM_DEBUG("NO_MOVE list was clean\n");
3025+
3026+ if (list_empty(&bm->unfenced))
3027+ DRM_DEBUG("Unfenced list was clean\n");
3028+
3029+ __free_page(bm->dummy_read_page);
3030+
3031+out:
3032+ mutex_unlock(&dev->struct_mutex);
3033+ return ret;
3034+}
3035+EXPORT_SYMBOL(drm_bo_driver_finish);
3036+
3037+/*
3038+ * This function is intended to be called on drm driver load.
3039+ * If you decide to call it from firstopen, you must protect the call
3040+ * from a potentially racing drm_bo_driver_finish in lastclose.
3041+ * (This may happen on X server restart).
3042+ */
3043+
3044+int drm_bo_driver_init(struct drm_device *dev)
3045+{
3046+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3047+ struct drm_buffer_manager *bm = &dev->bm;
3048+ int ret = -EINVAL;
3049+
3050+ bm->dummy_read_page = NULL;
3051+ drm_bo_init_lock(&bm->bm_lock);
3052+ mutex_lock(&dev->struct_mutex);
3053+ if (!driver)
3054+ goto out_unlock;
3055+
3056+ bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
3057+ if (!bm->dummy_read_page) {
3058+ ret = -ENOMEM;
3059+ goto out_unlock;
3060+ }
3061+
3062+
3063+ /*
3064+ * Initialize the system memory buffer type.
3065+ * Other types need to be driver / IOCTL initialized.
3066+ */
3067+ ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
3068+ if (ret)
3069+ goto out_unlock;
3070+
3071+ INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
3072+
3073+ bm->initialized = 1;
3074+ bm->nice_mode = 1;
3075+ atomic_set(&bm->count, 0);
3076+ bm->cur_pages = 0;
3077+ INIT_LIST_HEAD(&bm->unfenced);
3078+ INIT_LIST_HEAD(&bm->ddestroy);
3079+out_unlock:
3080+ mutex_unlock(&dev->struct_mutex);
3081+ return ret;
3082+}
3083+EXPORT_SYMBOL(drm_bo_driver_init);
3084+
3085+int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
3086+{
3087+ struct drm_mm_init_arg *arg = data;
3088+ struct drm_buffer_manager *bm = &dev->bm;
3089+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3090+ int ret;
3091+
3092+ if (!driver) {
3093+ DRM_ERROR("Buffer objects are not supported by this driver\n");
3094+ return -EINVAL;
3095+ }
3096+
3097+ ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
3098+ if (ret)
3099+ return ret;
3100+
3101+ ret = -EINVAL;
3102+ if (arg->magic != DRM_BO_INIT_MAGIC) {
3103+ DRM_ERROR("You are using an old libdrm that is not compatible with\n"
3104+ "\tthe kernel DRM module. Please upgrade your libdrm.\n");
3105+ return -EINVAL;
3106+ }
3107+ if (arg->major != DRM_BO_INIT_MAJOR) {
3108+ DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
3109+ "\tversion don't match. Got %d, expected %d.\n",
3110+ arg->major, DRM_BO_INIT_MAJOR);
3111+ return -EINVAL;
3112+ }
3113+
3114+ mutex_lock(&dev->struct_mutex);
3115+ if (!bm->initialized) {
3116+ DRM_ERROR("DRM memory manager was not initialized.\n");
3117+ goto out;
3118+ }
3119+ if (arg->mem_type == 0) {
3120+ DRM_ERROR("System memory buffers already initialized.\n");
3121+ goto out;
3122+ }
3123+ ret = drm_bo_init_mm(dev, arg->mem_type,
3124+ arg->p_offset, arg->p_size);
3125+
3126+out:
3127+ mutex_unlock(&dev->struct_mutex);
3128+ (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
3129+
3130+ if (ret)
3131+ return ret;
3132+
3133+ return 0;
3134+}
3135+
3136+int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
3137+{
3138+ struct drm_mm_type_arg *arg = data;
3139+ struct drm_buffer_manager *bm = &dev->bm;
3140+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3141+ int ret;
3142+
3143+ if (!driver) {
3144+ DRM_ERROR("Buffer objects are not supported by this driver\n");
3145+ return -EINVAL;
3146+ }
3147+
3148+ ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
3149+ if (ret)
3150+ return ret;
3151+
3152+ mutex_lock(&dev->struct_mutex);
3153+ ret = -EINVAL;
3154+ if (!bm->initialized) {
3155+ DRM_ERROR("DRM memory manager was not initialized\n");
3156+ goto out;
3157+ }
3158+ if (arg->mem_type == 0) {
3159+ DRM_ERROR("No takedown for System memory buffers.\n");
3160+ goto out;
3161+ }
3162+ ret = 0;
3163+ if (drm_bo_clean_mm(dev, arg->mem_type)) {
3164+ DRM_ERROR("Memory manager type %d not clean. "
3165+ "Delaying takedown\n", arg->mem_type);
3166+ }
3167+out:
3168+ mutex_unlock(&dev->struct_mutex);
3169+ (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
3170+
3171+ if (ret)
3172+ return ret;
3173+
3174+ return 0;
3175+}
3176+
3177+int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
3178+{
3179+ struct drm_mm_type_arg *arg = data;
3180+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3181+ int ret;
3182+
3183+ if (!driver) {
3184+ DRM_ERROR("Buffer objects are not supported by this driver\n");
3185+ return -EINVAL;
3186+ }
3187+
3188+ if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
3189+ DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
3190+ return -EINVAL;
3191+ }
3192+
3193+ if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
3194+ ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
3195+ if (ret)
3196+ return ret;
3197+ }
3198+
3199+ mutex_lock(&dev->struct_mutex);
3200+ ret = drm_bo_lock_mm(dev, arg->mem_type);
3201+ mutex_unlock(&dev->struct_mutex);
3202+ if (ret) {
3203+ (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
3204+ return ret;
3205+ }
3206+
3207+ return 0;
3208+}
3209+
3210+int drm_mm_unlock_ioctl(struct drm_device *dev,
3211+ void *data,
3212+ struct drm_file *file_priv)
3213+{
3214+ struct drm_mm_type_arg *arg = data;
3215+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3216+ int ret;
3217+
3218+ if (!driver) {
3219+ DRM_ERROR("Buffer objects are not supported by this driver\n");
3220+ return -EINVAL;
3221+ }
3222+
3223+ if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
3224+ ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
3225+ if (ret)
3226+ return ret;
3227+ }
3228+
3229+ return 0;
3230+}
3231+
3232+/*
3233+ * buffer object vm functions.
3234+ */
3235+
3236+int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
3237+{
3238+ struct drm_buffer_manager *bm = &dev->bm;
3239+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
3240+
3241+ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
3242+ if (mem->mem_type == DRM_BO_MEM_LOCAL)
3243+ return 0;
3244+
3245+ if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
3246+ return 0;
3247+
3248+ if (mem->flags & DRM_BO_FLAG_CACHED)
3249+ return 0;
3250+ }
3251+ return 1;
3252+}
3253+EXPORT_SYMBOL(drm_mem_reg_is_pci);
3254+
3255+/**
3256+ * \c Get the PCI offset for the buffer object memory.
3257+ *
3258+ * \param bo The buffer object.
3259+ * \param bus_base On return the base of the PCI region
3260+ * \param bus_offset On return the byte offset into the PCI region
3261+ * \param bus_size On return the byte size of the buffer object or zero if
3262+ * the buffer object memory is not accessible through a PCI region.
3263+ * \return Failure indication.
3264+ *
3265+ * Returns -EINVAL if the buffer object is currently not mappable.
3266+ * Otherwise returns zero.
3267+ */
3268+
3269+int drm_bo_pci_offset(struct drm_device *dev,
3270+ struct drm_bo_mem_reg *mem,
3271+ unsigned long *bus_base,
3272+ unsigned long *bus_offset, unsigned long *bus_size)
3273+{
3274+ struct drm_buffer_manager *bm = &dev->bm;
3275+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
3276+
3277+ *bus_size = 0;
3278+ if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
3279+ return -EINVAL;
3280+
3281+ if (drm_mem_reg_is_pci(dev, mem)) {
3282+ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
3283+ *bus_size = mem->num_pages << PAGE_SHIFT;
3284+ *bus_base = man->io_offset;
3285+ }
3286+
3287+ return 0;
3288+}
3289+
3290+/**
3291+ * \c Kill all user-space virtual mappings of this buffer object.
3292+ *
3293+ * \param bo The buffer object.
3294+ *
3295+ * Call bo->mutex locked.
3296+ */
3297+
3298+void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
3299+{
3300+ struct drm_device *dev = bo->dev;
3301+ loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
3302+ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
3303+
3304+ if (!dev->dev_mapping)
3305+ return;
3306+
3307+ unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
3308+}
3309+
3310+static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
3311+{
3312+ struct drm_map_list *list;
3313+ drm_local_map_t *map;
3314+ struct drm_device *dev = bo->dev;
3315+
3316+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
3317+ if (bo->type != drm_bo_type_dc)
3318+ return;
3319+
3320+ list = &bo->map_list;
3321+ if (list->user_token) {
3322+ drm_ht_remove_item(&dev->map_hash, &list->hash);
3323+ list->user_token = 0;
3324+ }
3325+ if (list->file_offset_node) {
3326+ drm_mm_put_block(list->file_offset_node);
3327+ list->file_offset_node = NULL;
3328+ }
3329+
3330+ map = list->map;
3331+ if (!map)
3332+ return;
3333+
3334+ drm_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
3335+ list->map = NULL;
3336+ list->user_token = 0ULL;
3337+ drm_bo_usage_deref_locked(&bo);
3338+}
3339+
3340+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
3341+{
3342+ struct drm_map_list *list = &bo->map_list;
3343+ drm_local_map_t *map;
3344+ struct drm_device *dev = bo->dev;
3345+
3346+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
3347+ list->map = drm_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
3348+ if (!list->map)
3349+ return -ENOMEM;
3350+
3351+ map = list->map;
3352+ map->offset = 0;
3353+ map->type = _DRM_TTM;
3354+ map->flags = _DRM_REMOVABLE;
3355+ map->size = bo->mem.num_pages * PAGE_SIZE;
3356+ atomic_inc(&bo->usage);
3357+ map->handle = (void *)bo;
3358+
3359+ list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
3360+ bo->mem.num_pages, 0, 0);
3361+
3362+ if (!list->file_offset_node) {
3363+ drm_bo_takedown_vm_locked(bo);
3364+ return -ENOMEM;
3365+ }
3366+
3367+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
3368+ bo->mem.num_pages, 0);
3369+ if (!list->file_offset_node) {
3370+ drm_bo_takedown_vm_locked(bo);
3371+ return -ENOMEM;
3372+ }
3373+
3374+ list->hash.key = list->file_offset_node->start;
3375+ if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
3376+ drm_bo_takedown_vm_locked(bo);
3377+ return -ENOMEM;
3378+ }
3379+
3380+ list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
3381+
3382+ return 0;
3383+}
3384+
3385+int drm_bo_version_ioctl(struct drm_device *dev, void *data,
3386+ struct drm_file *file_priv)
3387+{
3388+ struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
3389+
3390+ arg->major = DRM_BO_INIT_MAJOR;
3391+ arg->minor = DRM_BO_INIT_MINOR;
3392+ arg->patchlevel = DRM_BO_INIT_PATCH;
3393+
3394+ return 0;
3395+}
3396Index: linux-2.6.28/drivers/gpu/drm/drm_bo_lock.c
3397===================================================================
3398--- /dev/null 1970-01-01 00:00:00.000000000 +0000
3399+++ linux-2.6.28/drivers/gpu/drm/drm_bo_lock.c 2009-02-25 15:37:02.000000000 +0000
3400@@ -0,0 +1,175 @@
3401+/**************************************************************************
3402+ *
3403+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
3404+ * All Rights Reserved.
3405+ *
3406+ * Permission is hereby granted, free of charge, to any person obtaining a
3407+ * copy of this software and associated documentation files (the
3408+ * "Software"), to deal in the Software without restriction, including
3409+ * without limitation the rights to use, copy, modify, merge, publish,
3410+ * distribute, sub license, and/or sell copies of the Software, and to
3411+ * permit persons to whom the Software is furnished to do so, subject to
3412+ * the following conditions:
3413+ *
3414+ * The above copyright notice and this permission notice (including the
3415+ * next paragraph) shall be included in all copies or substantial portions
3416+ * of the Software.
3417+ *
3418+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3419+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3420+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
3421+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
3422+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
3423+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
3424+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
3425+ *
3426+ **************************************************************************/
3427+/*
3428+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
3429+ */
3430+
3431+/*
3432+ * This file implements a simple replacement for the buffer manager use
3433+ * of the heavyweight hardware lock.
3434+ * The lock is a read-write lock. Taking it in read mode is fast, and
3435+ * intended for in-kernel use only.
3436+ * Taking it in write mode is slow.
3437+ *
3438+ * The write mode is used only when there is a need to block all
3439+ * user-space processes from allocating a
3440+ * new memory area.
3441+ * Typical use in write mode is X server VT switching, and it's allowed
3442+ * to leave kernel space with the write lock held. If a user-space process
3443+ * dies while having the write-lock, it will be released during the file
3444+ * descriptor release.
3445+ *
3446+ * The read lock is typically placed at the start of an IOCTL- or
3447+ * user-space callable function that may end up allocating a memory area.
3448+ * This includes setstatus, super-ioctls and no_pfn; the latter may move
3449+ * unmappable regions to mappable. It's a bug to leave kernel space with the
3450+ * read lock held.
3451+ *
3452+ * Both read- and write lock taking is interruptible for low signal-delivery
3453+ * latency. The locking functions will return -EAGAIN if interrupted by a
3454+ * signal.
3455+ *
3456+ * Locking order: The lock should be taken BEFORE any kernel mutexes
3457+ * or spinlocks.
3458+ */
3459+
3460+#include "drmP.h"
3461+
3462+void drm_bo_init_lock(struct drm_bo_lock *lock)
3463+{
3464+ DRM_INIT_WAITQUEUE(&lock->queue);
3465+ atomic_set(&lock->write_lock_pending, 0);
3466+ atomic_set(&lock->readers, 0);
3467+}
3468+
3469+void drm_bo_read_unlock(struct drm_bo_lock *lock)
3470+{
3471+ if (unlikely(atomic_add_negative(-1, &lock->readers)))
3472+ BUG();
3473+ if (atomic_read(&lock->readers) == 0)
3474+ wake_up_interruptible(&lock->queue);
3475+}
3476+EXPORT_SYMBOL(drm_bo_read_unlock);
3477+
3478+int drm_bo_read_lock(struct drm_bo_lock *lock)
3479+{
3480+ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
3481+ int ret;
3482+ ret = wait_event_interruptible
3483+ (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
3484+ if (ret)
3485+ return -EAGAIN;
3486+ }
3487+
3488+ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
3489+ int ret;
3490+ ret = wait_event_interruptible
3491+ (lock->queue, atomic_add_unless(&lock->readers, 1, -1));
3492+ if (ret)
3493+ return -EAGAIN;
3494+ }
3495+ return 0;
3496+}
3497+EXPORT_SYMBOL(drm_bo_read_lock);
3498+
3499+static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
3500+{
3501+ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
3502+ return -EINVAL;
3503+ if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1))
3504+ return -EINVAL;
3505+ wake_up_interruptible(&lock->queue);
3506+ return 0;
3507+}
3508+
3509+static void drm_bo_write_lock_remove(struct drm_file *file_priv,
3510+ struct drm_user_object *item)
3511+{
3512+ struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base);
3513+ int ret;
3514+
3515+ ret = __drm_bo_write_unlock(lock);
3516+ BUG_ON(ret);
3517+}
3518+
3519+int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
3520+{
3521+ int ret = 0;
3522+ struct drm_device *dev;
3523+
3524+ if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0))
3525+ return -EINVAL;
3526+
3527+ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
3528+ ret = wait_event_interruptible
3529+ (lock->queue, atomic_cmpxchg(&lock->readers, 0, -1) == 0);
3530+
3531+ if (ret) {
3532+ atomic_set(&lock->write_lock_pending, 0);
3533+ wake_up_interruptible(&lock->queue);
3534+ return -EAGAIN;
3535+ }
3536+ }
3537+
3538+ /*
3539+ * Add a dummy user-object, the destructor of which will
3540+ * make sure the lock is released if the client dies
3541+ * while holding it.
3542+ */
3543+
3544+ dev = file_priv->minor->dev;
3545+ mutex_lock(&dev->struct_mutex);
3546+ ret = drm_add_user_object(file_priv, &lock->base, 0);
3547+ lock->base.remove = &drm_bo_write_lock_remove;
3548+ lock->base.type = drm_lock_type;
3549+ if (ret)
3550+ (void)__drm_bo_write_unlock(lock);
3551+
3552+ mutex_unlock(&dev->struct_mutex);
3553+
3554+ return ret;
3555+}
3556+
3557+int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv)
3558+{
3559+ struct drm_device *dev = file_priv->minor->dev;
3560+ struct drm_ref_object *ro;
3561+
3562+ mutex_lock(&dev->struct_mutex);
3563+
3564+ if (lock->base.owner != file_priv) {
3565+ mutex_unlock(&dev->struct_mutex);
3566+ return -EINVAL;
3567+ }
3568+ ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE);
3569+ BUG_ON(!ro);
3570+ drm_remove_ref_object(file_priv, ro);
3571+ lock->base.owner = NULL;
3572+
3573+ mutex_unlock(&dev->struct_mutex);
3574+ return 0;
3575+}
3576Index: linux-2.6.28/drivers/gpu/drm/drm_bo_move.c
3577===================================================================
3578--- /dev/null 1970-01-01 00:00:00.000000000 +0000
3579+++ linux-2.6.28/drivers/gpu/drm/drm_bo_move.c 2009-02-25 15:37:02.000000000 +0000
3580@@ -0,0 +1,590 @@
3581+/**************************************************************************
3582+ *
3583+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
3584+ * All Rights Reserved.
3585+ *
3586+ * Permission is hereby granted, free of charge, to any person obtaining a
3587+ * copy of this software and associated documentation files (the
3588+ * "Software"), to deal in the Software without restriction, including
3589+ * without limitation the rights to use, copy, modify, merge, publish,
3590+ * distribute, sub license, and/or sell copies of the Software, and to
3591+ * permit persons to whom the Software is furnished to do so, subject to
3592+ * the following conditions:
3593+ *
3594+ * The above copyright notice and this permission notice (including the
3595+ * next paragraph) shall be included in all copies or substantial portions
3596+ * of the Software.
3597+ *
3598+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3599+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3600+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
3601+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
3602+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
3603+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
3604+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
3605+ *
3606+ **************************************************************************/
3607+/*
3608+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
3609+ */
3610+
3611+#include "drmP.h"
3612+
3613+/**
3614+ * Free the old memory node unless it's a pinned region and we
3615+ * have not been requested to free also pinned regions.
3616+ */
3617+
3618+static void drm_bo_free_old_node(struct drm_buffer_object *bo)
3619+{
3620+ struct drm_bo_mem_reg *old_mem = &bo->mem;
3621+
3622+ if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
3623+ mutex_lock(&bo->dev->struct_mutex);
3624+ drm_mm_put_block(old_mem->mm_node);
3625+ mutex_unlock(&bo->dev->struct_mutex);
3626+ }
3627+ old_mem->mm_node = NULL;
3628+}
3629+
3630+int drm_bo_move_ttm(struct drm_buffer_object *bo,
3631+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
3632+{
3633+ struct drm_ttm *ttm = bo->ttm;
3634+ struct drm_bo_mem_reg *old_mem = &bo->mem;
3635+ uint64_t save_flags = old_mem->flags;
3636+ uint64_t save_mask = old_mem->mask;
3637+ int ret;
3638+
3639+ if (old_mem->mem_type != DRM_BO_MEM_LOCAL) {
3640+ if (evict)
3641+ drm_ttm_evict(ttm);
3642+ else
3643+ drm_ttm_unbind(ttm);
3644+
3645+ drm_bo_free_old_node(bo);
3646+ DRM_FLAG_MASKED(old_mem->flags,
3647+ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
3648+ DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
3649+ old_mem->mem_type = DRM_BO_MEM_LOCAL;
3650+ save_flags = old_mem->flags;
3651+ }
3652+ if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
3653+ ret = drm_bind_ttm(ttm, new_mem);
3654+ if (ret)
3655+ return ret;
3656+ }
3657+
3658+ *old_mem = *new_mem;
3659+ new_mem->mm_node = NULL;
3660+ old_mem->mask = save_mask;
3661+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
3662+ return 0;
3663+}
3664+EXPORT_SYMBOL(drm_bo_move_ttm);
3665+
3666+/**
3667+ * \c Return a kernel virtual address to the buffer object PCI memory.
3668+ *
3669+ * \param bo The buffer object.
3670+ * \return Failure indication.
3671+ *
3672+ * Returns -EINVAL if the buffer object is currently not mappable.
3673+ * Returns -ENOMEM if the ioremap operation failed.
3674+ * Otherwise returns zero.
3675+ *
3676+ * After a successfull call, bo->iomap contains the virtual address, or NULL
3677+ * if the buffer object content is not accessible through PCI space.
3678+ * Call bo->mutex locked.
3679+ */
3680+
3681+int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
3682+ void **virtual)
3683+{
3684+ struct drm_buffer_manager *bm = &dev->bm;
3685+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
3686+ unsigned long bus_offset;
3687+ unsigned long bus_size;
3688+ unsigned long bus_base;
3689+ int ret;
3690+ void *addr;
3691+
3692+ *virtual = NULL;
3693+ ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
3694+ if (ret || bus_size == 0)
3695+ return ret;
3696+
3697+ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
3698+ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
3699+ else {
3700+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
3701+ if (!addr)
3702+ return -ENOMEM;
3703+ }
3704+ *virtual = addr;
3705+ return 0;
3706+}
3707+EXPORT_SYMBOL(drm_mem_reg_ioremap);
3708+
3709+/**
3710+ * \c Unmap mapping obtained using drm_bo_ioremap
3711+ *
3712+ * \param bo The buffer object.
3713+ *
3714+ * Call bo->mutex locked.
3715+ */
3716+
3717+void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
3718+ void *virtual)
3719+{
3720+ struct drm_buffer_manager *bm;
3721+ struct drm_mem_type_manager *man;
3722+
3723+ bm = &dev->bm;
3724+ man = &bm->man[mem->mem_type];
3725+
3726+ if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
3727+ iounmap(virtual);
3728+}
3729+EXPORT_SYMBOL(drm_mem_reg_iounmap);
3730+
3731+static int drm_copy_io_page(void *dst, void *src, unsigned long page)
3732+{
3733+ uint32_t *dstP =
3734+ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
3735+ uint32_t *srcP =
3736+ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
3737+
3738+ int i;
3739+ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
3740+ iowrite32(ioread32(srcP++), dstP++);
3741+ return 0;
3742+}
3743+
3744+static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
3745+ unsigned long page)
3746+{
3747+ struct page *d = drm_ttm_get_page(ttm, page);
3748+ void *dst;
3749+
3750+ if (!d)
3751+ return -ENOMEM;
3752+
3753+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
3754+ dst = kmap(d);
3755+ if (!dst)
3756+ return -ENOMEM;
3757+
3758+ memcpy_fromio(dst, src, PAGE_SIZE);
3759+ kunmap(d);
3760+ return 0;
3761+}
3762+
3763+static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
3764+{
3765+ struct page *s = drm_ttm_get_page(ttm, page);
3766+ void *src;
3767+
3768+ if (!s)
3769+ return -ENOMEM;
3770+
3771+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
3772+ src = kmap(s);
3773+ if (!src)
3774+ return -ENOMEM;
3775+
3776+ memcpy_toio(dst, src, PAGE_SIZE);
3777+ kunmap(s);
3778+ return 0;
3779+}
3780+
3781+int drm_bo_move_memcpy(struct drm_buffer_object *bo,
3782+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
3783+{
3784+ struct drm_device *dev = bo->dev;
3785+ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
3786+ struct drm_ttm *ttm = bo->ttm;
3787+ struct drm_bo_mem_reg *old_mem = &bo->mem;
3788+ struct drm_bo_mem_reg old_copy = *old_mem;
3789+ void *old_iomap;
3790+ void *new_iomap;
3791+ int ret;
3792+ uint64_t save_flags = old_mem->flags;
3793+ uint64_t save_mask = old_mem->mask;
3794+ unsigned long i;
3795+ unsigned long page;
3796+ unsigned long add = 0;
3797+ int dir;
3798+
3799+ ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
3800+ if (ret)
3801+ return ret;
3802+ ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
3803+ if (ret)
3804+ goto out;
3805+
3806+ if (old_iomap == NULL && new_iomap == NULL)
3807+ goto out2;
3808+ if (old_iomap == NULL && ttm == NULL)
3809+ goto out2;
3810+
3811+ add = 0;
3812+ dir = 1;
3813+
3814+ if ((old_mem->mem_type == new_mem->mem_type) &&
3815+ (new_mem->mm_node->start <
3816+ old_mem->mm_node->start + old_mem->mm_node->size)) {
3817+ dir = -1;
3818+ add = new_mem->num_pages - 1;
3819+ }
3820+
3821+ for (i = 0; i < new_mem->num_pages; ++i) {
3822+ page = i * dir + add;
3823+ if (old_iomap == NULL)
3824+ ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
3825+ else if (new_iomap == NULL)
3826+ ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
3827+ else
3828+ ret = drm_copy_io_page(new_iomap, old_iomap, page);
3829+ if (ret)
3830+ goto out1;
3831+ }
3832+ mb();
3833+out2:
3834+ drm_bo_free_old_node(bo);
3835+
3836+ *old_mem = *new_mem;
3837+ new_mem->mm_node = NULL;
3838+ old_mem->mask = save_mask;
3839+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
3840+
3841+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
3842+ drm_ttm_unbind(ttm);
3843+ drm_destroy_ttm(ttm);
3844+ bo->ttm = NULL;
3845+ }
3846+
3847+out1:
3848+ drm_mem_reg_iounmap(dev, new_mem, new_iomap);
3849+out:
3850+ drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
3851+ return ret;
3852+}
3853+EXPORT_SYMBOL(drm_bo_move_memcpy);
3854+
3855+/*
3856+ * Transfer a buffer object's memory and LRU status to a newly
3857+ * created object. User-space references remains with the old
3858+ * object. Call bo->mutex locked.
3859+ */
3860+
3861+int drm_buffer_object_transfer(struct drm_buffer_object *bo,
3862+ struct drm_buffer_object **new_obj)
3863+{
3864+ struct drm_buffer_object *fbo;
3865+ struct drm_device *dev = bo->dev;
3866+ struct drm_buffer_manager *bm = &dev->bm;
3867+
3868+ fbo = drm_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
3869+ if (!fbo)
3870+ return -ENOMEM;
3871+
3872+ *fbo = *bo;
3873+ mutex_init(&fbo->mutex);
3874+ mutex_lock(&fbo->mutex);
3875+ mutex_lock(&dev->struct_mutex);
3876+
3877+ DRM_INIT_WAITQUEUE(&bo->event_queue);
3878+ INIT_LIST_HEAD(&fbo->ddestroy);
3879+ INIT_LIST_HEAD(&fbo->lru);
3880+ INIT_LIST_HEAD(&fbo->pinned_lru);
3881+#ifdef DRM_ODD_MM_COMPAT
3882+ INIT_LIST_HEAD(&fbo->vma_list);
3883+ INIT_LIST_HEAD(&fbo->p_mm_list);
3884+#endif
3885+
3886+ fbo->fence = drm_fence_reference_locked(bo->fence);
3887+ fbo->pinned_node = NULL;
3888+ fbo->mem.mm_node->private = (void *)fbo;
3889+ atomic_set(&fbo->usage, 1);
3890+ atomic_inc(&bm->count);
3891+ mutex_unlock(&dev->struct_mutex);
3892+ mutex_unlock(&fbo->mutex);
3893+ bo->reserved_size = 0;
3894+ *new_obj = fbo;
3895+ return 0;
3896+}
3897+
3898+/*
3899+ * Since move is underway, we need to block signals in this function.
3900+ * We cannot restart until it has finished.
3901+ */
3902+
3903+int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
3904+ int evict, int no_wait, uint32_t fence_class,
3905+ uint32_t fence_type, uint32_t fence_flags,
3906+ struct drm_bo_mem_reg *new_mem)
3907+{
3908+ struct drm_device *dev = bo->dev;
3909+ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
3910+ struct drm_bo_mem_reg *old_mem = &bo->mem;
3911+ int ret;
3912+ uint64_t save_flags = old_mem->flags;
3913+ uint64_t save_mask = old_mem->mask;
3914+ struct drm_buffer_object *old_obj;
3915+
3916+ if (bo->fence)
3917+ drm_fence_usage_deref_unlocked(&bo->fence);
3918+ ret = drm_fence_object_create(dev, fence_class, fence_type,
3919+ fence_flags | DRM_FENCE_FLAG_EMIT,
3920+ &bo->fence);
3921+ bo->fence_type = fence_type;
3922+ if (ret)
3923+ return ret;
3924+
3925+#ifdef DRM_ODD_MM_COMPAT
3926+ /*
3927+ * In this mode, we don't allow pipelining a copy blit,
3928+ * since the buffer will be accessible from user space
3929+ * the moment we return and rebuild the page tables.
3930+ *
3931+ * With normal vm operation, page tables are rebuilt
3932+ * on demand using fault(), which waits for buffer idle.
3933+ */
3934+ if (1)
3935+#else
3936+ if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
3937+ bo->mem.mm_node != NULL))
3938+#endif
3939+ {
3940+ ret = drm_bo_wait(bo, 0, 1, 0);
3941+ if (ret)
3942+ return ret;
3943+
3944+ drm_bo_free_old_node(bo);
3945+
3946+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
3947+ drm_ttm_unbind(bo->ttm);
3948+ drm_destroy_ttm(bo->ttm);
3949+ bo->ttm = NULL;
3950+ }
3951+ } else {
3952+
3953+ /* This should help pipeline ordinary buffer moves.
3954+ *
3955+ * Hang old buffer memory on a new buffer object,
3956+ * and leave it to be released when the GPU
3957+ * operation has completed.
3958+ */
3959+
3960+ ret = drm_buffer_object_transfer(bo, &old_obj);
3961+
3962+ if (ret)
3963+ return ret;
3964+
3965+ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
3966+ old_obj->ttm = NULL;
3967+ else
3968+ bo->ttm = NULL;
3969+
3970+ mutex_lock(&dev->struct_mutex);
3971+ list_del_init(&old_obj->lru);
3972+ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
3973+ drm_bo_add_to_lru(old_obj);
3974+
3975+ drm_bo_usage_deref_locked(&old_obj);
3976+ mutex_unlock(&dev->struct_mutex);
3977+
3978+ }
3979+
3980+ *old_mem = *new_mem;
3981+ new_mem->mm_node = NULL;
3982+ old_mem->mask = save_mask;
3983+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
3984+ return 0;
3985+}
3986+EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
3987+
3988+int drm_bo_same_page(unsigned long offset,
3989+ unsigned long offset2)
3990+{
3991+ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
3992+}
3993+EXPORT_SYMBOL(drm_bo_same_page);
3994+
3995+unsigned long drm_bo_offset_end(unsigned long offset,
3996+ unsigned long end)
3997+{
3998+ offset = (offset + PAGE_SIZE) & PAGE_MASK;
3999+ return (end < offset) ? end : offset;
4000+}
4001+EXPORT_SYMBOL(drm_bo_offset_end);
4002+
4003+static pgprot_t drm_kernel_io_prot(uint32_t map_type)
4004+{
4005+ pgprot_t tmp = PAGE_KERNEL;
4006+
4007+#if defined(__i386__) || defined(__x86_64__)
4008+ if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
4009+ pgprot_val(tmp) |= _PAGE_PCD;
4010+ pgprot_val(tmp) &= ~_PAGE_PWT;
4011+ }
4012+#elif defined(__powerpc__)
4013+ pgprot_val(tmp) |= _PAGE_NO_CACHE;
4014+ if (map_type == _DRM_REGISTERS)
4015+ pgprot_val(tmp) |= _PAGE_GUARDED;
4016+#endif
4017+#if defined(__ia64__)
4018+ if (map_type == _DRM_TTM)
4019+ tmp = pgprot_writecombine(tmp);
4020+ else
4021+ tmp = pgprot_noncached(tmp);
4022+#endif
4023+ return tmp;
4024+}
4025+
4026+static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
4027+ unsigned long bus_offset, unsigned long bus_size,
4028+ struct drm_bo_kmap_obj *map)
4029+{
4030+ struct drm_device *dev = bo->dev;
4031+ struct drm_bo_mem_reg *mem = &bo->mem;
4032+ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
4033+
4034+ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
4035+ map->bo_kmap_type = bo_map_premapped;
4036+ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
4037+ } else {
4038+ map->bo_kmap_type = bo_map_iomap;
4039+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
4040+ }
4041+ return (!map->virtual) ? -ENOMEM : 0;
4042+}
4043+
4044+static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
4045+ unsigned long start_page, unsigned long num_pages,
4046+ struct drm_bo_kmap_obj *map)
4047+{
4048+ struct drm_device *dev = bo->dev;
4049+ struct drm_bo_mem_reg *mem = &bo->mem;
4050+ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
4051+ pgprot_t prot;
4052+ struct drm_ttm *ttm = bo->ttm;
4053+ struct page *d;
4054+ int i;
4055+
4056+ BUG_ON(!ttm);
4057+
4058+ if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
4059+
4060+ /*
4061+ * We're mapping a single page, and the desired
4062+ * page protection is consistent with the bo.
4063+ */
4064+
4065+ map->bo_kmap_type = bo_map_kmap;
4066+ map->page = drm_ttm_get_page(ttm, start_page);
4067+ map->virtual = kmap(map->page);
4068+ } else {
4069+ /*
4070+ * Populate the part we're mapping;
4071+ */
4072+
4073+ for (i = start_page; i < start_page + num_pages; ++i) {
4074+ d = drm_ttm_get_page(ttm, i);
4075+ if (!d)
4076+ return -ENOMEM;
4077+ }
4078+
4079+ /*
4080+ * We need to use vmap to get the desired page protection
4081+ * or to make the buffer object look contigous.
4082+ */
4083+
4084+ prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
4085+ PAGE_KERNEL :
4086+ drm_kernel_io_prot(man->drm_bus_maptype);
4087+ map->bo_kmap_type = bo_map_vmap;
4088+ map->virtual = vmap(ttm->pages + start_page,
4089+ num_pages, 0, prot);
4090+ }
4091+ return (!map->virtual) ? -ENOMEM : 0;
4092+}
4093+
4094+/*
4095+ * This function is to be used for kernel mapping of buffer objects.
4096+ * It chooses the appropriate mapping method depending on the memory type
4097+ * and caching policy the buffer currently has.
4098+ * Mapping multiple pages or buffers that live in io memory is a bit slow and
4099+ * consumes vmalloc space. Be restrictive with such mappings.
4100+ * Mapping single pages usually returns the logical kernel address,
4101+ * (which is fast)
4102+ * BUG may use slower temporary mappings for high memory pages or
4103+ * uncached / write-combined pages.
4104+ *
4105+ * The function fills in a drm_bo_kmap_obj which can be used to return the
4106+ * kernel virtual address of the buffer.
4107+ *
4108+ * Code servicing a non-priviliged user request is only allowed to map one
4109+ * page at a time. We might need to implement a better scheme to stop such
4110+ * processes from consuming all vmalloc space.
4111+ */
4112+
4113+int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
4114+ unsigned long num_pages, struct drm_bo_kmap_obj *map)
4115+{
4116+ int ret;
4117+ unsigned long bus_base;
4118+ unsigned long bus_offset;
4119+ unsigned long bus_size;
4120+
4121+ map->virtual = NULL;
4122+
4123+ if (num_pages > bo->num_pages)
4124+ return -EINVAL;
4125+ if (start_page > bo->num_pages)
4126+ return -EINVAL;
4127+#if 0
4128+ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
4129+ return -EPERM;
4130+#endif
4131+ ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
4132+ &bus_offset, &bus_size);
4133+
4134+ if (ret)
4135+ return ret;
4136+
4137+ if (bus_size == 0) {
4138+ return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
4139+ } else {
4140+ bus_offset += start_page << PAGE_SHIFT;
4141+ bus_size = num_pages << PAGE_SHIFT;
4142+ return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
4143+ }
4144+}
4145+EXPORT_SYMBOL(drm_bo_kmap);
4146+
4147+void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
4148+{
4149+ if (!map->virtual)
4150+ return;
4151+
4152+ switch (map->bo_kmap_type) {
4153+ case bo_map_iomap:
4154+ iounmap(map->virtual);
4155+ break;
4156+ case bo_map_vmap:
4157+ vunmap(map->virtual);
4158+ break;
4159+ case bo_map_kmap:
4160+ kunmap(map->page);
4161+ break;
4162+ case bo_map_premapped:
4163+ break;
4164+ default:
4165+ BUG();
4166+ }
4167+ map->virtual = NULL;
4168+ map->page = NULL;
4169+}
4170+EXPORT_SYMBOL(drm_bo_kunmap);
4171Index: linux-2.6.28/drivers/gpu/drm/drm_bufs.c
4172===================================================================
4173--- linux-2.6.28.orig/drivers/gpu/drm/drm_bufs.c 2009-02-25 15:36:50.000000000 +0000
4174+++ linux-2.6.28/drivers/gpu/drm/drm_bufs.c 2009-02-25 15:37:02.000000000 +0000
4175@@ -435,6 +435,8 @@
4176 case _DRM_GEM:
4177 DRM_ERROR("tried to rmmap GEM object\n");
4178 break;
4179+ case _DRM_TTM:
4180+ BUG_ON(1);
4181 }
4182 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
4183
4184Index: linux-2.6.28/drivers/gpu/drm/drm_drv.c
4185===================================================================
4186--- linux-2.6.28.orig/drivers/gpu/drm/drm_drv.c 2009-02-25 15:36:50.000000000 +0000
4187+++ linux-2.6.28/drivers/gpu/drm/drm_drv.c 2009-02-25 15:37:02.000000000 +0000
4188@@ -143,6 +143,34 @@
4189 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
4190 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
4191 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
4192+
4193+ DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
4194+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
4195+ DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
4196+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
4197+ DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
4198+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
4199+ DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
4200+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
4201+
4202+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
4203+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
4204+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
4205+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
4206+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH),
4207+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH),
4208+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH),
4209+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
4210+
4211+ DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
4212+ DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
4213+ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
4214+ DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
4215+ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
4216+ DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH),
4217+ DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
4218+ DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
4219+ DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
4220 };
4221
4222 #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
4223@@ -317,6 +345,9 @@
4224 if (dev->driver->unload)
4225 dev->driver->unload(dev);
4226
4227+ drm_bo_driver_finish(dev);
4228+ drm_fence_manager_takedown(dev);
4229+
4230 if (drm_core_has_AGP(dev) && dev->agp) {
4231 drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
4232 dev->agp = NULL;
4233@@ -324,6 +355,8 @@
4234
4235 drm_ht_remove(&dev->map_hash);
4236 drm_ctxbitmap_cleanup(dev);
4237+ drm_mm_takedown(&dev->offset_manager);
4238+ drm_ht_remove(&dev->object_hash);
4239
4240 if (drm_core_check_feature(dev, DRIVER_MODESET))
4241 drm_put_minor(&dev->control);
4242@@ -336,6 +369,17 @@
4243 DRM_ERROR("Cannot unload module\n");
4244 }
4245
4246+void drm_cleanup_pci(struct pci_dev *pdev)
4247+{
4248+ struct drm_device *dev = pci_get_drvdata(pdev);
4249+
4250+ pci_set_drvdata(pdev, NULL);
4251+ pci_release_regions(pdev);
4252+ if (dev)
4253+ drm_cleanup(dev);
4254+}
4255+EXPORT_SYMBOL(drm_cleanup_pci);
4256+
4257 void drm_exit(struct drm_driver *driver)
4258 {
4259 struct drm_device *dev, *tmp;
4260Index: linux-2.6.28/drivers/gpu/drm/drm_fence.c
4261===================================================================
4262--- /dev/null 1970-01-01 00:00:00.000000000 +0000
4263+++ linux-2.6.28/drivers/gpu/drm/drm_fence.c 2009-02-25 15:37:02.000000000 +0000
4264@@ -0,0 +1,829 @@
4265+/**************************************************************************
4266+ *
4267+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4268+ * All Rights Reserved.
4269+ *
4270+ * Permission is hereby granted, free of charge, to any person obtaining a
4271+ * copy of this software and associated documentation files (the
4272+ * "Software"), to deal in the Software without restriction, including
4273+ * without limitation the rights to use, copy, modify, merge, publish,
4274+ * distribute, sub license, and/or sell copies of the Software, and to
4275+ * permit persons to whom the Software is furnished to do so, subject to
4276+ * the following conditions:
4277+ *
4278+ * The above copyright notice and this permission notice (including the
4279+ * next paragraph) shall be included in all copies or substantial portions
4280+ * of the Software.
4281+ *
4282+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4283+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4284+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
4285+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
4286+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
4287+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
4288+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
4289+ *
4290+ **************************************************************************/
4291+/*
4292+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
4293+ */
4294+
4295+#include "drmP.h"
4296+
4297+
4298+/*
4299+ * Convenience function to be called by fence::wait methods that
4300+ * need polling.
4301+ */
4302+
4303+int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
4304+ int interruptible, uint32_t mask,
4305+ unsigned long end_jiffies)
4306+{
4307+ struct drm_device *dev = fence->dev;
4308+ struct drm_fence_manager *fm = &dev->fm;
4309+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
4310+ uint32_t count = 0;
4311+ int ret;
4312+
4313+ DECLARE_WAITQUEUE(entry, current);
4314+ add_wait_queue(&fc->fence_queue, &entry);
4315+
4316+ ret = 0;
4317+
4318+ for (;;) {
4319+ __set_current_state((interruptible) ?
4320+ TASK_INTERRUPTIBLE :
4321+ TASK_UNINTERRUPTIBLE);
4322+ if (drm_fence_object_signaled(fence, mask))
4323+ break;
4324+ if (time_after_eq(jiffies, end_jiffies)) {
4325+ ret = -EBUSY;
4326+ break;
4327+ }
4328+ if (lazy)
4329+ schedule_timeout(1);
4330+ else if ((++count & 0x0F) == 0){
4331+ __set_current_state(TASK_RUNNING);
4332+ schedule();
4333+ __set_current_state((interruptible) ?
4334+ TASK_INTERRUPTIBLE :
4335+ TASK_UNINTERRUPTIBLE);
4336+ }
4337+ if (interruptible && signal_pending(current)) {
4338+ ret = -EAGAIN;
4339+ break;
4340+ }
4341+ }
4342+ __set_current_state(TASK_RUNNING);
4343+ remove_wait_queue(&fc->fence_queue, &entry);
4344+ return ret;
4345+}
4346+EXPORT_SYMBOL(drm_fence_wait_polling);
4347+
4348+/*
4349+ * Typically called by the IRQ handler.
4350+ */
4351+
4352+void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
4353+ uint32_t sequence, uint32_t type, uint32_t error)
4354+{
4355+ int wake = 0;
4356+ uint32_t diff;
4357+ uint32_t relevant_type;
4358+ uint32_t new_type;
4359+ struct drm_fence_manager *fm = &dev->fm;
4360+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
4361+ struct drm_fence_driver *driver = dev->driver->fence_driver;
4362+ struct list_head *head;
4363+ struct drm_fence_object *fence, *next;
4364+ int found = 0;
4365+
4366+ if (list_empty(&fc->ring))
4367+ return;
4368+
4369+ list_for_each_entry(fence, &fc->ring, ring) {
4370+ diff = (sequence - fence->sequence) & driver->sequence_mask;
4371+ if (diff > driver->wrap_diff) {
4372+ found = 1;
4373+ break;
4374+ }
4375+ }
4376+
4377+ fc->waiting_types &= ~type;
4378+ head = (found) ? &fence->ring : &fc->ring;
4379+
4380+ list_for_each_entry_safe_reverse(fence, next, head, ring) {
4381+ if (&fence->ring == &fc->ring)
4382+ break;
4383+
4384+ if (error) {
4385+ fence->error = error;
4386+ fence->signaled_types = fence->type;
4387+ list_del_init(&fence->ring);
4388+ wake = 1;
4389+ break;
4390+ }
4391+
4392+ if (type & DRM_FENCE_TYPE_EXE)
4393+ type |= fence->native_types;
4394+
4395+ relevant_type = type & fence->type;
4396+ new_type = (fence->signaled_types | relevant_type) ^
4397+ fence->signaled_types;
4398+
4399+ if (new_type) {
4400+ fence->signaled_types |= new_type;
4401+ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
4402+ fence->base.hash.key, fence->signaled_types);
4403+
4404+ if (driver->needed_flush)
4405+ fc->pending_flush |= driver->needed_flush(fence);
4406+
4407+ if (new_type & fence->waiting_types)
4408+ wake = 1;
4409+ }
4410+
4411+ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
4412+
4413+ if (!(fence->type & ~fence->signaled_types)) {
4414+ DRM_DEBUG("Fence completely signaled 0x%08lx\n",
4415+ fence->base.hash.key);
4416+ list_del_init(&fence->ring);
4417+ }
4418+ }
4419+
4420+ /*
4421+ * Reinstate lost waiting types.
4422+ */
4423+
4424+ if ((fc->waiting_types & type) != type) {
4425+ head = head->prev;
4426+ list_for_each_entry(fence, head, ring) {
4427+ if (&fence->ring == &fc->ring)
4428+ break;
4429+ diff = (fc->highest_waiting_sequence - fence->sequence) &
4430+ driver->sequence_mask;
4431+ if (diff > driver->wrap_diff)
4432+ break;
4433+
4434+ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
4435+ }
4436+ }
4437+
4438+ if (wake)
4439+ wake_up_all(&fc->fence_queue);
4440+}
4441+EXPORT_SYMBOL(drm_fence_handler);
4442+
4443+static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
4444+{
4445+ struct drm_fence_manager *fm = &dev->fm;
4446+ unsigned long flags;
4447+
4448+ write_lock_irqsave(&fm->lock, flags);
4449+ list_del_init(ring);
4450+ write_unlock_irqrestore(&fm->lock, flags);
4451+}
4452+
4453+void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
4454+{
4455+ struct drm_fence_object *tmp_fence = *fence;
4456+ struct drm_device *dev = tmp_fence->dev;
4457+ struct drm_fence_manager *fm = &dev->fm;
4458+
4459+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
4460+ *fence = NULL;
4461+ if (atomic_dec_and_test(&tmp_fence->usage)) {
4462+ drm_fence_unring(dev, &tmp_fence->ring);
4463+ DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
4464+ tmp_fence->base.hash.key);
4465+ atomic_dec(&fm->count);
4466+ BUG_ON(!list_empty(&tmp_fence->base.list));
4467+ drm_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
4468+ }
4469+}
4470+EXPORT_SYMBOL(drm_fence_usage_deref_locked);
4471+
4472+void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
4473+{
4474+ struct drm_fence_object *tmp_fence = *fence;
4475+ struct drm_device *dev = tmp_fence->dev;
4476+ struct drm_fence_manager *fm = &dev->fm;
4477+
4478+ *fence = NULL;
4479+ if (atomic_dec_and_test(&tmp_fence->usage)) {
4480+ mutex_lock(&dev->struct_mutex);
4481+ if (atomic_read(&tmp_fence->usage) == 0) {
4482+ drm_fence_unring(dev, &tmp_fence->ring);
4483+ atomic_dec(&fm->count);
4484+ BUG_ON(!list_empty(&tmp_fence->base.list));
4485+ drm_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
4486+ }
4487+ mutex_unlock(&dev->struct_mutex);
4488+ }
4489+}
4490+EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
4491+
4492+struct drm_fence_object
4493+*drm_fence_reference_locked(struct drm_fence_object *src)
4494+{
4495+ DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
4496+
4497+ atomic_inc(&src->usage);
4498+ return src;
4499+}
4500+
4501+void drm_fence_reference_unlocked(struct drm_fence_object **dst,
4502+ struct drm_fence_object *src)
4503+{
4504+ mutex_lock(&src->dev->struct_mutex);
4505+ *dst = src;
4506+ atomic_inc(&src->usage);
4507+ mutex_unlock(&src->dev->struct_mutex);
4508+}
4509+EXPORT_SYMBOL(drm_fence_reference_unlocked);
4510+
4511+static void drm_fence_object_destroy(struct drm_file *priv,
4512+ struct drm_user_object *base)
4513+{
4514+ struct drm_fence_object *fence =
4515+ drm_user_object_entry(base, struct drm_fence_object, base);
4516+
4517+ drm_fence_usage_deref_locked(&fence);
4518+}
4519+
4520+int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
4521+{
4522+ unsigned long flags;
4523+ int signaled;
4524+ struct drm_device *dev = fence->dev;
4525+ struct drm_fence_manager *fm = &dev->fm;
4526+ struct drm_fence_driver *driver = dev->driver->fence_driver;
4527+
4528+ mask &= fence->type;
4529+ read_lock_irqsave(&fm->lock, flags);
4530+ signaled = (mask & fence->signaled_types) == mask;
4531+ read_unlock_irqrestore(&fm->lock, flags);
4532+ if (!signaled && driver->poll) {
4533+ write_lock_irqsave(&fm->lock, flags);
4534+ driver->poll(dev, fence->fence_class, mask);
4535+ signaled = (mask & fence->signaled_types) == mask;
4536+ write_unlock_irqrestore(&fm->lock, flags);
4537+ }
4538+ return signaled;
4539+}
4540+EXPORT_SYMBOL(drm_fence_object_signaled);
4541+
4542+
4543+int drm_fence_object_flush(struct drm_fence_object *fence,
4544+ uint32_t type)
4545+{
4546+ struct drm_device *dev = fence->dev;
4547+ struct drm_fence_manager *fm = &dev->fm;
4548+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
4549+ struct drm_fence_driver *driver = dev->driver->fence_driver;
4550+ unsigned long irq_flags;
4551+ uint32_t saved_pending_flush;
4552+ uint32_t diff;
4553+ int call_flush;
4554+
4555+ if (type & ~fence->type) {
4556+ DRM_ERROR("Flush trying to extend fence type, "
4557+ "0x%x, 0x%x\n", type, fence->type);
4558+ return -EINVAL;
4559+ }
4560+
4561+ write_lock_irqsave(&fm->lock, irq_flags);
4562+ fence->waiting_types |= type;
4563+ fc->waiting_types |= fence->waiting_types;
4564+ diff = (fence->sequence - fc->highest_waiting_sequence) &
4565+ driver->sequence_mask;
4566+
4567+ if (diff < driver->wrap_diff)
4568+ fc->highest_waiting_sequence = fence->sequence;
4569+
4570+ /*
4571+ * fence->waiting_types has changed. Determine whether
4572+ * we need to initiate some kind of flush as a result of this.
4573+ */
4574+
4575+ saved_pending_flush = fc->pending_flush;
4576+ if (driver->needed_flush)
4577+ fc->pending_flush |= driver->needed_flush(fence);
4578+
4579+ if (driver->poll)
4580+ driver->poll(dev, fence->fence_class, fence->waiting_types);
4581+
4582+ call_flush = fc->pending_flush;
4583+ write_unlock_irqrestore(&fm->lock, irq_flags);
4584+
4585+ if (call_flush && driver->flush)
4586+ driver->flush(dev, fence->fence_class);
4587+
4588+ return 0;
4589+}
4590+EXPORT_SYMBOL(drm_fence_object_flush);
4591+
4592+/*
4593+ * Make sure old fence objects are signaled before their fence sequences are
4594+ * wrapped around and reused.
4595+ */
4596+
4597+void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
4598+ uint32_t sequence)
4599+{
4600+ struct drm_fence_manager *fm = &dev->fm;
4601+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
4602+ struct drm_fence_object *fence;
4603+ unsigned long irq_flags;
4604+ struct drm_fence_driver *driver = dev->driver->fence_driver;
4605+ int call_flush;
4606+
4607+ uint32_t diff;
4608+
4609+ write_lock_irqsave(&fm->lock, irq_flags);
4610+
4611+ list_for_each_entry_reverse(fence, &fc->ring, ring) {
4612+ diff = (sequence - fence->sequence) & driver->sequence_mask;
4613+ if (diff <= driver->flush_diff)
4614+ break;
4615+
4616+ fence->waiting_types = fence->type;
4617+ fc->waiting_types |= fence->type;
4618+
4619+ if (driver->needed_flush)
4620+ fc->pending_flush |= driver->needed_flush(fence);
4621+ }
4622+
4623+ if (driver->poll)
4624+ driver->poll(dev, fence_class, fc->waiting_types);
4625+
4626+ call_flush = fc->pending_flush;
4627+ write_unlock_irqrestore(&fm->lock, irq_flags);
4628+
4629+ if (call_flush && driver->flush)
4630+ driver->flush(dev, fence->fence_class);
4631+
4632+ /*
4633+ * FIXME: Shold we implement a wait here for really old fences?
4634+ */
4635+
4636+}
4637+EXPORT_SYMBOL(drm_fence_flush_old);
4638+
4639+int drm_fence_object_wait(struct drm_fence_object *fence,
4640+ int lazy, int ignore_signals, uint32_t mask)
4641+{
4642+ struct drm_device *dev = fence->dev;
4643+ struct drm_fence_driver *driver = dev->driver->fence_driver;
4644+ struct drm_fence_manager *fm = &dev->fm;
4645+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
4646+ int ret = 0;
4647+ unsigned long _end = 3 * DRM_HZ;
4648+
4649+ if (mask & ~fence->type) {
4650+ DRM_ERROR("Wait trying to extend fence type"
4651+ " 0x%08x 0x%08x\n", mask, fence->type);
4652+ BUG();
4653+ return -EINVAL;
4654+ }
4655+
4656+ if (driver->wait)
4657+ return driver->wait(fence, lazy, !ignore_signals, mask);
4658+
4659+
4660+ drm_fence_object_flush(fence, mask);
4661+ if (driver->has_irq(dev, fence->fence_class, mask)) {
4662+ if (!ignore_signals)
4663+ ret = wait_event_interruptible_timeout
4664+ (fc->fence_queue,
4665+ drm_fence_object_signaled(fence, mask),
4666+ 3 * DRM_HZ);
4667+ else
4668+ ret = wait_event_timeout
4669+ (fc->fence_queue,
4670+ drm_fence_object_signaled(fence, mask),
4671+ 3 * DRM_HZ);
4672+
4673+ if (unlikely(ret == -ERESTARTSYS))
4674+ return -EAGAIN;
4675+
4676+ if (unlikely(ret == 0))
4677+ return -EBUSY;
4678+
4679+ return 0;
4680+ }
4681+
4682+ return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
4683+ _end);
4684+}
4685+EXPORT_SYMBOL(drm_fence_object_wait);
4686+
4687+
4688+
4689+int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
4690+ uint32_t fence_class, uint32_t type)
4691+{
4692+ struct drm_device *dev = fence->dev;
4693+ struct drm_fence_manager *fm = &dev->fm;
4694+ struct drm_fence_driver *driver = dev->driver->fence_driver;
4695+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
4696+ unsigned long flags;
4697+ uint32_t sequence;
4698+ uint32_t native_types;
4699+ int ret;
4700+
4701+ drm_fence_unring(dev, &fence->ring);
4702+ ret = driver->emit(dev, fence_class, fence_flags, &sequence,
4703+ &native_types);
4704+ if (ret)
4705+ return ret;
4706+
4707+ write_lock_irqsave(&fm->lock, flags);
4708+ fence->fence_class = fence_class;
4709+ fence->type = type;
4710+ fence->waiting_types = 0;
4711+ fence->signaled_types = 0;
4712+ fence->error = 0;
4713+ fence->sequence = sequence;
4714+ fence->native_types = native_types;
4715+ if (list_empty(&fc->ring))
4716+ fc->highest_waiting_sequence = sequence - 1;
4717+ list_add_tail(&fence->ring, &fc->ring);
4718+ fc->latest_queued_sequence = sequence;
4719+ write_unlock_irqrestore(&fm->lock, flags);
4720+ return 0;
4721+}
4722+EXPORT_SYMBOL(drm_fence_object_emit);
4723+
4724+static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
4725+ uint32_t type,
4726+ uint32_t fence_flags,
4727+ struct drm_fence_object *fence)
4728+{
4729+ int ret = 0;
4730+ unsigned long flags;
4731+ struct drm_fence_manager *fm = &dev->fm;
4732+
4733+ mutex_lock(&dev->struct_mutex);
4734+ atomic_set(&fence->usage, 1);
4735+ mutex_unlock(&dev->struct_mutex);
4736+
4737+ write_lock_irqsave(&fm->lock, flags);
4738+ INIT_LIST_HEAD(&fence->ring);
4739+
4740+ /*
4741+ * Avoid hitting BUG() for kernel-only fence objects.
4742+ */
4743+
4744+ INIT_LIST_HEAD(&fence->base.list);
4745+ fence->fence_class = fence_class;
4746+ fence->type = type;
4747+ fence->signaled_types = 0;
4748+ fence->waiting_types = 0;
4749+ fence->sequence = 0;
4750+ fence->error = 0;
4751+ fence->dev = dev;
4752+ write_unlock_irqrestore(&fm->lock, flags);
4753+ if (fence_flags & DRM_FENCE_FLAG_EMIT) {
4754+ ret = drm_fence_object_emit(fence, fence_flags,
4755+ fence->fence_class, type);
4756+ }
4757+ return ret;
4758+}
4759+
4760+int drm_fence_add_user_object(struct drm_file *priv,
4761+ struct drm_fence_object *fence, int shareable)
4762+{
4763+ struct drm_device *dev = priv->minor->dev;
4764+ int ret;
4765+
4766+ mutex_lock(&dev->struct_mutex);
4767+ ret = drm_add_user_object(priv, &fence->base, shareable);
4768+ if (ret)
4769+ goto out;
4770+ atomic_inc(&fence->usage);
4771+ fence->base.type = drm_fence_type;
4772+ fence->base.remove = &drm_fence_object_destroy;
4773+ DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
4774+out:
4775+ mutex_unlock(&dev->struct_mutex);
4776+ return ret;
4777+}
4778+EXPORT_SYMBOL(drm_fence_add_user_object);
4779+
4780+int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
4781+ uint32_t type, unsigned flags,
4782+ struct drm_fence_object **c_fence)
4783+{
4784+ struct drm_fence_object *fence;
4785+ int ret;
4786+ struct drm_fence_manager *fm = &dev->fm;
4787+
4788+ fence = drm_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
4789+ if (!fence) {
4790+ DRM_INFO("Out of memory creating fence object.\n");
4791+ return -ENOMEM;
4792+ }
4793+ ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
4794+ if (ret) {
4795+ drm_fence_usage_deref_unlocked(&fence);
4796+ return ret;
4797+ }
4798+ *c_fence = fence;
4799+ atomic_inc(&fm->count);
4800+
4801+ return 0;
4802+}
4803+EXPORT_SYMBOL(drm_fence_object_create);
4804+
4805+void drm_fence_manager_init(struct drm_device *dev)
4806+{
4807+ struct drm_fence_manager *fm = &dev->fm;
4808+ struct drm_fence_class_manager *fence_class;
4809+ struct drm_fence_driver *fed = dev->driver->fence_driver;
4810+ int i;
4811+ unsigned long flags;
4812+
4813+ rwlock_init(&fm->lock);
4814+ write_lock_irqsave(&fm->lock, flags);
4815+ fm->initialized = 0;
4816+ if (!fed)
4817+ goto out_unlock;
4818+
4819+ fm->initialized = 1;
4820+ fm->num_classes = fed->num_classes;
4821+ BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
4822+
4823+ for (i = 0; i < fm->num_classes; ++i) {
4824+ fence_class = &fm->fence_class[i];
4825+
4826+ memset(fence_class, 0, sizeof(*fence_class));
4827+ INIT_LIST_HEAD(&fence_class->ring);
4828+ DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
4829+ }
4830+
4831+ atomic_set(&fm->count, 0);
4832+ out_unlock:
4833+ write_unlock_irqrestore(&fm->lock, flags);
4834+}
4835+
4836+void drm_fence_fill_arg(struct drm_fence_object *fence,
4837+ struct drm_fence_arg *arg)
4838+{
4839+ struct drm_device *dev = fence->dev;
4840+ struct drm_fence_manager *fm = &dev->fm;
4841+ unsigned long irq_flags;
4842+
4843+ read_lock_irqsave(&fm->lock, irq_flags);
4844+ arg->handle = fence->base.hash.key;
4845+ arg->fence_class = fence->fence_class;
4846+ arg->type = fence->type;
4847+ arg->signaled = fence->signaled_types;
4848+ arg->error = fence->error;
4849+ arg->sequence = fence->sequence;
4850+ read_unlock_irqrestore(&fm->lock, irq_flags);
4851+}
4852+EXPORT_SYMBOL(drm_fence_fill_arg);
4853+
4854+void drm_fence_manager_takedown(struct drm_device *dev)
4855+{
4856+}
4857+
4858+struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
4859+ uint32_t handle)
4860+{
4861+ struct drm_device *dev = priv->minor->dev;
4862+ struct drm_user_object *uo;
4863+ struct drm_fence_object *fence;
4864+
4865+ mutex_lock(&dev->struct_mutex);
4866+ uo = drm_lookup_user_object(priv, handle);
4867+ if (!uo || (uo->type != drm_fence_type)) {
4868+ mutex_unlock(&dev->struct_mutex);
4869+ return NULL;
4870+ }
4871+ fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
4872+ mutex_unlock(&dev->struct_mutex);
4873+ return fence;
4874+}
4875+
4876+int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
4877+{
4878+ int ret;
4879+ struct drm_fence_manager *fm = &dev->fm;
4880+ struct drm_fence_arg *arg = data;
4881+ struct drm_fence_object *fence;
4882+ ret = 0;
4883+
4884+ if (!fm->initialized) {
4885+ DRM_ERROR("The DRM driver does not support fencing.\n");
4886+ return -EINVAL;
4887+ }
4888+
4889+ if (arg->flags & DRM_FENCE_FLAG_EMIT)
4890+ LOCK_TEST_WITH_RETURN(dev, file_priv);
4891+ ret = drm_fence_object_create(dev, arg->fence_class,
4892+ arg->type, arg->flags, &fence);
4893+ if (ret)
4894+ return ret;
4895+ ret = drm_fence_add_user_object(file_priv, fence,
4896+ arg->flags &
4897+ DRM_FENCE_FLAG_SHAREABLE);
4898+ if (ret) {
4899+ drm_fence_usage_deref_unlocked(&fence);
4900+ return ret;
4901+ }
4902+
4903+ /*
4904+ * usage > 0. No need to lock dev->struct_mutex;
4905+ */
4906+
4907+ arg->handle = fence->base.hash.key;
4908+
4909+ drm_fence_fill_arg(fence, arg);
4910+ drm_fence_usage_deref_unlocked(&fence);
4911+
4912+ return ret;
4913+}
4914+
4915+int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
4916+{
4917+ int ret;
4918+ struct drm_fence_manager *fm = &dev->fm;
4919+ struct drm_fence_arg *arg = data;
4920+ struct drm_fence_object *fence;
4921+ struct drm_user_object *uo;
4922+ ret = 0;
4923+
4924+ if (!fm->initialized) {
4925+ DRM_ERROR("The DRM driver does not support fencing.\n");
4926+ return -EINVAL;
4927+ }
4928+
4929+ ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
4930+ if (ret)
4931+ return ret;
4932+ fence = drm_lookup_fence_object(file_priv, arg->handle);
4933+ drm_fence_fill_arg(fence, arg);
4934+ drm_fence_usage_deref_unlocked(&fence);
4935+
4936+ return ret;
4937+}
4938+
4939+
4940+int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
4941+{
4942+ int ret;
4943+ struct drm_fence_manager *fm = &dev->fm;
4944+ struct drm_fence_arg *arg = data;
4945+ ret = 0;
4946+
4947+ if (!fm->initialized) {
4948+ DRM_ERROR("The DRM driver does not support fencing.\n");
4949+ return -EINVAL;
4950+ }
4951+
4952+ return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
4953+}
4954+
4955+int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
4956+{
4957+ int ret;
4958+ struct drm_fence_manager *fm = &dev->fm;
4959+ struct drm_fence_arg *arg = data;
4960+ struct drm_fence_object *fence;
4961+ ret = 0;
4962+
4963+ if (!fm->initialized) {
4964+ DRM_ERROR("The DRM driver does not support fencing.\n");
4965+ return -EINVAL;
4966+ }
4967+
4968+ fence = drm_lookup_fence_object(file_priv, arg->handle);
4969+ if (!fence)
4970+ return -EINVAL;
4971+
4972+ drm_fence_fill_arg(fence, arg);
4973+ drm_fence_usage_deref_unlocked(&fence);
4974+
4975+ return ret;
4976+}
4977+
4978+int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
4979+{
4980+ int ret;
4981+ struct drm_fence_manager *fm = &dev->fm;
4982+ struct drm_fence_arg *arg = data;
4983+ struct drm_fence_object *fence;
4984+ ret = 0;
4985+
4986+ if (!fm->initialized) {
4987+ DRM_ERROR("The DRM driver does not support fencing.\n");
4988+ return -EINVAL;
4989+ }
4990+
4991+ fence = drm_lookup_fence_object(file_priv, arg->handle);
4992+ if (!fence)
4993+ return -EINVAL;
4994+ ret = drm_fence_object_flush(fence, arg->type);
4995+
4996+ drm_fence_fill_arg(fence, arg);
4997+ drm_fence_usage_deref_unlocked(&fence);
4998+
4999+ return ret;
5000+}
5001+
5002+
5003+int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
5004+{
5005+ int ret;
5006+ struct drm_fence_manager *fm = &dev->fm;
5007+ struct drm_fence_arg *arg = data;
5008+ struct drm_fence_object *fence;
5009+ ret = 0;
5010+
5011+ if (!fm->initialized) {
5012+ DRM_ERROR("The DRM driver does not support fencing.\n");
5013+ return -EINVAL;
5014+ }
5015+
5016+ fence = drm_lookup_fence_object(file_priv, arg->handle);
5017+ if (!fence)
5018+ return -EINVAL;
5019+ ret = drm_fence_object_wait(fence,
5020+ arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
5021+ 0, arg->type);
5022+
5023+ drm_fence_fill_arg(fence, arg);
5024+ drm_fence_usage_deref_unlocked(&fence);
5025+
5026+ return ret;
5027+}
5028+
5029+
5030+int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
5031+{
5032+ int ret;
5033+ struct drm_fence_manager *fm = &dev->fm;
5034+ struct drm_fence_arg *arg = data;
5035+ struct drm_fence_object *fence;
5036+ ret = 0;
5037+
5038+ if (!fm->initialized) {
5039+ DRM_ERROR("The DRM driver does not support fencing.\n");
5040+ return -EINVAL;
5041+ }
5042+
5043+ LOCK_TEST_WITH_RETURN(dev, file_priv);
5044+ fence = drm_lookup_fence_object(file_priv, arg->handle);
5045+ if (!fence)
5046+ return -EINVAL;
5047+ ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
5048+ arg->type);
5049+
5050+ drm_fence_fill_arg(fence, arg);
5051+ drm_fence_usage_deref_unlocked(&fence);
5052+
5053+ return ret;
5054+}
5055+
5056+int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
5057+{
5058+ int ret;
5059+ struct drm_fence_manager *fm = &dev->fm;
5060+ struct drm_fence_arg *arg = data;
5061+ struct drm_fence_object *fence;
5062+ ret = 0;
5063+
5064+ if (!fm->initialized) {
5065+ DRM_ERROR("The DRM driver does not support fencing.\n");
5066+ return -EINVAL;
5067+ }
5068+
5069+ if (!dev->bm.initialized) {
5070+ DRM_ERROR("Buffer object manager is not initialized\n");
5071+ return -EINVAL;
5072+ }
5073+ LOCK_TEST_WITH_RETURN(dev, file_priv);
5074+ ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
5075+ NULL, &fence);
5076+ if (ret)
5077+ return ret;
5078+
5079+ if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
5080+ ret = drm_fence_add_user_object(file_priv, fence,
5081+ arg->flags &
5082+ DRM_FENCE_FLAG_SHAREABLE);
5083+ if (ret)
5084+ return ret;
5085+ }
5086+
5087+ arg->handle = fence->base.hash.key;
5088+
5089+ drm_fence_fill_arg(fence, arg);
5090+ drm_fence_usage_deref_unlocked(&fence);
5091+
5092+ return ret;
5093+}
5094Index: linux-2.6.28/drivers/gpu/drm/drm_fops.c
5095===================================================================
5096--- linux-2.6.28.orig/drivers/gpu/drm/drm_fops.c 2009-02-25 15:36:50.000000000 +0000
5097+++ linux-2.6.28/drivers/gpu/drm/drm_fops.c 2009-02-25 15:37:02.000000000 +0000
5098@@ -1,3 +1,4 @@
5099+
5100 /**
5101 * \file drm_fops.c
5102 * File operations for DRM
5103@@ -232,6 +233,7 @@
5104 int minor_id = iminor(inode);
5105 struct drm_file *priv;
5106 int ret;
5107+ int i, j;
5108
5109 if (filp->f_flags & O_EXCL)
5110 return -EBUSY; /* No exclusive opens */
5111@@ -257,10 +259,24 @@
5112
5113 INIT_LIST_HEAD(&priv->lhead);
5114 INIT_LIST_HEAD(&priv->fbs);
5115+ INIT_LIST_HEAD(&priv->refd_objects);
5116
5117 if (dev->driver->driver_features & DRIVER_GEM)
5118 drm_gem_open(dev, priv);
5119
5120+ for (i = 0; i < _DRM_NO_REF_TYPES; ++i) {
5121+ ret = drm_ht_create(&priv->refd_object_hash[i],
5122+ DRM_FILE_HASH_ORDER);
5123+ if (ret)
5124+ break;
5125+ }
5126+
5127+ if (ret) {
5128+ for (j = 0; j < i; ++j)
5129+ drm_ht_remove(&priv->refd_object_hash[j]);
5130+ goto out_free;
5131+ }
5132+
5133 if (dev->driver->open) {
5134 ret = dev->driver->open(dev, priv);
5135 if (ret < 0)
5136Index: linux-2.6.28/drivers/gpu/drm/drm_irq.c
5137===================================================================
5138--- linux-2.6.28.orig/drivers/gpu/drm/drm_irq.c 2009-02-25 15:36:50.000000000 +0000
5139+++ linux-2.6.28/drivers/gpu/drm/drm_irq.c 2009-02-25 15:37:02.000000000 +0000
5140@@ -124,6 +124,7 @@
5141
5142 dev->num_crtcs = 0;
5143 }
5144+EXPORT_SYMBOL(drm_vblank_cleanup);
5145
5146 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
5147 {
5148@@ -697,7 +698,7 @@
5149 *
5150 * If a signal is not requested, then calls vblank_wait().
5151 */
5152-static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
5153+void drm_vbl_send_signals(struct drm_device *dev, int crtc)
5154 {
5155 struct drm_vbl_sig *vbl_sig, *tmp;
5156 struct list_head *vbl_sigs;
5157@@ -726,6 +727,7 @@
5158
5159 spin_unlock_irqrestore(&dev->vbl_lock, flags);
5160 }
5161+EXPORT_SYMBOL(drm_vbl_send_signals);
5162
5163 /**
5164 * drm_handle_vblank - handle a vblank event
5165Index: linux-2.6.28/drivers/gpu/drm/drm_object.c
5166===================================================================
5167--- /dev/null 1970-01-01 00:00:00.000000000 +0000
5168+++ linux-2.6.28/drivers/gpu/drm/drm_object.c 2009-02-25 15:37:02.000000000 +0000
5169@@ -0,0 +1,294 @@
5170+/**************************************************************************
5171+ *
5172+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
5173+ * All Rights Reserved.
5174+ *
5175+ * Permission is hereby granted, free of charge, to any person obtaining a
5176+ * copy of this software and associated documentation files (the
5177+ * "Software"), to deal in the Software without restriction, including
5178+ * without limitation the rights to use, copy, modify, merge, publish,
5179+ * distribute, sub license, and/or sell copies of the Software, and to
5180+ * permit persons to whom the Software is furnished to do so, subject to
5181+ * the following conditions:
5182+ *
5183+ * The above copyright notice and this permission notice (including the
5184+ * next paragraph) shall be included in all copies or substantial portions
5185+ * of the Software.
5186+ *
5187+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
5188+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
5189+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
5190+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
5191+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
5192+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
5193+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
5194+ *
5195+ **************************************************************************/
5196+/*
5197+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
5198+ */
5199+
5200+#include "drmP.h"
5201+
5202+int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
5203+ int shareable)
5204+{
5205+ struct drm_device *dev = priv->minor->dev;
5206+ int ret;
5207+
5208+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
5209+
5210+ /* The refcount will be bumped to 1 when we add the ref object below. */
5211+ atomic_set(&item->refcount, 0);
5212+ item->shareable = shareable;
5213+ item->owner = priv;
5214+
5215+ ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
5216+ (unsigned long)item, 32, 0, 0);
5217+ if (ret)
5218+ return ret;
5219+
5220+ ret = drm_add_ref_object(priv, item, _DRM_REF_USE);
5221+ if (ret)
5222+ ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
5223+
5224+ return ret;
5225+}
5226+EXPORT_SYMBOL(drm_add_user_object);
5227+
5228+struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)
5229+{
5230+ struct drm_device *dev = priv->minor->dev;
5231+ struct drm_hash_item *hash;
5232+ int ret;
5233+ struct drm_user_object *item;
5234+
5235+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
5236+
5237+ ret = drm_ht_find_item(&dev->object_hash, key, &hash);
5238+ if (ret)
5239+ return NULL;
5240+
5241+ item = drm_hash_entry(hash, struct drm_user_object, hash);
5242+
5243+ if (priv != item->owner) {
5244+ struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE];
5245+ ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
5246+ if (ret) {
5247+ DRM_ERROR("Object not registered for usage\n");
5248+ return NULL;
5249+ }
5250+ }
5251+ return item;
5252+}
5253+EXPORT_SYMBOL(drm_lookup_user_object);
5254+
5255+static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)
5256+{
5257+ struct drm_device *dev = priv->minor->dev;
5258+ int ret;
5259+
5260+ if (atomic_dec_and_test(&item->refcount)) {
5261+ ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
5262+ BUG_ON(ret);
5263+ item->remove(priv, item);
5264+ }
5265+}
5266+
5267+static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro,
5268+ enum drm_ref_type action)
5269+{
5270+ int ret = 0;
5271+
5272+ switch (action) {
5273+ case _DRM_REF_USE:
5274+ atomic_inc(&ro->refcount);
5275+ break;
5276+ default:
5277+ if (!ro->ref_struct_locked) {
5278+ break;
5279+ } else {
5280+ ro->ref_struct_locked(priv, ro, action);
5281+ }
5282+ }
5283+ return ret;
5284+}
5285+
5286+int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object,
5287+ enum drm_ref_type ref_action)
5288+{
5289+ int ret = 0;
5290+ struct drm_ref_object *item;
5291+ struct drm_open_hash *ht = &priv->refd_object_hash[ref_action];
5292+
5293+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
5294+ if (!referenced_object->shareable && priv != referenced_object->owner) {
5295+ DRM_ERROR("Not allowed to reference this object\n");
5296+ return -EINVAL;
5297+ }
5298+
5299+ /*
5300+ * If this is not a usage reference, Check that usage has been registered
5301+ * first. Otherwise strange things may happen on destruction.
5302+ */
5303+
5304+ if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
5305+ item =
5306+ drm_lookup_ref_object(priv, referenced_object,
5307+ _DRM_REF_USE);
5308+ if (!item) {
5309+ DRM_ERROR
5310+ ("Object not registered for usage by this client\n");
5311+ return -EINVAL;
5312+ }
5313+ }
5314+
5315+ if (NULL !=
5316+ (item =
5317+ drm_lookup_ref_object(priv, referenced_object, ref_action))) {
5318+ atomic_inc(&item->refcount);
5319+ return drm_object_ref_action(priv, referenced_object,
5320+ ref_action);
5321+ }
5322+
5323+ item = drm_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
5324+ if (item == NULL) {
5325+ DRM_ERROR("Could not allocate reference object\n");
5326+ return -ENOMEM;
5327+ }
5328+
5329+ atomic_set(&item->refcount, 1);
5330+ item->hash.key = (unsigned long)referenced_object;
5331+ ret = drm_ht_insert_item(ht, &item->hash);
5332+ item->unref_action = ref_action;
5333+
5334+ if (ret)
5335+ goto out;
5336+
5337+ list_add(&item->list, &priv->refd_objects);
5338+ ret = drm_object_ref_action(priv, referenced_object, ref_action);
5339+out:
5340+ return ret;
5341+}
5342+
5343+struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
5344+ struct drm_user_object *referenced_object,
5345+ enum drm_ref_type ref_action)
5346+{
5347+ struct drm_hash_item *hash;
5348+ int ret;
5349+
5350+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
5351+ ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
5352+ (unsigned long)referenced_object, &hash);
5353+ if (ret)
5354+ return NULL;
5355+
5356+ return drm_hash_entry(hash, struct drm_ref_object, hash);
5357+}
5358+EXPORT_SYMBOL(drm_lookup_ref_object);
5359+
5360+static void drm_remove_other_references(struct drm_file *priv,
5361+ struct drm_user_object *ro)
5362+{
5363+ int i;
5364+ struct drm_open_hash *ht;
5365+ struct drm_hash_item *hash;
5366+ struct drm_ref_object *item;
5367+
5368+ for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
5369+ ht = &priv->refd_object_hash[i];
5370+ while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
5371+ item = drm_hash_entry(hash, struct drm_ref_object, hash);
5372+ drm_remove_ref_object(priv, item);
5373+ }
5374+ }
5375+}
5376+
5377+void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item)
5378+{
5379+ int ret;
5380+ struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key;
5381+ struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action];
5382+ enum drm_ref_type unref_action;
5383+
5384+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
5385+ unref_action = item->unref_action;
5386+ if (atomic_dec_and_test(&item->refcount)) {
5387+ ret = drm_ht_remove_item(ht, &item->hash);
5388+ BUG_ON(ret);
5389+ list_del_init(&item->list);
5390+ if (unref_action == _DRM_REF_USE)
5391+ drm_remove_other_references(priv, user_object);
5392+ drm_free(item, sizeof(*item), DRM_MEM_OBJECTS);
5393+ }
5394+
5395+ switch (unref_action) {
5396+ case _DRM_REF_USE:
5397+ drm_deref_user_object(priv, user_object);
5398+ break;
5399+ default:
5400+ BUG_ON(!user_object->unref);
5401+ user_object->unref(priv, user_object, unref_action);
5402+ break;
5403+ }
5404+
5405+}
5406+EXPORT_SYMBOL(drm_remove_ref_object);
5407+
5408+int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
5409+ enum drm_object_type type, struct drm_user_object **object)
5410+{
5411+ struct drm_device *dev = priv->minor->dev;
5412+ struct drm_user_object *uo;
5413+ struct drm_hash_item *hash;
5414+ int ret;
5415+
5416+ mutex_lock(&dev->struct_mutex);
5417+ ret = drm_ht_find_item(&dev->object_hash, user_token, &hash);
5418+ if (ret) {
5419+ DRM_ERROR("Could not find user object to reference.\n");
5420+ goto out_err;
5421+ }
5422+ uo = drm_hash_entry(hash, struct drm_user_object, hash);
5423+ if (uo->type != type) {
5424+ ret = -EINVAL;
5425+ goto out_err;
5426+ }
5427+ ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
5428+ if (ret)
5429+ goto out_err;
5430+ mutex_unlock(&dev->struct_mutex);
5431+ *object = uo;
5432+ return 0;
5433+out_err:
5434+ mutex_unlock(&dev->struct_mutex);
5435+ return ret;
5436+}
5437+
5438+int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
5439+ enum drm_object_type type)
5440+{
5441+ struct drm_device *dev = priv->minor->dev;
5442+ struct drm_user_object *uo;
5443+ struct drm_ref_object *ro;
5444+ int ret;
5445+
5446+ mutex_lock(&dev->struct_mutex);
5447+ uo = drm_lookup_user_object(priv, user_token);
5448+ if (!uo || (uo->type != type)) {
5449+ ret = -EINVAL;
5450+ goto out_err;
5451+ }
5452+ ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
5453+ if (!ro) {
5454+ ret = -EINVAL;
5455+ goto out_err;
5456+ }
5457+ drm_remove_ref_object(priv, ro);
5458+ mutex_unlock(&dev->struct_mutex);
5459+ return 0;
5460+out_err:
5461+ mutex_unlock(&dev->struct_mutex);
5462+ return ret;
5463+}
5464Index: linux-2.6.28/drivers/gpu/drm/drm_regman.c
5465===================================================================
5466--- /dev/null 1970-01-01 00:00:00.000000000 +0000
5467+++ linux-2.6.28/drivers/gpu/drm/drm_regman.c 2009-02-25 15:37:02.000000000 +0000
5468@@ -0,0 +1,200 @@
5469+/**************************************************************************
5470+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
5471+ * All Rights Reserved.
5472+ *
5473+ * Permission is hereby granted, free of charge, to any person obtaining a
5474+ * copy of this software and associated documentation files (the
5475+ * "Software"), to deal in the Software without restriction, including
5476+ * without limitation the rights to use, copy, modify, merge, publish,
5477+ * distribute, sub license, and/or sell copies of the Software, and to
5478+ * permit persons to whom the Software is furnished to do so, subject to
5479+ * the following conditions:
5480+ *
5481+ * The above copyright notice and this permission notice (including the
5482+ * next paragraph) shall be included in all copies or substantial portions
5483+ * of the Software.
5484+ *
5485+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
5486+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
5487+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
5488+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
5489+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
5490+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
5491+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
5492+ *
5493+ **************************************************************************/
5494+/*
5495+ * An allocate-fence manager implementation intended for sets of base-registers
5496+ * or tiling-registers.
5497+ */
5498+
5499+#include "drmP.h"
5500+
5501+/*
5502+ * Allocate a compatible register and put it on the unfenced list.
5503+ */
5504+
5505+int drm_regs_alloc(struct drm_reg_manager *manager,
5506+ const void *data,
5507+ uint32_t fence_class,
5508+ uint32_t fence_type,
5509+ int interruptible, int no_wait, struct drm_reg **reg)
5510+{
5511+ struct drm_reg *entry, *next_entry;
5512+ int ret;
5513+
5514+ *reg = NULL;
5515+
5516+ /*
5517+ * Search the unfenced list.
5518+ */
5519+
5520+ list_for_each_entry(entry, &manager->unfenced, head) {
5521+ if (manager->reg_reusable(entry, data)) {
5522+ entry->new_fence_type |= fence_type;
5523+ goto out;
5524+ }
5525+ }
5526+
5527+ /*
5528+ * Search the lru list.
5529+ */
5530+
5531+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
5532+ struct drm_fence_object *fence = entry->fence;
5533+ if (fence->fence_class == fence_class &&
5534+ (entry->fence_type & fence_type) == entry->fence_type &&
5535+ manager->reg_reusable(entry, data)) {
5536+ list_del(&entry->head);
5537+ entry->new_fence_type = fence_type;
5538+ list_add_tail(&entry->head, &manager->unfenced);
5539+ goto out;
5540+ }
5541+ }
5542+
5543+ /*
5544+ * Search the free list.
5545+ */
5546+
5547+ list_for_each_entry(entry, &manager->free, head) {
5548+ list_del(&entry->head);
5549+ entry->new_fence_type = fence_type;
5550+ list_add_tail(&entry->head, &manager->unfenced);
5551+ goto out;
5552+ }
5553+
5554+ if (no_wait)
5555+ return -EBUSY;
5556+
5557+ /*
5558+ * Go back to the lru list and try to expire fences.
5559+ */
5560+
5561+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
5562+ BUG_ON(!entry->fence);
5563+ ret = drm_fence_object_wait(entry->fence, 0, !interruptible,
5564+ entry->fence_type);
5565+ if (ret)
5566+ return ret;
5567+
5568+ drm_fence_usage_deref_unlocked(&entry->fence);
5569+ list_del(&entry->head);
5570+ entry->new_fence_type = fence_type;
5571+ list_add_tail(&entry->head, &manager->unfenced);
5572+ goto out;
5573+ }
5574+
5575+ /*
5576+ * Oops. All registers are used up :(.
5577+ */
5578+
5579+ return -EBUSY;
5580+out:
5581+ *reg = entry;
5582+ return 0;
5583+}
5584+EXPORT_SYMBOL(drm_regs_alloc);
5585+
5586+void drm_regs_fence(struct drm_reg_manager *manager,
5587+ struct drm_fence_object *fence)
5588+{
5589+ struct drm_reg *entry;
5590+ struct drm_reg *next_entry;
5591+
5592+ if (!fence) {
5593+
5594+ /*
5595+ * Old fence (if any) is still valid.
5596+ * Put back on free and lru lists.
5597+ */
5598+
5599+ list_for_each_entry_safe_reverse(entry, next_entry,
5600+ &manager->unfenced, head) {
5601+ list_del(&entry->head);
5602+ list_add(&entry->head, (entry->fence) ?
5603+ &manager->lru : &manager->free);
5604+ }
5605+ } else {
5606+
5607+ /*
5608+ * Fence with a new fence and put on lru list.
5609+ */
5610+
5611+ list_for_each_entry_safe(entry, next_entry, &manager->unfenced,
5612+ head) {
5613+ list_del(&entry->head);
5614+ if (entry->fence)
5615+ drm_fence_usage_deref_unlocked(&entry->fence);
5616+ drm_fence_reference_unlocked(&entry->fence, fence);
5617+
5618+ entry->fence_type = entry->new_fence_type;
5619+ BUG_ON((entry->fence_type & fence->type) !=
5620+ entry->fence_type);
5621+
5622+ list_add_tail(&entry->head, &manager->lru);
5623+ }
5624+ }
5625+}
5626+EXPORT_SYMBOL(drm_regs_fence);
5627+
5628+void drm_regs_free(struct drm_reg_manager *manager)
5629+{
5630+ struct drm_reg *entry;
5631+ struct drm_reg *next_entry;
5632+
5633+ drm_regs_fence(manager, NULL);
5634+
5635+ list_for_each_entry_safe(entry, next_entry, &manager->free, head) {
5636+ list_del(&entry->head);
5637+ manager->reg_destroy(entry);
5638+ }
5639+
5640+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
5641+
5642+ (void)drm_fence_object_wait(entry->fence, 1, 1,
5643+ entry->fence_type);
5644+ list_del(&entry->head);
5645+ drm_fence_usage_deref_unlocked(&entry->fence);
5646+ manager->reg_destroy(entry);
5647+ }
5648+}
5649+EXPORT_SYMBOL(drm_regs_free);
5650+
5651+void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg)
5652+{
5653+ reg->fence = NULL;
5654+ list_add_tail(&reg->head, &manager->free);
5655+}
5656+EXPORT_SYMBOL(drm_regs_add);
5657+
5658+void drm_regs_init(struct drm_reg_manager *manager,
5659+ int (*reg_reusable) (const struct drm_reg *, const void *),
5660+ void (*reg_destroy) (struct drm_reg *))
5661+{
5662+ INIT_LIST_HEAD(&manager->free);
5663+ INIT_LIST_HEAD(&manager->lru);
5664+ INIT_LIST_HEAD(&manager->unfenced);
5665+ manager->reg_reusable = reg_reusable;
5666+ manager->reg_destroy = reg_destroy;
5667+}
5668+EXPORT_SYMBOL(drm_regs_init);
5669Index: linux-2.6.28/drivers/gpu/drm/drm_stub.c
5670===================================================================
5671--- linux-2.6.28.orig/drivers/gpu/drm/drm_stub.c 2009-02-25 15:36:50.000000000 +0000
5672+++ linux-2.6.28/drivers/gpu/drm/drm_stub.c 2009-02-25 15:37:02.000000000 +0000
5673@@ -201,6 +201,7 @@
5674 init_timer(&dev->timer);
5675 mutex_init(&dev->struct_mutex);
5676 mutex_init(&dev->ctxlist_mutex);
5677+ mutex_init(&dev->bm.evict_mutex);
5678
5679 idr_init(&dev->drw_idr);
5680
5681@@ -216,6 +217,18 @@
5682 return -ENOMEM;
5683 }
5684
5685+ if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
5686+ DRM_FILE_PAGE_OFFSET_SIZE)) {
5687+ drm_ht_remove(&dev->map_hash);
5688+ return -ENOMEM;
5689+ }
5690+
5691+ if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
5692+ drm_ht_remove(&dev->map_hash);
5693+ drm_mm_takedown(&dev->offset_manager);
5694+ return -ENOMEM;
5695+ }
5696+
5697 /* the DRM has 6 basic counters */
5698 dev->counters = 6;
5699 dev->types[0] = _DRM_STAT_LOCK;
5700@@ -261,6 +274,7 @@
5701 }
5702 }
5703
5704+ drm_fence_manager_init(dev);
5705 return 0;
5706
5707 error_out_unreg:
5708@@ -409,6 +423,8 @@
5709 drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
5710 return ret;
5711 }
5712+EXPORT_SYMBOL(drm_get_dev);
5713+
5714
5715 /**
5716 * Put a device minor number.
5717Index: linux-2.6.28/drivers/gpu/drm/drm_ttm.c
5718===================================================================
5719--- /dev/null 1970-01-01 00:00:00.000000000 +0000
5720+++ linux-2.6.28/drivers/gpu/drm/drm_ttm.c 2009-02-25 15:37:02.000000000 +0000
5721@@ -0,0 +1,430 @@
5722+/**************************************************************************
5723+ *
5724+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
5725+ * All Rights Reserved.
5726+ *
5727+ * Permission is hereby granted, free of charge, to any person obtaining a
5728+ * copy of this software and associated documentation files (the
5729+ * "Software"), to deal in the Software without restriction, including
5730+ * without limitation the rights to use, copy, modify, merge, publish,
5731+ * distribute, sub license, and/or sell copies of the Software, and to
5732+ * permit persons to whom the Software is furnished to do so, subject to
5733+ * the following conditions:
5734+ *
5735+ * The above copyright notice and this permission notice (including the
5736+ * next paragraph) shall be included in all copies or substantial portions
5737+ * of the Software.
5738+ *
5739+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
5740+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
5741+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
5742+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
5743+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
5744+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
5745+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
5746+ *
5747+ **************************************************************************/
5748+/*
5749+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
5750+ */
5751+
5752+#include "drmP.h"
5753+#include <asm/agp.h>
5754+
5755+static void drm_ttm_ipi_handler(void *null)
5756+{
5757+ flush_agp_cache();
5758+}
5759+
5760+void drm_ttm_cache_flush(void)
5761+{
5762+ if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1) != 0)
5763+ DRM_ERROR("Timed out waiting for drm cache flush.\n");
5764+}
5765+EXPORT_SYMBOL(drm_ttm_cache_flush);
5766+
5767+/*
5768+ * Use kmalloc if possible. Otherwise fall back to vmalloc.
5769+ */
5770+
5771+static void ttm_alloc_pages(struct drm_ttm *ttm)
5772+{
5773+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
5774+ ttm->pages = NULL;
5775+
5776+ if (size <= PAGE_SIZE)
5777+ ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
5778+
5779+ if (!ttm->pages) {
5780+ ttm->pages = vmalloc_user(size);
5781+ if (ttm->pages)
5782+ ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
5783+ }
5784+}
5785+
5786+static void ttm_free_pages(struct drm_ttm *ttm)
5787+{
5788+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
5789+
5790+ if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) {
5791+ vfree(ttm->pages);
5792+ ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC;
5793+ } else {
5794+ drm_free(ttm->pages, size, DRM_MEM_TTM);
5795+ }
5796+ ttm->pages = NULL;
5797+}
5798+
5799+static struct page *drm_ttm_alloc_page(void)
5800+{
5801+ struct page *page;
5802+
5803+ page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
5804+ if (!page)
5805+ return NULL;
5806+ return page;
5807+}
5808+
5809+/*
5810+ * Change caching policy for the linear kernel map
5811+ * for range of pages in a ttm.
5812+ */
5813+
5814+static int drm_set_caching(struct drm_ttm *ttm, int noncached)
5815+{
5816+ int i;
5817+ struct page **cur_page;
5818+ int do_tlbflush = 0;
5819+
5820+ if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
5821+ return 0;
5822+
5823+ if (noncached)
5824+ drm_ttm_cache_flush();
5825+
5826+ for (i = 0; i < ttm->num_pages; ++i) {
5827+ cur_page = ttm->pages + i;
5828+ if (*cur_page) {
5829+ if (!PageHighMem(*cur_page)) {
5830+ if (noncached) {
5831+ map_page_into_agp(*cur_page);
5832+ } else {
5833+ unmap_page_from_agp(*cur_page);
5834+ }
5835+ do_tlbflush = 1;
5836+ }
5837+ }
5838+ }
5839+ //if (do_tlbflush)
5840+ // flush_agp_mappings();
5841+
5842+ DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
5843+
5844+ return 0;
5845+}
5846+
5847+
5848+static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
5849+{
5850+ int write;
5851+ int dirty;
5852+ struct page *page;
5853+ int i;
5854+
5855+ BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
5856+ write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0);
5857+ dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
5858+
5859+ for (i = 0; i < ttm->num_pages; ++i) {
5860+ page = ttm->pages[i];
5861+ if (page == NULL)
5862+ continue;
5863+
5864+ if (page == ttm->dummy_read_page) {
5865+ BUG_ON(write);
5866+ continue;
5867+ }
5868+
5869+ if (write && dirty && !PageReserved(page))
5870+ set_page_dirty_lock(page);
5871+
5872+ ttm->pages[i] = NULL;
5873+ put_page(page);
5874+ }
5875+}
5876+
5877+static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
5878+{
5879+ int i;
5880+ struct drm_buffer_manager *bm = &ttm->dev->bm;
5881+ struct page **cur_page;
5882+
5883+ for (i = 0; i < ttm->num_pages; ++i) {
5884+ cur_page = ttm->pages + i;
5885+ if (*cur_page) {
5886+ if (page_count(*cur_page) != 1)
5887+ DRM_ERROR("Erroneous page count. Leaking pages.\n");
5888+ if (page_mapped(*cur_page))
5889+ DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
5890+ __free_page(*cur_page);
5891+ --bm->cur_pages;
5892+ }
5893+ }
5894+}
5895+
5896+/*
5897+ * Free all resources associated with a ttm.
5898+ */
5899+
5900+int drm_destroy_ttm(struct drm_ttm *ttm)
5901+{
5902+ struct drm_ttm_backend *be;
5903+
5904+ if (!ttm)
5905+ return 0;
5906+
5907+ be = ttm->be;
5908+ if (be) {
5909+ be->func->destroy(be);
5910+ ttm->be = NULL;
5911+ }
5912+
5913+ if (ttm->pages) {
5914+ if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
5915+ drm_set_caching(ttm, 0);
5916+
5917+ if (ttm->page_flags & DRM_TTM_PAGE_USER)
5918+ drm_ttm_free_user_pages(ttm);
5919+ else
5920+ drm_ttm_free_alloced_pages(ttm);
5921+
5922+ ttm_free_pages(ttm);
5923+ }
5924+
5925+ return 0;
5926+}
5927+
5928+struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
5929+{
5930+ struct page *p;
5931+ struct drm_buffer_manager *bm = &ttm->dev->bm;
5932+
5933+ p = ttm->pages[index];
5934+ if (!p) {
5935+ p = drm_ttm_alloc_page();
5936+ if (!p)
5937+ return NULL;
5938+ ttm->pages[index] = p;
5939+ ++bm->cur_pages;
5940+ }
5941+ return p;
5942+}
5943+EXPORT_SYMBOL(drm_ttm_get_page);
5944+
5945+int drm_ttm_set_user(struct drm_ttm *ttm,
5946+ struct task_struct *tsk,
5947+ int write,
5948+ unsigned long start,
5949+ unsigned long num_pages,
5950+ struct page *dummy_read_page)
5951+{
5952+ struct mm_struct *mm = tsk->mm;
5953+ int ret;
5954+ int i;
5955+
5956+ BUG_ON(num_pages != ttm->num_pages);
5957+
5958+ ttm->dummy_read_page = dummy_read_page;
5959+ ttm->page_flags |= DRM_TTM_PAGE_USER |
5960+ ((write) ? DRM_TTM_PAGE_USER_WRITE : 0);
5961+
5962+
5963+ down_read(&mm->mmap_sem);
5964+ ret = get_user_pages(tsk, mm, start, num_pages,
5965+ write, 0, ttm->pages, NULL);
5966+ up_read(&mm->mmap_sem);
5967+
5968+ if (ret != num_pages && write) {
5969+ drm_ttm_free_user_pages(ttm);
5970+ return -ENOMEM;
5971+ }
5972+
5973+ for (i = 0; i < num_pages; ++i) {
5974+ if (ttm->pages[i] == NULL)
5975+ ttm->pages[i] = ttm->dummy_read_page;
5976+ }
5977+
5978+ return 0;
5979+}
5980+
5981+int drm_ttm_populate(struct drm_ttm *ttm)
5982+{
5983+ struct page *page;
5984+ unsigned long i;
5985+ struct drm_ttm_backend *be;
5986+
5987+ if (ttm->state != ttm_unpopulated)
5988+ return 0;
5989+
5990+ be = ttm->be;
5991+ for (i = 0; i < ttm->num_pages; ++i) {
5992+ page = drm_ttm_get_page(ttm, i);
5993+ if (!page)
5994+ return -ENOMEM;
5995+ }
5996+ be->func->populate(be, ttm->num_pages, ttm->pages);
5997+ ttm->state = ttm_unbound;
5998+ return 0;
5999+}
6000+
6001+static inline size_t drm_size_align(size_t size)
6002+{
6003+ size_t tmpSize = 4;
6004+ if (size > PAGE_SIZE)
6005+ return PAGE_ALIGN(size);
6006+ while (tmpSize < size)
6007+ tmpSize <<= 1;
6008+
6009+ return (size_t) tmpSize;
6010+}
6011+
6012+/*
6013+ * Calculate the estimated pinned memory usage of a ttm.
6014+ */
6015+
6016+unsigned long drm_ttm_size(struct drm_device *dev,
6017+ unsigned long num_pages,
6018+ int user_bo)
6019+{
6020+ struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
6021+ unsigned long tmp;
6022+
6023+ tmp = drm_size_align(sizeof(struct drm_ttm)) +
6024+ drm_size_align(num_pages * sizeof(struct page *)) +
6025+ ((user_bo) ? 0 : drm_size_align(num_pages * PAGE_SIZE));
6026+
6027+ if (bo_driver->backend_size)
6028+ tmp += bo_driver->backend_size(dev, num_pages);
6029+ else
6030+ tmp += drm_size_align(num_pages * sizeof(struct page *)) +
6031+ 3*drm_size_align(sizeof(struct drm_ttm_backend));
6032+ return tmp;
6033+}
6034+
6035+
6036+/*
6037+ * Initialize a ttm.
6038+ */
6039+
6040+struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size)
6041+{
6042+ struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
6043+ struct drm_ttm *ttm;
6044+
6045+ if (!bo_driver)
6046+ return NULL;
6047+
6048+ ttm = drm_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
6049+ if (!ttm)
6050+ return NULL;
6051+
6052+ ttm->dev = dev;
6053+ atomic_set(&ttm->vma_count, 0);
6054+
6055+ ttm->destroy = 0;
6056+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
6057+
6058+ ttm->page_flags = 0;
6059+
6060+ /*
6061+ * Account also for AGP module memory usage.
6062+ */
6063+
6064+ ttm_alloc_pages(ttm);
6065+ if (!ttm->pages) {
6066+ drm_destroy_ttm(ttm);
6067+ DRM_ERROR("Failed allocating page table\n");
6068+ return NULL;
6069+ }
6070+ ttm->be = bo_driver->create_ttm_backend_entry(dev);
6071+ if (!ttm->be) {
6072+ drm_destroy_ttm(ttm);
6073+ DRM_ERROR("Failed creating ttm backend entry\n");
6074+ return NULL;
6075+ }
6076+ ttm->state = ttm_unpopulated;
6077+ return ttm;
6078+}
6079+
6080+/*
6081+ * Unbind a ttm region from the aperture.
6082+ */
6083+
6084+void drm_ttm_evict(struct drm_ttm *ttm)
6085+{
6086+ struct drm_ttm_backend *be = ttm->be;
6087+ int ret;
6088+
6089+ if (ttm->state == ttm_bound) {
6090+ ret = be->func->unbind(be);
6091+ BUG_ON(ret);
6092+ }
6093+
6094+ ttm->state = ttm_evicted;
6095+}
6096+
6097+void drm_ttm_fixup_caching(struct drm_ttm *ttm)
6098+{
6099+
6100+ if (ttm->state == ttm_evicted) {
6101+ struct drm_ttm_backend *be = ttm->be;
6102+ if (be->func->needs_ub_cache_adjust(be))
6103+ drm_set_caching(ttm, 0);
6104+ ttm->state = ttm_unbound;
6105+ }
6106+}
6107+
6108+void drm_ttm_unbind(struct drm_ttm *ttm)
6109+{
6110+ if (ttm->state == ttm_bound)
6111+ drm_ttm_evict(ttm);
6112+
6113+ drm_ttm_fixup_caching(ttm);
6114+}
6115+
6116+int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
6117+{
6118+ struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
6119+ int ret = 0;
6120+ struct drm_ttm_backend *be;
6121+
6122+ if (!ttm)
6123+ return -EINVAL;
6124+ if (ttm->state == ttm_bound)
6125+ return 0;
6126+
6127+ be = ttm->be;
6128+
6129+ ret = drm_ttm_populate(ttm);
6130+ if (ret)
6131+ return ret;
6132+
6133+ if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
6134+ drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
6135+ else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
6136+ bo_driver->ttm_cache_flush)
6137+ bo_driver->ttm_cache_flush(ttm);
6138+
6139+ ret = be->func->bind(be, bo_mem);
6140+ if (ret) {
6141+ ttm->state = ttm_evicted;
6142+ DRM_ERROR("Couldn't bind backend.\n");
6143+ return ret;
6144+ }
6145+
6146+ ttm->state = ttm_bound;
6147+ if (ttm->page_flags & DRM_TTM_PAGE_USER)
6148+ ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
6149+ return 0;
6150+}
6151+EXPORT_SYMBOL(drm_bind_ttm);
6152Index: linux-2.6.28/drivers/gpu/drm/drm_vm.c
6153===================================================================
6154--- linux-2.6.28.orig/drivers/gpu/drm/drm_vm.c 2009-02-25 15:36:50.000000000 +0000
6155+++ linux-2.6.28/drivers/gpu/drm/drm_vm.c 2009-02-25 15:37:02.000000000 +0000
6156@@ -40,6 +40,10 @@
6157
6158 static void drm_vm_open(struct vm_area_struct *vma);
6159 static void drm_vm_close(struct vm_area_struct *vma);
6160+static int drm_bo_mmap_locked(struct vm_area_struct *vma,
6161+ struct file *filp,
6162+ drm_local_map_t *map);
6163+
6164
6165 static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
6166 {
6167@@ -270,6 +274,9 @@
6168 case _DRM_GEM:
6169 DRM_ERROR("tried to rmmap GEM object\n");
6170 break;
6171+ case _DRM_TTM:
6172+ BUG_ON(1);
6173+ break;
6174 }
6175 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
6176 }
6177@@ -650,6 +657,8 @@
6178 vma->vm_flags |= VM_RESERVED;
6179 vma->vm_page_prot = drm_dma_prot(map->type, vma);
6180 break;
6181+ case _DRM_TTM:
6182+ return drm_bo_mmap_locked(vma, filp, map);
6183 default:
6184 return -EINVAL; /* This should never happen. */
6185 }
6186@@ -674,3 +683,213 @@
6187 return ret;
6188 }
6189 EXPORT_SYMBOL(drm_mmap);
6190+
6191+/**
6192+ * buffer object vm functions.
6193+ */
6194+
6195+/**
6196+ * \c Pagefault method for buffer objects.
6197+ *
6198+ * \param vma Virtual memory area.
6199+ * \param address File offset.
6200+ * \return Error or refault. The pfn is manually inserted.
6201+ *
6202+ * It's important that pfns are inserted while holding the bo->mutex lock.
6203+ * otherwise we might race with unmap_mapping_range() which is always
6204+ * called with the bo->mutex lock held.
6205+ *
6206+ * We're modifying the page attribute bits of the vma->vm_page_prot field,
6207+ * without holding the mmap_sem in write mode. Only in read mode.
6208+ * These bits are not used by the mm subsystem code, and we consider them
6209+ * protected by the bo->mutex lock.
6210+ */
6211+
6212+#define DRM_NOPFN_EXTRA 15 /* Fault 16 pages at a time in */
6213+
6214+int drm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6215+{
6216+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
6217+ unsigned long page_offset;
6218+ struct page *page = NULL;
6219+ struct drm_ttm *ttm = NULL;
6220+ struct drm_device *dev;
6221+ unsigned long pfn;
6222+ int err;
6223+ unsigned long bus_base;
6224+ unsigned long bus_offset;
6225+ unsigned long bus_size;
6226+ int i;
6227+ unsigned long ret = VM_FAULT_NOPAGE;
6228+ unsigned long address = (unsigned long)vmf->virtual_address;
6229+
6230+ if (address > vma->vm_end)
6231+ return VM_FAULT_SIGBUS;
6232+
6233+ dev = bo->dev;
6234+ err = drm_bo_read_lock(&dev->bm.bm_lock);
6235+ if (err)
6236+ return VM_FAULT_NOPAGE;
6237+
6238+ err = mutex_lock_interruptible(&bo->mutex);
6239+ if (err) {
6240+ drm_bo_read_unlock(&dev->bm.bm_lock);
6241+ return VM_FAULT_NOPAGE;
6242+ }
6243+
6244+ err = drm_bo_wait(bo, 0, 0, 0);
6245+ if (err) {
6246+ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
6247+ goto out_unlock;
6248+ }
6249+
6250+ /*
6251+ * If buffer happens to be in a non-mappable location,
6252+ * move it to a mappable.
6253+ */
6254+
6255+ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
6256+ uint32_t new_mask = bo->mem.mask |
6257+ DRM_BO_FLAG_MAPPABLE |
6258+ DRM_BO_FLAG_FORCE_MAPPABLE;
6259+ err = drm_bo_move_buffer(bo, new_mask, 0, 0);
6260+ if (err) {
6261+ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
6262+ goto out_unlock;
6263+ }
6264+ }
6265+
6266+ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
6267+ &bus_size);
6268+
6269+ if (err) {
6270+ ret = VM_FAULT_SIGBUS;
6271+ goto out_unlock;
6272+ }
6273+
6274+ page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
6275+
6276+ if (bus_size) {
6277+ struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
6278+
6279+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
6280+ vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
6281+ } else {
6282+ ttm = bo->ttm;
6283+
6284+ drm_ttm_fixup_caching(ttm);
6285+ page = drm_ttm_get_page(ttm, page_offset);
6286+ if (!page) {
6287+ ret = VM_FAULT_OOM;
6288+ goto out_unlock;
6289+ }
6290+ pfn = page_to_pfn(page);
6291+ vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
6292+ vm_get_page_prot(vma->vm_flags) :
6293+ drm_io_prot(_DRM_TTM, vma);
6294+ }
6295+
6296+ err = vm_insert_pfn(vma, address, pfn);
6297+ if (err) {
6298+ ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
6299+ goto out_unlock;
6300+ }
6301+
6302+ for (i=0; i<DRM_NOPFN_EXTRA; ++i) {
6303+
6304+ if (++page_offset == bo->mem.num_pages)
6305+ break;
6306+ address = vma->vm_start + (page_offset << PAGE_SHIFT);
6307+ if (address >= vma->vm_end)
6308+ break;
6309+ if (bus_size) {
6310+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT)
6311+ + page_offset;
6312+ } else {
6313+ page = drm_ttm_get_page(ttm, page_offset);
6314+ if (!page)
6315+ break;
6316+ pfn = page_to_pfn(page);
6317+ }
6318+ if (vm_insert_pfn(vma, address, pfn))
6319+ break;
6320+ }
6321+out_unlock:
6322+ mutex_unlock(&bo->mutex);
6323+ drm_bo_read_unlock(&dev->bm.bm_lock);
6324+ return ret;
6325+}
6326+EXPORT_SYMBOL(drm_bo_vm_fault);
6327+
6328+static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
6329+{
6330+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
6331+
6332+ drm_vm_open_locked(vma);
6333+ atomic_inc(&bo->usage);
6334+}
6335+
6336+/**
6337+ * \c vma open method for buffer objects.
6338+ *
6339+ * \param vma virtual memory area.
6340+ */
6341+
6342+static void drm_bo_vm_open(struct vm_area_struct *vma)
6343+{
6344+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
6345+ struct drm_device *dev = bo->dev;
6346+
6347+ mutex_lock(&dev->struct_mutex);
6348+ drm_bo_vm_open_locked(vma);
6349+ mutex_unlock(&dev->struct_mutex);
6350+}
6351+
6352+/**
6353+ * \c vma close method for buffer objects.
6354+ *
6355+ * \param vma virtual memory area.
6356+ */
6357+
6358+static void drm_bo_vm_close(struct vm_area_struct *vma)
6359+{
6360+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
6361+ struct drm_device *dev = bo->dev;
6362+
6363+ drm_vm_close(vma);
6364+ if (bo) {
6365+ mutex_lock(&dev->struct_mutex);
6366+ drm_bo_usage_deref_locked((struct drm_buffer_object **)
6367+ &vma->vm_private_data);
6368+ mutex_unlock(&dev->struct_mutex);
6369+ }
6370+ return;
6371+}
6372+
6373+static struct vm_operations_struct drm_bo_vm_ops = {
6374+ .fault = drm_bo_vm_fault,
6375+ .open = drm_bo_vm_open,
6376+ .close = drm_bo_vm_close,
6377+};
6378+
6379+/**
6380+ * mmap buffer object memory.
6381+ *
6382+ * \param vma virtual memory area.
6383+ * \param file_priv DRM file private.
6384+ * \param map The buffer object drm map.
6385+ * \return zero on success or a negative number on failure.
6386+ */
6387+
6388+int drm_bo_mmap_locked(struct vm_area_struct *vma,
6389+ struct file *filp,
6390+ drm_local_map_t *map)
6391+{
6392+ vma->vm_ops = &drm_bo_vm_ops;
6393+ vma->vm_private_data = map->handle;
6394+ vma->vm_file = filp;
6395+ vma->vm_flags |= VM_RESERVED | VM_IO;
6396+ vma->vm_flags |= VM_PFNMAP;
6397+ drm_bo_vm_open_locked(vma);
6398+ return 0;
6399+}
6400Index: linux-2.6.28/drivers/gpu/drm/psb/Makefile
6401===================================================================
6402--- /dev/null 1970-01-01 00:00:00.000000000 +0000
6403+++ linux-2.6.28/drivers/gpu/drm/psb/Makefile 2009-02-25 15:37:02.000000000 +0000
6404@@ -0,0 +1,12 @@
6405+#
6406+# Makefile for the drm device driver. This driver provides support for the
6407+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
6408+
6409+ccflags-y := -Iinclude/drm
6410+
6411+psb-y := psb_drv.o psb_mmu.o psb_sgx.o psb_irq.o psb_fence.o psb_buffer.o \
6412+ psb_gtt.o psb_fb.o psb_msvdx.o \
6413+ psb_msvdxinit.o psb_regman.o psb_reset.o psb_scene.o \
6414+ psb_schedule.o psb_xhw.o
6415+
6416+obj-$(CONFIG_DRM_PSB) += psb.o
6417Index: linux-2.6.28/drivers/gpu/drm/psb/psb_buffer.c
6418===================================================================
6419--- /dev/null 1970-01-01 00:00:00.000000000 +0000
6420+++ linux-2.6.28/drivers/gpu/drm/psb/psb_buffer.c 2009-02-25 15:37:02.000000000 +0000
6421@@ -0,0 +1,437 @@
6422+/**************************************************************************
6423+ * Copyright (c) 2007, Intel Corporation.
6424+ * All Rights Reserved.
6425+ *
6426+ * This program is free software; you can redistribute it and/or modify it
6427+ * under the terms and conditions of the GNU General Public License,
6428+ * version 2, as published by the Free Software Foundation.
6429+ *
6430+ * This program is distributed in the hope it will be useful, but WITHOUT
6431+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6432+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6433+ * more details.
6434+ *
6435+ * You should have received a copy of the GNU General Public License along with
6436+ * this program; if not, write to the Free Software Foundation, Inc.,
6437+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6438+ *
6439+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
6440+ * develop this driver.
6441+ *
6442+ **************************************************************************/
6443+/*
6444+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
6445+ */
6446+#include "drmP.h"
6447+#include "psb_drv.h"
6448+#include "psb_schedule.h"
6449+
6450+struct drm_psb_ttm_backend {
6451+ struct drm_ttm_backend base;
6452+ struct page **pages;
6453+ unsigned int desired_tile_stride;
6454+ unsigned int hw_tile_stride;
6455+ int mem_type;
6456+ unsigned long offset;
6457+ unsigned long num_pages;
6458+};
6459+
6460+int psb_fence_types(struct drm_buffer_object *bo, uint32_t * class,
6461+ uint32_t * type)
6462+{
6463+ switch (*class) {
6464+ case PSB_ENGINE_TA:
6465+ *type = DRM_FENCE_TYPE_EXE |
6466+ _PSB_FENCE_TYPE_TA_DONE | _PSB_FENCE_TYPE_RASTER_DONE;
6467+ if (bo->mem.mask & PSB_BO_FLAG_TA)
6468+ *type &= ~_PSB_FENCE_TYPE_RASTER_DONE;
6469+ if (bo->mem.mask & PSB_BO_FLAG_SCENE)
6470+ *type |= _PSB_FENCE_TYPE_SCENE_DONE;
6471+ if (bo->mem.mask & PSB_BO_FLAG_FEEDBACK)
6472+ *type |= _PSB_FENCE_TYPE_FEEDBACK;
6473+ break;
6474+ default:
6475+ *type = DRM_FENCE_TYPE_EXE;
6476+ }
6477+ return 0;
6478+}
6479+
6480+static inline size_t drm_size_align(size_t size)
6481+{
6482+ size_t tmpSize = 4;
6483+ if (size > PAGE_SIZE)
6484+ return PAGE_ALIGN(size);
6485+ while (tmpSize < size)
6486+ tmpSize <<= 1;
6487+
6488+ return (size_t) tmpSize;
6489+}
6490+
6491+/*
6492+ * Poulsbo GPU virtual space looks like this
6493+ * (We currently use only one MMU context).
6494+ *
6495+ * gatt_start = Start of GATT aperture in bus space.
6496+ * stolen_end = End of GATT populated by stolen memory in bus space.
6497+ * gatt_end = End of GATT
6498+ * twod_end = MIN(gatt_start + 256_MEM, gatt_end)
6499+ *
6500+ * 0x00000000 -> 0x10000000 Temporary mapping space for tiling- and copy operations.
6501+ * This space is not managed and is protected by the
6502+ * temp_mem mutex.
6503+ *
6504+ * 0x10000000 -> 0x20000000 DRM_PSB_MEM_KERNEL For kernel buffers.
6505+ *
6506+ * 0x20000000 -> gatt_start DRM_PSB_MEM_MMU For generic MMU-only use.
6507+ *
6508+ * gatt_start -> stolen_end DRM_BO_MEM_VRAM Pre-populated GATT pages.
6509+ *
6510+ * stolen_end -> twod_end DRM_BO_MEM_TT GATT memory usable by 2D engine.
6511+ *
6512+ * twod_end -> gatt_end DRM_BO_MEM_APER GATT memory not usable by 2D engine.
6513+ *
6514+ * gatt_end -> 0xffffffff Currently unused.
6515+ */
6516+
6517+int psb_init_mem_type(struct drm_device *dev, uint32_t type,
6518+ struct drm_mem_type_manager *man)
6519+{
6520+ struct drm_psb_private *dev_priv =
6521+ (struct drm_psb_private *)dev->dev_private;
6522+ struct psb_gtt *pg = dev_priv->pg;
6523+
6524+ switch (type) {
6525+ case DRM_BO_MEM_LOCAL:
6526+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
6527+ _DRM_FLAG_MEMTYPE_CACHED;
6528+ man->drm_bus_maptype = 0;
6529+ break;
6530+ case DRM_PSB_MEM_KERNEL:
6531+ man->io_offset = 0x00000000;
6532+ man->io_size = 0x00000000;
6533+ man->io_addr = NULL;
6534+ man->drm_bus_maptype = _DRM_TTM;
6535+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
6536+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
6537+ man->gpu_offset = PSB_MEM_KERNEL_START;
6538+ break;
6539+ case DRM_PSB_MEM_MMU:
6540+ man->io_offset = 0x00000000;
6541+ man->io_size = 0x00000000;
6542+ man->io_addr = NULL;
6543+ man->drm_bus_maptype = _DRM_TTM;
6544+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
6545+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
6546+ man->gpu_offset = PSB_MEM_MMU_START;
6547+ break;
6548+ case DRM_PSB_MEM_PDS:
6549+ man->io_offset = 0x00000000;
6550+ man->io_size = 0x00000000;
6551+ man->io_addr = NULL;
6552+ man->drm_bus_maptype = _DRM_TTM;
6553+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
6554+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
6555+ man->gpu_offset = PSB_MEM_PDS_START;
6556+ break;
6557+ case DRM_PSB_MEM_RASTGEOM:
6558+ man->io_offset = 0x00000000;
6559+ man->io_size = 0x00000000;
6560+ man->io_addr = NULL;
6561+ man->drm_bus_maptype = _DRM_TTM;
6562+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
6563+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
6564+ man->gpu_offset = PSB_MEM_RASTGEOM_START;
6565+ break;
6566+ case DRM_BO_MEM_VRAM:
6567+ man->io_addr = NULL;
6568+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
6569+ _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
6570+#ifdef PSB_WORKING_HOST_MMU_ACCESS
6571+ man->drm_bus_maptype = _DRM_AGP;
6572+ man->io_offset = pg->gatt_start;
6573+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
6574+#else
6575+ man->drm_bus_maptype = _DRM_TTM; /* Forces uncached */
6576+ man->io_offset = pg->stolen_base;
6577+ man->io_size = pg->stolen_size;
6578+#endif
6579+ man->gpu_offset = pg->gatt_start;
6580+ break;
6581+ case DRM_BO_MEM_TT: /* Mappable GATT memory */
6582+ man->io_offset = pg->gatt_start;
6583+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
6584+ man->io_addr = NULL;
6585+#ifdef PSB_WORKING_HOST_MMU_ACCESS
6586+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
6587+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
6588+ man->drm_bus_maptype = _DRM_AGP;
6589+#else
6590+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
6591+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
6592+ man->drm_bus_maptype = _DRM_TTM;
6593+#endif
6594+ man->gpu_offset = pg->gatt_start;
6595+ break;
6596+ case DRM_PSB_MEM_APER: /*MMU memory. Mappable. Not usable for 2D. */
6597+ man->io_offset = pg->gatt_start;
6598+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
6599+ man->io_addr = NULL;
6600+#ifdef PSB_WORKING_HOST_MMU_ACCESS
6601+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
6602+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
6603+ man->drm_bus_maptype = _DRM_AGP;
6604+#else
6605+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
6606+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
6607+ man->drm_bus_maptype = _DRM_TTM;
6608+#endif
6609+ man->gpu_offset = pg->gatt_start;
6610+ break;
6611+ default:
6612+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
6613+ return -EINVAL;
6614+ }
6615+ return 0;
6616+}
6617+
6618+uint32_t psb_evict_mask(struct drm_buffer_object * bo)
6619+{
6620+ switch (bo->mem.mem_type) {
6621+ case DRM_BO_MEM_VRAM:
6622+ return DRM_BO_FLAG_MEM_TT;
6623+ default:
6624+ return DRM_BO_FLAG_MEM_LOCAL;
6625+ }
6626+}
6627+
6628+int psb_invalidate_caches(struct drm_device *dev, uint64_t flags)
6629+{
6630+ return 0;
6631+}
6632+
6633+static int psb_move_blit(struct drm_buffer_object *bo,
6634+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
6635+{
6636+ struct drm_bo_mem_reg *old_mem = &bo->mem;
6637+ int dir = 0;
6638+
6639+ if ((old_mem->mem_type == new_mem->mem_type) &&
6640+ (new_mem->mm_node->start <
6641+ old_mem->mm_node->start + old_mem->mm_node->size)) {
6642+ dir = 1;
6643+ }
6644+
6645+ psb_emit_2d_copy_blit(bo->dev,
6646+ old_mem->mm_node->start << PAGE_SHIFT,
6647+ new_mem->mm_node->start << PAGE_SHIFT,
6648+ new_mem->num_pages, dir);
6649+
6650+ return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
6651+ DRM_FENCE_TYPE_EXE, 0, new_mem);
6652+}
6653+
6654+/*
6655+ * Flip destination ttm into cached-coherent GATT,
6656+ * then blit and subsequently move out again.
6657+ */
6658+
6659+static int psb_move_flip(struct drm_buffer_object *bo,
6660+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
6661+{
6662+ struct drm_device *dev = bo->dev;
6663+ struct drm_bo_mem_reg tmp_mem;
6664+ int ret;
6665+
6666+ tmp_mem = *new_mem;
6667+ tmp_mem.mm_node = NULL;
6668+ tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
6669+ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
6670+
6671+ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
6672+ if (ret)
6673+ return ret;
6674+ ret = drm_bind_ttm(bo->ttm, &tmp_mem);
6675+ if (ret)
6676+ goto out_cleanup;
6677+ ret = psb_move_blit(bo, 1, no_wait, &tmp_mem);
6678+ if (ret)
6679+ goto out_cleanup;
6680+
6681+ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
6682+ out_cleanup:
6683+ if (tmp_mem.mm_node) {
6684+ mutex_lock(&dev->struct_mutex);
6685+ if (tmp_mem.mm_node != bo->pinned_node)
6686+ drm_mm_put_block(tmp_mem.mm_node);
6687+ tmp_mem.mm_node = NULL;
6688+ mutex_unlock(&dev->struct_mutex);
6689+ }
6690+ return ret;
6691+}
6692+
6693+int psb_move(struct drm_buffer_object *bo,
6694+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
6695+{
6696+ struct drm_bo_mem_reg *old_mem = &bo->mem;
6697+
6698+ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
6699+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
6700+ } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
6701+ if (psb_move_flip(bo, evict, no_wait, new_mem))
6702+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
6703+ } else {
6704+ if (psb_move_blit(bo, evict, no_wait, new_mem))
6705+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
6706+ }
6707+ return 0;
6708+}
6709+
6710+static int drm_psb_tbe_nca(struct drm_ttm_backend *backend)
6711+{
6712+ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
6713+}
6714+
6715+static int drm_psb_tbe_populate(struct drm_ttm_backend *backend,
6716+ unsigned long num_pages, struct page **pages)
6717+{
6718+ struct drm_psb_ttm_backend *psb_be =
6719+ container_of(backend, struct drm_psb_ttm_backend, base);
6720+
6721+ psb_be->pages = pages;
6722+ return 0;
6723+}
6724+
6725+static int drm_psb_tbe_unbind(struct drm_ttm_backend *backend)
6726+{
6727+ struct drm_device *dev = backend->dev;
6728+ struct drm_psb_private *dev_priv =
6729+ (struct drm_psb_private *)dev->dev_private;
6730+ struct drm_psb_ttm_backend *psb_be =
6731+ container_of(backend, struct drm_psb_ttm_backend, base);
6732+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
6733+ struct drm_mem_type_manager *man = &dev->bm.man[psb_be->mem_type];
6734+
6735+ PSB_DEBUG_RENDER("MMU unbind.\n");
6736+
6737+ if (psb_be->mem_type == DRM_BO_MEM_TT) {
6738+ uint32_t gatt_p_offset = (psb_be->offset - man->gpu_offset) >>
6739+ PAGE_SHIFT;
6740+
6741+ (void)psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
6742+ psb_be->num_pages,
6743+ psb_be->desired_tile_stride,
6744+ psb_be->hw_tile_stride);
6745+ }
6746+
6747+ psb_mmu_remove_pages(pd, psb_be->offset,
6748+ psb_be->num_pages,
6749+ psb_be->desired_tile_stride,
6750+ psb_be->hw_tile_stride);
6751+
6752+ return 0;
6753+}
6754+
6755+static int drm_psb_tbe_bind(struct drm_ttm_backend *backend,
6756+ struct drm_bo_mem_reg *bo_mem)
6757+{
6758+ struct drm_device *dev = backend->dev;
6759+ struct drm_psb_private *dev_priv =
6760+ (struct drm_psb_private *)dev->dev_private;
6761+ struct drm_psb_ttm_backend *psb_be =
6762+ container_of(backend, struct drm_psb_ttm_backend, base);
6763+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
6764+ struct drm_mem_type_manager *man = &dev->bm.man[bo_mem->mem_type];
6765+ int type;
6766+ int ret = 0;
6767+
6768+ psb_be->mem_type = bo_mem->mem_type;
6769+ psb_be->num_pages = bo_mem->num_pages;
6770+ psb_be->desired_tile_stride = bo_mem->desired_tile_stride;
6771+ psb_be->hw_tile_stride = bo_mem->hw_tile_stride;
6772+ psb_be->desired_tile_stride = 0;
6773+ psb_be->hw_tile_stride = 0;
6774+ psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
6775+ man->gpu_offset;
6776+
6777+ type = (bo_mem->flags & DRM_BO_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
6778+
6779+ PSB_DEBUG_RENDER("MMU bind.\n");
6780+ if (psb_be->mem_type == DRM_BO_MEM_TT) {
6781+ uint32_t gatt_p_offset = (psb_be->offset - man->gpu_offset) >>
6782+ PAGE_SHIFT;
6783+
6784+ ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
6785+ gatt_p_offset,
6786+ psb_be->num_pages,
6787+ psb_be->desired_tile_stride,
6788+ psb_be->hw_tile_stride, type);
6789+ }
6790+
6791+ ret = psb_mmu_insert_pages(pd, psb_be->pages,
6792+ psb_be->offset, psb_be->num_pages,
6793+ psb_be->desired_tile_stride,
6794+ psb_be->hw_tile_stride, type);
6795+ if (ret)
6796+ goto out_err;
6797+
6798+ DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
6799+ DRM_BE_FLAG_BOUND_CACHED : 0, DRM_BE_FLAG_BOUND_CACHED);
6800+
6801+ return 0;
6802+ out_err:
6803+ drm_psb_tbe_unbind(backend);
6804+ return ret;
6805+
6806+}
6807+
6808+static void drm_psb_tbe_clear(struct drm_ttm_backend *backend)
6809+{
6810+ struct drm_psb_ttm_backend *psb_be =
6811+ container_of(backend, struct drm_psb_ttm_backend, base);
6812+
6813+ psb_be->pages = NULL;
6814+ return;
6815+}
6816+
6817+static void drm_psb_tbe_destroy(struct drm_ttm_backend *backend)
6818+{
6819+ struct drm_psb_ttm_backend *psb_be =
6820+ container_of(backend, struct drm_psb_ttm_backend, base);
6821+
6822+ if (backend)
6823+ drm_free(psb_be, sizeof(*psb_be), DRM_MEM_TTM);
6824+}
6825+
6826+static struct drm_ttm_backend_func psb_ttm_backend = {
6827+ .needs_ub_cache_adjust = drm_psb_tbe_nca,
6828+ .populate = drm_psb_tbe_populate,
6829+ .clear = drm_psb_tbe_clear,
6830+ .bind = drm_psb_tbe_bind,
6831+ .unbind = drm_psb_tbe_unbind,
6832+ .destroy = drm_psb_tbe_destroy,
6833+};
6834+
6835+struct drm_ttm_backend *drm_psb_tbe_init(struct drm_device *dev)
6836+{
6837+ struct drm_psb_ttm_backend *psb_be;
6838+
6839+ psb_be = drm_calloc(1, sizeof(*psb_be), DRM_MEM_TTM);
6840+ if (!psb_be)
6841+ return NULL;
6842+ psb_be->pages = NULL;
6843+ psb_be->base.func = &psb_ttm_backend;
6844+ psb_be->base.dev = dev;
6845+
6846+ return &psb_be->base;
6847+}
6848+
6849+int psb_tbe_size(struct drm_device *dev, unsigned long num_pages)
6850+{
6851+ /*
6852+ * Return the size of the structures themselves and the
6853+ * estimated size of the pagedir and pagetable entries.
6854+ */
6855+
6856+ return drm_size_align(sizeof(struct drm_psb_ttm_backend)) +
6857+ 8*num_pages;
6858+}
6859Index: linux-2.6.28/drivers/gpu/drm/psb/psb_drm.h
6860===================================================================
6861--- /dev/null 1970-01-01 00:00:00.000000000 +0000
6862+++ linux-2.6.28/drivers/gpu/drm/psb/psb_drm.h 2009-02-25 15:37:02.000000000 +0000
6863@@ -0,0 +1,370 @@
6864+/**************************************************************************
6865+ * Copyright (c) 2007, Intel Corporation.
6866+ * All Rights Reserved.
6867+ *
6868+ * This program is free software; you can redistribute it and/or modify it
6869+ * under the terms and conditions of the GNU General Public License,
6870+ * version 2, as published by the Free Software Foundation.
6871+ *
6872+ * This program is distributed in the hope it will be useful, but WITHOUT
6873+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6874+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6875+ * more details.
6876+ *
6877+ * You should have received a copy of the GNU General Public License along with
6878+ * this program; if not, write to the Free Software Foundation, Inc.,
6879+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6880+ *
6881+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
6882+ * develop this driver.
6883+ *
6884+ **************************************************************************/
6885+/*
6886+ */
6887+
6888+#ifndef _PSB_DRM_H_
6889+#define _PSB_DRM_H_
6890+
6891+#if defined(__linux__) && !defined(__KERNEL__)
6892+#include<stdint.h>
6893+#endif
6894+
6895+/*
6896+ * Intel Poulsbo driver package version.
6897+ *
6898+ */
6899+/* #define PSB_PACKAGE_VERSION "ED"__DATE__*/
6900+#define PSB_PACKAGE_VERSION "2.1.0.32L.0019"
6901+
6902+#define DRM_PSB_SAREA_MAJOR 0
6903+#define DRM_PSB_SAREA_MINOR 1
6904+#define PSB_FIXED_SHIFT 16
6905+
6906+/*
6907+ * Public memory types.
6908+ */
6909+
6910+#define DRM_PSB_MEM_MMU DRM_BO_MEM_PRIV1
6911+#define DRM_PSB_FLAG_MEM_MMU DRM_BO_FLAG_MEM_PRIV1
6912+#define DRM_PSB_MEM_PDS DRM_BO_MEM_PRIV2
6913+#define DRM_PSB_FLAG_MEM_PDS DRM_BO_FLAG_MEM_PRIV2
6914+#define DRM_PSB_MEM_APER DRM_BO_MEM_PRIV3
6915+#define DRM_PSB_FLAG_MEM_APER DRM_BO_FLAG_MEM_PRIV3
6916+#define DRM_PSB_MEM_RASTGEOM DRM_BO_MEM_PRIV4
6917+#define DRM_PSB_FLAG_MEM_RASTGEOM DRM_BO_FLAG_MEM_PRIV4
6918+#define PSB_MEM_RASTGEOM_START 0x30000000
6919+
6920+typedef int32_t psb_fixed;
6921+typedef uint32_t psb_ufixed;
6922+
6923+static inline psb_fixed psb_int_to_fixed(int a)
6924+{
6925+ return a * (1 << PSB_FIXED_SHIFT);
6926+}
6927+
6928+static inline psb_ufixed psb_unsigned_to_ufixed(unsigned int a)
6929+{
6930+ return a << PSB_FIXED_SHIFT;
6931+}
6932+
6933+/*Status of the command sent to the gfx device.*/
6934+typedef enum {
6935+ DRM_CMD_SUCCESS,
6936+ DRM_CMD_FAILED,
6937+ DRM_CMD_HANG
6938+} drm_cmd_status_t;
6939+
6940+struct drm_psb_scanout {
6941+ uint32_t buffer_id; /* DRM buffer object ID */
6942+ uint32_t rotation; /* Rotation as in RR_rotation definitions */
6943+ uint32_t stride; /* Buffer stride in bytes */
6944+ uint32_t depth; /* Buffer depth in bits (NOT) bpp */
6945+ uint32_t width; /* Buffer width in pixels */
6946+ uint32_t height; /* Buffer height in lines */
6947+ psb_fixed transform[3][3]; /* Buffer composite transform */
6948+ /* (scaling, rot, reflect) */
6949+};
6950+
6951+#define DRM_PSB_SAREA_OWNERS 16
6952+#define DRM_PSB_SAREA_OWNER_2D 0
6953+#define DRM_PSB_SAREA_OWNER_3D 1
6954+
6955+#define DRM_PSB_SAREA_SCANOUTS 3
6956+
6957+struct drm_psb_sarea {
6958+ /* Track changes of this data structure */
6959+
6960+ uint32_t major;
6961+ uint32_t minor;
6962+
6963+ /* Last context to touch part of hw */
6964+ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
6965+
6966+ /* Definition of front- and rotated buffers */
6967+ uint32_t num_scanouts;
6968+ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
6969+
6970+ int pipeA_x;
6971+ int pipeA_y;
6972+ int pipeA_w;
6973+ int pipeA_h;
6974+ int pipeB_x;
6975+ int pipeB_y;
6976+ int pipeB_w;
6977+ int pipeB_h;
6978+ uint32_t msvdx_state;
6979+ uint32_t msvdx_context;
6980+};
6981+
6982+#define PSB_RELOC_MAGIC 0x67676767
6983+#define PSB_RELOC_SHIFT_MASK 0x0000FFFF
6984+#define PSB_RELOC_SHIFT_SHIFT 0
6985+#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000
6986+#define PSB_RELOC_ALSHIFT_SHIFT 16
6987+
6988+#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated
6989+ * buffer
6990+ */
6991+#define PSB_RELOC_OP_2D_OFFSET 1 /* Offset of the indicated
6992+ * buffer, relative to 2D
6993+ * base address
6994+ */
6995+#define PSB_RELOC_OP_PDS_OFFSET 2 /* Offset of the indicated buffer,
6996+ * relative to PDS base address
6997+ */
6998+#define PSB_RELOC_OP_STRIDE 3 /* Stride of the indicated
6999+ * buffer (for tiling)
7000+ */
7001+#define PSB_RELOC_OP_USE_OFFSET 4 /* Offset of USE buffer
7002+ * relative to base reg
7003+ */
7004+#define PSB_RELOC_OP_USE_REG 5 /* Base reg of USE buffer */
7005+
7006+struct drm_psb_reloc {
7007+ uint32_t reloc_op;
7008+ uint32_t where; /* offset in destination buffer */
7009+ uint32_t buffer; /* Buffer reloc applies to */
7010+ uint32_t mask; /* Destination format: */
7011+ uint32_t shift; /* Destination format: */
7012+ uint32_t pre_add; /* Destination format: */
7013+ uint32_t background; /* Destination add */
7014+ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */
7015+ uint32_t arg0; /* Reloc-op dependant */
7016+ uint32_t arg1;
7017+};
7018+
7019+#define PSB_BO_FLAG_TA (1ULL << 48)
7020+#define PSB_BO_FLAG_SCENE (1ULL << 49)
7021+#define PSB_BO_FLAG_FEEDBACK (1ULL << 50)
7022+#define PSB_BO_FLAG_USSE (1ULL << 51)
7023+
7024+#define PSB_ENGINE_2D 0
7025+#define PSB_ENGINE_VIDEO 1
7026+#define PSB_ENGINE_RASTERIZER 2
7027+#define PSB_ENGINE_TA 3
7028+#define PSB_ENGINE_HPRAST 4
7029+
7030+/*
7031+ * For this fence class we have a couple of
7032+ * fence types.
7033+ */
7034+
7035+#define _PSB_FENCE_EXE_SHIFT 0
7036+#define _PSB_FENCE_TA_DONE_SHIFT 1
7037+#define _PSB_FENCE_RASTER_DONE_SHIFT 2
7038+#define _PSB_FENCE_SCENE_DONE_SHIFT 3
7039+#define _PSB_FENCE_FEEDBACK_SHIFT 4
7040+
7041+#define _PSB_ENGINE_TA_FENCE_TYPES 5
7042+#define _PSB_FENCE_TYPE_TA_DONE (1 << _PSB_FENCE_TA_DONE_SHIFT)
7043+#define _PSB_FENCE_TYPE_RASTER_DONE (1 << _PSB_FENCE_RASTER_DONE_SHIFT)
7044+#define _PSB_FENCE_TYPE_SCENE_DONE (1 << _PSB_FENCE_SCENE_DONE_SHIFT)
7045+#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT)
7046+
7047+#define PSB_ENGINE_HPRAST 4
7048+#define PSB_NUM_ENGINES 5
7049+
7050+#define PSB_TA_FLAG_FIRSTPASS (1 << 0)
7051+#define PSB_TA_FLAG_LASTPASS (1 << 1)
7052+
7053+#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
7054+
7055+struct drm_psb_scene {
7056+ int handle_valid;
7057+ uint32_t handle;
7058+ uint32_t w;
7059+ uint32_t h;
7060+ uint32_t num_buffers;
7061+};
7062+
7063+struct drm_psb_hw_info
7064+{
7065+ uint32_t rev_id;
7066+ uint32_t caps;
7067+};
7068+
7069+typedef struct drm_psb_cmdbuf_arg {
7070+ uint64_t buffer_list; /* List of buffers to validate */
7071+ uint64_t clip_rects; /* See i915 counterpart */
7072+ uint64_t scene_arg;
7073+ uint64_t fence_arg;
7074+
7075+ uint32_t ta_flags;
7076+
7077+ uint32_t ta_handle; /* TA reg-value pairs */
7078+ uint32_t ta_offset;
7079+ uint32_t ta_size;
7080+
7081+ uint32_t oom_handle;
7082+ uint32_t oom_offset;
7083+ uint32_t oom_size;
7084+
7085+ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
7086+ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
7087+ uint32_t cmdbuf_size;
7088+
7089+ uint32_t reloc_handle; /* Reloc buffer object */
7090+ uint32_t reloc_offset;
7091+ uint32_t num_relocs;
7092+
7093+ int32_t damage; /* Damage front buffer with cliprects */
7094+ /* Not implemented yet */
7095+ uint32_t fence_flags;
7096+ uint32_t engine;
7097+
7098+ /*
7099+ * Feedback;
7100+ */
7101+
7102+ uint32_t feedback_ops;
7103+ uint32_t feedback_handle;
7104+ uint32_t feedback_offset;
7105+ uint32_t feedback_breakpoints;
7106+ uint32_t feedback_size;
7107+} drm_psb_cmdbuf_arg_t;
7108+
7109+struct drm_psb_xhw_init_arg {
7110+ uint32_t operation;
7111+ uint32_t buffer_handle;
7112+};
7113+
7114+/*
7115+ * Feedback components:
7116+ */
7117+
7118+/*
7119+ * Vistest component. The number of these in the feedback buffer
7120+ * equals the number of vistest breakpoints + 1.
7121+ * This is currently the only feedback component.
7122+ */
7123+
7124+struct drm_psb_vistest {
7125+ uint32_t vt[8];
7126+};
7127+
7128+#define PSB_HW_COOKIE_SIZE 16
7129+#define PSB_HW_FEEDBACK_SIZE 8
7130+#define PSB_HW_OOM_CMD_SIZE 6
7131+
7132+struct drm_psb_xhw_arg {
7133+ uint32_t op;
7134+ int ret;
7135+ uint32_t irq_op;
7136+ uint32_t issue_irq;
7137+ uint32_t cookie[PSB_HW_COOKIE_SIZE];
7138+ union {
7139+ struct {
7140+ uint32_t w;
7141+ uint32_t h;
7142+ uint32_t size;
7143+ uint32_t clear_p_start;
7144+ uint32_t clear_num_pages;
7145+ } si;
7146+ struct {
7147+ uint32_t fire_flags;
7148+ uint32_t hw_context;
7149+ uint32_t offset;
7150+ uint32_t engine;
7151+ uint32_t flags;
7152+ uint32_t rca;
7153+ uint32_t num_oom_cmds;
7154+ uint32_t oom_cmds[PSB_HW_OOM_CMD_SIZE];
7155+ } sb;
7156+ struct {
7157+ uint32_t pages;
7158+ uint32_t size;
7159+ } bi;
7160+ struct {
7161+ uint32_t bca;
7162+ uint32_t rca;
7163+ uint32_t flags;
7164+ } oom;
7165+ struct {
7166+ uint32_t pt_offset;
7167+ uint32_t param_offset;
7168+ uint32_t flags;
7169+ } bl;
7170+ struct {
7171+ uint32_t value;
7172+ } cl;
7173+ uint32_t feedback[PSB_HW_FEEDBACK_SIZE];
7174+ } arg;
7175+};
7176+
7177+#define DRM_PSB_CMDBUF 0x00
7178+#define DRM_PSB_XHW_INIT 0x01
7179+#define DRM_PSB_XHW 0x02
7180+#define DRM_PSB_SCENE_UNREF 0x03
7181+/* Controlling the kernel modesetting buffers */
7182+#define DRM_PSB_KMS_OFF 0x04
7183+#define DRM_PSB_KMS_ON 0x05
7184+#define DRM_PSB_HW_INFO 0x06
7185+
7186+#define PSB_XHW_INIT 0x00
7187+#define PSB_XHW_TAKEDOWN 0x01
7188+
7189+#define PSB_XHW_FIRE_RASTER 0x00
7190+#define PSB_XHW_SCENE_INFO 0x01
7191+#define PSB_XHW_SCENE_BIND_FIRE 0x02
7192+#define PSB_XHW_TA_MEM_INFO 0x03
7193+#define PSB_XHW_RESET_DPM 0x04
7194+#define PSB_XHW_OOM 0x05
7195+#define PSB_XHW_TERMINATE 0x06
7196+#define PSB_XHW_VISTEST 0x07
7197+#define PSB_XHW_RESUME 0x08
7198+#define PSB_XHW_TA_MEM_LOAD 0x09
7199+#define PSB_XHW_CHECK_LOCKUP 0x0a
7200+
7201+#define PSB_SCENE_FLAG_DIRTY (1 << 0)
7202+#define PSB_SCENE_FLAG_COMPLETE (1 << 1)
7203+#define PSB_SCENE_FLAG_SETUP (1 << 2)
7204+#define PSB_SCENE_FLAG_SETUP_ONLY (1 << 3)
7205+#define PSB_SCENE_FLAG_CLEARED (1 << 4)
7206+
7207+#define PSB_TA_MEM_FLAG_TA (1 << 0)
7208+#define PSB_TA_MEM_FLAG_RASTER (1 << 1)
7209+#define PSB_TA_MEM_FLAG_HOSTA (1 << 2)
7210+#define PSB_TA_MEM_FLAG_HOSTD (1 << 3)
7211+#define PSB_TA_MEM_FLAG_INIT (1 << 4)
7212+#define PSB_TA_MEM_FLAG_NEW_PT_OFFSET (1 << 5)
7213+
7214+/*Raster fire will deallocate memory */
7215+#define PSB_FIRE_FLAG_RASTER_DEALLOC (1 << 0)
7216+/*Isp reset needed due to change in ZLS format */
7217+#define PSB_FIRE_FLAG_NEEDS_ISP_RESET (1 << 1)
7218+/*These are set by Xpsb. */
7219+#define PSB_FIRE_FLAG_XHW_MASK 0xff000000
7220+/*The task has had at least one OOM and Xpsb will
7221+ send back messages on each fire. */
7222+#define PSB_FIRE_FLAG_XHW_OOM (1 << 24)
7223+
7224+#define PSB_SCENE_ENGINE_TA 0
7225+#define PSB_SCENE_ENGINE_RASTER 1
7226+#define PSB_SCENE_NUM_ENGINES 2
7227+
7228+struct drm_psb_dev_info_arg {
7229+ uint32_t num_use_attribute_registers;
7230+};
7231+#define DRM_PSB_DEVINFO 0x01
7232+
7233+#endif
7234Index: linux-2.6.28/drivers/gpu/drm/psb/psb_drv.c
7235===================================================================
7236--- /dev/null 1970-01-01 00:00:00.000000000 +0000
7237+++ linux-2.6.28/drivers/gpu/drm/psb/psb_drv.c 2009-02-25 15:37:02.000000000 +0000
7238@@ -0,0 +1,1028 @@
7239+/**************************************************************************
7240+ * Copyright (c) 2007, Intel Corporation.
7241+ * All Rights Reserved.
7242+ *
7243+ * This program is free software; you can redistribute it and/or modify it
7244+ * under the terms and conditions of the GNU General Public License,
7245+ * version 2, as published by the Free Software Foundation.
7246+ *
7247+ * This program is distributed in the hope it will be useful, but WITHOUT
7248+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
7249+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
7250+ * more details.
7251+ *
7252+ * You should have received a copy of the GNU General Public License along with
7253+ * this program; if not, write to the Free Software Foundation, Inc.,
7254+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
7255+ *
7256+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
7257+ * develop this driver.
7258+ *
7259+ **************************************************************************/
7260+/*
7261+ */
7262+
7263+#include "drmP.h"
7264+#include "drm.h"
7265+#include "psb_drm.h"
7266+#include "psb_drv.h"
7267+#include "psb_reg.h"
7268+#include "../i915/i915_reg.h"
7269+#include "psb_msvdx.h"
7270+#include "drm_pciids.h"
7271+#include "psb_scene.h"
7272+#include "drm_crtc.h"
7273+#include "drm_crtc_helper.h"
7274+#include <linux/cpu.h>
7275+#include <linux/notifier.h>
7276+#include <linux/fb.h>
7277+
7278+extern int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX, uint32_t maxY);
7279+
7280+int drm_psb_debug = 0;
7281+EXPORT_SYMBOL(drm_psb_debug);
7282+static int drm_psb_trap_pagefaults = 0;
7283+static int drm_psb_clock_gating = 0;
7284+static int drm_psb_ta_mem_size = 32 * 1024;
7285+int drm_psb_disable_vsync = 1;
7286+int drm_psb_no_fb = 0;
7287+int drm_psb_force_pipeb = 0;
7288+char* psb_init_mode;
7289+
7290+
7291+MODULE_PARM_DESC(debug, "Enable debug output");
7292+MODULE_PARM_DESC(clock_gating, "clock gating");
7293+MODULE_PARM_DESC(no_fb, "Disable FBdev");
7294+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
7295+MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
7296+MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
7297+MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
7298+MODULE_PARM_DESC(mode, "initial mode name");
7299+MODULE_PARM_DESC(xres, "initial mode width");
7300+MODULE_PARM_DESC(yres, "initial mode height");
7301+
7302+module_param_named(debug, drm_psb_debug, int, 0600);
7303+module_param_named(clock_gating, drm_psb_clock_gating, int, 0600);
7304+module_param_named(no_fb, drm_psb_no_fb, int, 0600);
7305+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
7306+module_param_named(disable_vsync, drm_psb_disable_vsync, int, 0600);
7307+module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
7308+module_param_named(ta_mem_size, drm_psb_ta_mem_size, int, 0600);
7309+module_param_named(mode, psb_init_mode, charp, 0600);
7310+
7311+static struct pci_device_id pciidlist[] = {
7312+ psb_PCI_IDS
7313+};
7314+
7315+#define DRM_PSB_CMDBUF_IOCTL DRM_IOW(DRM_PSB_CMDBUF, \
7316+ struct drm_psb_cmdbuf_arg)
7317+#define DRM_PSB_XHW_INIT_IOCTL DRM_IOR(DRM_PSB_XHW_INIT, \
7318+ struct drm_psb_xhw_init_arg)
7319+#define DRM_PSB_XHW_IOCTL DRM_IO(DRM_PSB_XHW)
7320+
7321+#define DRM_PSB_SCENE_UNREF_IOCTL DRM_IOWR(DRM_PSB_SCENE_UNREF, \
7322+ struct drm_psb_scene)
7323+#define DRM_PSB_HW_INFO_IOCTL DRM_IOR(DRM_PSB_HW_INFO, \
7324+ struct drm_psb_hw_info)
7325+
7326+#define DRM_PSB_KMS_OFF_IOCTL DRM_IO(DRM_PSB_KMS_OFF)
7327+#define DRM_PSB_KMS_ON_IOCTL DRM_IO(DRM_PSB_KMS_ON)
7328+
7329+static struct drm_ioctl_desc psb_ioctls[] = {
7330+ DRM_IOCTL_DEF(DRM_PSB_CMDBUF_IOCTL, psb_cmdbuf_ioctl, DRM_AUTH),
7331+ DRM_IOCTL_DEF(DRM_PSB_XHW_INIT_IOCTL, psb_xhw_init_ioctl,
7332+ DRM_ROOT_ONLY),
7333+ DRM_IOCTL_DEF(DRM_PSB_XHW_IOCTL, psb_xhw_ioctl, DRM_ROOT_ONLY),
7334+ DRM_IOCTL_DEF(DRM_PSB_SCENE_UNREF_IOCTL, drm_psb_scene_unref_ioctl,
7335+ DRM_AUTH),
7336+ DRM_IOCTL_DEF(DRM_PSB_KMS_OFF_IOCTL, psbfb_kms_off_ioctl,
7337+ DRM_ROOT_ONLY),
7338+ DRM_IOCTL_DEF(DRM_PSB_KMS_ON_IOCTL, psbfb_kms_on_ioctl, DRM_ROOT_ONLY),
7339+ DRM_IOCTL_DEF(DRM_PSB_HW_INFO_IOCTL, psb_hw_info_ioctl, DRM_AUTH),
7340+};
7341+static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
7342+
7343+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
7344+
7345+static int dri_library_name(struct drm_device *dev, char *buf)
7346+{
7347+ return snprintf(buf, PAGE_SIZE, "psb\n");
7348+}
7349+
7350+static void psb_set_uopt(struct drm_psb_uopt *uopt)
7351+{
7352+ uopt->clock_gating = drm_psb_clock_gating;
7353+}
7354+
7355+static void psb_lastclose(struct drm_device *dev)
7356+{
7357+ struct drm_psb_private *dev_priv =
7358+ (struct drm_psb_private *)dev->dev_private;
7359+
7360+ if (!dev->dev_private)
7361+ return;
7362+
7363+ mutex_lock(&dev->struct_mutex);
7364+ if (dev_priv->ta_mem)
7365+ psb_ta_mem_unref_devlocked(&dev_priv->ta_mem);
7366+ mutex_unlock(&dev->struct_mutex);
7367+ mutex_lock(&dev_priv->cmdbuf_mutex);
7368+ if (dev_priv->buffers) {
7369+ vfree(dev_priv->buffers);
7370+ dev_priv->buffers = NULL;
7371+ }
7372+ mutex_unlock(&dev_priv->cmdbuf_mutex);
7373+}
7374+
7375+static void psb_do_takedown(struct drm_device *dev)
7376+{
7377+ struct drm_psb_private *dev_priv =
7378+ (struct drm_psb_private *)dev->dev_private;
7379+
7380+ mutex_lock(&dev->struct_mutex);
7381+ if (dev->bm.initialized) {
7382+ if (dev_priv->have_mem_rastgeom) {
7383+ drm_bo_clean_mm(dev, DRM_PSB_MEM_RASTGEOM);
7384+ dev_priv->have_mem_rastgeom = 0;
7385+ }
7386+ if (dev_priv->have_mem_mmu) {
7387+ drm_bo_clean_mm(dev, DRM_PSB_MEM_MMU);
7388+ dev_priv->have_mem_mmu = 0;
7389+ }
7390+ if (dev_priv->have_mem_aper) {
7391+ drm_bo_clean_mm(dev, DRM_PSB_MEM_APER);
7392+ dev_priv->have_mem_aper = 0;
7393+ }
7394+ if (dev_priv->have_tt) {
7395+ drm_bo_clean_mm(dev, DRM_BO_MEM_TT);
7396+ dev_priv->have_tt = 0;
7397+ }
7398+ if (dev_priv->have_vram) {
7399+ drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM);
7400+ dev_priv->have_vram = 0;
7401+ }
7402+ }
7403+ mutex_unlock(&dev->struct_mutex);
7404+
7405+ if (dev_priv->has_msvdx)
7406+ psb_msvdx_uninit(dev);
7407+
7408+ if (dev_priv->comm) {
7409+ kunmap(dev_priv->comm_page);
7410+ dev_priv->comm = NULL;
7411+ }
7412+ if (dev_priv->comm_page) {
7413+ __free_page(dev_priv->comm_page);
7414+ dev_priv->comm_page = NULL;
7415+ }
7416+}
7417+
7418+void psb_clockgating(struct drm_psb_private *dev_priv)
7419+{
7420+ uint32_t clock_gating;
7421+
7422+ if (dev_priv->uopt.clock_gating == 1) {
7423+ PSB_DEBUG_INIT("Disabling clock gating.\n");
7424+
7425+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
7426+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
7427+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
7428+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
7429+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
7430+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
7431+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
7432+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
7433+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
7434+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
7435+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
7436+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
7437+
7438+ } else if (dev_priv->uopt.clock_gating == 2) {
7439+ PSB_DEBUG_INIT("Enabling clock gating.\n");
7440+
7441+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_AUTO <<
7442+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
7443+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
7444+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
7445+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
7446+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
7447+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
7448+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
7449+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
7450+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
7451+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
7452+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
7453+ } else
7454+ clock_gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
7455+
7456+#ifdef FIX_TG_2D_CLOCKGATE
7457+ clock_gating &= ~_PSB_C_CLKGATECTL_2D_CLKG_MASK;
7458+ clock_gating |= (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
7459+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT);
7460+#endif
7461+ PSB_WSGX32(clock_gating, PSB_CR_CLKGATECTL);
7462+ (void)PSB_RSGX32(PSB_CR_CLKGATECTL);
7463+}
7464+
7465+static int psb_master_create(struct drm_device *dev, struct drm_master *master)
7466+{
7467+ struct drm_i915_master_private *master_priv;
7468+
7469+ master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
7470+ if (!master_priv)
7471+ return -ENOMEM;
7472+
7473+ master->driver_priv = master_priv;
7474+ return 0;
7475+}
7476+
7477+static void psb_master_destroy(struct drm_device *dev, struct drm_master *master)
7478+{
7479+ struct drm_i915_master_private *master_priv = master->driver_priv;
7480+
7481+ if (!master_priv)
7482+ return;
7483+
7484+ drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
7485+
7486+ master->driver_priv = NULL;
7487+}
7488+
7489+
7490+static int psb_do_init(struct drm_device *dev)
7491+{
7492+ struct drm_psb_private *dev_priv =
7493+ (struct drm_psb_private *)dev->dev_private;
7494+ struct psb_gtt *pg = dev_priv->pg;
7495+
7496+ uint32_t stolen_gtt;
7497+ uint32_t tt_start;
7498+ uint32_t tt_pages;
7499+
7500+ int ret = -ENOMEM;
7501+
7502+ DRM_ERROR("Debug is 0x%08x\n", drm_psb_debug);
7503+
7504+ dev_priv->ta_mem_pages =
7505+ PSB_ALIGN_TO(drm_psb_ta_mem_size * 1024, PAGE_SIZE) >> PAGE_SHIFT;
7506+ dev_priv->comm_page = alloc_page(GFP_KERNEL);
7507+ if (!dev_priv->comm_page)
7508+ goto out_err;
7509+
7510+ dev_priv->comm = kmap(dev_priv->comm_page);
7511+ memset((void *)dev_priv->comm, 0, PAGE_SIZE);
7512+
7513+ dev_priv->has_msvdx = 1;
7514+ if (psb_msvdx_init(dev))
7515+ dev_priv->has_msvdx = 0;
7516+
7517+ /*
7518+ * Initialize sequence numbers for the different command
7519+ * submission mechanisms.
7520+ */
7521+
7522+ dev_priv->sequence[PSB_ENGINE_2D] = 0;
7523+ dev_priv->sequence[PSB_ENGINE_RASTERIZER] = 0;
7524+ dev_priv->sequence[PSB_ENGINE_TA] = 0;
7525+ dev_priv->sequence[PSB_ENGINE_HPRAST] = 0;
7526+
7527+ if (pg->gatt_start & 0x0FFFFFFF) {
7528+ DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
7529+ ret = -EINVAL;
7530+ goto out_err;
7531+ }
7532+
7533+ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
7534+ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
7535+ stolen_gtt = (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
7536+
7537+ dev_priv->gatt_free_offset = pg->gatt_start +
7538+ (stolen_gtt << PAGE_SHIFT) * 1024;
7539+
7540+ /*
7541+ * Insert a cache-coherent communications page in mmu space
7542+ * just after the stolen area. Will be used for fencing etc.
7543+ */
7544+
7545+ dev_priv->comm_mmu_offset = dev_priv->gatt_free_offset;
7546+ dev_priv->gatt_free_offset += PAGE_SIZE;
7547+
7548+ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
7549+ &dev_priv->comm_page,
7550+ dev_priv->comm_mmu_offset, 1, 0, 0,
7551+ PSB_MMU_CACHED_MEMORY);
7552+
7553+ if (ret)
7554+ goto out_err;
7555+
7556+ if (1 || drm_debug) {
7557+ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
7558+ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
7559+ DRM_INFO("SGX core id = 0x%08x\n", core_id);
7560+ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
7561+ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
7562+ _PSB_CC_REVISION_MAJOR_SHIFT,
7563+ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
7564+ _PSB_CC_REVISION_MINOR_SHIFT);
7565+ DRM_INFO
7566+ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
7567+ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
7568+ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
7569+ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
7570+ _PSB_CC_REVISION_DESIGNER_SHIFT);
7571+ }
7572+
7573+ dev_priv->irqmask_lock = SPIN_LOCK_UNLOCKED;
7574+ dev_priv->fence0_irq_on = 0;
7575+
7576+ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
7577+ pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
7578+ tt_start = dev_priv->gatt_free_offset - pg->gatt_start;
7579+ tt_pages -= tt_start >> PAGE_SHIFT;
7580+
7581+ mutex_lock(&dev->struct_mutex);
7582+
7583+ if (!drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0,
7584+ pg->stolen_size >> PAGE_SHIFT)) {
7585+ dev_priv->have_vram = 1;
7586+ }
7587+
7588+ if (!drm_bo_init_mm(dev, DRM_BO_MEM_TT, tt_start >> PAGE_SHIFT,
7589+ tt_pages)) {
7590+ dev_priv->have_tt = 1;
7591+ }
7592+
7593+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_MMU, 0x00000000,
7594+ (pg->gatt_start -
7595+ PSB_MEM_MMU_START) >> PAGE_SHIFT)) {
7596+ dev_priv->have_mem_mmu = 1;
7597+ }
7598+
7599+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_RASTGEOM, 0x00000000,
7600+ (PSB_MEM_MMU_START -
7601+ PSB_MEM_RASTGEOM_START) >> PAGE_SHIFT)) {
7602+ dev_priv->have_mem_rastgeom = 1;
7603+ }
7604+#if 0
7605+ if (pg->gatt_pages > PSB_TT_PRIV0_PLIMIT) {
7606+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_APER, PSB_TT_PRIV0_PLIMIT,
7607+ pg->gatt_pages - PSB_TT_PRIV0_PLIMIT)) {
7608+ dev_priv->have_mem_aper = 1;
7609+ }
7610+ }
7611+#endif
7612+
7613+ mutex_unlock(&dev->struct_mutex);
7614+
7615+ return 0;
7616+ out_err:
7617+ psb_do_takedown(dev);
7618+ return ret;
7619+}
7620+
7621+static int psb_driver_unload(struct drm_device *dev)
7622+{
7623+ struct drm_psb_private *dev_priv =
7624+ (struct drm_psb_private *)dev->dev_private;
7625+
7626+ intel_modeset_cleanup(dev);
7627+
7628+ if (dev_priv) {
7629+ psb_watchdog_takedown(dev_priv);
7630+ psb_do_takedown(dev);
7631+ psb_xhw_takedown(dev_priv);
7632+ psb_scheduler_takedown(&dev_priv->scheduler);
7633+
7634+ mutex_lock(&dev->struct_mutex);
7635+ if (dev_priv->have_mem_pds) {
7636+ drm_bo_clean_mm(dev, DRM_PSB_MEM_PDS);
7637+ dev_priv->have_mem_pds = 0;
7638+ }
7639+ if (dev_priv->have_mem_kernel) {
7640+ drm_bo_clean_mm(dev, DRM_PSB_MEM_KERNEL);
7641+ dev_priv->have_mem_kernel = 0;
7642+ }
7643+ mutex_unlock(&dev->struct_mutex);
7644+
7645+ (void)drm_bo_driver_finish(dev);
7646+
7647+ if (dev_priv->pf_pd) {
7648+ psb_mmu_free_pagedir(dev_priv->pf_pd);
7649+ dev_priv->pf_pd = NULL;
7650+ }
7651+ if (dev_priv->mmu) {
7652+ struct psb_gtt *pg = dev_priv->pg;
7653+
7654+ down_read(&pg->sem);
7655+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
7656+ (dev_priv->mmu),
7657+ pg->gatt_start,
7658+ pg->
7659+ stolen_size >> PAGE_SHIFT);
7660+ up_read(&pg->sem);
7661+ psb_mmu_driver_takedown(dev_priv->mmu);
7662+ dev_priv->mmu = NULL;
7663+ }
7664+ psb_gtt_takedown(dev_priv->pg, 1);
7665+ if (dev_priv->scratch_page) {
7666+ __free_page(dev_priv->scratch_page);
7667+ dev_priv->scratch_page = NULL;
7668+ }
7669+ psb_takedown_use_base(dev_priv);
7670+ if (dev_priv->common.regs) {
7671+ iounmap(dev_priv->common.regs);
7672+ dev_priv->common.regs = NULL;
7673+ }
7674+ if (dev_priv->sgx_reg) {
7675+ iounmap(dev_priv->sgx_reg);
7676+ dev_priv->sgx_reg = NULL;
7677+ }
7678+ if (dev_priv->msvdx_reg) {
7679+ iounmap(dev_priv->msvdx_reg);
7680+ dev_priv->msvdx_reg = NULL;
7681+ }
7682+
7683+ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
7684+ dev->dev_private = NULL;
7685+ }
7686+ return 0;
7687+}
7688+
7689+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
7690+{
7691+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7692+ struct drm_device *dev = fb->dev;
7693+
7694+ //if (fb->fbdev)
7695+ // intelfb_remove(dev, fb);
7696+
7697+ drm_framebuffer_cleanup(fb);
7698+ mutex_lock(&dev->struct_mutex);
7699+ drm_gem_object_unreference(intel_fb->obj);
7700+ mutex_unlock(&dev->struct_mutex);
7701+
7702+ kfree(intel_fb);
7703+}
7704+
7705+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
7706+ struct drm_file *file_priv,
7707+ unsigned int *handle)
7708+{
7709+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7710+ struct drm_gem_object *object = intel_fb->obj;
7711+
7712+ return drm_gem_handle_create(file_priv, object, handle);
7713+}
7714+
7715+static const struct drm_framebuffer_funcs psb_fb_funcs = {
7716+ .destroy = psb_user_framebuffer_destroy,
7717+ .create_handle = psb_user_framebuffer_create_handle,
7718+};
7719+
7720+int psb_framebuffer_create(struct drm_device *dev,
7721+ struct drm_mode_fb_cmd *mode_cmd,
7722+ struct drm_framebuffer **fb,
7723+ struct drm_gem_object *obj)
7724+{
7725+ struct intel_framebuffer *intel_fb;
7726+ int ret;
7727+
7728+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7729+ if (!intel_fb)
7730+ return -ENOMEM;
7731+
7732+ ret = drm_framebuffer_init(dev, &intel_fb->base, &psb_fb_funcs);
7733+ if (ret) {
7734+ DRM_ERROR("framebuffer init failed %d\n", ret);
7735+ return ret;
7736+ }
7737+
7738+ drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
7739+
7740+ intel_fb->obj = obj;
7741+
7742+ *fb = &intel_fb->base;
7743+
7744+ return 0;
7745+}
7746+
7747+
7748+static struct drm_framebuffer *
7749+psb_user_framebuffer_create(struct drm_device *dev,
7750+ struct drm_file *filp,
7751+ struct drm_mode_fb_cmd *mode_cmd)
7752+{
7753+ struct drm_gem_object *obj;
7754+ struct drm_framebuffer *fb;
7755+ int ret;
7756+
7757+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
7758+ if (!obj)
7759+ return NULL;
7760+
7761+ ret = psb_framebuffer_create(dev, mode_cmd, &fb, obj);
7762+ if (ret) {
7763+ drm_gem_object_unreference(obj);
7764+ return NULL;
7765+ }
7766+
7767+ return fb;
7768+}
7769+
7770+
7771+int psbfb_probe2(struct drm_device *dev)
7772+{
7773+ return 0;
7774+}
7775+
7776+static const struct drm_mode_config_funcs psb_mode_funcs = {
7777+ .fb_create = psb_user_framebuffer_create,
7778+ .fb_changed = psbfb_probe2,
7779+};
7780+
7781+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
7782+{
7783+ struct drm_psb_private *dev_priv;
7784+ unsigned long resource_start;
7785+ struct psb_gtt *pg;
7786+ int ret = -ENOMEM;
7787+
7788+ DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION);
7789+ dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
7790+ if (dev_priv == NULL)
7791+ return -ENOMEM;
7792+
7793+ mutex_init(&dev_priv->temp_mem);
7794+ mutex_init(&dev_priv->cmdbuf_mutex);
7795+ mutex_init(&dev_priv->reset_mutex);
7796+ psb_init_disallowed();
7797+
7798+ atomic_set(&dev_priv->msvdx_mmu_invaldc, 0);
7799+
7800+#ifdef FIX_TG_16
7801+ atomic_set(&dev_priv->lock_2d, 0);
7802+ atomic_set(&dev_priv->ta_wait_2d, 0);
7803+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
7804+ atomic_set(&dev_priv->waiters_2d, 0);;
7805+ DRM_INIT_WAITQUEUE(&dev_priv->queue_2d);
7806+#else
7807+ mutex_init(&dev_priv->mutex_2d);
7808+#endif
7809+
7810+ spin_lock_init(&dev_priv->reloc_lock);
7811+
7812+ DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
7813+ DRM_INIT_WAITQUEUE(&dev_priv->event_2d_queue);
7814+
7815+ dev->dev_private = (void *)dev_priv;
7816+ dev_priv->chipset = chipset;
7817+ psb_set_uopt(&dev_priv->uopt);
7818+
7819+ psb_watchdog_init(dev_priv);
7820+ psb_scheduler_init(dev, &dev_priv->scheduler);
7821+
7822+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
7823+
7824+ dev_priv->msvdx_reg =
7825+ ioremap(resource_start + PSB_MSVDX_OFFSET, PSB_MSVDX_SIZE);
7826+ if (!dev_priv->msvdx_reg)
7827+ goto out_err;
7828+
7829+ dev_priv->common.regs =
7830+ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
7831+ if (!dev_priv->common.regs)
7832+ goto out_err;
7833+
7834+ dev_priv->sgx_reg =
7835+ ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
7836+ if (!dev_priv->sgx_reg)
7837+ goto out_err;
7838+
7839+ psb_clockgating(dev_priv);
7840+ if (psb_init_use_base(dev_priv, 3, 13))
7841+ goto out_err;
7842+
7843+ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
7844+ if (!dev_priv->scratch_page)
7845+ goto out_err;
7846+
7847+ dev_priv->pg = psb_gtt_alloc(dev);
7848+ if (!dev_priv->pg)
7849+ goto out_err;
7850+
7851+ ret = psb_gtt_init(dev_priv->pg, 0);
7852+ if (ret)
7853+ goto out_err;
7854+
7855+ dev_priv->mmu = psb_mmu_driver_init(dev_priv->sgx_reg,
7856+ drm_psb_trap_pagefaults, 0,
7857+ &dev_priv->msvdx_mmu_invaldc);
7858+ if (!dev_priv->mmu)
7859+ goto out_err;
7860+
7861+ pg = dev_priv->pg;
7862+
7863+ /*
7864+ * Make sgx MMU aware of the stolen memory area we call VRAM.
7865+ */
7866+
7867+ down_read(&pg->sem);
7868+ ret =
7869+ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
7870+ pg->stolen_base >> PAGE_SHIFT,
7871+ pg->gatt_start,
7872+ pg->stolen_size >> PAGE_SHIFT, 0);
7873+ up_read(&pg->sem);
7874+ if (ret)
7875+ goto out_err;
7876+
7877+ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
7878+ if (!dev_priv->pf_pd)
7879+ goto out_err;
7880+
7881+ /*
7882+ * Make all presumably unused requestors page-fault by making them
7883+ * use context 1 which does not have any valid mappings.
7884+ */
7885+
7886+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
7887+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
7888+ PSB_RSGX32(PSB_CR_BIF_BANK1);
7889+
7890+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
7891+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
7892+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
7893+
7894+ psb_init_2d(dev_priv);
7895+
7896+ ret = drm_bo_driver_init(dev);
7897+ if (ret)
7898+ goto out_err;
7899+
7900+ ret = drm_bo_init_mm(dev, DRM_PSB_MEM_KERNEL, 0x00000000,
7901+ (PSB_MEM_PDS_START - PSB_MEM_KERNEL_START)
7902+ >> PAGE_SHIFT);
7903+ if (ret)
7904+ goto out_err;
7905+ dev_priv->have_mem_kernel = 1;
7906+
7907+ ret = drm_bo_init_mm(dev, DRM_PSB_MEM_PDS, 0x00000000,
7908+ (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START)
7909+ >> PAGE_SHIFT);
7910+ if (ret)
7911+ goto out_err;
7912+ dev_priv->have_mem_pds = 1;
7913+
7914+ ret = psb_do_init(dev);
7915+ if (ret)
7916+ return ret;
7917+
7918+ ret = psb_xhw_init(dev);
7919+ if (ret)
7920+ return ret;
7921+
7922+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
7923+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
7924+
7925+ intel_modeset_init(dev);
7926+
7927+ dev->mode_config.funcs = (void *)&psb_mode_funcs;
7928+
7929+ drm_helper_initial_config(dev, false);
7930+
7931+ return 0;
7932+ out_err:
7933+ psb_driver_unload(dev);
7934+ return ret;
7935+}
7936+
7937+int psb_driver_device_is_agp(struct drm_device *dev)
7938+{
7939+ return 0;
7940+}
7941+
7942+static int psb_prepare_msvdx_suspend(struct drm_device *dev)
7943+{
7944+ struct drm_psb_private *dev_priv =
7945+ (struct drm_psb_private *)dev->dev_private;
7946+ struct drm_fence_manager *fm = &dev->fm;
7947+ struct drm_fence_class_manager *fc = &fm->fence_class[PSB_ENGINE_VIDEO];
7948+ struct drm_fence_object *fence;
7949+ int ret = 0;
7950+ int signaled = 0;
7951+ int count = 0;
7952+ unsigned long _end = jiffies + 3 * DRM_HZ;
7953+
7954+ PSB_DEBUG_GENERAL("MSVDXACPI Entering psb_prepare_msvdx_suspend....\n");
7955+
7956+ /*set the msvdx-reset flag here.. */
7957+ dev_priv->msvdx_needs_reset = 1;
7958+
7959+ /*Ensure that all pending IRQs are serviced, */
7960+ list_for_each_entry(fence, &fc->ring, ring) {
7961+ count++;
7962+ do {
7963+ DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
7964+ (signaled =
7965+ drm_fence_object_signaled(fence,
7966+ DRM_FENCE_TYPE_EXE)));
7967+ if (signaled)
7968+ break;
7969+ if (time_after_eq(jiffies, _end))
7970+ PSB_DEBUG_GENERAL
7971+ ("MSVDXACPI: fence 0x%x didn't get signaled for 3 secs; we will suspend anyways\n",
7972+ (unsigned int)fence);
7973+ } while (ret == -EINTR);
7974+
7975+ }
7976+
7977+ /* Issue software reset */
7978+ PSB_WMSVDX32 (msvdx_sw_reset_all, MSVDX_CONTROL);
7979+
7980+ ret = psb_wait_for_register (dev_priv, MSVDX_CONTROL, 0,
7981+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
7982+
7983+ PSB_DEBUG_GENERAL("MSVDXACPI: All MSVDX IRQs (%d) serviced...\n",
7984+ count);
7985+ return 0;
7986+}
7987+
7988+static int psb_suspend(struct pci_dev *pdev, pm_message_t state)
7989+{
7990+ struct drm_device *dev = pci_get_drvdata(pdev);
7991+ struct drm_psb_private *dev_priv =
7992+ (struct drm_psb_private *)dev->dev_private;
7993+ struct drm_connector *output;
7994+
7995+ if (drm_psb_no_fb == 0)
7996+ psbfb_suspend(dev);
7997+ else {
7998+ if(num_registered_fb)
7999+ {
8000+ list_for_each_entry(output, &dev->mode_config.connector_list, head) {
8001+ //if(output->encoder->crtc != NULL)
8002+ // intel_crtc_mode_save(output->encoder->crtc);
8003+ //if(output->funcs->save)
8004+ // output->funcs->save(output);
8005+ }
8006+ }
8007+ }
8008+
8009+ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
8010+ (void)psb_idle_3d(dev);
8011+ (void)psb_idle_2d(dev);
8012+ flush_scheduled_work();
8013+
8014+ psb_takedown_use_base(dev_priv);
8015+
8016+ if (dev_priv->has_msvdx)
8017+ psb_prepare_msvdx_suspend(dev);
8018+
8019+ pci_save_state(pdev);
8020+ pci_disable_device(pdev);
8021+ pci_set_power_state(pdev, PCI_D3hot);
8022+
8023+ return 0;
8024+}
8025+
8026+static int psb_resume(struct pci_dev *pdev)
8027+{
8028+ struct drm_device *dev = pci_get_drvdata(pdev);
8029+ struct drm_psb_private *dev_priv =
8030+ (struct drm_psb_private *)dev->dev_private;
8031+ struct psb_gtt *pg = dev_priv->pg;
8032+ struct drm_connector *output;
8033+ int ret;
8034+
8035+ pci_set_power_state(pdev, PCI_D0);
8036+ pci_restore_state(pdev);
8037+ ret = pci_enable_device(pdev);
8038+ if (ret)
8039+ return ret;
8040+
8041+ INIT_LIST_HEAD(&dev_priv->resume_buf.head);
8042+ dev_priv->msvdx_needs_reset = 1;
8043+
8044+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
8045+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
8046+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
8047+
8048+ /*
8049+ * The GTT page tables are probably not saved.
8050+ * However, TT and VRAM is empty at this point.
8051+ */
8052+
8053+ psb_gtt_init(dev_priv->pg, 1);
8054+
8055+ /*
8056+ * The SGX loses it's register contents.
8057+ * Restore BIF registers. The MMU page tables are
8058+ * "normal" pages, so their contents should be kept.
8059+ */
8060+
8061+ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
8062+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
8063+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
8064+ PSB_RSGX32(PSB_CR_BIF_BANK1);
8065+
8066+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
8067+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
8068+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
8069+
8070+ /*
8071+ * 2D Base registers..
8072+ */
8073+ psb_init_2d(dev_priv);
8074+
8075+ if (drm_psb_no_fb == 0) {
8076+ list_for_each_entry(output, &dev->mode_config.connector_list, head) {
8077+ if(output->encoder->crtc != NULL)
8078+ drm_crtc_helper_set_mode(output->encoder->crtc, &output->encoder->crtc->mode,
8079+ output->encoder->crtc->x, output->encoder->crtc->y, NULL);
8080+ }
8081+ }
8082+
8083+ /*
8084+ * Persistant 3D base registers and USSE base registers..
8085+ */
8086+
8087+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
8088+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
8089+ psb_init_use_base(dev_priv, 3, 13);
8090+
8091+ /*
8092+ * Now, re-initialize the 3D engine.
8093+ */
8094+
8095+ psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
8096+
8097+ psb_scheduler_ta_mem_check(dev_priv);
8098+ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
8099+ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
8100+ PSB_TA_MEM_FLAG_TA |
8101+ PSB_TA_MEM_FLAG_RASTER |
8102+ PSB_TA_MEM_FLAG_HOSTA |
8103+ PSB_TA_MEM_FLAG_HOSTD |
8104+ PSB_TA_MEM_FLAG_INIT,
8105+ dev_priv->ta_mem->ta_memory->offset,
8106+ dev_priv->ta_mem->hw_data->offset,
8107+ dev_priv->ta_mem->hw_cookie);
8108+ }
8109+
8110+ if (drm_psb_no_fb == 0)
8111+ psbfb_resume(dev);
8112+
8113+ else {
8114+ if(num_registered_fb)
8115+ {
8116+ struct fb_info *fb_info=registered_fb[0];
8117+ list_for_each_entry(output, &dev->mode_config.connector_list, head) {
8118+ //if(output->encoder->crtc != NULL)
8119+ // intel_crtc_mode_restore(output->encoder->crtc);
8120+ }
8121+ if(fb_info)
8122+ {
8123+ fb_set_suspend(fb_info, 0);
8124+ printk("set the fb_set_suspend resume end\n");
8125+ }
8126+ }
8127+ }
8128+
8129+
8130+ return 0;
8131+}
8132+
8133+/* always available as we are SIGIO'd */
8134+static unsigned int psb_poll(struct file *filp, struct poll_table_struct *wait)
8135+{
8136+ return (POLLIN | POLLRDNORM);
8137+}
8138+
8139+static int psb_release(struct inode *inode, struct file *filp)
8140+{
8141+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
8142+ struct drm_device *dev = file_priv->minor->dev;
8143+ struct drm_psb_private *dev_priv =
8144+ (struct drm_psb_private *)dev->dev_private;
8145+
8146+ if (dev_priv && dev_priv->xhw_file) {
8147+ psb_xhw_init_takedown(dev_priv, file_priv, 1);
8148+ }
8149+ return drm_release(inode, filp);
8150+}
8151+
8152+extern struct drm_fence_driver psb_fence_driver;
8153+
8154+/*
8155+ * Use this memory type priority if no eviction is needed.
8156+ */
8157+static uint32_t psb_mem_prios[] = { DRM_BO_MEM_VRAM,
8158+ DRM_BO_MEM_TT,
8159+ DRM_PSB_MEM_KERNEL,
8160+ DRM_PSB_MEM_MMU,
8161+ DRM_PSB_MEM_RASTGEOM,
8162+ DRM_PSB_MEM_PDS,
8163+ DRM_PSB_MEM_APER,
8164+ DRM_BO_MEM_LOCAL
8165+};
8166+
8167+/*
8168+ * Use this memory type priority if need to evict.
8169+ */
8170+static uint32_t psb_busy_prios[] = { DRM_BO_MEM_TT,
8171+ DRM_BO_MEM_VRAM,
8172+ DRM_PSB_MEM_KERNEL,
8173+ DRM_PSB_MEM_MMU,
8174+ DRM_PSB_MEM_RASTGEOM,
8175+ DRM_PSB_MEM_PDS,
8176+ DRM_PSB_MEM_APER,
8177+ DRM_BO_MEM_LOCAL
8178+};
8179+
8180+static struct drm_bo_driver psb_bo_driver = {
8181+ .mem_type_prio = psb_mem_prios,
8182+ .mem_busy_prio = psb_busy_prios,
8183+ .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
8184+ .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
8185+ .create_ttm_backend_entry = drm_psb_tbe_init,
8186+ .fence_type = psb_fence_types,
8187+ .invalidate_caches = psb_invalidate_caches,
8188+ .init_mem_type = psb_init_mem_type,
8189+ .evict_mask = psb_evict_mask,
8190+ .move = psb_move,
8191+ .backend_size = psb_tbe_size,
8192+ .command_stream_barrier = NULL,
8193+};
8194+
8195+static struct drm_driver driver = {
8196+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
8197+ DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
8198+ .load = psb_driver_load,
8199+ .unload = psb_driver_unload,
8200+ .dri_library_name = dri_library_name,
8201+ .get_reg_ofs = drm_core_get_reg_ofs,
8202+ .ioctls = psb_ioctls,
8203+ .device_is_agp = psb_driver_device_is_agp,
8204+ .get_vblank_counter = psb_get_vblank_counter,
8205+ .enable_vblank = psb_enable_vblank,
8206+ .disable_vblank = psb_disable_vblank,
8207+ .irq_preinstall = psb_irq_preinstall,
8208+ .irq_postinstall = psb_irq_postinstall,
8209+ .irq_uninstall = psb_irq_uninstall,
8210+ .irq_handler = psb_irq_handler,
8211+ .master_create = psb_master_create,
8212+ .master_destroy = psb_master_destroy,
8213+ .fb_probe = psbfb_probe,
8214+ .fb_remove = psbfb_remove,
8215+ .firstopen = NULL,
8216+ .lastclose = psb_lastclose,
8217+ .fops = {
8218+ .owner = THIS_MODULE,
8219+ .open = drm_open,
8220+ .release = psb_release,
8221+ .ioctl = drm_ioctl,
8222+ .mmap = drm_mmap,
8223+ .poll = psb_poll,
8224+ .fasync = drm_fasync,
8225+ },
8226+ .pci_driver = {
8227+ .name = DRIVER_NAME,
8228+ .id_table = pciidlist,
8229+ .probe = probe,
8230+ .remove = __devexit_p(drm_cleanup_pci),
8231+ .resume = psb_resume,
8232+ .suspend = psb_suspend,
8233+ },
8234+ .fence_driver = &psb_fence_driver,
8235+ .bo_driver = &psb_bo_driver,
8236+ .name = DRIVER_NAME,
8237+ .desc = DRIVER_DESC,
8238+ .date = PSB_DRM_DRIVER_DATE,
8239+ .major = PSB_DRM_DRIVER_MAJOR,
8240+ .minor = PSB_DRM_DRIVER_MINOR,
8241+ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
8242+};
8243+
8244+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8245+{
8246+ return drm_get_dev(pdev, ent, &driver);
8247+}
8248+
8249+static int __init psb_init(void)
8250+{
8251+ driver.num_ioctls = psb_max_ioctl;
8252+
8253+ return drm_init(&driver);
8254+}
8255+
8256+static void __exit psb_exit(void)
8257+{
8258+ drm_exit(&driver);
8259+}
8260+
8261+module_init(psb_init);
8262+module_exit(psb_exit);
8263+
8264+MODULE_AUTHOR(DRIVER_AUTHOR);
8265+MODULE_DESCRIPTION(DRIVER_DESC);
8266+MODULE_LICENSE("GPL");
8267Index: linux-2.6.28/drivers/gpu/drm/psb/psb_drv.h
8268===================================================================
8269--- /dev/null 1970-01-01 00:00:00.000000000 +0000
8270+++ linux-2.6.28/drivers/gpu/drm/psb/psb_drv.h 2009-02-25 15:37:02.000000000 +0000
8271@@ -0,0 +1,549 @@
8272+/**************************************************************************
8273+ * Copyright (c) 2007, Intel Corporation.
8274+ * All Rights Reserved.
8275+ *
8276+ * This program is free software; you can redistribute it and/or modify it
8277+ * under the terms and conditions of the GNU General Public License,
8278+ * version 2, as published by the Free Software Foundation.
8279+ *
8280+ * This program is distributed in the hope it will be useful, but WITHOUT
8281+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8282+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8283+ * more details.
8284+ *
8285+ * You should have received a copy of the GNU General Public License along with
8286+ * this program; if not, write to the Free Software Foundation, Inc.,
8287+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8288+ *
8289+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
8290+ * develop this driver.
8291+ *
8292+ **************************************************************************/
8293+/*
8294+ */
8295+#ifndef _PSB_DRV_H_
8296+#define _PSB_DRV_H_
8297+
8298+#include "drmP.h"
8299+#include "psb_drm.h"
8300+#include "psb_reg.h"
8301+#include "psb_schedule.h"
8302+#include "psb_priv.h"
8303+#include "../i915/intel_drv.h"
8304+
8305+
8306+enum {
8307+ CHIP_PSB_8108 = 0,
8308+ CHIP_PSB_8109 = 1
8309+};
8310+
8311+/*
8312+ * Hardware bugfixes
8313+ */
8314+
8315+#define FIX_TG_16
8316+#define FIX_TG_2D_CLOCKGATE
8317+
8318+#define DRIVER_NAME "psb"
8319+#define DRIVER_DESC "drm driver for the Intel GMA500"
8320+#define DRIVER_AUTHOR "Tungsten Graphics Inc."
8321+
8322+#define PSB_DRM_DRIVER_DATE "20080613"
8323+#define PSB_DRM_DRIVER_MAJOR 4
8324+#define PSB_DRM_DRIVER_MINOR 12
8325+#define PSB_DRM_DRIVER_PATCHLEVEL 0
8326+
8327+#define PSB_VDC_OFFSET 0x00000000
8328+#define PSB_VDC_SIZE 0x000080000
8329+#define PSB_SGX_SIZE 0x8000
8330+#define PSB_SGX_OFFSET 0x00040000
8331+#define PSB_MMIO_RESOURCE 0
8332+#define PSB_GATT_RESOURCE 2
8333+#define PSB_GTT_RESOURCE 3
8334+#define PSB_GMCH_CTRL 0x52
8335+#define PSB_BSM 0x5C
8336+#define _PSB_GMCH_ENABLED 0x4
8337+#define PSB_PGETBL_CTL 0x2020
8338+#define _PSB_PGETBL_ENABLED 0x00000001
8339+#define PSB_SGX_2D_SLAVE_PORT 0x4000
8340+#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
8341+#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
8342+#define PSB_NUM_VALIDATE_BUFFERS 1024
8343+#define PSB_MEM_KERNEL_START 0x10000000
8344+#define PSB_MEM_PDS_START 0x20000000
8345+#define PSB_MEM_MMU_START 0x40000000
8346+
8347+#define DRM_PSB_MEM_KERNEL DRM_BO_MEM_PRIV0
8348+#define DRM_PSB_FLAG_MEM_KERNEL DRM_BO_FLAG_MEM_PRIV0
8349+
8350+/*
8351+ * Flags for external memory type field.
8352+ */
8353+
8354+#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */
8355+#define PSB_MSVDX_SIZE 0x8000 /*MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
8356+
8357+#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
8358+#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
8359+#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
8360+
8361+/*
8362+ * PTE's and PDE's
8363+ */
8364+
8365+#define PSB_PDE_MASK 0x003FFFFF
8366+#define PSB_PDE_SHIFT 22
8367+#define PSB_PTE_SHIFT 12
8368+
8369+#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
8370+#define PSB_PTE_WO 0x0002 /* Write only */
8371+#define PSB_PTE_RO 0x0004 /* Read only */
8372+#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
8373+
8374+/*
8375+ * VDC registers and bits
8376+ */
8377+#define PSB_HWSTAM 0x2098
8378+#define PSB_INSTPM 0x20C0
8379+#define PSB_INT_IDENTITY_R 0x20A4
8380+#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
8381+#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
8382+#define _PSB_IRQ_SGX_FLAG (1<<18)
8383+#define _PSB_IRQ_MSVDX_FLAG (1<<19)
8384+#define PSB_INT_MASK_R 0x20A8
8385+#define PSB_INT_ENABLE_R 0x20A0
8386+#define PSB_PIPEASTAT 0x70024
8387+#define _PSB_VBLANK_INTERRUPT_ENABLE (1 << 17)
8388+#define _PSB_VBLANK_CLEAR (1 << 1)
8389+#define PSB_PIPEBSTAT 0x71024
8390+
8391+#define _PSB_MMU_ER_MASK 0x0001FF00
8392+#define _PSB_MMU_ER_HOST (1 << 16)
8393+#define GPIOA 0x5010
8394+#define GPIOB 0x5014
8395+#define GPIOC 0x5018
8396+#define GPIOD 0x501c
8397+#define GPIOE 0x5020
8398+#define GPIOF 0x5024
8399+#define GPIOG 0x5028
8400+#define GPIOH 0x502c
8401+#define GPIO_CLOCK_DIR_MASK (1 << 0)
8402+#define GPIO_CLOCK_DIR_IN (0 << 1)
8403+#define GPIO_CLOCK_DIR_OUT (1 << 1)
8404+#define GPIO_CLOCK_VAL_MASK (1 << 2)
8405+#define GPIO_CLOCK_VAL_OUT (1 << 3)
8406+#define GPIO_CLOCK_VAL_IN (1 << 4)
8407+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
8408+#define GPIO_DATA_DIR_MASK (1 << 8)
8409+#define GPIO_DATA_DIR_IN (0 << 9)
8410+#define GPIO_DATA_DIR_OUT (1 << 9)
8411+#define GPIO_DATA_VAL_MASK (1 << 10)
8412+#define GPIO_DATA_VAL_OUT (1 << 11)
8413+#define GPIO_DATA_VAL_IN (1 << 12)
8414+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
8415+
8416+#define VCLK_DIVISOR_VGA0 0x6000
8417+#define VCLK_DIVISOR_VGA1 0x6004
8418+#define VCLK_POST_DIV 0x6010
8419+
8420+#define I915_READ(reg) readl(dev_priv->common.regs + (reg))
8421+#define I915_WRITE(reg, val) writel(val, dev_priv->common.regs + (reg))
8422+
8423+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
8424+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
8425+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
8426+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
8427+#define PSB_COMM_USER_IRQ (1024 >> 2)
8428+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
8429+#define PSB_COMM_FW (2048 >> 2)
8430+
8431+#define PSB_UIRQ_VISTEST 1
8432+#define PSB_UIRQ_OOM_REPLY 2
8433+#define PSB_UIRQ_FIRE_TA_REPLY 3
8434+#define PSB_UIRQ_FIRE_RASTER_REPLY 4
8435+
8436+#define PSB_2D_SIZE (256*1024*1024)
8437+#define PSB_MAX_RELOC_PAGES 1024
8438+
8439+#define PSB_LOW_REG_OFFS 0x0204
8440+#define PSB_HIGH_REG_OFFS 0x0600
8441+
8442+#define PSB_NUM_VBLANKS 2
8443+
8444+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
8445+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
8446+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
8447+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
8448+#define PSB_COMM_FW (2048 >> 2)
8449+
8450+#define PSB_2D_SIZE (256*1024*1024)
8451+#define PSB_MAX_RELOC_PAGES 1024
8452+
8453+#define PSB_LOW_REG_OFFS 0x0204
8454+#define PSB_HIGH_REG_OFFS 0x0600
8455+
8456+#define PSB_NUM_VBLANKS 2
8457+#define PSB_WATCHDOG_DELAY (DRM_HZ / 10)
8458+
8459+/*
8460+ * User options.
8461+ */
8462+
8463+
8464+struct psb_gtt {
8465+ struct drm_device *dev;
8466+ int initialized;
8467+ uint32_t gatt_start;
8468+ uint32_t gtt_start;
8469+ uint32_t gtt_phys_start;
8470+ unsigned gtt_pages;
8471+ unsigned gatt_pages;
8472+ uint32_t stolen_base;
8473+ uint32_t pge_ctl;
8474+ u16 gmch_ctrl;
8475+ unsigned long stolen_size;
8476+ uint32_t *gtt_map;
8477+ struct rw_semaphore sem;
8478+};
8479+
8480+struct psb_use_base {
8481+ struct list_head head;
8482+ struct drm_fence_object *fence;
8483+ unsigned int reg;
8484+ unsigned long offset;
8485+ unsigned int dm;
8486+};
8487+
8488+struct psb_buflist_item;
8489+
8490+struct psb_msvdx_cmd_queue {
8491+ struct list_head head;
8492+ void *cmd;
8493+ unsigned long cmd_size;
8494+ uint32_t sequence;
8495+};
8496+
8497+
8498+struct psb_mmu_driver;
8499+
8500+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
8501+ int trap_pagefaults,
8502+ int invalid_type,
8503+ atomic_t *msvdx_mmu_invaldc);
8504+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
8505+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver);
8506+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
8507+ uint32_t gtt_start, uint32_t gtt_pages);
8508+extern void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset);
8509+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
8510+ int trap_pagefaults,
8511+ int invalid_type);
8512+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
8513+extern void psb_mmu_flush(struct psb_mmu_driver *driver);
8514+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
8515+ unsigned long address,
8516+ uint32_t num_pages);
8517+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
8518+ uint32_t start_pfn,
8519+ unsigned long address,
8520+ uint32_t num_pages, int type);
8521+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
8522+ unsigned long *pfn);
8523+
8524+/*
8525+ * Enable / disable MMU for different requestors.
8526+ */
8527+
8528+extern void psb_mmu_enable_requestor(struct psb_mmu_driver *driver,
8529+ uint32_t mask);
8530+extern void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
8531+ uint32_t mask);
8532+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
8533+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
8534+ unsigned long address, uint32_t num_pages,
8535+ uint32_t desired_tile_stride,
8536+ uint32_t hw_tile_stride, int type);
8537+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
8538+ uint32_t num_pages,
8539+ uint32_t desired_tile_stride,
8540+ uint32_t hw_tile_stride);
8541+/*
8542+ * psb_sgx.c
8543+ */
8544+
8545+extern int psb_blit_sequence(struct drm_psb_private *dev_priv,
8546+ uint32_t sequence);
8547+extern void psb_init_2d(struct drm_psb_private *dev_priv);
8548+extern int psb_idle_2d(struct drm_device *dev);
8549+extern int psb_idle_3d(struct drm_device *dev);
8550+extern int psb_emit_2d_copy_blit(struct drm_device *dev,
8551+ uint32_t src_offset,
8552+ uint32_t dst_offset, uint32_t pages,
8553+ int direction);
8554+extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
8555+ struct drm_file *file_priv);
8556+extern int psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t * regs,
8557+ unsigned int cmds);
8558+extern int psb_submit_copy_cmdbuf(struct drm_device *dev,
8559+ struct drm_buffer_object *cmd_buffer,
8560+ unsigned long cmd_offset,
8561+ unsigned long cmd_size, int engine,
8562+ uint32_t * copy_buffer);
8563+extern void psb_fence_or_sync(struct drm_file *priv,
8564+ int engine,
8565+ struct drm_psb_cmdbuf_arg *arg,
8566+ struct drm_fence_arg *fence_arg,
8567+ struct drm_fence_object **fence_p);
8568+extern void psb_init_disallowed(void);
8569+
8570+/*
8571+ * psb_irq.c
8572+ */
8573+
8574+extern u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
8575+extern int psb_enable_vblank(struct drm_device *dev, int crtc);
8576+extern void psb_disable_vblank(struct drm_device *dev, int crtc);
8577+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
8578+extern void psb_irq_preinstall(struct drm_device *dev);
8579+extern int psb_irq_postinstall(struct drm_device *dev);
8580+extern void psb_irq_uninstall(struct drm_device *dev);
8581+
8582+/*
8583+ * psb_fence.c
8584+ */
8585+
8586+extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
8587+extern void psb_2D_irq_off(struct drm_psb_private *dev_priv);
8588+extern void psb_2D_irq_on(struct drm_psb_private *dev_priv);
8589+extern uint32_t psb_fence_advance_sequence(struct drm_device *dev,
8590+ uint32_t class);
8591+extern int psb_fence_emit_sequence(struct drm_device *dev, uint32_t fence_class,
8592+ uint32_t flags, uint32_t * sequence,
8593+ uint32_t * native_type);
8594+extern void psb_fence_error(struct drm_device *dev,
8595+ uint32_t class,
8596+ uint32_t sequence, uint32_t type, int error);
8597+
8598+/*MSVDX stuff*/
8599+extern void psb_msvdx_irq_off(struct drm_psb_private *dev_priv);
8600+extern void psb_msvdx_irq_on(struct drm_psb_private *dev_priv);
8601+extern int psb_hw_info_ioctl(struct drm_device *dev, void *data,
8602+ struct drm_file *file_priv);
8603+
8604+/*
8605+ * psb_buffer.c
8606+ */
8607+extern struct drm_ttm_backend *drm_psb_tbe_init(struct drm_device *dev);
8608+extern int psb_fence_types(struct drm_buffer_object *bo, uint32_t * class,
8609+ uint32_t * type);
8610+extern uint32_t psb_evict_mask(struct drm_buffer_object *bo);
8611+extern int psb_invalidate_caches(struct drm_device *dev, uint64_t flags);
8612+extern int psb_init_mem_type(struct drm_device *dev, uint32_t type,
8613+ struct drm_mem_type_manager *man);
8614+extern int psb_move(struct drm_buffer_object *bo,
8615+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
8616+extern int psb_tbe_size(struct drm_device *dev, unsigned long num_pages);
8617+
8618+/*
8619+ * psb_gtt.c
8620+ */
8621+extern int psb_gtt_init(struct psb_gtt *pg, int resume);
8622+extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
8623+ unsigned offset_pages, unsigned num_pages,
8624+ unsigned desired_tile_stride,
8625+ unsigned hw_tile_stride, int type);
8626+extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
8627+ unsigned num_pages,
8628+ unsigned desired_tile_stride,
8629+ unsigned hw_tile_stride);
8630+
8631+extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
8632+extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
8633+
8634+/*
8635+ * psb_fb.c
8636+ */
8637+extern int psbfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
8638+extern int psbfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
8639+extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
8640+ struct drm_file *file_priv);
8641+extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
8642+ struct drm_file *file_priv);
8643+extern void psbfb_suspend(struct drm_device *dev);
8644+extern void psbfb_resume(struct drm_device *dev);
8645+
8646+/*
8647+ * psb_reset.c
8648+ */
8649+
8650+extern void psb_reset(struct drm_psb_private *dev_priv, int reset_2d);
8651+extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
8652+extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
8653+extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
8654+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
8655+
8656+/*
8657+ * psb_regman.c
8658+ */
8659+
8660+extern void psb_takedown_use_base(struct drm_psb_private *dev_priv);
8661+extern int psb_grab_use_base(struct drm_psb_private *dev_priv,
8662+ unsigned long dev_virtual,
8663+ unsigned long size,
8664+ unsigned int data_master,
8665+ uint32_t fence_class,
8666+ uint32_t fence_type,
8667+ int no_wait,
8668+ int ignore_signals,
8669+ int *r_reg, uint32_t * r_offset);
8670+extern int psb_init_use_base(struct drm_psb_private *dev_priv,
8671+ unsigned int reg_start, unsigned int reg_num);
8672+
8673+/*
8674+ * psb_xhw.c
8675+ */
8676+
8677+extern int psb_xhw_ioctl(struct drm_device *dev, void *data,
8678+ struct drm_file *file_priv);
8679+extern int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
8680+ struct drm_file *file_priv);
8681+extern int psb_xhw_init(struct drm_device *dev);
8682+extern void psb_xhw_takedown(struct drm_psb_private *dev_priv);
8683+extern void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
8684+ struct drm_file *file_priv, int closing);
8685+extern int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
8686+ struct psb_xhw_buf *buf,
8687+ uint32_t fire_flags,
8688+ uint32_t hw_context,
8689+ uint32_t * cookie,
8690+ uint32_t * oom_cmds,
8691+ uint32_t num_oom_cmds,
8692+ uint32_t offset,
8693+ uint32_t engine, uint32_t flags);
8694+extern int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
8695+ struct psb_xhw_buf *buf, uint32_t fire_flags);
8696+extern int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
8697+ struct psb_xhw_buf *buf,
8698+ uint32_t w,
8699+ uint32_t h,
8700+ uint32_t * hw_cookie,
8701+ uint32_t * bo_size,
8702+ uint32_t * clear_p_start,
8703+ uint32_t * clear_num_pages);
8704+
8705+extern int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
8706+ struct psb_xhw_buf *buf);
8707+extern int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
8708+ struct psb_xhw_buf *buf, uint32_t * value);
8709+extern int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
8710+ struct psb_xhw_buf *buf,
8711+ uint32_t pages,
8712+ uint32_t * hw_cookie, uint32_t * size);
8713+extern int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
8714+ struct psb_xhw_buf *buf, uint32_t * cookie);
8715+extern void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
8716+ struct psb_xhw_buf *buf,
8717+ uint32_t * cookie,
8718+ uint32_t * bca,
8719+ uint32_t * rca, uint32_t * flags);
8720+extern int psb_xhw_vistest(struct drm_psb_private *dev_priv,
8721+ struct psb_xhw_buf *buf);
8722+extern int psb_xhw_handler(struct drm_psb_private *dev_priv);
8723+extern int psb_xhw_resume(struct drm_psb_private *dev_priv,
8724+ struct psb_xhw_buf *buf);
8725+extern void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
8726+ struct psb_xhw_buf *buf, uint32_t * cookie);
8727+extern int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
8728+ struct psb_xhw_buf *buf,
8729+ uint32_t flags,
8730+ uint32_t param_offset,
8731+ uint32_t pt_offset, uint32_t * hw_cookie);
8732+extern void psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
8733+ struct psb_xhw_buf *buf);
8734+
8735+extern void psb_i2c_init(struct drm_psb_private *dev_priv);
8736+
8737+/*
8738+ * psb_schedule.c: HW bug fixing.
8739+ */
8740+
8741+#ifdef FIX_TG_16
8742+
8743+extern void psb_2d_unlock(struct drm_psb_private *dev_priv);
8744+extern void psb_2d_lock(struct drm_psb_private *dev_priv);
8745+extern void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv);
8746+
8747+#else
8748+
8749+#define psb_2d_lock(_dev_priv) mutex_lock(&(_dev_priv)->mutex_2d)
8750+#define psb_2d_unlock(_dev_priv) mutex_unlock(&(_dev_priv)->mutex_2d)
8751+
8752+#endif
8753+
8754+/*
8755+ * Utilities
8756+ */
8757+
8758+#define PSB_WVDC32(_val, _offs) I915_WRITE(_offs, _val)
8759+#define PSB_RVDC32(_offs) I915_READ(_offs)
8760+
8761+#define PSB_ALIGN_TO(_val, _align) \
8762+ (((_val) + ((_align) - 1)) & ~((_align) - 1))
8763+#define PSB_WSGX32(_val, _offs) \
8764+ iowrite32(_val, dev_priv->sgx_reg + (_offs))
8765+#define PSB_RSGX32(_offs) \
8766+ ioread32(dev_priv->sgx_reg + (_offs))
8767+#define PSB_WMSVDX32(_val, _offs) \
8768+ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
8769+#define PSB_RMSVDX32(_offs) \
8770+ ioread32(dev_priv->msvdx_reg + (_offs))
8771+
8772+#define PSB_ALPL(_val, _base) \
8773+ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
8774+#define PSB_ALPLM(_val, _base) \
8775+ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
8776+
8777+#define PSB_D_RENDER (1 << 16)
8778+
8779+#define PSB_D_GENERAL (1 << 0)
8780+#define PSB_D_INIT (1 << 1)
8781+#define PSB_D_IRQ (1 << 2)
8782+#define PSB_D_FW (1 << 3)
8783+#define PSB_D_PERF (1 << 4)
8784+#define PSB_D_TMP (1 << 5)
8785+#define PSB_D_RELOC (1 << 6)
8786+
8787+extern int drm_psb_debug;
8788+extern int drm_psb_no_fb;
8789+extern int drm_psb_disable_vsync;
8790+
8791+#define PSB_DEBUG_FW(_fmt, _arg...) \
8792+ PSB_DEBUG(PSB_D_FW, _fmt, ##_arg)
8793+#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
8794+ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
8795+#define PSB_DEBUG_INIT(_fmt, _arg...) \
8796+ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
8797+#define PSB_DEBUG_IRQ(_fmt, _arg...) \
8798+ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
8799+#define PSB_DEBUG_RENDER(_fmt, _arg...) \
8800+ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
8801+#define PSB_DEBUG_PERF(_fmt, _arg...) \
8802+ PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg)
8803+#define PSB_DEBUG_TMP(_fmt, _arg...) \
8804+ PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg)
8805+#define PSB_DEBUG_RELOC(_fmt, _arg...) \
8806+ PSB_DEBUG(PSB_D_RELOC, _fmt, ##_arg)
8807+
8808+#if DRM_DEBUG_CODE
8809+#define PSB_DEBUG(_flag, _fmt, _arg...) \
8810+ do { \
8811+ if (unlikely((_flag) & drm_psb_debug)) \
8812+ printk(KERN_DEBUG \
8813+ "[psb:0x%02x:%s] " _fmt , _flag, \
8814+ __FUNCTION__ , ##_arg); \
8815+ } while (0)
8816+#else
8817+#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
8818+#endif
8819+
8820+#endif
8821Index: linux-2.6.28/drivers/gpu/drm/psb/psb_fb.c
8822===================================================================
8823--- /dev/null 1970-01-01 00:00:00.000000000 +0000
8824+++ linux-2.6.28/drivers/gpu/drm/psb/psb_fb.c 2009-02-25 15:37:02.000000000 +0000
8825@@ -0,0 +1,1219 @@
8826+/**************************************************************************
8827+ * Copyright (c) 2007, Intel Corporation.
8828+ * All Rights Reserved.
8829+ *
8830+ * This program is free software; you can redistribute it and/or modify it
8831+ * under the terms and conditions of the GNU General Public License,
8832+ * version 2, as published by the Free Software Foundation.
8833+ *
8834+ * This program is distributed in the hope it will be useful, but WITHOUT
8835+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8836+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8837+ * more details.
8838+ *
8839+ * You should have received a copy of the GNU General Public License along with
8840+ * this program; if not, write to the Free Software Foundation, Inc.,
8841+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8842+ *
8843+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
8844+ * develop this driver.
8845+ *
8846+ **************************************************************************/
8847+
8848+#include <linux/module.h>
8849+#include <linux/kernel.h>
8850+#include <linux/errno.h>
8851+#include <linux/string.h>
8852+#include <linux/mm.h>
8853+#include <linux/tty.h>
8854+#include <linux/slab.h>
8855+#include <linux/delay.h>
8856+#include <linux/fb.h>
8857+#include <linux/init.h>
8858+#include <linux/console.h>
8859+
8860+#include "drmP.h"
8861+#include "drm.h"
8862+#include "drm_crtc.h"
8863+#include "psb_drv.h"
8864+
8865+struct psbfb_vm_info {
8866+ struct drm_buffer_object *bo;
8867+ struct address_space *f_mapping;
8868+ struct mutex vm_mutex;
8869+ atomic_t refcount;
8870+};
8871+
8872+struct psbfb_par {
8873+ struct drm_device *dev;
8874+ struct drm_crtc *crtc;
8875+ struct drm_connector *output;
8876+ struct psbfb_vm_info *vi;
8877+ int dpms_state;
8878+};
8879+
8880+static void psbfb_vm_info_deref(struct psbfb_vm_info **vi)
8881+{
8882+ struct psbfb_vm_info *tmp = *vi;
8883+ *vi = NULL;
8884+ if (atomic_dec_and_test(&tmp->refcount)) {
8885+ drm_bo_usage_deref_unlocked(&tmp->bo);
8886+ drm_free(tmp, sizeof(*tmp), DRM_MEM_MAPS);
8887+ }
8888+}
8889+
8890+static struct psbfb_vm_info *psbfb_vm_info_ref(struct psbfb_vm_info *vi)
8891+{
8892+ atomic_inc(&vi->refcount);
8893+ return vi;
8894+}
8895+
8896+static struct psbfb_vm_info *psbfb_vm_info_create(void)
8897+{
8898+ struct psbfb_vm_info *vi;
8899+
8900+ vi = drm_calloc(1, sizeof(*vi), DRM_MEM_MAPS);
8901+ if (!vi)
8902+ return NULL;
8903+
8904+ mutex_init(&vi->vm_mutex);
8905+ atomic_set(&vi->refcount, 1);
8906+ return vi;
8907+}
8908+
8909+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
8910+
8911+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
8912+ unsigned blue, unsigned transp, struct fb_info *info)
8913+{
8914+ struct psbfb_par *par = info->par;
8915+ struct drm_crtc *crtc = par->crtc;
8916+ uint32_t v;
8917+
8918+ if (!crtc->fb)
8919+ return -ENOMEM;
8920+
8921+ if (regno > 255)
8922+ return 1;
8923+
8924+ if (crtc->fb->depth == 8) {
8925+ intel_crtc_fb_gamma_set(crtc, red, green, blue, regno);
8926+ return 0;
8927+ }
8928+
8929+ red = CMAP_TOHW(red, info->var.red.length);
8930+ blue = CMAP_TOHW(blue, info->var.blue.length);
8931+ green = CMAP_TOHW(green, info->var.green.length);
8932+ transp = CMAP_TOHW(transp, info->var.transp.length);
8933+
8934+ v = (red << info->var.red.offset) |
8935+ (green << info->var.green.offset) |
8936+ (blue << info->var.blue.offset) |
8937+ (transp << info->var.transp.offset);
8938+
8939+ switch (crtc->fb->bits_per_pixel) {
8940+ case 16:
8941+ ((uint32_t *) info->pseudo_palette)[regno] = v;
8942+ break;
8943+ case 24:
8944+ case 32:
8945+ ((uint32_t *) info->pseudo_palette)[regno] = v;
8946+ break;
8947+ }
8948+
8949+ return 0;
8950+}
8951+
8952+static int psbfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
8953+{
8954+ struct psbfb_par *par = info->par;
8955+ //struct drm_device *dev = par->dev;
8956+ struct drm_framebuffer *fb = par->crtc->fb;
8957+ //struct drm_display_mode *drm_mode;
8958+ //struct drm_connector *output;
8959+ int depth;
8960+ int pitch;
8961+ int bpp = var->bits_per_pixel;
8962+
8963+ if (!fb)
8964+ return -ENOMEM;
8965+
8966+ if (!var->pixclock)
8967+ return -EINVAL;
8968+
8969+ /* don't support virtuals for now */
8970+ if (var->xres_virtual > var->xres)
8971+ return -EINVAL;
8972+
8973+ if (var->yres_virtual > var->yres)
8974+ return -EINVAL;
8975+
8976+ switch (bpp) {
8977+ case 8:
8978+ depth = 8;
8979+ break;
8980+ case 16:
8981+ depth = (var->green.length == 6) ? 16 : 15;
8982+ break;
8983+ case 24: /* assume this is 32bpp / depth 24 */
8984+ bpp = 32;
8985+ /* fallthrough */
8986+ case 32:
8987+ depth = (var->transp.length > 0) ? 32 : 24;
8988+ break;
8989+ default:
8990+ return -EINVAL;
8991+ }
8992+
8993+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
8994+
8995+ /* Check that we can resize */
8996+ if ((pitch * var->yres) > (fb->bo->num_pages << PAGE_SHIFT)) {
8997+ /* Need to resize the fb object.
8998+ * But the generic fbdev code doesn't really understand
8999+ * that we can do this. So disable for now.
9000+ */
9001+ DRM_INFO("Can't support requested size, too big!\n");
9002+ return -EINVAL;
9003+ }
9004+
9005+ switch (depth) {
9006+ case 8:
9007+ var->red.offset = 0;
9008+ var->green.offset = 0;
9009+ var->blue.offset = 0;
9010+ var->red.length = 8;
9011+ var->green.length = 8;
9012+ var->blue.length = 8;
9013+ var->transp.length = 0;
9014+ var->transp.offset = 0;
9015+ break;
9016+ case 15:
9017+ var->red.offset = 10;
9018+ var->green.offset = 5;
9019+ var->blue.offset = 0;
9020+ var->red.length = 5;
9021+ var->green.length = 5;
9022+ var->blue.length = 5;
9023+ var->transp.length = 1;
9024+ var->transp.offset = 15;
9025+ break;
9026+ case 16:
9027+ var->red.offset = 11;
9028+ var->green.offset = 5;
9029+ var->blue.offset = 0;
9030+ var->red.length = 5;
9031+ var->green.length = 6;
9032+ var->blue.length = 5;
9033+ var->transp.length = 0;
9034+ var->transp.offset = 0;
9035+ break;
9036+ case 24:
9037+ var->red.offset = 16;
9038+ var->green.offset = 8;
9039+ var->blue.offset = 0;
9040+ var->red.length = 8;
9041+ var->green.length = 8;
9042+ var->blue.length = 8;
9043+ var->transp.length = 0;
9044+ var->transp.offset = 0;
9045+ break;
9046+ case 32:
9047+ var->red.offset = 16;
9048+ var->green.offset = 8;
9049+ var->blue.offset = 0;
9050+ var->red.length = 8;
9051+ var->green.length = 8;
9052+ var->blue.length = 8;
9053+ var->transp.length = 8;
9054+ var->transp.offset = 24;
9055+ break;
9056+ default:
9057+ return -EINVAL;
9058+ }
9059+
9060+ return 0;
9061+}
9062+
9063+static int psbfb_move_fb_bo(struct fb_info *info, struct drm_buffer_object *bo,
9064+ uint64_t mem_type_flags)
9065+{
9066+ struct psbfb_par *par;
9067+ loff_t holelen;
9068+ int ret;
9069+
9070+ /*
9071+ * Kill all user-space mappings of this device. They will be
9072+ * faulted back using nopfn when accessed.
9073+ */
9074+
9075+ par = info->par;
9076+ holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
9077+ mutex_lock(&par->vi->vm_mutex);
9078+ if (par->vi->f_mapping) {
9079+ unmap_mapping_range(par->vi->f_mapping, 0, holelen, 1);
9080+ }
9081+
9082+ ret = drm_bo_do_validate(bo,
9083+ mem_type_flags,
9084+ DRM_BO_MASK_MEM |
9085+ DRM_BO_FLAG_NO_EVICT,
9086+ DRM_BO_HINT_DONT_FENCE, 0, 1, NULL);
9087+
9088+ mutex_unlock(&par->vi->vm_mutex);
9089+ return ret;
9090+}
9091+
9092+/* this will let fbcon do the mode init */
9093+static int psbfb_set_par(struct fb_info *info)
9094+{
9095+ struct psbfb_par *par = info->par;
9096+ struct drm_framebuffer *fb = par->crtc->fb;
9097+ struct drm_device *dev = par->dev;
9098+ struct drm_display_mode *drm_mode;
9099+ struct fb_var_screeninfo *var = &info->var;
9100+ struct drm_psb_private *dev_priv = dev->dev_private;
9101+ //struct drm_connector *output;
9102+ int pitch;
9103+ int depth;
9104+ int bpp = var->bits_per_pixel;
9105+
9106+ if (!fb)
9107+ return -ENOMEM;
9108+
9109+ switch (bpp) {
9110+ case 8:
9111+ depth = 8;
9112+ break;
9113+ case 16:
9114+ depth = (var->green.length == 6) ? 16 : 15;
9115+ break;
9116+ case 24: /* assume this is 32bpp / depth 24 */
9117+ bpp = 32;
9118+ /* fallthrough */
9119+ case 32:
9120+ depth = (var->transp.length > 0) ? 32 : 24;
9121+ break;
9122+ default:
9123+ return -EINVAL;
9124+ }
9125+
9126+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
9127+
9128+ if ((pitch * var->yres) > (fb->bo->num_pages << PAGE_SHIFT)) {
9129+ /* Need to resize the fb object.
9130+ * But the generic fbdev code doesn't really understand
9131+ * that we can do this. So disable for now.
9132+ */
9133+ DRM_INFO("Can't support requested size, too big!\n");
9134+ return -EINVAL;
9135+ }
9136+
9137+ fb->offset = fb->bo->offset - dev_priv->pg->gatt_start;
9138+ fb->width = var->xres;
9139+ fb->height = var->yres;
9140+ fb->bits_per_pixel = bpp;
9141+ fb->pitch = pitch;
9142+ fb->depth = depth;
9143+
9144+ info->fix.line_length = fb->pitch;
9145+ info->fix.visual =
9146+ (fb->depth == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
9147+
9148+ /* some fbdev's apps don't want these to change */
9149+ info->fix.smem_start = dev->mode_config.fb_base + fb->offset;
9150+
9151+ /* we have to align the output base address because the fb->bo
9152+ may be moved in the previous drm_bo_do_validate().
9153+ Otherwise the output screens may go black when exit the X
9154+ window and re-enter the console */
9155+ info->screen_base = fb->kmap.virtual;
9156+
9157+ /* Should we walk the output's modelist or just create our own ???
9158+ * For now, we create and destroy a mode based on the incoming
9159+ * parameters. But there's commented out code below which scans
9160+ * the output list too.
9161+ */
9162+
9163+ drm_mode = drm_mode_create(dev);
9164+ drm_mode->hdisplay = var->xres;
9165+ drm_mode->hsync_start = drm_mode->hdisplay + var->right_margin;
9166+ drm_mode->hsync_end = drm_mode->hsync_start + var->hsync_len;
9167+ drm_mode->htotal = drm_mode->hsync_end + var->left_margin;
9168+ drm_mode->vdisplay = var->yres;
9169+ drm_mode->vsync_start = drm_mode->vdisplay + var->lower_margin;
9170+ drm_mode->vsync_end = drm_mode->vsync_start + var->vsync_len;
9171+ drm_mode->vtotal = drm_mode->vsync_end + var->upper_margin;
9172+ drm_mode->clock = PICOS2KHZ(var->pixclock);
9173+ drm_mode->vrefresh = drm_mode_vrefresh(drm_mode);
9174+ drm_mode_set_name(drm_mode);
9175+ drm_mode_set_crtcinfo(drm_mode, CRTC_INTERLACE_HALVE_V);
9176+
9177+
9178+ if (!drm_crtc_helper_set_mode(par->crtc, drm_mode, 0, 0, NULL))
9179+ return -EINVAL;
9180+
9181+ /* Have to destroy our created mode if we're not searching the mode
9182+ * list for it.
9183+ */
9184+ drm_mode_destroy(dev, drm_mode);
9185+
9186+ return 0;
9187+}
9188+
9189+extern int psb_2d_submit(struct drm_psb_private *, uint32_t *, uint32_t);;
9190+
9191+static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv,
9192+ uint32_t dst_offset, uint32_t dst_stride,
9193+ uint32_t dst_format, uint16_t dst_x,
9194+ uint16_t dst_y, uint16_t size_x,
9195+ uint16_t size_y, uint32_t fill)
9196+{
9197+ uint32_t buffer[10];
9198+ uint32_t *buf;
9199+ int ret;
9200+
9201+ buf = buffer;
9202+
9203+ *buf++ = PSB_2D_FENCE_BH;
9204+
9205+ *buf++ =
9206+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
9207+ PSB_2D_DST_STRIDE_SHIFT);
9208+ *buf++ = dst_offset;
9209+
9210+ *buf++ =
9211+ PSB_2D_BLIT_BH |
9212+ PSB_2D_ROT_NONE |
9213+ PSB_2D_COPYORDER_TL2BR |
9214+ PSB_2D_DSTCK_DISABLE |
9215+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
9216+
9217+ *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT;
9218+ *buf++ =
9219+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
9220+ PSB_2D_DST_YSTART_SHIFT);
9221+ *buf++ =
9222+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
9223+ PSB_2D_DST_YSIZE_SHIFT);
9224+ *buf++ = PSB_2D_FLUSH_BH;
9225+
9226+ psb_2d_lock(dev_priv);
9227+ ret = psb_2d_submit(dev_priv, buffer, buf - buffer);
9228+ psb_2d_unlock(dev_priv);
9229+
9230+ return ret;
9231+}
9232+
9233+static void psbfb_fillrect_accel(struct fb_info *info,
9234+ const struct fb_fillrect *r)
9235+{
9236+ struct psbfb_par *par = info->par;
9237+ struct drm_framebuffer *fb = par->crtc->fb;
9238+ struct drm_psb_private *dev_priv = par->dev->dev_private;
9239+ uint32_t offset;
9240+ uint32_t stride;
9241+ uint32_t format;
9242+
9243+ if (!fb)
9244+ return;
9245+
9246+ offset = fb->offset;
9247+ stride = fb->pitch;
9248+
9249+ switch (fb->depth) {
9250+ case 8:
9251+ format = PSB_2D_DST_332RGB;
9252+ break;
9253+ case 15:
9254+ format = PSB_2D_DST_555RGB;
9255+ break;
9256+ case 16:
9257+ format = PSB_2D_DST_565RGB;
9258+ break;
9259+ case 24:
9260+ case 32:
9261+ /* this is wrong but since we don't do blending its okay */
9262+ format = PSB_2D_DST_8888ARGB;
9263+ break;
9264+ default:
9265+ /* software fallback */
9266+ cfb_fillrect(info, r);
9267+ return;
9268+ }
9269+
9270+ psb_accel_2d_fillrect(dev_priv,
9271+ offset, stride, format,
9272+ r->dx, r->dy, r->width, r->height, r->color);
9273+}
9274+
9275+static void psbfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
9276+{
9277+ if (info->state != FBINFO_STATE_RUNNING)
9278+ return;
9279+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
9280+ cfb_fillrect(info, rect);
9281+ return;
9282+ }
9283+ if (in_interrupt() || in_atomic()) {
9284+ /*
9285+ * Catch case when we're shutting down.
9286+ */
9287+ cfb_fillrect(info, rect);
9288+ return;
9289+ }
9290+ psbfb_fillrect_accel(info, rect);
9291+}
9292+
9293+uint32_t psb_accel_2d_copy_direction(int xdir, int ydir)
9294+{
9295+ if (xdir < 0)
9296+ return ((ydir <
9297+ 0) ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TR2BL);
9298+ else
9299+ return ((ydir <
9300+ 0) ? PSB_2D_COPYORDER_BL2TR : PSB_2D_COPYORDER_TL2BR);
9301+}
9302+
9303+/*
9304+ * @srcOffset in bytes
9305+ * @srcStride in bytes
9306+ * @srcFormat psb 2D format defines
9307+ * @dstOffset in bytes
9308+ * @dstStride in bytes
9309+ * @dstFormat psb 2D format defines
9310+ * @srcX offset in pixels
9311+ * @srcY offset in pixels
9312+ * @dstX offset in pixels
9313+ * @dstY offset in pixels
9314+ * @sizeX of the copied area
9315+ * @sizeY of the copied area
9316+ */
9317+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
9318+ uint32_t src_offset, uint32_t src_stride,
9319+ uint32_t src_format, uint32_t dst_offset,
9320+ uint32_t dst_stride, uint32_t dst_format,
9321+ uint16_t src_x, uint16_t src_y, uint16_t dst_x,
9322+ uint16_t dst_y, uint16_t size_x, uint16_t size_y)
9323+{
9324+ uint32_t blit_cmd;
9325+ uint32_t buffer[10];
9326+ uint32_t *buf;
9327+ uint32_t direction;
9328+ int ret;
9329+
9330+ buf = buffer;
9331+
9332+ direction = psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
9333+
9334+ if (direction == PSB_2D_COPYORDER_BR2TL ||
9335+ direction == PSB_2D_COPYORDER_TR2BL) {
9336+ src_x += size_x - 1;
9337+ dst_x += size_x - 1;
9338+ }
9339+ if (direction == PSB_2D_COPYORDER_BR2TL ||
9340+ direction == PSB_2D_COPYORDER_BL2TR) {
9341+ src_y += size_y - 1;
9342+ dst_y += size_y - 1;
9343+ }
9344+
9345+ blit_cmd =
9346+ PSB_2D_BLIT_BH |
9347+ PSB_2D_ROT_NONE |
9348+ PSB_2D_DSTCK_DISABLE |
9349+ PSB_2D_SRCCK_DISABLE |
9350+ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
9351+
9352+ *buf++ = PSB_2D_FENCE_BH;
9353+ *buf++ =
9354+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
9355+ PSB_2D_DST_STRIDE_SHIFT);
9356+ *buf++ = dst_offset;
9357+ *buf++ =
9358+ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
9359+ PSB_2D_SRC_STRIDE_SHIFT);
9360+ *buf++ = src_offset;
9361+ *buf++ =
9362+ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) | (src_y
9363+ <<
9364+ PSB_2D_SRCOFF_YSTART_SHIFT);
9365+ *buf++ = blit_cmd;
9366+ *buf++ =
9367+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
9368+ PSB_2D_DST_YSTART_SHIFT);
9369+ *buf++ =
9370+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
9371+ PSB_2D_DST_YSIZE_SHIFT);
9372+ *buf++ = PSB_2D_FLUSH_BH;
9373+
9374+ psb_2d_lock(dev_priv);
9375+ ret = psb_2d_submit(dev_priv, buffer, buf - buffer);
9376+ psb_2d_unlock(dev_priv);
9377+ return ret;
9378+}
9379+
9380+static void psbfb_copyarea_accel(struct fb_info *info,
9381+ const struct fb_copyarea *a)
9382+{
9383+ struct psbfb_par *par = info->par;
9384+ struct drm_framebuffer *fb = par->crtc->fb;
9385+ struct drm_psb_private *dev_priv = par->dev->dev_private;
9386+ uint32_t offset;
9387+ uint32_t stride;
9388+ uint32_t src_format;
9389+ uint32_t dst_format;
9390+
9391+ if (!fb)
9392+ return;
9393+
9394+ offset = fb->offset;
9395+ stride = fb->pitch;
9396+
9397+ if (a->width == 8 || a->height == 8) {
9398+ psb_2d_lock(dev_priv);
9399+ psb_idle_2d(par->dev);
9400+ psb_2d_unlock(dev_priv);
9401+ cfb_copyarea(info, a);
9402+ return;
9403+ }
9404+
9405+ switch (fb->depth) {
9406+ case 8:
9407+ src_format = PSB_2D_SRC_332RGB;
9408+ dst_format = PSB_2D_DST_332RGB;
9409+ break;
9410+ case 15:
9411+ src_format = PSB_2D_SRC_555RGB;
9412+ dst_format = PSB_2D_DST_555RGB;
9413+ break;
9414+ case 16:
9415+ src_format = PSB_2D_SRC_565RGB;
9416+ dst_format = PSB_2D_DST_565RGB;
9417+ break;
9418+ case 24:
9419+ case 32:
9420+ /* this is wrong but since we don't do blending its okay */
9421+ src_format = PSB_2D_SRC_8888ARGB;
9422+ dst_format = PSB_2D_DST_8888ARGB;
9423+ break;
9424+ default:
9425+ /* software fallback */
9426+ cfb_copyarea(info, a);
9427+ return;
9428+ }
9429+
9430+ psb_accel_2d_copy(dev_priv,
9431+ offset, stride, src_format,
9432+ offset, stride, dst_format,
9433+ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
9434+}
9435+
9436+static void psbfb_copyarea(struct fb_info *info,
9437+ const struct fb_copyarea *region)
9438+{
9439+ if (info->state != FBINFO_STATE_RUNNING)
9440+ return;
9441+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
9442+ cfb_copyarea(info, region);
9443+ return;
9444+ }
9445+ if (in_interrupt() || in_atomic()) {
9446+ /*
9447+ * Catch case when we're shutting down.
9448+ */
9449+ cfb_copyarea(info, region);
9450+ return;
9451+ }
9452+
9453+ psbfb_copyarea_accel(info, region);
9454+}
9455+
9456+void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
9457+{
9458+ if (info->state != FBINFO_STATE_RUNNING)
9459+ return;
9460+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
9461+ cfb_imageblit(info, image);
9462+ return;
9463+ }
9464+ if (in_interrupt() || in_atomic()) {
9465+ cfb_imageblit(info, image);
9466+ return;
9467+ }
9468+
9469+ cfb_imageblit(info, image);
9470+}
9471+
9472+static int psbfb_blank(int blank_mode, struct fb_info *info)
9473+{
9474+ int dpms_mode;
9475+ struct psbfb_par *par = info->par;
9476+ struct drm_connector *output;
9477+ struct drm_crtc_helper_funcs *crtc_funcs;
9478+
9479+ par->dpms_state = blank_mode;
9480+
9481+ switch(blank_mode) {
9482+ case FB_BLANK_UNBLANK:
9483+ dpms_mode = DRM_MODE_DPMS_ON;
9484+ break;
9485+ case FB_BLANK_NORMAL:
9486+ if (!par->crtc)
9487+ return 0;
9488+ crtc_funcs = par->crtc->helper_private;
9489+
9490+ (*crtc_funcs->dpms)(par->crtc, DRM_MODE_DPMS_STANDBY);
9491+ return 0;
9492+ case FB_BLANK_HSYNC_SUSPEND:
9493+ default:
9494+ dpms_mode = DRM_MODE_DPMS_STANDBY;
9495+ break;
9496+ case FB_BLANK_VSYNC_SUSPEND:
9497+ dpms_mode = DRM_MODE_DPMS_SUSPEND;
9498+ break;
9499+ case FB_BLANK_POWERDOWN:
9500+ dpms_mode = DRM_MODE_DPMS_OFF;
9501+ break;
9502+ }
9503+
9504+ if (!par->crtc)
9505+ return 0;
9506+
9507+ crtc_funcs = par->crtc->helper_private;
9508+
9509+ list_for_each_entry(output, &par->dev->mode_config.connector_list, head) {
9510+ if (output->encoder->crtc == par->crtc)
9511+ (*output->funcs->dpms)(output, dpms_mode);
9512+ }
9513+
9514+ return 0;
9515+}
9516+
9517+
9518+static int psbfb_kms_off(struct drm_device *dev, int suspend)
9519+{
9520+ struct drm_framebuffer *fb = 0;
9521+ struct drm_buffer_object *bo = 0;
9522+ struct drm_psb_private *dev_priv = dev->dev_private;
9523+ int ret = 0;
9524+
9525+ DRM_DEBUG("psbfb_kms_off_ioctl\n");
9526+
9527+ mutex_lock(&dev->mode_config.mutex);
9528+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
9529+ struct fb_info *info = fb->fbdev;
9530+ struct psbfb_par *par = info->par;
9531+ int save_dpms_state;
9532+
9533+ if (suspend)
9534+ fb_set_suspend(info, 1);
9535+ else
9536+ info->state &= ~FBINFO_STATE_RUNNING;
9537+
9538+ info->screen_base = NULL;
9539+
9540+ bo = fb->bo;
9541+
9542+ if (!bo)
9543+ continue;
9544+
9545+ drm_bo_kunmap(&fb->kmap);
9546+
9547+ /*
9548+ * We don't take the 2D lock here as we assume that the
9549+ * 2D engine will eventually idle anyway.
9550+ */
9551+
9552+ if (!suspend) {
9553+ uint32_t dummy2 = 0;
9554+ (void) psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
9555+ &dummy2, &dummy2);
9556+ psb_2d_lock(dev_priv);
9557+ (void)psb_idle_2d(dev);
9558+ psb_2d_unlock(dev_priv);
9559+ } else
9560+ psb_idle_2d(dev);
9561+
9562+ save_dpms_state = par->dpms_state;
9563+ psbfb_blank(FB_BLANK_NORMAL, info);
9564+ par->dpms_state = save_dpms_state;
9565+
9566+ ret = psbfb_move_fb_bo(info, bo, DRM_BO_FLAG_MEM_LOCAL);
9567+
9568+ if (ret)
9569+ goto out_err;
9570+ }
9571+ out_err:
9572+ mutex_unlock(&dev->mode_config.mutex);
9573+
9574+ return ret;
9575+}
9576+
9577+int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
9578+ struct drm_file *file_priv)
9579+{
9580+ int ret;
9581+
9582+ acquire_console_sem();
9583+ ret = psbfb_kms_off(dev, 0);
9584+ release_console_sem();
9585+
9586+ return ret;
9587+}
9588+
9589+static int psbfb_kms_on(struct drm_device *dev, int resume)
9590+{
9591+ struct drm_framebuffer *fb = 0;
9592+ struct drm_buffer_object *bo = 0;
9593+ struct drm_psb_private *dev_priv = dev->dev_private;
9594+ int ret = 0;
9595+ int dummy;
9596+
9597+ DRM_DEBUG("psbfb_kms_on_ioctl\n");
9598+
9599+ if (!resume) {
9600+ uint32_t dummy2 = 0;
9601+ (void) psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
9602+ &dummy2, &dummy2);
9603+ psb_2d_lock(dev_priv);
9604+ (void)psb_idle_2d(dev);
9605+ psb_2d_unlock(dev_priv);
9606+ } else
9607+ psb_idle_2d(dev);
9608+
9609+ mutex_lock(&dev->mode_config.mutex);
9610+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
9611+ struct fb_info *info = fb->fbdev;
9612+ struct psbfb_par *par = info->par;
9613+
9614+ bo = fb->bo;
9615+ if (!bo)
9616+ continue;
9617+
9618+ ret = psbfb_move_fb_bo(info, bo,
9619+ DRM_BO_FLAG_MEM_TT |
9620+ DRM_BO_FLAG_MEM_VRAM |
9621+ DRM_BO_FLAG_NO_EVICT);
9622+ if (ret)
9623+ goto out_err;
9624+
9625+ ret = drm_bo_kmap(bo, 0, bo->num_pages, &fb->kmap);
9626+ if (ret)
9627+ goto out_err;
9628+
9629+ info->screen_base = drm_bmo_virtual(&fb->kmap, &dummy);
9630+ fb->offset = bo->offset - dev_priv->pg->gatt_start;
9631+
9632+ if (ret)
9633+ goto out_err;
9634+
9635+ if (resume)
9636+ fb_set_suspend(info, 0);
9637+ else
9638+ info->state |= FBINFO_STATE_RUNNING;
9639+
9640+ /*
9641+ * Re-run modesetting here, since the VDS scanout offset may
9642+ * have changed.
9643+ */
9644+
9645+ if (par->crtc->enabled) {
9646+ psbfb_set_par(info);
9647+ psbfb_blank(par->dpms_state, info);
9648+ }
9649+ }
9650+ out_err:
9651+ mutex_unlock(&dev->mode_config.mutex);
9652+
9653+ return ret;
9654+}
9655+
9656+int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
9657+ struct drm_file *file_priv)
9658+{
9659+ int ret;
9660+
9661+ acquire_console_sem();
9662+ ret = psbfb_kms_on(dev, 0);
9663+ release_console_sem();
9664+
9665+ drm_helper_disable_unused_functions(dev);
9666+
9667+ return ret;
9668+}
9669+
9670+void psbfb_suspend(struct drm_device *dev)
9671+{
9672+ acquire_console_sem();
9673+ psbfb_kms_off(dev, 1);
9674+ release_console_sem();
9675+}
9676+
9677+void psbfb_resume(struct drm_device *dev)
9678+{
9679+ acquire_console_sem();
9680+ psbfb_kms_on(dev, 1);
9681+ release_console_sem();
9682+
9683+ drm_helper_disable_unused_functions(dev);
9684+}
9685+
9686+/*
9687+ * FIXME: Before kernel inclusion, migrate nopfn to fault.
9688+ * Also, these should be the default vm ops for buffer object type fbs.
9689+ */
9690+
9691+extern unsigned long drm_bo_vm_fault(struct vm_area_struct *vma,
9692+ struct vm_fault *vmf);
9693+
9694+/*
9695+ * This wrapper is a bit ugly and is here because we need access to a mutex
9696+ * that we can lock both around nopfn and around unmap_mapping_range + move.
9697+ * Normally, this would've been done using the bo mutex, but unfortunately
9698+ * we cannot lock it around drm_bo_do_validate(), since that would imply
9699+ * recursive locking.
9700+ */
9701+
9702+static int psbfb_fault(struct vm_area_struct *vma,
9703+ struct vm_fault *vmf)
9704+{
9705+ struct psbfb_vm_info *vi = (struct psbfb_vm_info *)vma->vm_private_data;
9706+ struct vm_area_struct tmp_vma;
9707+ int ret;
9708+
9709+ mutex_lock(&vi->vm_mutex);
9710+ tmp_vma = *vma;
9711+ tmp_vma.vm_private_data = vi->bo;
9712+ ret = drm_bo_vm_fault(&tmp_vma, vmf);
9713+ mutex_unlock(&vi->vm_mutex);
9714+ return ret;
9715+}
9716+
9717+static void psbfb_vm_open(struct vm_area_struct *vma)
9718+{
9719+ struct psbfb_vm_info *vi = (struct psbfb_vm_info *)vma->vm_private_data;
9720+
9721+ atomic_inc(&vi->refcount);
9722+}
9723+
9724+static void psbfb_vm_close(struct vm_area_struct *vma)
9725+{
9726+ psbfb_vm_info_deref((struct psbfb_vm_info **)&vma->vm_private_data);
9727+}
9728+
9729+static struct vm_operations_struct psbfb_vm_ops = {
9730+ .fault = psbfb_fault,
9731+ .open = psbfb_vm_open,
9732+ .close = psbfb_vm_close,
9733+};
9734+
9735+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
9736+{
9737+ struct psbfb_par *par = info->par;
9738+ struct drm_framebuffer *fb = par->crtc->fb;
9739+ struct drm_buffer_object *bo = fb->bo;
9740+ unsigned long size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
9741+ unsigned long offset = vma->vm_pgoff;
9742+
9743+ if (vma->vm_pgoff != 0)
9744+ return -EINVAL;
9745+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
9746+ return -EINVAL;
9747+ if (offset + size > bo->num_pages)
9748+ return -EINVAL;
9749+
9750+ mutex_lock(&par->vi->vm_mutex);
9751+ if (!par->vi->f_mapping)
9752+ par->vi->f_mapping = vma->vm_file->f_mapping;
9753+ mutex_unlock(&par->vi->vm_mutex);
9754+
9755+ vma->vm_private_data = psbfb_vm_info_ref(par->vi);
9756+
9757+ vma->vm_ops = &psbfb_vm_ops;
9758+ vma->vm_flags |= VM_PFNMAP;
9759+
9760+ return 0;
9761+}
9762+
9763+int psbfb_sync(struct fb_info *info)
9764+{
9765+ struct psbfb_par *par = info->par;
9766+ struct drm_psb_private *dev_priv = par->dev->dev_private;
9767+
9768+ psb_2d_lock(dev_priv);
9769+ psb_idle_2d(par->dev);
9770+ psb_2d_unlock(dev_priv);
9771+
9772+ return 0;
9773+}
9774+
9775+static struct fb_ops psbfb_ops = {
9776+ .owner = THIS_MODULE,
9777+ .fb_check_var = psbfb_check_var,
9778+ .fb_set_par = psbfb_set_par,
9779+ .fb_setcolreg = psbfb_setcolreg,
9780+ .fb_fillrect = psbfb_fillrect,
9781+ .fb_copyarea = psbfb_copyarea,
9782+ .fb_imageblit = psbfb_imageblit,
9783+ .fb_mmap = psbfb_mmap,
9784+ .fb_sync = psbfb_sync,
9785+ .fb_blank = psbfb_blank,
9786+};
9787+
9788+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
9789+{
9790+ drm_framebuffer_cleanup(fb);
9791+ kfree(fb);
9792+}
9793+
9794+static const struct drm_framebuffer_funcs psb_fb_funcs = {
9795+ .destroy = psb_user_framebuffer_destroy,
9796+};
9797+
9798+int psbfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
9799+{
9800+ struct fb_info *info;
9801+ struct psbfb_par *par;
9802+ struct device *device = &dev->pdev->dev;
9803+ struct drm_framebuffer *fb;
9804+ struct drm_display_mode *mode = crtc->desired_mode;
9805+ struct drm_psb_private *dev_priv =
9806+ (struct drm_psb_private *)dev->dev_private;
9807+ struct drm_buffer_object *fbo = NULL;
9808+ int ret;
9809+ int is_iomem;
9810+
9811+ if (drm_psb_no_fb) {
9812+ /* need to do this as the DRM will disable the output */
9813+ crtc->enabled = 1;
9814+ return 0;
9815+ }
9816+
9817+ fb = kzalloc(sizeof(struct drm_framebuffer), GFP_KERNEL);
9818+ if (!fb)
9819+ return -ENOMEM;
9820+
9821+
9822+ ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
9823+ if (!fb) {
9824+ DRM_ERROR("failed to allocate fb.\n");
9825+ return -ENOMEM;
9826+ }
9827+ crtc->fb = fb;
9828+
9829+ fb->width = mode->hdisplay;
9830+ fb->height = mode->vdisplay;
9831+
9832+ fb->bits_per_pixel = 32;
9833+ fb->depth = 24;
9834+ fb->pitch =
9835+ ((fb->width * ((fb->bits_per_pixel + 1) / 8)) + 0x3f) & ~0x3f;
9836+
9837+ info = framebuffer_alloc(sizeof(struct psbfb_par), device);
9838+ if (!info) {
9839+ kfree(fb);
9840+ return -ENOMEM;
9841+ }
9842+
9843+ ret = drm_buffer_object_create(dev,
9844+ fb->pitch * fb->height,
9845+ drm_bo_type_kernel,
9846+ DRM_BO_FLAG_READ |
9847+ DRM_BO_FLAG_WRITE |
9848+ DRM_BO_FLAG_MEM_TT |
9849+ DRM_BO_FLAG_MEM_VRAM |
9850+ DRM_BO_FLAG_NO_EVICT,
9851+ DRM_BO_HINT_DONT_FENCE, 0, 0, &fbo);
9852+ if (ret || !fbo) {
9853+ DRM_ERROR("failed to allocate framebuffer\n");
9854+ goto out_err0;
9855+ }
9856+
9857+ fb->offset = fbo->offset - dev_priv->pg->gatt_start;
9858+ fb->bo = fbo;
9859+ DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width,
9860+ fb->height, fb->offset, fbo);
9861+
9862+ fb->fbdev = info;
9863+
9864+ par = info->par;
9865+
9866+ par->dev = dev;
9867+ par->crtc = crtc;
9868+ par->vi = psbfb_vm_info_create();
9869+ if (!par->vi)
9870+ goto out_err1;
9871+
9872+ mutex_lock(&dev->struct_mutex);
9873+ par->vi->bo = fbo;
9874+ atomic_inc(&fbo->usage);
9875+ mutex_unlock(&dev->struct_mutex);
9876+
9877+ par->vi->f_mapping = NULL;
9878+ info->fbops = &psbfb_ops;
9879+
9880+ strcpy(info->fix.id, "psbfb");
9881+ info->fix.type = FB_TYPE_PACKED_PIXELS;
9882+ info->fix.visual = FB_VISUAL_DIRECTCOLOR;
9883+ info->fix.type_aux = 0;
9884+ info->fix.xpanstep = 1;
9885+ info->fix.ypanstep = 1;
9886+ info->fix.ywrapstep = 0;
9887+ info->fix.accel = FB_ACCEL_NONE; /* ??? */
9888+ info->fix.type_aux = 0;
9889+ info->fix.mmio_start = 0;
9890+ info->fix.mmio_len = 0;
9891+ info->fix.line_length = fb->pitch;
9892+ info->fix.smem_start = dev->mode_config.fb_base + fb->offset;
9893+ info->fix.smem_len = info->fix.line_length * fb->height;
9894+
9895+ info->flags = FBINFO_DEFAULT |
9896+ FBINFO_PARTIAL_PAN_OK /*| FBINFO_MISC_ALWAYS_SETPAR */ ;
9897+
9898+ ret = drm_bo_kmap(fb->bo, 0, fb->bo->num_pages, &fb->kmap);
9899+ if (ret) {
9900+ DRM_ERROR("error mapping fb: %d\n", ret);
9901+ goto out_err2;
9902+ }
9903+
9904+ info->screen_base = drm_bmo_virtual(&fb->kmap, &is_iomem);
9905+ memset(info->screen_base, 0x00, fb->pitch*fb->height);
9906+ info->screen_size = info->fix.smem_len; /* FIXME */
9907+ info->pseudo_palette = fb->pseudo_palette;
9908+ info->var.xres_virtual = fb->width;
9909+ info->var.yres_virtual = fb->height;
9910+ info->var.bits_per_pixel = fb->bits_per_pixel;
9911+ info->var.xoffset = 0;
9912+ info->var.yoffset = 0;
9913+ info->var.activate = FB_ACTIVATE_NOW;
9914+ info->var.height = -1;
9915+ info->var.width = -1;
9916+ info->var.vmode = FB_VMODE_NONINTERLACED;
9917+
9918+ info->var.xres = mode->hdisplay;
9919+ info->var.right_margin = mode->hsync_start - mode->hdisplay;
9920+ info->var.hsync_len = mode->hsync_end - mode->hsync_start;
9921+ info->var.left_margin = mode->htotal - mode->hsync_end;
9922+ info->var.yres = mode->vdisplay;
9923+ info->var.lower_margin = mode->vsync_start - mode->vdisplay;
9924+ info->var.vsync_len = mode->vsync_end - mode->vsync_start;
9925+ info->var.upper_margin = mode->vtotal - mode->vsync_end;
9926+ info->var.pixclock = 10000000 / mode->htotal * 1000 /
9927+ mode->vtotal * 100;
9928+ /* avoid overflow */
9929+ info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
9930+
9931+ info->pixmap.size = 64 * 1024;
9932+ info->pixmap.buf_align = 8;
9933+ info->pixmap.access_align = 32;
9934+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
9935+ info->pixmap.scan_align = 1;
9936+
9937+ DRM_DEBUG("fb depth is %d\n", fb->depth);
9938+ DRM_DEBUG(" pitch is %d\n", fb->pitch);
9939+ switch (fb->depth) {
9940+ case 8:
9941+ info->var.red.offset = 0;
9942+ info->var.green.offset = 0;
9943+ info->var.blue.offset = 0;
9944+ info->var.red.length = 8; /* 8bit DAC */
9945+ info->var.green.length = 8;
9946+ info->var.blue.length = 8;
9947+ info->var.transp.offset = 0;
9948+ info->var.transp.length = 0;
9949+ break;
9950+ case 15:
9951+ info->var.red.offset = 10;
9952+ info->var.green.offset = 5;
9953+ info->var.blue.offset = 0;
9954+ info->var.red.length = info->var.green.length =
9955+ info->var.blue.length = 5;
9956+ info->var.transp.offset = 15;
9957+ info->var.transp.length = 1;
9958+ break;
9959+ case 16:
9960+ info->var.red.offset = 11;
9961+ info->var.green.offset = 5;
9962+ info->var.blue.offset = 0;
9963+ info->var.red.length = 5;
9964+ info->var.green.length = 6;
9965+ info->var.blue.length = 5;
9966+ info->var.transp.offset = 0;
9967+ break;
9968+ case 24:
9969+ info->var.red.offset = 16;
9970+ info->var.green.offset = 8;
9971+ info->var.blue.offset = 0;
9972+ info->var.red.length = info->var.green.length =
9973+ info->var.blue.length = 8;
9974+ info->var.transp.offset = 0;
9975+ info->var.transp.length = 0;
9976+ break;
9977+ case 32:
9978+ info->var.red.offset = 16;
9979+ info->var.green.offset = 8;
9980+ info->var.blue.offset = 0;
9981+ info->var.red.length = info->var.green.length =
9982+ info->var.blue.length = 8;
9983+ info->var.transp.offset = 24;
9984+ info->var.transp.length = 8;
9985+ break;
9986+ default:
9987+ break;
9988+ }
9989+
9990+ if (register_framebuffer(info) < 0)
9991+ goto out_err3;
9992+
9993+ if (psbfb_check_var(&info->var, info) < 0)
9994+ goto out_err4;
9995+
9996+ psbfb_set_par(info);
9997+
9998+ DRM_INFO("fb%d: %s frame buffer device\n", info->node, info->fix.id);
9999+
10000+ return 0;
10001+ out_err4:
10002+ unregister_framebuffer(info);
10003+ out_err3:
10004+ drm_bo_kunmap(&fb->kmap);
10005+ out_err2:
10006+ psbfb_vm_info_deref(&par->vi);
10007+ out_err1:
10008+ drm_bo_usage_deref_unlocked(&fb->bo);
10009+ out_err0:
10010+ drm_framebuffer_cleanup(fb);
10011+ framebuffer_release(info);
10012+ crtc->fb = NULL;
10013+ return -EINVAL;
10014+}
10015+
10016+EXPORT_SYMBOL(psbfb_probe);
10017+
10018+int psbfb_remove(struct drm_device *dev, struct drm_crtc *crtc)
10019+{
10020+ struct drm_framebuffer *fb;
10021+ struct fb_info *info;
10022+ struct psbfb_par *par;
10023+
10024+ if (drm_psb_no_fb)
10025+ return 0;
10026+
10027+ fb = crtc->fb;
10028+ info = fb->fbdev;
10029+
10030+ if (info) {
10031+ unregister_framebuffer(info);
10032+ drm_bo_kunmap(&fb->kmap);
10033+ par = info->par;
10034+ if (par)
10035+ psbfb_vm_info_deref(&par->vi);
10036+ drm_bo_usage_deref_unlocked(&fb->bo);
10037+ drm_framebuffer_cleanup(fb);
10038+ framebuffer_release(info);
10039+ }
10040+ return 0;
10041+}
10042+
10043+EXPORT_SYMBOL(psbfb_remove);
10044+
10045Index: linux-2.6.28/drivers/gpu/drm/psb/psb_fence.c
10046===================================================================
10047--- /dev/null 1970-01-01 00:00:00.000000000 +0000
10048+++ linux-2.6.28/drivers/gpu/drm/psb/psb_fence.c 2009-02-25 15:37:02.000000000 +0000
10049@@ -0,0 +1,285 @@
10050+/**************************************************************************
10051+ * Copyright (c) 2007, Intel Corporation.
10052+ * All Rights Reserved.
10053+ *
10054+ * This program is free software; you can redistribute it and/or modify it
10055+ * under the terms and conditions of the GNU General Public License,
10056+ * version 2, as published by the Free Software Foundation.
10057+ *
10058+ * This program is distributed in the hope it will be useful, but WITHOUT
10059+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10060+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10061+ * more details.
10062+ *
10063+ * You should have received a copy of the GNU General Public License along with
10064+ * this program; if not, write to the Free Software Foundation, Inc.,
10065+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
10066+ *
10067+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
10068+ * develop this driver.
10069+ *
10070+ **************************************************************************/
10071+/*
10072+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
10073+ */
10074+
10075+#include "drmP.h"
10076+#include "psb_drv.h"
10077+
10078+static void psb_poll_ta(struct drm_device *dev, uint32_t waiting_types)
10079+{
10080+ struct drm_psb_private *dev_priv =
10081+ (struct drm_psb_private *)dev->dev_private;
10082+ struct drm_fence_driver *driver = dev->driver->fence_driver;
10083+ uint32_t cur_flag = 1;
10084+ uint32_t flags = 0;
10085+ uint32_t sequence = 0;
10086+ uint32_t remaining = 0xFFFFFFFF;
10087+ uint32_t diff;
10088+
10089+ struct psb_scheduler *scheduler;
10090+ struct psb_scheduler_seq *seq;
10091+ struct drm_fence_class_manager *fc =
10092+ &dev->fm.fence_class[PSB_ENGINE_TA];
10093+
10094+ if (unlikely(!dev_priv))
10095+ return;
10096+
10097+ scheduler = &dev_priv->scheduler;
10098+ seq = scheduler->seq;
10099+
10100+ while (likely(waiting_types & remaining)) {
10101+ if (!(waiting_types & cur_flag))
10102+ goto skip;
10103+ if (seq->reported)
10104+ goto skip;
10105+ if (flags == 0)
10106+ sequence = seq->sequence;
10107+ else if (sequence != seq->sequence) {
10108+ drm_fence_handler(dev, PSB_ENGINE_TA,
10109+ sequence, flags, 0);
10110+ sequence = seq->sequence;
10111+ flags = 0;
10112+ }
10113+ flags |= cur_flag;
10114+
10115+ /*
10116+ * Sequence may not have ended up on the ring yet.
10117+ * In that case, report it but don't mark it as
10118+ * reported. A subsequent poll will report it again.
10119+ */
10120+
10121+ diff = (fc->latest_queued_sequence - sequence) &
10122+ driver->sequence_mask;
10123+ if (diff < driver->wrap_diff)
10124+ seq->reported = 1;
10125+
10126+ skip:
10127+ cur_flag <<= 1;
10128+ remaining <<= 1;
10129+ seq++;
10130+ }
10131+
10132+ if (flags) {
10133+ drm_fence_handler(dev, PSB_ENGINE_TA, sequence, flags, 0);
10134+ }
10135+}
10136+
10137+static void psb_poll_other(struct drm_device *dev, uint32_t fence_class,
10138+ uint32_t waiting_types)
10139+{
10140+ struct drm_psb_private *dev_priv =
10141+ (struct drm_psb_private *)dev->dev_private;
10142+ struct drm_fence_manager *fm = &dev->fm;
10143+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
10144+ uint32_t sequence;
10145+
10146+ if (unlikely(!dev_priv))
10147+ return;
10148+
10149+ if (waiting_types) {
10150+ if (fence_class == PSB_ENGINE_VIDEO)
10151+ sequence = dev_priv->msvdx_current_sequence;
10152+ else
10153+ sequence = dev_priv->comm[fence_class << 4];
10154+
10155+ drm_fence_handler(dev, fence_class, sequence,
10156+ DRM_FENCE_TYPE_EXE, 0);
10157+
10158+ switch (fence_class) {
10159+ case PSB_ENGINE_2D:
10160+ if (dev_priv->fence0_irq_on && !fc->waiting_types) {
10161+ psb_2D_irq_off(dev_priv);
10162+ dev_priv->fence0_irq_on = 0;
10163+ } else if (!dev_priv->fence0_irq_on
10164+ && fc->waiting_types) {
10165+ psb_2D_irq_on(dev_priv);
10166+ dev_priv->fence0_irq_on = 1;
10167+ }
10168+ break;
10169+#if 0
10170+ /*
10171+ * FIXME: MSVDX irq switching
10172+ */
10173+
10174+ case PSB_ENGINE_VIDEO:
10175+ if (dev_priv->fence2_irq_on && !fc->waiting_types) {
10176+ psb_msvdx_irq_off(dev_priv);
10177+ dev_priv->fence2_irq_on = 0;
10178+ } else if (!dev_priv->fence2_irq_on
10179+ && fc->pending_exe_flush) {
10180+ psb_msvdx_irq_on(dev_priv);
10181+ dev_priv->fence2_irq_on = 1;
10182+ }
10183+ break;
10184+#endif
10185+ default:
10186+ return;
10187+ }
10188+ }
10189+}
10190+
10191+static void psb_fence_poll(struct drm_device *dev,
10192+ uint32_t fence_class, uint32_t waiting_types)
10193+{
10194+ switch (fence_class) {
10195+ case PSB_ENGINE_TA:
10196+ psb_poll_ta(dev, waiting_types);
10197+ break;
10198+ default:
10199+ psb_poll_other(dev, fence_class, waiting_types);
10200+ break;
10201+ }
10202+}
10203+
10204+void psb_fence_error(struct drm_device *dev,
10205+ uint32_t fence_class,
10206+ uint32_t sequence, uint32_t type, int error)
10207+{
10208+ struct drm_fence_manager *fm = &dev->fm;
10209+ unsigned long irq_flags;
10210+
10211+ BUG_ON(fence_class >= PSB_NUM_ENGINES);
10212+ write_lock_irqsave(&fm->lock, irq_flags);
10213+ drm_fence_handler(dev, fence_class, sequence, type, error);
10214+ write_unlock_irqrestore(&fm->lock, irq_flags);
10215+}
10216+
10217+int psb_fence_emit_sequence(struct drm_device *dev, uint32_t fence_class,
10218+ uint32_t flags, uint32_t * sequence,
10219+ uint32_t * native_type)
10220+{
10221+ struct drm_psb_private *dev_priv =
10222+ (struct drm_psb_private *)dev->dev_private;
10223+ uint32_t seq = 0;
10224+ int ret;
10225+
10226+ if (!dev_priv)
10227+ return -EINVAL;
10228+
10229+ if (fence_class >= PSB_NUM_ENGINES)
10230+ return -EINVAL;
10231+
10232+ switch (fence_class) {
10233+ case PSB_ENGINE_2D:
10234+ spin_lock(&dev_priv->sequence_lock);
10235+ seq = ++dev_priv->sequence[fence_class];
10236+ spin_unlock(&dev_priv->sequence_lock);
10237+ ret = psb_blit_sequence(dev_priv, seq);
10238+ if (ret)
10239+ return ret;
10240+ break;
10241+ case PSB_ENGINE_VIDEO:
10242+ spin_lock(&dev_priv->sequence_lock);
10243+ seq = ++dev_priv->sequence[fence_class];
10244+ spin_unlock(&dev_priv->sequence_lock);
10245+ break;
10246+ default:
10247+ spin_lock(&dev_priv->sequence_lock);
10248+ seq = dev_priv->sequence[fence_class];
10249+ spin_unlock(&dev_priv->sequence_lock);
10250+ }
10251+
10252+ *sequence = seq;
10253+ *native_type = DRM_FENCE_TYPE_EXE;
10254+
10255+ return 0;
10256+}
10257+
10258+uint32_t psb_fence_advance_sequence(struct drm_device * dev,
10259+ uint32_t fence_class)
10260+{
10261+ struct drm_psb_private *dev_priv =
10262+ (struct drm_psb_private *)dev->dev_private;
10263+ uint32_t sequence;
10264+
10265+ spin_lock(&dev_priv->sequence_lock);
10266+ sequence = ++dev_priv->sequence[fence_class];
10267+ spin_unlock(&dev_priv->sequence_lock);
10268+
10269+ return sequence;
10270+}
10271+
10272+void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
10273+{
10274+ struct drm_fence_manager *fm = &dev->fm;
10275+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
10276+
10277+#ifdef FIX_TG_16
10278+ if (fence_class == 0) {
10279+ struct drm_psb_private *dev_priv =
10280+ (struct drm_psb_private *)dev->dev_private;
10281+
10282+ if ((atomic_read(&dev_priv->ta_wait_2d_irq) == 1) &&
10283+ (PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
10284+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
10285+ _PSB_C2B_STATUS_BUSY) == 0))
10286+ psb_resume_ta_2d_idle(dev_priv);
10287+ }
10288+#endif
10289+ write_lock(&fm->lock);
10290+ psb_fence_poll(dev, fence_class, fc->waiting_types);
10291+ write_unlock(&fm->lock);
10292+}
10293+
10294+static int psb_fence_wait(struct drm_fence_object *fence,
10295+ int lazy, int interruptible, uint32_t mask)
10296+{
10297+ struct drm_device *dev = fence->dev;
10298+ struct drm_fence_class_manager *fc =
10299+ &dev->fm.fence_class[fence->fence_class];
10300+ int ret = 0;
10301+ unsigned long timeout = DRM_HZ *
10302+ ((fence->fence_class == PSB_ENGINE_TA) ? 30 : 3);
10303+
10304+ drm_fence_object_flush(fence, mask);
10305+ if (interruptible)
10306+ ret = wait_event_interruptible_timeout
10307+ (fc->fence_queue, drm_fence_object_signaled(fence, mask),
10308+ timeout);
10309+ else
10310+ ret = wait_event_timeout
10311+ (fc->fence_queue, drm_fence_object_signaled(fence, mask),
10312+ timeout);
10313+
10314+ if (unlikely(ret == -ERESTARTSYS))
10315+ return -EAGAIN;
10316+
10317+ if (unlikely(ret == 0))
10318+ return -EBUSY;
10319+
10320+ return 0;
10321+}
10322+
10323+struct drm_fence_driver psb_fence_driver = {
10324+ .num_classes = PSB_NUM_ENGINES,
10325+ .wrap_diff = (1 << 30),
10326+ .flush_diff = (1 << 29),
10327+ .sequence_mask = 0xFFFFFFFFU,
10328+ .has_irq = NULL,
10329+ .emit = psb_fence_emit_sequence,
10330+ .flush = NULL,
10331+ .poll = psb_fence_poll,
10332+ .needed_flush = NULL,
10333+ .wait = psb_fence_wait
10334+};
10335Index: linux-2.6.28/drivers/gpu/drm/psb/psb_gtt.c
10336===================================================================
10337--- /dev/null 1970-01-01 00:00:00.000000000 +0000
10338+++ linux-2.6.28/drivers/gpu/drm/psb/psb_gtt.c 2009-02-25 15:37:02.000000000 +0000
10339@@ -0,0 +1,253 @@
10340+/**************************************************************************
10341+ * Copyright (c) 2007, Intel Corporation.
10342+ * All Rights Reserved.
10343+ *
10344+ * This program is free software; you can redistribute it and/or modify it
10345+ * under the terms and conditions of the GNU General Public License,
10346+ * version 2, as published by the Free Software Foundation.
10347+ *
10348+ * This program is distributed in the hope it will be useful, but WITHOUT
10349+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10350+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10351+ * more details.
10352+ *
10353+ * You should have received a copy of the GNU General Public License along with
10354+ * this program; if not, write to the Free Software Foundation, Inc.,
10355+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
10356+ *
10357+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
10358+ * develop this driver.
10359+ *
10360+ **************************************************************************/
10361+/*
10362+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
10363+ */
10364+#include "drmP.h"
10365+#include "psb_drv.h"
10366+
10367+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
10368+{
10369+ uint32_t mask = PSB_PTE_VALID;
10370+
10371+ if (type & PSB_MMU_CACHED_MEMORY)
10372+ mask |= PSB_PTE_CACHED;
10373+ if (type & PSB_MMU_RO_MEMORY)
10374+ mask |= PSB_PTE_RO;
10375+ if (type & PSB_MMU_WO_MEMORY)
10376+ mask |= PSB_PTE_WO;
10377+
10378+ return (pfn << PAGE_SHIFT) | mask;
10379+}
10380+
10381+struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
10382+{
10383+ struct psb_gtt *tmp = drm_calloc(1, sizeof(*tmp), DRM_MEM_DRIVER);
10384+
10385+ if (!tmp)
10386+ return NULL;
10387+
10388+ init_rwsem(&tmp->sem);
10389+ tmp->dev = dev;
10390+
10391+ return tmp;
10392+}
10393+
10394+void psb_gtt_takedown(struct psb_gtt *pg, int free)
10395+{
10396+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
10397+
10398+ if (!pg)
10399+ return;
10400+
10401+ if (pg->gtt_map) {
10402+ iounmap(pg->gtt_map);
10403+ pg->gtt_map = NULL;
10404+ }
10405+ if (pg->initialized) {
10406+ pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
10407+ pg->gmch_ctrl);
10408+ PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
10409+ (void)PSB_RVDC32(PSB_PGETBL_CTL);
10410+ }
10411+ if (free)
10412+ drm_free(pg, sizeof(*pg), DRM_MEM_DRIVER);
10413+}
10414+
10415+int psb_gtt_init(struct psb_gtt *pg, int resume)
10416+{
10417+ struct drm_device *dev = pg->dev;
10418+ struct drm_psb_private *dev_priv = dev->dev_private;
10419+ unsigned gtt_pages;
10420+ unsigned long stolen_size;
10421+ unsigned i, num_pages;
10422+ unsigned pfn_base;
10423+
10424+ int ret = 0;
10425+ uint32_t pte;
10426+
10427+ printk(KERN_ERR "Bar A1\n");
10428+
10429+ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
10430+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
10431+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
10432+
10433+ printk(KERN_ERR "Bar A2\n");
10434+
10435+ pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
10436+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
10437+ (void)PSB_RVDC32(PSB_PGETBL_CTL);
10438+
10439+ printk(KERN_ERR "Bar A3\n");
10440+
10441+ pg->initialized = 1;
10442+
10443+ pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
10444+ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
10445+ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
10446+ gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
10447+ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
10448+ >> PAGE_SHIFT;
10449+
10450+ printk(KERN_ERR "Bar A4\n");
10451+ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
10452+ stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
10453+
10454+ printk(KERN_ERR "Bar A5\n");
10455+
10456+ PSB_DEBUG_INIT("GTT phys start: 0x%08x.\n", pg->gtt_phys_start);
10457+ PSB_DEBUG_INIT("GTT start: 0x%08x.\n", pg->gtt_start);
10458+ PSB_DEBUG_INIT("GATT start: 0x%08x.\n", pg->gatt_start);
10459+ PSB_DEBUG_INIT("GTT pages: %u\n", gtt_pages);
10460+ PSB_DEBUG_INIT("Stolen size: %lu kiB\n", stolen_size / 1024);
10461+
10462+ if (resume && (gtt_pages != pg->gtt_pages) &&
10463+ (stolen_size != pg->stolen_size)) {
10464+ DRM_ERROR("GTT resume error.\n");
10465+ ret = -EINVAL;
10466+ goto out_err;
10467+ }
10468+
10469+ printk(KERN_ERR "Bar A6\n");
10470+
10471+ pg->gtt_pages = gtt_pages;
10472+ pg->stolen_size = stolen_size;
10473+ pg->gtt_map =
10474+ ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
10475+ if (!pg->gtt_map) {
10476+ DRM_ERROR("Failure to map gtt.\n");
10477+ ret = -ENOMEM;
10478+ goto out_err;
10479+ }
10480+
10481+ printk(KERN_ERR "Bar A7\n");
10482+
10483+ /*
10484+ * insert stolen pages.
10485+ */
10486+
10487+ pfn_base = pg->stolen_base >> PAGE_SHIFT;
10488+ num_pages = stolen_size >> PAGE_SHIFT;
10489+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
10490+ num_pages, pfn_base);
10491+ for (i = 0; i < num_pages; ++i) {
10492+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
10493+ iowrite32(pte, pg->gtt_map + i);
10494+ }
10495+
10496+ printk(KERN_ERR "Bar A8\n");
10497+
10498+ /*
10499+ * Init rest of gtt.
10500+ */
10501+
10502+ pfn_base = page_to_pfn(dev_priv->scratch_page);
10503+ pte = psb_gtt_mask_pte(pfn_base, 0);
10504+ PSB_DEBUG_INIT("Initializing the rest of a total "
10505+ "of %d gtt pages.\n", pg->gatt_pages);
10506+
10507+ printk(KERN_ERR "Bar A10\n");
10508+
10509+ for (; i < pg->gatt_pages; ++i)
10510+ iowrite32(pte, pg->gtt_map + i);
10511+ (void)ioread32(pg->gtt_map + i - 1);
10512+
10513+ printk(KERN_ERR "Bar A11\n");
10514+
10515+ return 0;
10516+
10517+ out_err:
10518+ psb_gtt_takedown(pg, 0);
10519+ return ret;
10520+}
10521+
10522+int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
10523+ unsigned offset_pages, unsigned num_pages,
10524+ unsigned desired_tile_stride, unsigned hw_tile_stride,
10525+ int type)
10526+{
10527+ unsigned rows = 1;
10528+ unsigned add;
10529+ unsigned row_add;
10530+ unsigned i;
10531+ unsigned j;
10532+ uint32_t *cur_page = NULL;
10533+ uint32_t pte;
10534+
10535+ if (hw_tile_stride)
10536+ rows = num_pages / desired_tile_stride;
10537+ else
10538+ desired_tile_stride = num_pages;
10539+
10540+ add = desired_tile_stride;
10541+ row_add = hw_tile_stride;
10542+
10543+ down_read(&pg->sem);
10544+ for (i = 0; i < rows; ++i) {
10545+ cur_page = pg->gtt_map + offset_pages;
10546+ for (j = 0; j < desired_tile_stride; ++j) {
10547+ pte = psb_gtt_mask_pte(page_to_pfn(*pages++), type);
10548+ iowrite32(pte, cur_page++);
10549+ }
10550+ offset_pages += add;
10551+ }
10552+ (void)ioread32(cur_page - 1);
10553+ up_read(&pg->sem);
10554+
10555+ return 0;
10556+}
10557+
10558+int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
10559+ unsigned num_pages, unsigned desired_tile_stride,
10560+ unsigned hw_tile_stride)
10561+{
10562+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
10563+ unsigned rows = 1;
10564+ unsigned add;
10565+ unsigned row_add;
10566+ unsigned i;
10567+ unsigned j;
10568+ uint32_t *cur_page = NULL;
10569+ unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
10570+ uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
10571+
10572+ if (hw_tile_stride)
10573+ rows = num_pages / desired_tile_stride;
10574+ else
10575+ desired_tile_stride = num_pages;
10576+
10577+ add = desired_tile_stride;
10578+ row_add = hw_tile_stride;
10579+
10580+ down_read(&pg->sem);
10581+ for (i = 0; i < rows; ++i) {
10582+ cur_page = pg->gtt_map + offset_pages;
10583+ for (j = 0; j < desired_tile_stride; ++j) {
10584+ iowrite32(pte, cur_page++);
10585+ }
10586+ offset_pages += add;
10587+ }
10588+ (void)ioread32(cur_page - 1);
10589+ up_read(&pg->sem);
10590+
10591+ return 0;
10592+}
10593Index: linux-2.6.28/drivers/gpu/drm/psb/psb_irq.c
10594===================================================================
10595--- /dev/null 1970-01-01 00:00:00.000000000 +0000
10596+++ linux-2.6.28/drivers/gpu/drm/psb/psb_irq.c 2009-02-25 15:37:02.000000000 +0000
10597@@ -0,0 +1,519 @@
10598+/**************************************************************************
10599+ * Copyright (c) 2007, Intel Corporation.
10600+ * All Rights Reserved.
10601+ *
10602+ * This program is free software; you can redistribute it and/or modify it
10603+ * under the terms and conditions of the GNU General Public License,
10604+ * version 2, as published by the Free Software Foundation.
10605+ *
10606+ * This program is distributed in the hope it will be useful, but WITHOUT
10607+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10608+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10609+ * more details.
10610+ *
10611+ * You should have received a copy of the GNU General Public License along with
10612+ * this program; if not, write to the Free Software Foundation, Inc.,
10613+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
10614+ *
10615+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
10616+ * develop this driver.
10617+ *
10618+ **************************************************************************/
10619+/*
10620+ */
10621+
10622+#include "drmP.h"
10623+#include "psb_drv.h"
10624+#include "psb_reg.h"
10625+#include "psb_msvdx.h"
10626+#include "../i915/i915_reg.h"
10627+
10628+/*
10629+ * Video display controller interrupt.
10630+ */
10631+
10632+static inline u32
10633+psb_pipestat(int pipe)
10634+{
10635+ if (pipe == 0)
10636+ return PIPEASTAT;
10637+ if (pipe == 1)
10638+ return PIPEBSTAT;
10639+ BUG();
10640+}
10641+
10642+void
10643+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
10644+{
10645+ //struct drm_i915_common_private *dev_priv_common = dev_priv;
10646+
10647+ if ((dev_priv->pipestat[pipe] & mask) != mask) {
10648+ u32 reg = psb_pipestat(pipe);
10649+
10650+ dev_priv->pipestat[pipe] |= mask;
10651+ /* Enable the interrupt, clear any pending status */
10652+ I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
10653+ (void) I915_READ(reg);
10654+ }
10655+}
10656+
10657+void
10658+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
10659+{
10660+ //struct drm_i915_common_private *dev_priv_common = (struct drm_i915_common_private *) dev_priv;
10661+
10662+ if ((dev_priv->pipestat[pipe] & mask) != 0) {
10663+ u32 reg = psb_pipestat(pipe);
10664+
10665+ dev_priv->pipestat[pipe] &= ~mask;
10666+ I915_WRITE(reg, dev_priv->pipestat[pipe]);
10667+ (void) I915_READ(reg);
10668+ }
10669+}
10670+
10671+
10672+/**
10673+ * i915_pipe_enabled - check if a pipe is enabled
10674+ * @dev: DRM device
10675+ * @pipe: pipe to check
10676+ *
10677+ * Reading certain registers when the pipe is disabled can hang the chip.
10678+ * Use this routine to make sure the PLL is running and the pipe is active
10679+ * before reading such registers if unsure.
10680+ */
10681+static int
10682+i915_pipe_enabled(struct drm_device *dev, int pipe)
10683+{
10684+ struct drm_psb_private *dev_priv = dev->dev_private;
10685+ unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
10686+
10687+ if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
10688+ return 1;
10689+
10690+ return 0;
10691+}
10692+
10693+/* Called from drm generic code, passed a 'crtc', which
10694+ * we use as a pipe index
10695+ */
10696+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
10697+{
10698+ struct drm_psb_private *dev_priv = dev->dev_private;
10699+ unsigned long high_frame;
10700+ unsigned long low_frame;
10701+ u32 high1, high2, low, count;
10702+
10703+ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
10704+ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
10705+
10706+ if (!i915_pipe_enabled(dev, pipe)) {
10707+ DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
10708+ return 0;
10709+ }
10710+
10711+ /*
10712+ * High & low register fields aren't synchronized, so make sure
10713+ * we get a low value that's stable across two reads of the high
10714+ * register.
10715+ */
10716+ do {
10717+ high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
10718+ PIPE_FRAME_HIGH_SHIFT);
10719+ low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
10720+ PIPE_FRAME_LOW_SHIFT);
10721+ high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
10722+ PIPE_FRAME_HIGH_SHIFT);
10723+ } while (high1 != high2);
10724+
10725+ count = (high1 << 8) | low;
10726+
10727+ return count;
10728+}
10729+
10730+/* Called from drm generic code, passed 'crtc' which
10731+ * we use as a pipe index
10732+ */
10733+int psb_enable_vblank(struct drm_device *dev, int pipe)
10734+{
10735+ struct drm_psb_private *dev_priv = dev->dev_private;
10736+ unsigned long irqflags;
10737+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
10738+ u32 pipeconf;
10739+
10740+ pipeconf = I915_READ(pipeconf_reg);
10741+ if (!(pipeconf & PIPEACONF_ENABLE))
10742+ return -EINVAL;
10743+
10744+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
10745+ psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
10746+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
10747+ return 0;
10748+}
10749+
10750+/* Called from drm generic code, passed 'crtc' which
10751+ * we use as a pipe index
10752+ */
10753+void psb_disable_vblank(struct drm_device *dev, int pipe)
10754+{
10755+ struct drm_psb_private *dev_priv = dev->dev_private;
10756+ unsigned long irqflags;
10757+
10758+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
10759+ psb_disable_pipestat(dev_priv, pipe,
10760+ PIPE_VBLANK_INTERRUPT_ENABLE |
10761+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
10762+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
10763+}
10764+
10765+
10766+
10767+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
10768+{
10769+ struct drm_psb_private *dev_priv = dev->dev_private; uint32_t pipe_stats;
10770+ int wake = 0;
10771+
10772+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)) {
10773+ pipe_stats = PSB_RVDC32(PSB_PIPEASTAT);
10774+ atomic_inc(&dev->_vblank_count[0]);
10775+ wake = 1;
10776+ PSB_WVDC32(pipe_stats | _PSB_VBLANK_INTERRUPT_ENABLE |
10777+ _PSB_VBLANK_CLEAR, PSB_PIPEASTAT);
10778+ }
10779+
10780+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)) {
10781+ pipe_stats = PSB_RVDC32(PSB_PIPEBSTAT);
10782+ atomic_inc(&dev->_vblank_count[1]);
10783+ wake = 1;
10784+ PSB_WVDC32(pipe_stats | _PSB_VBLANK_INTERRUPT_ENABLE |
10785+ _PSB_VBLANK_CLEAR, PSB_PIPEBSTAT);
10786+ }
10787+
10788+ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
10789+ (void)PSB_RVDC32(PSB_INT_IDENTITY_R);
10790+ DRM_READMEMORYBARRIER();
10791+
10792+ if (wake) {
10793+ int i;
10794+ DRM_WAKEUP(dev->vbl_queue);
10795+
10796+ for (i = 0; i < 2; i++)
10797+ drm_vbl_send_signals(dev, i);
10798+ }
10799+}
10800+
10801+/*
10802+ * SGX interrupt source 1.
10803+ */
10804+
10805+static void psb_sgx_interrupt(struct drm_device *dev, uint32_t sgx_stat,
10806+ uint32_t sgx_stat2)
10807+{
10808+ struct drm_psb_private *dev_priv = dev->dev_private;
10809+
10810+ if (sgx_stat & _PSB_CE_TWOD_COMPLETE) {
10811+ DRM_WAKEUP(&dev_priv->event_2d_queue);
10812+ psb_fence_handler(dev, 0);
10813+ }
10814+
10815+ if (unlikely(sgx_stat2 & _PSB_CE2_BIF_REQUESTER_FAULT))
10816+ psb_print_pagefault(dev_priv);
10817+
10818+ psb_scheduler_handler(dev_priv, sgx_stat);
10819+}
10820+
10821+/*
10822+ * MSVDX interrupt.
10823+ */
10824+static void psb_msvdx_interrupt(struct drm_device *dev, uint32_t msvdx_stat)
10825+{
10826+ struct drm_psb_private *dev_priv =
10827+ (struct drm_psb_private *)dev->dev_private;
10828+
10829+ if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) {
10830+ /*Ideally we should we should never get to this */
10831+ PSB_DEBUG_GENERAL
10832+ ("******MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d ***** (MMU FAULT)\n",
10833+ msvdx_stat, dev_priv->fence2_irq_on);
10834+
10835+ /* Pause MMU */
10836+ PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK,
10837+ MSVDX_MMU_CONTROL0);
10838+ DRM_WRITEMEMORYBARRIER();
10839+
10840+ /* Clear this interupt bit only */
10841+ PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK,
10842+ MSVDX_INTERRUPT_CLEAR);
10843+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
10844+ DRM_READMEMORYBARRIER();
10845+
10846+ dev_priv->msvdx_needs_reset = 1;
10847+ } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) {
10848+ PSB_DEBUG_GENERAL
10849+ ("******MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d ***** (MTX)\n",
10850+ msvdx_stat, dev_priv->fence2_irq_on);
10851+
10852+ /* Clear all interupt bits */
10853+ PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR);
10854+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
10855+ DRM_READMEMORYBARRIER();
10856+
10857+ psb_msvdx_mtx_interrupt(dev);
10858+ }
10859+}
10860+
10861+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
10862+{
10863+ struct drm_device *dev = (struct drm_device *)arg;
10864+ struct drm_psb_private *dev_priv =
10865+ (struct drm_psb_private *)dev->dev_private;
10866+
10867+ uint32_t vdc_stat;
10868+ uint32_t sgx_stat;
10869+ uint32_t sgx_stat2;
10870+ uint32_t msvdx_stat;
10871+ int handled = 0;
10872+
10873+ spin_lock(&dev_priv->irqmask_lock);
10874+
10875+ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
10876+ sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS);
10877+ sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
10878+ msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
10879+
10880+ sgx_stat2 &= dev_priv->sgx2_irq_mask;
10881+ sgx_stat &= dev_priv->sgx_irq_mask;
10882+ PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2);
10883+ PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR);
10884+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
10885+
10886+ vdc_stat &= dev_priv->vdc_irq_mask;
10887+ spin_unlock(&dev_priv->irqmask_lock);
10888+
10889+ if (msvdx_stat) {
10890+ psb_msvdx_interrupt(dev, msvdx_stat);
10891+ handled = 1;
10892+ }
10893+
10894+ if (vdc_stat) {
10895+ /* MSVDX IRQ status is part of vdc_irq_mask */
10896+ psb_vdc_interrupt(dev, vdc_stat);
10897+ handled = 1;
10898+ }
10899+
10900+ if (sgx_stat || sgx_stat2) {
10901+ psb_sgx_interrupt(dev, sgx_stat, sgx_stat2);
10902+ handled = 1;
10903+ }
10904+
10905+ if (!handled) {
10906+ return IRQ_NONE;
10907+ }
10908+
10909+ return IRQ_HANDLED;
10910+}
10911+
10912+void psb_msvdx_irq_preinstall(struct drm_psb_private *dev_priv)
10913+{
10914+ unsigned long mtx_int = 0;
10915+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
10916+
10917+ /*Clear MTX interrupt */
10918+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
10919+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
10920+}
10921+
10922+void psb_irq_preinstall(struct drm_device *dev)
10923+{
10924+ struct drm_psb_private *dev_priv =
10925+ (struct drm_psb_private *)dev->dev_private;
10926+ spin_lock(&dev_priv->irqmask_lock);
10927+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
10928+ PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
10929+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
10930+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
10931+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
10932+
10933+ dev_priv->sgx_irq_mask = _PSB_CE_PIXELBE_END_RENDER |
10934+ _PSB_CE_DPM_3D_MEM_FREE |
10935+ _PSB_CE_TA_FINISHED |
10936+ _PSB_CE_DPM_REACHED_MEM_THRESH |
10937+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
10938+ _PSB_CE_DPM_OUT_OF_MEMORY_MT |
10939+ _PSB_CE_TA_TERMINATE | _PSB_CE_SW_EVENT;
10940+
10941+ dev_priv->sgx2_irq_mask = _PSB_CE2_BIF_REQUESTER_FAULT;
10942+
10943+ dev_priv->vdc_irq_mask = _PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG;
10944+
10945+ if (!drm_psb_disable_vsync)
10946+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG |
10947+ _PSB_VSYNC_PIPEB_FLAG;
10948+
10949+ /*Clear MTX interrupt */
10950+ {
10951+ unsigned long mtx_int = 0;
10952+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS,
10953+ CR_MTX_IRQ, 1);
10954+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
10955+ }
10956+ spin_unlock(&dev_priv->irqmask_lock);
10957+}
10958+
10959+void psb_msvdx_irq_postinstall(struct drm_psb_private *dev_priv)
10960+{
10961+ /* Enable Mtx Interupt to host */
10962+ unsigned long enables = 0;
10963+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
10964+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
10965+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
10966+}
10967+
10968+int psb_irq_postinstall(struct drm_device *dev)
10969+{
10970+ struct drm_psb_private *dev_priv =
10971+ (struct drm_psb_private *)dev->dev_private;
10972+ unsigned long irqflags;
10973+
10974+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
10975+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
10976+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
10977+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
10978+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
10979+ /****MSVDX IRQ Setup...*****/
10980+ /* Enable Mtx Interupt to host */
10981+ {
10982+ unsigned long enables = 0;
10983+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
10984+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS,
10985+ CR_MTX_IRQ, 1);
10986+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
10987+ }
10988+ dev_priv->irq_enabled = 1;
10989+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
10990+ return 0;
10991+}
10992+
10993+void psb_irq_uninstall(struct drm_device *dev)
10994+{
10995+ struct drm_psb_private *dev_priv =
10996+ (struct drm_psb_private *)dev->dev_private;
10997+ unsigned long irqflags;
10998+
10999+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
11000+
11001+ dev_priv->sgx_irq_mask = 0x00000000;
11002+ dev_priv->sgx2_irq_mask = 0x00000000;
11003+ dev_priv->vdc_irq_mask = 0x00000000;
11004+
11005+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
11006+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
11007+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
11008+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
11009+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
11010+ wmb();
11011+ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
11012+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS), PSB_CR_EVENT_HOST_CLEAR);
11013+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2), PSB_CR_EVENT_HOST_CLEAR2);
11014+
11015+ /****MSVDX IRQ Setup...*****/
11016+ /* Clear interrupt enabled flag */
11017+ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
11018+
11019+ dev_priv->irq_enabled = 0;
11020+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
11021+
11022+}
11023+
11024+void psb_2D_irq_off(struct drm_psb_private *dev_priv)
11025+{
11026+ unsigned long irqflags;
11027+ uint32_t old_mask;
11028+ uint32_t cleared_mask;
11029+
11030+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
11031+ --dev_priv->irqen_count_2d;
11032+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
11033+
11034+ old_mask = dev_priv->sgx_irq_mask;
11035+ dev_priv->sgx_irq_mask &= ~_PSB_CE_TWOD_COMPLETE;
11036+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
11037+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
11038+
11039+ cleared_mask = (old_mask ^ dev_priv->sgx_irq_mask) & old_mask;
11040+ PSB_WSGX32(cleared_mask, PSB_CR_EVENT_HOST_CLEAR);
11041+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
11042+ }
11043+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
11044+}
11045+
11046+void psb_2D_irq_on(struct drm_psb_private *dev_priv)
11047+{
11048+ unsigned long irqflags;
11049+
11050+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
11051+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
11052+ dev_priv->sgx_irq_mask |= _PSB_CE_TWOD_COMPLETE;
11053+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
11054+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
11055+ }
11056+ ++dev_priv->irqen_count_2d;
11057+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
11058+}
11059+#if 0
11060+static int psb_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
11061+ atomic_t * counter, int crtc)
11062+{
11063+ unsigned int cur_vblank;
11064+ int ret = 0;
11065+
11066+ DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
11067+ (((cur_vblank = atomic_read(counter))
11068+ - *sequence) <= (1 << 23)));
11069+
11070+ *sequence = cur_vblank;
11071+
11072+ return ret;
11073+}
11074+
11075+int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence)
11076+{
11077+ int ret;
11078+
11079+ ret = psb_vblank_do_wait(dev, sequence, &dev->_vblank_count[0], 0);
11080+ return ret;
11081+}
11082+
11083+int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
11084+{
11085+ int ret;
11086+
11087+ ret = psb_vblank_do_wait(dev, sequence, &dev->_vblank_count[1], 1);
11088+ return ret;
11089+}
11090+#endif
11091+
11092+void psb_msvdx_irq_off(struct drm_psb_private *dev_priv)
11093+{
11094+ unsigned long irqflags;
11095+
11096+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
11097+ if (dev_priv->irq_enabled) {
11098+ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
11099+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
11100+ (void)PSB_RSGX32(PSB_INT_ENABLE_R);
11101+ }
11102+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
11103+}
11104+
11105+void psb_msvdx_irq_on(struct drm_psb_private *dev_priv)
11106+{
11107+ unsigned long irqflags;
11108+
11109+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
11110+ if (dev_priv->irq_enabled) {
11111+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
11112+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
11113+ (void)PSB_RSGX32(PSB_INT_ENABLE_R);
11114+ }
11115+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
11116+}
11117Index: linux-2.6.28/drivers/gpu/drm/psb/psb_mmu.c
11118===================================================================
11119--- /dev/null 1970-01-01 00:00:00.000000000 +0000
11120+++ linux-2.6.28/drivers/gpu/drm/psb/psb_mmu.c 2009-02-25 15:37:02.000000000 +0000
11121@@ -0,0 +1,1037 @@
11122+/**************************************************************************
11123+ * Copyright (c) 2007, Intel Corporation.
11124+ * All Rights Reserved.
11125+ *
11126+ * This program is free software; you can redistribute it and/or modify it
11127+ * under the terms and conditions of the GNU General Public License,
11128+ * version 2, as published by the Free Software Foundation.
11129+ *
11130+ * This program is distributed in the hope it will be useful, but WITHOUT
11131+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11132+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11133+ * more details.
11134+ *
11135+ * You should have received a copy of the GNU General Public License along with
11136+ * this program; if not, write to the Free Software Foundation, Inc.,
11137+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
11138+ *
11139+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
11140+ * develop this driver.
11141+ *
11142+ **************************************************************************/
11143+#include "drmP.h"
11144+#include "psb_drv.h"
11145+#include "psb_reg.h"
11146+
11147+/*
11148+ * Code for the SGX MMU:
11149+ */
11150+
11151+/*
11152+ * clflush on one processor only:
11153+ * clflush should apparently flush the cache line on all processors in an
11154+ * SMP system.
11155+ */
11156+
11157+/*
11158+ * kmap atomic:
11159+ * The usage of the slots must be completely encapsulated within a spinlock, and
11160+ * no other functions that may be using the locks for other purposed may be
11161+ * called from within the locked region.
11162+ * Since the slots are per processor, this will guarantee that we are the only
11163+ * user.
11164+ */
11165+
11166+/*
11167+ * TODO: Inserting ptes from an interrupt handler:
11168+ * This may be desirable for some SGX functionality where the GPU can fault in
11169+ * needed pages. For that, we need to make an atomic insert_pages function, that
11170+ * may fail.
11171+ * If it fails, the caller need to insert the page using a workqueue function,
11172+ * but on average it should be fast.
11173+ */
11174+
11175+struct psb_mmu_driver {
11176+ /* protects driver- and pd structures. Always take in read mode
11177+ * before taking the page table spinlock.
11178+ */
11179+ struct rw_semaphore sem;
11180+
11181+ /* protects page tables, directory tables and pt tables.
11182+ * and pt structures.
11183+ */
11184+ spinlock_t lock;
11185+
11186+ atomic_t needs_tlbflush;
11187+ atomic_t *msvdx_mmu_invaldc;
11188+ uint8_t __iomem *register_map;
11189+ struct psb_mmu_pd *default_pd;
11190+ uint32_t bif_ctrl;
11191+ int has_clflush;
11192+ int clflush_add;
11193+ unsigned long clflush_mask;
11194+};
11195+
11196+struct psb_mmu_pd;
11197+
11198+struct psb_mmu_pt {
11199+ struct psb_mmu_pd *pd;
11200+ uint32_t index;
11201+ uint32_t count;
11202+ struct page *p;
11203+ uint32_t *v;
11204+};
11205+
11206+struct psb_mmu_pd {
11207+ struct psb_mmu_driver *driver;
11208+ int hw_context;
11209+ struct psb_mmu_pt **tables;
11210+ struct page *p;
11211+ struct page *dummy_pt;
11212+ struct page *dummy_page;
11213+ uint32_t pd_mask;
11214+ uint32_t invalid_pde;
11215+ uint32_t invalid_pte;
11216+};
11217+
11218+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
11219+{
11220+ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
11221+}
11222+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
11223+{
11224+ return (offset >> PSB_PDE_SHIFT);
11225+}
11226+
11227+#if defined(CONFIG_X86)
11228+static inline void psb_clflush(void *addr)
11229+{
11230+ __asm__ __volatile__("clflush (%0)\n"::"r"(addr):"memory");
11231+}
11232+
11233+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
11234+{
11235+ if (!driver->has_clflush)
11236+ return;
11237+
11238+ mb();
11239+ psb_clflush(addr);
11240+ mb();
11241+}
11242+#else
11243+
11244+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
11245+{;
11246+}
11247+
11248+#endif
11249+
11250+static inline void psb_iowrite32(const struct psb_mmu_driver *d,
11251+ uint32_t val, uint32_t offset)
11252+{
11253+ iowrite32(val, d->register_map + offset);
11254+}
11255+
11256+static inline uint32_t psb_ioread32(const struct psb_mmu_driver *d,
11257+ uint32_t offset)
11258+{
11259+ return ioread32(d->register_map + offset);
11260+}
11261+
11262+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
11263+{
11264+ if (atomic_read(&driver->needs_tlbflush) || force) {
11265+ uint32_t val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
11266+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
11267+ PSB_CR_BIF_CTRL);
11268+ wmb();
11269+ psb_iowrite32(driver, val & ~_PSB_CB_CTRL_INVALDC,
11270+ PSB_CR_BIF_CTRL);
11271+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
11272+ if (driver->msvdx_mmu_invaldc)
11273+ atomic_set(driver->msvdx_mmu_invaldc, 1);
11274+ }
11275+ atomic_set(&driver->needs_tlbflush, 0);
11276+}
11277+
11278+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
11279+{
11280+ down_write(&driver->sem);
11281+ psb_mmu_flush_pd_locked(driver, force);
11282+ up_write(&driver->sem);
11283+}
11284+
11285+void psb_mmu_flush(struct psb_mmu_driver *driver)
11286+{
11287+ uint32_t val;
11288+
11289+ down_write(&driver->sem);
11290+ val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
11291+ if (atomic_read(&driver->needs_tlbflush))
11292+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
11293+ PSB_CR_BIF_CTRL);
11294+ else
11295+ psb_iowrite32(driver, val | _PSB_CB_CTRL_FLUSH,
11296+ PSB_CR_BIF_CTRL);
11297+ wmb();
11298+ psb_iowrite32(driver,
11299+ val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
11300+ PSB_CR_BIF_CTRL);
11301+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
11302+ atomic_set(&driver->needs_tlbflush, 0);
11303+ if (driver->msvdx_mmu_invaldc)
11304+ atomic_set(driver->msvdx_mmu_invaldc, 1);
11305+ up_write(&driver->sem);
11306+}
11307+
11308+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
11309+{
11310+ uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
11311+ PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
11312+
11313+ drm_ttm_cache_flush();
11314+ down_write(&pd->driver->sem);
11315+ psb_iowrite32(pd->driver, (page_to_pfn(pd->p) << PAGE_SHIFT), offset);
11316+ wmb();
11317+ psb_mmu_flush_pd_locked(pd->driver, 1);
11318+ pd->hw_context = hw_context;
11319+ up_write(&pd->driver->sem);
11320+
11321+}
11322+
11323+static inline unsigned long psb_pd_addr_end(unsigned long addr,
11324+ unsigned long end)
11325+{
11326+
11327+ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
11328+ return (addr < end) ? addr : end;
11329+}
11330+
11331+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
11332+{
11333+ uint32_t mask = PSB_PTE_VALID;
11334+
11335+ if (type & PSB_MMU_CACHED_MEMORY)
11336+ mask |= PSB_PTE_CACHED;
11337+ if (type & PSB_MMU_RO_MEMORY)
11338+ mask |= PSB_PTE_RO;
11339+ if (type & PSB_MMU_WO_MEMORY)
11340+ mask |= PSB_PTE_WO;
11341+
11342+ return (pfn << PAGE_SHIFT) | mask;
11343+}
11344+
11345+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
11346+ int trap_pagefaults, int invalid_type)
11347+{
11348+ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
11349+ uint32_t *v;
11350+ int i;
11351+
11352+ if (!pd)
11353+ return NULL;
11354+
11355+ pd->p = alloc_page(GFP_DMA32);
11356+ if (!pd->p)
11357+ goto out_err1;
11358+ pd->dummy_pt = alloc_page(GFP_DMA32);
11359+ if (!pd->dummy_pt)
11360+ goto out_err2;
11361+ pd->dummy_page = alloc_page(GFP_DMA32);
11362+ if (!pd->dummy_page)
11363+ goto out_err3;
11364+
11365+ if (!trap_pagefaults) {
11366+ pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
11367+ invalid_type |
11368+ PSB_MMU_CACHED_MEMORY);
11369+ pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
11370+ invalid_type |
11371+ PSB_MMU_CACHED_MEMORY);
11372+ } else {
11373+ pd->invalid_pde = 0;
11374+ pd->invalid_pte = 0;
11375+ }
11376+
11377+ v = kmap(pd->dummy_pt);
11378+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
11379+ v[i] = pd->invalid_pte;
11380+ }
11381+ kunmap(pd->dummy_pt);
11382+
11383+ v = kmap(pd->p);
11384+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
11385+ v[i] = pd->invalid_pde;
11386+ }
11387+ kunmap(pd->p);
11388+
11389+ clear_page(kmap(pd->dummy_page));
11390+ kunmap(pd->dummy_page);
11391+
11392+ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
11393+ if (!pd->tables)
11394+ goto out_err4;
11395+
11396+ pd->hw_context = -1;
11397+ pd->pd_mask = PSB_PTE_VALID;
11398+ pd->driver = driver;
11399+
11400+ return pd;
11401+
11402+ out_err4:
11403+ __free_page(pd->dummy_page);
11404+ out_err3:
11405+ __free_page(pd->dummy_pt);
11406+ out_err2:
11407+ __free_page(pd->p);
11408+ out_err1:
11409+ kfree(pd);
11410+ return NULL;
11411+}
11412+
11413+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
11414+{
11415+ __free_page(pt->p);
11416+ kfree(pt);
11417+}
11418+
11419+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
11420+{
11421+ struct psb_mmu_driver *driver = pd->driver;
11422+ struct psb_mmu_pt *pt;
11423+ int i;
11424+
11425+ down_write(&driver->sem);
11426+ if (pd->hw_context != -1) {
11427+ psb_iowrite32(driver, 0,
11428+ PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
11429+ psb_mmu_flush_pd_locked(driver, 1);
11430+ }
11431+
11432+ /* Should take the spinlock here, but we don't need to do that
11433+ since we have the semaphore in write mode. */
11434+
11435+ for (i = 0; i < 1024; ++i) {
11436+ pt = pd->tables[i];
11437+ if (pt)
11438+ psb_mmu_free_pt(pt);
11439+ }
11440+
11441+ vfree(pd->tables);
11442+ __free_page(pd->dummy_page);
11443+ __free_page(pd->dummy_pt);
11444+ __free_page(pd->p);
11445+ kfree(pd);
11446+ up_write(&driver->sem);
11447+}
11448+
11449+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
11450+{
11451+ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
11452+ void *v;
11453+ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
11454+ uint32_t clflush_count = PAGE_SIZE / clflush_add;
11455+ spinlock_t *lock = &pd->driver->lock;
11456+ uint8_t *clf;
11457+ uint32_t *ptes;
11458+ int i;
11459+
11460+ if (!pt)
11461+ return NULL;
11462+
11463+ pt->p = alloc_page(GFP_DMA32);
11464+ if (!pt->p) {
11465+ kfree(pt);
11466+ return NULL;
11467+ }
11468+
11469+ spin_lock(lock);
11470+
11471+ v = kmap_atomic(pt->p, KM_USER0);
11472+ clf = (uint8_t *) v;
11473+ ptes = (uint32_t *) v;
11474+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
11475+ *ptes++ = pd->invalid_pte;
11476+ }
11477+
11478+#if defined(CONFIG_X86)
11479+ if (pd->driver->has_clflush && pd->hw_context != -1) {
11480+ mb();
11481+ for (i = 0; i < clflush_count; ++i) {
11482+ psb_clflush(clf);
11483+ clf += clflush_add;
11484+ }
11485+ mb();
11486+ }
11487+#endif
11488+ kunmap_atomic(v, KM_USER0);
11489+ spin_unlock(lock);
11490+
11491+ pt->count = 0;
11492+ pt->pd = pd;
11493+ pt->index = 0;
11494+
11495+ return pt;
11496+}
11497+
11498+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
11499+ unsigned long addr)
11500+{
11501+ uint32_t index = psb_mmu_pd_index(addr);
11502+ struct psb_mmu_pt *pt;
11503+ volatile uint32_t *v;
11504+ spinlock_t *lock = &pd->driver->lock;
11505+
11506+ spin_lock(lock);
11507+ pt = pd->tables[index];
11508+ while (!pt) {
11509+ spin_unlock(lock);
11510+ pt = psb_mmu_alloc_pt(pd);
11511+ if (!pt)
11512+ return NULL;
11513+ spin_lock(lock);
11514+
11515+ if (pd->tables[index]) {
11516+ spin_unlock(lock);
11517+ psb_mmu_free_pt(pt);
11518+ spin_lock(lock);
11519+ pt = pd->tables[index];
11520+ continue;
11521+ }
11522+
11523+ v = kmap_atomic(pd->p, KM_USER0);
11524+ pd->tables[index] = pt;
11525+ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
11526+ pt->index = index;
11527+ kunmap_atomic((void *)v, KM_USER0);
11528+
11529+ if (pd->hw_context != -1) {
11530+ psb_mmu_clflush(pd->driver, (void *)&v[index]);
11531+ atomic_set(&pd->driver->needs_tlbflush, 1);
11532+ }
11533+ }
11534+ pt->v = kmap_atomic(pt->p, KM_USER0);
11535+ return pt;
11536+}
11537+
11538+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
11539+ unsigned long addr)
11540+{
11541+ uint32_t index = psb_mmu_pd_index(addr);
11542+ struct psb_mmu_pt *pt;
11543+ spinlock_t *lock = &pd->driver->lock;
11544+
11545+ spin_lock(lock);
11546+ pt = pd->tables[index];
11547+ if (!pt) {
11548+ spin_unlock(lock);
11549+ return NULL;
11550+ }
11551+ pt->v = kmap_atomic(pt->p, KM_USER0);
11552+ return pt;
11553+}
11554+
11555+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
11556+{
11557+ struct psb_mmu_pd *pd = pt->pd;
11558+ volatile uint32_t *v;
11559+
11560+ kunmap_atomic(pt->v, KM_USER0);
11561+ if (pt->count == 0) {
11562+ v = kmap_atomic(pd->p, KM_USER0);
11563+ v[pt->index] = pd->invalid_pde;
11564+ pd->tables[pt->index] = NULL;
11565+
11566+ if (pd->hw_context != -1) {
11567+ psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
11568+ atomic_set(&pd->driver->needs_tlbflush, 1);
11569+ }
11570+ kunmap_atomic(pt->v, KM_USER0);
11571+ spin_unlock(&pd->driver->lock);
11572+ psb_mmu_free_pt(pt);
11573+ return;
11574+ }
11575+ spin_unlock(&pd->driver->lock);
11576+}
11577+
11578+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
11579+ uint32_t pte)
11580+{
11581+ pt->v[psb_mmu_pt_index(addr)] = pte;
11582+}
11583+
11584+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
11585+ unsigned long addr)
11586+{
11587+ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
11588+}
11589+
11590+#if 0
11591+static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
11592+ uint32_t mmu_offset)
11593+{
11594+ uint32_t *v;
11595+ uint32_t pfn;
11596+
11597+ v = kmap_atomic(pd->p, KM_USER0);
11598+ if (!v) {
11599+ printk(KERN_INFO "Could not kmap pde page.\n");
11600+ return 0;
11601+ }
11602+ pfn = v[psb_mmu_pd_index(mmu_offset)];
11603+ // printk(KERN_INFO "pde is 0x%08x\n",pfn);
11604+ kunmap_atomic(v, KM_USER0);
11605+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
11606+ printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
11607+ mmu_offset, pfn);
11608+ }
11609+ v = ioremap(pfn & 0xFFFFF000, 4096);
11610+ if (!v) {
11611+ printk(KERN_INFO "Could not kmap pte page.\n");
11612+ return 0;
11613+ }
11614+ pfn = v[psb_mmu_pt_index(mmu_offset)];
11615+ // printk(KERN_INFO "pte is 0x%08x\n",pfn);
11616+ iounmap(v);
11617+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
11618+ printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
11619+ mmu_offset, pfn);
11620+ }
11621+ return pfn >> PAGE_SHIFT;
11622+}
11623+
11624+static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
11625+ uint32_t mmu_offset, uint32_t gtt_pages)
11626+{
11627+ uint32_t start;
11628+ uint32_t next;
11629+
11630+ printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
11631+ mmu_offset, gtt_pages);
11632+ down_read(&pd->driver->sem);
11633+ start = psb_mmu_check_pte_locked(pd, mmu_offset);
11634+ mmu_offset += PAGE_SIZE;
11635+ gtt_pages -= 1;
11636+ while (gtt_pages--) {
11637+ next = psb_mmu_check_pte_locked(pd, mmu_offset);
11638+ if (next != start + 1) {
11639+ printk(KERN_INFO "Ptes out of order: 0x%08x, 0x%08x.\n",
11640+ start, next);
11641+ }
11642+ start = next;
11643+ mmu_offset += PAGE_SIZE;
11644+ }
11645+ up_read(&pd->driver->sem);
11646+}
11647+
11648+#endif
11649+
11650+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
11651+ uint32_t mmu_offset, uint32_t gtt_start,
11652+ uint32_t gtt_pages)
11653+{
11654+ uint32_t *v;
11655+ uint32_t start = psb_mmu_pd_index(mmu_offset);
11656+ struct psb_mmu_driver *driver = pd->driver;
11657+
11658+ down_read(&driver->sem);
11659+ spin_lock(&driver->lock);
11660+
11661+ v = kmap_atomic(pd->p, KM_USER0);
11662+ v += start;
11663+
11664+ while (gtt_pages--) {
11665+ *v++ = gtt_start | pd->pd_mask;
11666+ gtt_start += PAGE_SIZE;
11667+ }
11668+
11669+ drm_ttm_cache_flush();
11670+ kunmap_atomic(v, KM_USER0);
11671+ spin_unlock(&driver->lock);
11672+
11673+ if (pd->hw_context != -1)
11674+ atomic_set(&pd->driver->needs_tlbflush, 1);
11675+
11676+ up_read(&pd->driver->sem);
11677+ psb_mmu_flush_pd(pd->driver, 0);
11678+}
11679+
11680+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
11681+{
11682+ struct psb_mmu_pd *pd;
11683+
11684+ down_read(&driver->sem);
11685+ pd = driver->default_pd;
11686+ up_read(&driver->sem);
11687+
11688+ return pd;
11689+}
11690+
11691+/* Returns the physical address of the PD shared by sgx/msvdx */
11692+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver * driver)
11693+{
11694+ struct psb_mmu_pd *pd;
11695+
11696+ pd = psb_mmu_get_default_pd(driver);
11697+ return ((page_to_pfn(pd->p) << PAGE_SHIFT));
11698+}
11699+
11700+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
11701+{
11702+ psb_iowrite32(driver, driver->bif_ctrl, PSB_CR_BIF_CTRL);
11703+ psb_mmu_free_pagedir(driver->default_pd);
11704+ kfree(driver);
11705+}
11706+
11707+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
11708+ int trap_pagefaults,
11709+ int invalid_type,
11710+ atomic_t *msvdx_mmu_invaldc)
11711+{
11712+ struct psb_mmu_driver *driver;
11713+
11714+ driver = (struct psb_mmu_driver *)kmalloc(sizeof(*driver), GFP_KERNEL);
11715+
11716+ if (!driver)
11717+ return NULL;
11718+
11719+ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
11720+ invalid_type);
11721+ if (!driver->default_pd)
11722+ goto out_err1;
11723+
11724+ spin_lock_init(&driver->lock);
11725+ init_rwsem(&driver->sem);
11726+ down_write(&driver->sem);
11727+ driver->register_map = registers;
11728+ atomic_set(&driver->needs_tlbflush, 1);
11729+ driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
11730+
11731+ driver->bif_ctrl = psb_ioread32(driver, PSB_CR_BIF_CTRL);
11732+ psb_iowrite32(driver, driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
11733+ PSB_CR_BIF_CTRL);
11734+ psb_iowrite32(driver, driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
11735+ PSB_CR_BIF_CTRL);
11736+
11737+ driver->has_clflush = 0;
11738+
11739+#if defined(CONFIG_X86)
11740+ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
11741+ uint32_t tfms, misc, cap0, cap4, clflush_size;
11742+
11743+ /*
11744+ * clflush size is determined at kernel setup for x86_64 but not for
11745+ * i386. We have to do it here.
11746+ */
11747+
11748+ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
11749+ clflush_size = ((misc >> 8) & 0xff) * 8;
11750+ driver->has_clflush = 1;
11751+ driver->clflush_add =
11752+ PAGE_SIZE * clflush_size / sizeof(uint32_t);
11753+ driver->clflush_mask = driver->clflush_add - 1;
11754+ driver->clflush_mask = ~driver->clflush_mask;
11755+ }
11756+#endif
11757+
11758+ up_write(&driver->sem);
11759+ return driver;
11760+
11761+ out_err1:
11762+ kfree(driver);
11763+ return NULL;
11764+}
11765+
11766+#if defined(CONFIG_X86)
11767+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
11768+ uint32_t num_pages, uint32_t desired_tile_stride,
11769+ uint32_t hw_tile_stride)
11770+{
11771+ struct psb_mmu_pt *pt;
11772+ uint32_t rows = 1;
11773+ uint32_t i;
11774+ unsigned long addr;
11775+ unsigned long end;
11776+ unsigned long next;
11777+ unsigned long add;
11778+ unsigned long row_add;
11779+ unsigned long clflush_add = pd->driver->clflush_add;
11780+ unsigned long clflush_mask = pd->driver->clflush_mask;
11781+
11782+ if (!pd->driver->has_clflush) {
11783+ drm_ttm_cache_flush();
11784+ return;
11785+ }
11786+
11787+ if (hw_tile_stride)
11788+ rows = num_pages / desired_tile_stride;
11789+ else
11790+ desired_tile_stride = num_pages;
11791+
11792+ add = desired_tile_stride << PAGE_SHIFT;
11793+ row_add = hw_tile_stride << PAGE_SHIFT;
11794+ mb();
11795+ for (i = 0; i < rows; ++i) {
11796+
11797+ addr = address;
11798+ end = addr + add;
11799+
11800+ do {
11801+ next = psb_pd_addr_end(addr, end);
11802+ pt = psb_mmu_pt_map_lock(pd, addr);
11803+ if (!pt)
11804+ continue;
11805+ do {
11806+ psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
11807+ } while (addr += clflush_add,
11808+ (addr & clflush_mask) < next);
11809+
11810+ psb_mmu_pt_unmap_unlock(pt);
11811+ } while (addr = next, next != end);
11812+ address += row_add;
11813+ }
11814+ mb();
11815+}
11816+#else
11817+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
11818+ uint32_t num_pages, uint32_t desired_tile_stride,
11819+ uint32_t hw_tile_stride)
11820+{
11821+ drm_ttm_cache_flush();
11822+}
11823+#endif
11824+
11825+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
11826+ unsigned long address, uint32_t num_pages)
11827+{
11828+ struct psb_mmu_pt *pt;
11829+ unsigned long addr;
11830+ unsigned long end;
11831+ unsigned long next;
11832+ unsigned long f_address = address;
11833+
11834+ down_read(&pd->driver->sem);
11835+
11836+ addr = address;
11837+ end = addr + (num_pages << PAGE_SHIFT);
11838+
11839+ do {
11840+ next = psb_pd_addr_end(addr, end);
11841+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
11842+ if (!pt)
11843+ goto out;
11844+ do {
11845+ psb_mmu_invalidate_pte(pt, addr);
11846+ --pt->count;
11847+ } while (addr += PAGE_SIZE, addr < next);
11848+ psb_mmu_pt_unmap_unlock(pt);
11849+
11850+ } while (addr = next, next != end);
11851+
11852+ out:
11853+ if (pd->hw_context != -1)
11854+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
11855+
11856+ up_read(&pd->driver->sem);
11857+
11858+ if (pd->hw_context != -1)
11859+ psb_mmu_flush(pd->driver);
11860+
11861+ return;
11862+}
11863+
11864+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
11865+ uint32_t num_pages, uint32_t desired_tile_stride,
11866+ uint32_t hw_tile_stride)
11867+{
11868+ struct psb_mmu_pt *pt;
11869+ uint32_t rows = 1;
11870+ uint32_t i;
11871+ unsigned long addr;
11872+ unsigned long end;
11873+ unsigned long next;
11874+ unsigned long add;
11875+ unsigned long row_add;
11876+ unsigned long f_address = address;
11877+
11878+ if (hw_tile_stride)
11879+ rows = num_pages / desired_tile_stride;
11880+ else
11881+ desired_tile_stride = num_pages;
11882+
11883+ add = desired_tile_stride << PAGE_SHIFT;
11884+ row_add = hw_tile_stride << PAGE_SHIFT;
11885+
11886+ down_read(&pd->driver->sem);
11887+
11888+ /* Make sure we only need to flush this processor's cache */
11889+
11890+ for (i = 0; i < rows; ++i) {
11891+
11892+ addr = address;
11893+ end = addr + add;
11894+
11895+ do {
11896+ next = psb_pd_addr_end(addr, end);
11897+ pt = psb_mmu_pt_map_lock(pd, addr);
11898+ if (!pt)
11899+ continue;
11900+ do {
11901+ psb_mmu_invalidate_pte(pt, addr);
11902+ --pt->count;
11903+
11904+ } while (addr += PAGE_SIZE, addr < next);
11905+ psb_mmu_pt_unmap_unlock(pt);
11906+
11907+ } while (addr = next, next != end);
11908+ address += row_add;
11909+ }
11910+ if (pd->hw_context != -1)
11911+ psb_mmu_flush_ptes(pd, f_address, num_pages,
11912+ desired_tile_stride, hw_tile_stride);
11913+
11914+ up_read(&pd->driver->sem);
11915+
11916+ if (pd->hw_context != -1)
11917+ psb_mmu_flush(pd->driver);
11918+}
11919+
11920+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
11921+ unsigned long address, uint32_t num_pages,
11922+ int type)
11923+{
11924+ struct psb_mmu_pt *pt;
11925+ uint32_t pte;
11926+ unsigned long addr;
11927+ unsigned long end;
11928+ unsigned long next;
11929+ unsigned long f_address = address;
11930+ int ret = -ENOMEM;
11931+
11932+ down_read(&pd->driver->sem);
11933+
11934+ addr = address;
11935+ end = addr + (num_pages << PAGE_SHIFT);
11936+
11937+ do {
11938+ next = psb_pd_addr_end(addr, end);
11939+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
11940+ if (!pt) {
11941+ ret = -ENOMEM;
11942+ goto out;
11943+ }
11944+ do {
11945+ pte = psb_mmu_mask_pte(start_pfn++, type);
11946+ psb_mmu_set_pte(pt, addr, pte);
11947+ pt->count++;
11948+ } while (addr += PAGE_SIZE, addr < next);
11949+ psb_mmu_pt_unmap_unlock(pt);
11950+
11951+ } while (addr = next, next != end);
11952+ ret = 0;
11953+
11954+ out:
11955+ if (pd->hw_context != -1)
11956+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
11957+
11958+ up_read(&pd->driver->sem);
11959+
11960+ if (pd->hw_context != -1)
11961+ psb_mmu_flush(pd->driver);
11962+
11963+ return 0;
11964+}
11965+
11966+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
11967+ unsigned long address, uint32_t num_pages,
11968+ uint32_t desired_tile_stride, uint32_t hw_tile_stride,
11969+ int type)
11970+{
11971+ struct psb_mmu_pt *pt;
11972+ uint32_t rows = 1;
11973+ uint32_t i;
11974+ uint32_t pte;
11975+ unsigned long addr;
11976+ unsigned long end;
11977+ unsigned long next;
11978+ unsigned long add;
11979+ unsigned long row_add;
11980+ unsigned long f_address = address;
11981+ int ret = -ENOMEM;
11982+
11983+ if (hw_tile_stride) {
11984+ if (num_pages % desired_tile_stride != 0)
11985+ return -EINVAL;
11986+ rows = num_pages / desired_tile_stride;
11987+ } else {
11988+ desired_tile_stride = num_pages;
11989+ }
11990+
11991+ add = desired_tile_stride << PAGE_SHIFT;
11992+ row_add = hw_tile_stride << PAGE_SHIFT;
11993+
11994+ down_read(&pd->driver->sem);
11995+
11996+ for (i = 0; i < rows; ++i) {
11997+
11998+ addr = address;
11999+ end = addr + add;
12000+
12001+ do {
12002+ next = psb_pd_addr_end(addr, end);
12003+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
12004+ if (!pt)
12005+ goto out;
12006+ do {
12007+ pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
12008+ type);
12009+ psb_mmu_set_pte(pt, addr, pte);
12010+ pt->count++;
12011+ } while (addr += PAGE_SIZE, addr < next);
12012+ psb_mmu_pt_unmap_unlock(pt);
12013+
12014+ } while (addr = next, next != end);
12015+
12016+ address += row_add;
12017+ }
12018+ ret = 0;
12019+ out:
12020+ if (pd->hw_context != -1)
12021+ psb_mmu_flush_ptes(pd, f_address, num_pages,
12022+ desired_tile_stride, hw_tile_stride);
12023+
12024+ up_read(&pd->driver->sem);
12025+
12026+ if (pd->hw_context != -1)
12027+ psb_mmu_flush(pd->driver);
12028+
12029+ return 0;
12030+}
12031+
12032+void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
12033+{
12034+ mask &= _PSB_MMU_ER_MASK;
12035+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
12036+ PSB_CR_BIF_CTRL);
12037+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
12038+}
12039+
12040+void psb_mmu_disable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
12041+{
12042+ mask &= _PSB_MMU_ER_MASK;
12043+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
12044+ PSB_CR_BIF_CTRL);
12045+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
12046+}
12047+
12048+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
12049+ unsigned long *pfn)
12050+{
12051+ int ret;
12052+ struct psb_mmu_pt *pt;
12053+ uint32_t tmp;
12054+ spinlock_t *lock = &pd->driver->lock;
12055+
12056+ down_read(&pd->driver->sem);
12057+ pt = psb_mmu_pt_map_lock(pd, virtual);
12058+ if (!pt) {
12059+ uint32_t *v;
12060+
12061+ spin_lock(lock);
12062+ v = kmap_atomic(pd->p, KM_USER0);
12063+ tmp = v[psb_mmu_pd_index(virtual)];
12064+ kunmap_atomic(v, KM_USER0);
12065+ spin_unlock(lock);
12066+
12067+ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
12068+ !(pd->invalid_pte & PSB_PTE_VALID)) {
12069+ ret = -EINVAL;
12070+ goto out;
12071+ }
12072+ ret = 0;
12073+ *pfn = pd->invalid_pte >> PAGE_SHIFT;
12074+ goto out;
12075+ }
12076+ tmp = pt->v[psb_mmu_pt_index(virtual)];
12077+ if (!(tmp & PSB_PTE_VALID)) {
12078+ ret = -EINVAL;
12079+ } else {
12080+ ret = 0;
12081+ *pfn = tmp >> PAGE_SHIFT;
12082+ }
12083+ psb_mmu_pt_unmap_unlock(pt);
12084+ out:
12085+ up_read(&pd->driver->sem);
12086+ return ret;
12087+}
12088+
12089+void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
12090+{
12091+ struct page *p;
12092+ unsigned long pfn;
12093+ int ret = 0;
12094+ struct psb_mmu_pd *pd;
12095+ uint32_t *v;
12096+ uint32_t *vmmu;
12097+
12098+ pd = driver->default_pd;
12099+ if (!pd) {
12100+ printk(KERN_WARNING "Could not get default pd\n");
12101+ }
12102+
12103+ p = alloc_page(GFP_DMA32);
12104+
12105+ if (!p) {
12106+ printk(KERN_WARNING "Failed allocating page\n");
12107+ return;
12108+ }
12109+
12110+ v = kmap(p);
12111+ memset(v, 0x67, PAGE_SIZE);
12112+
12113+ pfn = (offset >> PAGE_SHIFT);
12114+
12115+ ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0,
12116+ PSB_MMU_CACHED_MEMORY);
12117+ if (ret) {
12118+ printk(KERN_WARNING "Failed inserting mmu page\n");
12119+ goto out_err1;
12120+ }
12121+
12122+ /* Ioremap the page through the GART aperture */
12123+
12124+ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
12125+ if (!vmmu) {
12126+ printk(KERN_WARNING "Failed ioremapping page\n");
12127+ goto out_err2;
12128+ }
12129+
12130+ /* Read from the page with mmu disabled. */
12131+ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
12132+
12133+ /* Enable the mmu for host accesses and read again. */
12134+ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
12135+
12136+ printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
12137+ ioread32(vmmu));
12138+ *v = 0x15243705;
12139+ printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
12140+ ioread32(vmmu));
12141+ iowrite32(0x16243355, vmmu);
12142+ (void)ioread32(vmmu);
12143+ printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
12144+
12145+ printk(KERN_INFO "Int stat is 0x%08x\n",
12146+ psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
12147+ printk(KERN_INFO "Fault is 0x%08x\n",
12148+ psb_ioread32(driver, PSB_CR_BIF_FAULT));
12149+
12150+ /* Disable MMU for host accesses and clear page fault register */
12151+ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
12152+ iounmap(vmmu);
12153+ out_err2:
12154+ psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
12155+ out_err1:
12156+ kunmap(p);
12157+ __free_page(p);
12158+}
12159Index: linux-2.6.28/drivers/gpu/drm/psb/psb_msvdx.c
12160===================================================================
12161--- /dev/null 1970-01-01 00:00:00.000000000 +0000
12162+++ linux-2.6.28/drivers/gpu/drm/psb/psb_msvdx.c 2009-02-25 15:37:02.000000000 +0000
12163@@ -0,0 +1,671 @@
12164+/**
12165+ * file psb_msvdx.c
12166+ * MSVDX I/O operations and IRQ handling
12167+ *
12168+ */
12169+
12170+/**************************************************************************
12171+ *
12172+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
12173+ * Copyright (c) Imagination Technologies Limited, UK
12174+ * All Rights Reserved.
12175+ *
12176+ * Permission is hereby granted, free of charge, to any person obtaining a
12177+ * copy of this software and associated documentation files (the
12178+ * "Software"), to deal in the Software without restriction, including
12179+ * without limitation the rights to use, copy, modify, merge, publish,
12180+ * distribute, sub license, and/or sell copies of the Software, and to
12181+ * permit persons to whom the Software is furnished to do so, subject to
12182+ * the following conditions:
12183+ *
12184+ * The above copyright notice and this permission notice (including the
12185+ * next paragraph) shall be included in all copies or substantial portions
12186+ * of the Software.
12187+ *
12188+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12189+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12190+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
12191+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
12192+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
12193+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
12194+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
12195+ *
12196+ **************************************************************************/
12197+
12198+#include "drmP.h"
12199+#include "drm_os_linux.h"
12200+#include "psb_drv.h"
12201+#include "psb_drm.h"
12202+#include "psb_msvdx.h"
12203+
12204+#include <asm/io.h>
12205+#include <linux/delay.h>
12206+
12207+#ifndef list_first_entry
12208+#define list_first_entry(ptr, type, member) \
12209+ list_entry((ptr)->next, type, member)
12210+#endif
12211+
12212+static int psb_msvdx_send (struct drm_device *dev, void *cmd,
12213+ unsigned long cmd_size);
12214+
12215+int
12216+psb_msvdx_dequeue_send (struct drm_device *dev)
12217+{
12218+ struct drm_psb_private *dev_priv = dev->dev_private;
12219+ struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
12220+ int ret = 0;
12221+
12222+ if (list_empty (&dev_priv->msvdx_queue))
12223+ {
12224+ PSB_DEBUG_GENERAL ("MSVDXQUE: msvdx list empty.\n");
12225+ dev_priv->msvdx_busy = 0;
12226+ return -EINVAL;
12227+ }
12228+ msvdx_cmd =
12229+ list_first_entry (&dev_priv->msvdx_queue, struct psb_msvdx_cmd_queue,
12230+ head);
12231+ PSB_DEBUG_GENERAL ("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
12232+ ret = psb_msvdx_send (dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size);
12233+ if (ret)
12234+ {
12235+ PSB_DEBUG_GENERAL ("MSVDXQUE: psb_msvdx_send failed\n");
12236+ ret = -EINVAL;
12237+ }
12238+ list_del (&msvdx_cmd->head);
12239+ kfree (msvdx_cmd->cmd);
12240+ drm_free (msvdx_cmd, sizeof (struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
12241+ return ret;
12242+}
12243+
12244+int
12245+psb_msvdx_map_command (struct drm_device *dev,
12246+ struct drm_buffer_object *cmd_buffer,
12247+ unsigned long cmd_offset, unsigned long cmd_size,
12248+ void **msvdx_cmd, uint32_t sequence, int copy_cmd)
12249+{
12250+ struct drm_psb_private *dev_priv = dev->dev_private;
12251+ int ret = 0;
12252+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
12253+ unsigned long cmd_size_remaining;
12254+ struct drm_bo_kmap_obj cmd_kmap;
12255+ void *cmd, *tmp, *cmd_start;
12256+ int is_iomem;
12257+
12258+ /* command buffers may not exceed page boundary */
12259+ if (cmd_size + cmd_page_offset > PAGE_SIZE)
12260+ return -EINVAL;
12261+
12262+ ret = drm_bo_kmap (cmd_buffer, cmd_offset >> PAGE_SHIFT, 2, &cmd_kmap);
12263+
12264+ if (ret)
12265+ {
12266+ PSB_DEBUG_GENERAL ("MSVDXQUE:ret:%d\n", ret);
12267+ return ret;
12268+ }
12269+
12270+ cmd_start =
12271+ (void *) drm_bmo_virtual (&cmd_kmap, &is_iomem) + cmd_page_offset;
12272+ cmd = cmd_start;
12273+ cmd_size_remaining = cmd_size;
12274+
12275+ while (cmd_size_remaining > 0)
12276+ {
12277+ uint32_t mmu_ptd;
12278+ uint32_t cur_cmd_size = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_SIZE);
12279+ uint32_t cur_cmd_id = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_ID);
12280+ PSB_DEBUG_GENERAL
12281+ ("cmd start at %08x cur_cmd_size = %d cur_cmd_id = %02x fence = %08x\n",
12282+ (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence);
12283+ if ((cur_cmd_size % sizeof (uint32_t))
12284+ || (cur_cmd_size > cmd_size_remaining))
12285+ {
12286+ ret = -EINVAL;
12287+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
12288+ goto out;
12289+ }
12290+
12291+ switch (cur_cmd_id)
12292+ {
12293+ case VA_MSGID_RENDER:
12294+ /* Fence ID */
12295+ MEMIO_WRITE_FIELD (cmd, FW_VA_RENDER_FENCE_VALUE, sequence);
12296+
12297+ mmu_ptd = psb_get_default_pd_addr (dev_priv->mmu);
12298+ if (atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc, 1, 0) == 1)
12299+ {
12300+ mmu_ptd |= 1;
12301+ PSB_DEBUG_GENERAL ("MSVDX: Setting MMU invalidate flag\n");
12302+ }
12303+ /* PTD */
12304+ MEMIO_WRITE_FIELD (cmd, FW_VA_RENDER_MMUPTD, mmu_ptd);
12305+ break;
12306+
12307+ default:
12308+ /* Msg not supported */
12309+ ret = -EINVAL;
12310+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
12311+ goto out;
12312+ }
12313+
12314+ cmd += cur_cmd_size;
12315+ cmd_size_remaining -= cur_cmd_size;
12316+ }
12317+
12318+ if (copy_cmd)
12319+ {
12320+ PSB_DEBUG_GENERAL
12321+ ("MSVDXQUE: psb_msvdx_map_command copying command...\n");
12322+ tmp = drm_calloc (1, cmd_size, DRM_MEM_DRIVER);
12323+ if (tmp == NULL)
12324+ {
12325+ ret = -ENOMEM;
12326+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
12327+ goto out;
12328+ }
12329+ memcpy (tmp, cmd_start, cmd_size);
12330+ *msvdx_cmd = tmp;
12331+ }
12332+ else
12333+ {
12334+ PSB_DEBUG_GENERAL
12335+ ("MSVDXQUE: psb_msvdx_map_command did NOT copy command...\n");
12336+ ret = psb_msvdx_send (dev, cmd_start, cmd_size);
12337+ if (ret)
12338+ {
12339+ PSB_DEBUG_GENERAL ("MSVDXQUE: psb_msvdx_send failed\n");
12340+ ret = -EINVAL;
12341+ }
12342+ }
12343+
12344+out:
12345+ drm_bo_kunmap (&cmd_kmap);
12346+
12347+ return ret;
12348+}
12349+
12350+int
12351+psb_submit_video_cmdbuf (struct drm_device *dev,
12352+ struct drm_buffer_object *cmd_buffer,
12353+ unsigned long cmd_offset, unsigned long cmd_size,
12354+ struct drm_fence_object *fence)
12355+{
12356+ struct drm_psb_private *dev_priv = dev->dev_private;
12357+ uint32_t sequence = fence->sequence;
12358+ unsigned long irq_flags;
12359+ int ret = 0;
12360+
12361+ mutex_lock (&dev_priv->msvdx_mutex);
12362+ psb_schedule_watchdog (dev_priv);
12363+
12364+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
12365+ if (dev_priv->msvdx_needs_reset)
12366+ {
12367+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
12368+ PSB_DEBUG_GENERAL ("MSVDX: Needs reset\n");
12369+ if (psb_msvdx_reset (dev_priv))
12370+ {
12371+ mutex_unlock (&dev_priv->msvdx_mutex);
12372+ ret = -EBUSY;
12373+ PSB_DEBUG_GENERAL ("MSVDX: Reset failed\n");
12374+ return ret;
12375+ }
12376+ PSB_DEBUG_GENERAL ("MSVDX: Reset ok\n");
12377+ dev_priv->msvdx_needs_reset = 0;
12378+ dev_priv->msvdx_busy = 0;
12379+ dev_priv->msvdx_start_idle = 0;
12380+
12381+ psb_msvdx_init (dev);
12382+ psb_msvdx_irq_preinstall (dev_priv);
12383+ psb_msvdx_irq_postinstall (dev_priv);
12384+ PSB_DEBUG_GENERAL ("MSVDX: Init ok\n");
12385+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
12386+ }
12387+
12388+ if (!dev_priv->msvdx_busy)
12389+ {
12390+ dev_priv->msvdx_busy = 1;
12391+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
12392+ PSB_DEBUG_GENERAL
12393+ ("MSVDXQUE: nothing in the queue sending sequence:%08x..\n",
12394+ sequence);
12395+ ret =
12396+ psb_msvdx_map_command (dev, cmd_buffer, cmd_offset, cmd_size,
12397+ NULL, sequence, 0);
12398+ if (ret)
12399+ {
12400+ mutex_unlock (&dev_priv->msvdx_mutex);
12401+ PSB_DEBUG_GENERAL ("MSVDXQUE: Failed to extract cmd...\n");
12402+ return ret;
12403+ }
12404+ }
12405+ else
12406+ {
12407+ struct psb_msvdx_cmd_queue *msvdx_cmd;
12408+ void *cmd = NULL;
12409+
12410+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
12411+ /*queue the command to be sent when the h/w is ready */
12412+ PSB_DEBUG_GENERAL ("MSVDXQUE: queueing sequence:%08x..\n", sequence);
12413+ msvdx_cmd =
12414+ drm_calloc (1, sizeof (struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
12415+ if (msvdx_cmd == NULL)
12416+ {
12417+ mutex_unlock (&dev_priv->msvdx_mutex);
12418+ PSB_DEBUG_GENERAL ("MSVDXQUE: Out of memory...\n");
12419+ return -ENOMEM;
12420+ }
12421+
12422+ ret =
12423+ psb_msvdx_map_command (dev, cmd_buffer, cmd_offset, cmd_size,
12424+ &cmd, sequence, 1);
12425+ if (ret)
12426+ {
12427+ mutex_unlock (&dev_priv->msvdx_mutex);
12428+ PSB_DEBUG_GENERAL ("MSVDXQUE: Failed to extract cmd...\n");
12429+ drm_free (msvdx_cmd, sizeof (struct psb_msvdx_cmd_queue),
12430+ DRM_MEM_DRIVER);
12431+ return ret;
12432+ }
12433+ msvdx_cmd->cmd = cmd;
12434+ msvdx_cmd->cmd_size = cmd_size;
12435+ msvdx_cmd->sequence = sequence;
12436+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
12437+ list_add_tail (&msvdx_cmd->head, &dev_priv->msvdx_queue);
12438+ if (!dev_priv->msvdx_busy)
12439+ {
12440+ dev_priv->msvdx_busy = 1;
12441+ PSB_DEBUG_GENERAL ("MSVDXQUE: Need immediate dequeue\n");
12442+ psb_msvdx_dequeue_send (dev);
12443+ }
12444+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
12445+ }
12446+ mutex_unlock (&dev_priv->msvdx_mutex);
12447+ return ret;
12448+}
12449+
12450+int
12451+psb_msvdx_send (struct drm_device *dev, void *cmd, unsigned long cmd_size)
12452+{
12453+ int ret = 0;
12454+ struct drm_psb_private *dev_priv = dev->dev_private;
12455+
12456+ while (cmd_size > 0)
12457+ {
12458+ uint32_t cur_cmd_size = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_SIZE);
12459+ if (cur_cmd_size > cmd_size)
12460+ {
12461+ ret = -EINVAL;
12462+ PSB_DEBUG_GENERAL
12463+ ("MSVDX: cmd_size = %d cur_cmd_size = %d\n",
12464+ (int) cmd_size, cur_cmd_size);
12465+ goto out;
12466+ }
12467+ /* Send the message to h/w */
12468+ ret = psb_mtx_send (dev_priv, cmd);
12469+ if (ret)
12470+ {
12471+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
12472+ goto out;
12473+ }
12474+ cmd += cur_cmd_size;
12475+ cmd_size -= cur_cmd_size;
12476+ }
12477+
12478+out:
12479+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
12480+ return ret;
12481+}
12482+
12483+int
12484+psb_mtx_send (struct drm_psb_private *dev_priv, const void *pvMsg)
12485+{
12486+
12487+ static uint32_t padMessage[FWRK_PADMSG_SIZE];
12488+
12489+ const uint32_t *pui32Msg = (uint32_t *) pvMsg;
12490+ uint32_t msgNumWords, wordsFree, readIndex, writeIndex;
12491+ int ret = 0;
12492+
12493+ PSB_DEBUG_GENERAL ("MSVDX: psb_mtx_send\n");
12494+
12495+ /* we need clocks enabled before we touch VEC local ram */
12496+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
12497+
12498+ msgNumWords = (MEMIO_READ_FIELD (pvMsg, FWRK_GENMSG_SIZE) + 3) / 4;
12499+
12500+ if (msgNumWords > NUM_WORDS_MTX_BUF)
12501+ {
12502+ ret = -EINVAL;
12503+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
12504+ goto out;
12505+ }
12506+
12507+ readIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_RD_INDEX);
12508+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_WRT_INDEX);
12509+
12510+ if (writeIndex + msgNumWords > NUM_WORDS_MTX_BUF)
12511+ { /* message would wrap, need to send a pad message */
12512+ BUG_ON (MEMIO_READ_FIELD (pvMsg, FWRK_GENMSG_ID) == FWRK_MSGID_PADDING); /* Shouldn't happen for a PAD message itself */
12513+ /* if the read pointer is at zero then we must wait for it to change otherwise the write
12514+ * pointer will equal the read pointer,which should only happen when the buffer is empty
12515+ *
12516+ * This will only happens if we try to overfill the queue, queue management should make
12517+ * sure this never happens in the first place.
12518+ */
12519+ BUG_ON (0 == readIndex);
12520+ if (0 == readIndex)
12521+ {
12522+ ret = -EINVAL;
12523+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
12524+ goto out;
12525+ }
12526+ /* Send a pad message */
12527+ MEMIO_WRITE_FIELD (padMessage, FWRK_GENMSG_SIZE,
12528+ (NUM_WORDS_MTX_BUF - writeIndex) << 2);
12529+ MEMIO_WRITE_FIELD (padMessage, FWRK_GENMSG_ID, FWRK_MSGID_PADDING);
12530+ psb_mtx_send (dev_priv, padMessage);
12531+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_WRT_INDEX);
12532+ }
12533+
12534+ wordsFree =
12535+ (writeIndex >=
12536+ readIndex) ? NUM_WORDS_MTX_BUF - (writeIndex -
12537+ readIndex) : readIndex - writeIndex;
12538+
12539+ BUG_ON (msgNumWords > wordsFree);
12540+ if (msgNumWords > wordsFree)
12541+ {
12542+ ret = -EINVAL;
12543+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
12544+ goto out;
12545+ }
12546+
12547+ while (msgNumWords > 0)
12548+ {
12549+ PSB_WMSVDX32 (*pui32Msg++, MSVDX_COMMS_TO_MTX_BUF + (writeIndex << 2));
12550+ msgNumWords--;
12551+ writeIndex++;
12552+ if (NUM_WORDS_MTX_BUF == writeIndex)
12553+ {
12554+ writeIndex = 0;
12555+ }
12556+ }
12557+ PSB_WMSVDX32 (writeIndex, MSVDX_COMMS_TO_MTX_WRT_INDEX);
12558+
12559+ /* Make sure clocks are enabled before we kick */
12560+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
12561+
12562+ /* signal an interrupt to let the mtx know there is a new message */
12563+ PSB_WMSVDX32 (1, MSVDX_MTX_KICKI);
12564+
12565+out:
12566+ return ret;
12567+}
12568+
12569+/*
12570+ * MSVDX MTX interrupt
12571+ */
12572+void
12573+psb_msvdx_mtx_interrupt (struct drm_device *dev)
12574+{
12575+ static uint32_t msgBuffer[128];
12576+ uint32_t readIndex, writeIndex;
12577+ uint32_t msgNumWords, msgWordOffset;
12578+ struct drm_psb_private *dev_priv =
12579+ (struct drm_psb_private *) dev->dev_private;
12580+
12581+ /* Are clocks enabled - If not enable before attempting to read from VLR */
12582+ if (PSB_RMSVDX32 (MSVDX_MAN_CLK_ENABLE) != (clk_enable_all))
12583+ {
12584+ PSB_DEBUG_GENERAL
12585+ ("MSVDX: Warning - Clocks disabled when Interupt set\n");
12586+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
12587+ }
12588+
12589+ for (;;)
12590+ {
12591+ readIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_RD_INDEX);
12592+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_WRT_INDEX);
12593+
12594+ if (readIndex != writeIndex)
12595+ {
12596+ msgWordOffset = 0;
12597+
12598+ msgBuffer[msgWordOffset] =
12599+ PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_BUF + (readIndex << 2));
12600+
12601+ msgNumWords = (MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_SIZE) + 3) / 4; /* round to nearest word */
12602+
12603+ /*ASSERT(msgNumWords <= sizeof(msgBuffer) / sizeof(uint32_t)); */
12604+
12605+ if (++readIndex >= NUM_WORDS_HOST_BUF)
12606+ readIndex = 0;
12607+
12608+ for (msgWordOffset++; msgWordOffset < msgNumWords; msgWordOffset++)
12609+ {
12610+ msgBuffer[msgWordOffset] =
12611+ PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_BUF + (readIndex << 2));
12612+
12613+ if (++readIndex >= NUM_WORDS_HOST_BUF)
12614+ {
12615+ readIndex = 0;
12616+ }
12617+ }
12618+
12619+ /* Update the Read index */
12620+ PSB_WMSVDX32 (readIndex, MSVDX_COMMS_TO_HOST_RD_INDEX);
12621+
12622+ if (!dev_priv->msvdx_needs_reset)
12623+ switch (MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID))
12624+ {
12625+ case VA_MSGID_CMD_HW_PANIC:
12626+ case VA_MSGID_CMD_FAILED:
12627+ {
12628+ uint32_t ui32Fence = MEMIO_READ_FIELD (msgBuffer,
12629+ FW_VA_CMD_FAILED_FENCE_VALUE);
12630+ uint32_t ui32FaultStatus = MEMIO_READ_FIELD (msgBuffer,
12631+ FW_VA_CMD_FAILED_IRQSTATUS);
12632+
12633+ if(MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID) == VA_MSGID_CMD_HW_PANIC )
12634+ PSB_DEBUG_GENERAL
12635+ ("MSVDX: VA_MSGID_CMD_HW_PANIC: Msvdx fault detected - Fence: %08x, Status: %08x - resetting and ignoring error\n",
12636+ ui32Fence, ui32FaultStatus);
12637+ else
12638+ PSB_DEBUG_GENERAL
12639+ ("MSVDX: VA_MSGID_CMD_FAILED: Msvdx fault detected - Fence: %08x, Status: %08x - resetting and ignoring error\n",
12640+ ui32Fence, ui32FaultStatus);
12641+
12642+ dev_priv->msvdx_needs_reset = 1;
12643+
12644+ if(MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID) == VA_MSGID_CMD_HW_PANIC)
12645+ {
12646+ if (dev_priv->
12647+ msvdx_current_sequence
12648+ - dev_priv->sequence[PSB_ENGINE_VIDEO] > 0x0FFFFFFF)
12649+ dev_priv->msvdx_current_sequence++;
12650+ PSB_DEBUG_GENERAL
12651+ ("MSVDX: Fence ID missing, assuming %08x\n",
12652+ dev_priv->msvdx_current_sequence);
12653+ }
12654+ else
12655+ dev_priv->msvdx_current_sequence = ui32Fence;
12656+
12657+ psb_fence_error (dev,
12658+ PSB_ENGINE_VIDEO,
12659+ dev_priv->
12660+ msvdx_current_sequence,
12661+ DRM_FENCE_TYPE_EXE, DRM_CMD_FAILED);
12662+
12663+ /* Flush the command queue */
12664+ psb_msvdx_flush_cmd_queue (dev);
12665+
12666+ goto isrExit;
12667+ break;
12668+ }
12669+ case VA_MSGID_CMD_COMPLETED:
12670+ {
12671+ uint32_t ui32Fence = MEMIO_READ_FIELD (msgBuffer,
12672+ FW_VA_CMD_COMPLETED_FENCE_VALUE);
12673+ uint32_t ui32Flags =
12674+ MEMIO_READ_FIELD (msgBuffer, FW_VA_CMD_COMPLETED_FLAGS);
12675+
12676+ PSB_DEBUG_GENERAL
12677+ ("msvdx VA_MSGID_CMD_COMPLETED: FenceID: %08x, flags: 0x%x\n",
12678+ ui32Fence, ui32Flags);
12679+ dev_priv->msvdx_current_sequence = ui32Fence;
12680+
12681+ psb_fence_handler (dev, PSB_ENGINE_VIDEO);
12682+
12683+
12684+ if (ui32Flags & FW_VA_RENDER_HOST_INT)
12685+ {
12686+ /*Now send the next command from the msvdx cmd queue */
12687+ psb_msvdx_dequeue_send (dev);
12688+ goto isrExit;
12689+ }
12690+ break;
12691+ }
12692+ case VA_MSGID_ACK:
12693+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_ACK\n");
12694+ break;
12695+
12696+ case VA_MSGID_TEST1:
12697+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_TEST1\n");
12698+ break;
12699+
12700+ case VA_MSGID_TEST2:
12701+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_TEST2\n");
12702+ break;
12703+ /* Don't need to do anything with these messages */
12704+
12705+ case VA_MSGID_DEBLOCK_REQUIRED:
12706+ {
12707+ uint32_t ui32ContextId = MEMIO_READ_FIELD (msgBuffer,
12708+ FW_VA_DEBLOCK_REQUIRED_CONTEXT);
12709+
12710+ /* The BE we now be locked. */
12711+
12712+ /* Unblock rendec by reading the mtx2mtx end of slice */
12713+ (void) PSB_RMSVDX32 (MSVDX_RENDEC_READ_DATA);
12714+
12715+ PSB_DEBUG_GENERAL
12716+ ("msvdx VA_MSGID_DEBLOCK_REQUIRED Context=%08x\n",
12717+ ui32ContextId);
12718+ goto isrExit;
12719+ break;
12720+ }
12721+
12722+ default:
12723+ {
12724+ PSB_DEBUG_GENERAL
12725+ ("ERROR: msvdx Unknown message from MTX \n");
12726+ }
12727+ break;
12728+
12729+ }
12730+ }
12731+ else
12732+ {
12733+ /* Get out of here if nothing */
12734+ break;
12735+ }
12736+ }
12737+isrExit:
12738+
12739+#if 1
12740+ if (!dev_priv->msvdx_busy)
12741+ {
12742+ /* check that clocks are enabled before reading VLR */
12743+ if( PSB_RMSVDX32( MSVDX_MAN_CLK_ENABLE ) != (clk_enable_all) )
12744+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
12745+
12746+ /* If the firmware says the hardware is idle and the CCB is empty then we can power down */
12747+ {
12748+ uint32_t ui32FWStatus = PSB_RMSVDX32( MSVDX_COMMS_FW_STATUS );
12749+ uint32_t ui32CCBRoff = PSB_RMSVDX32 ( MSVDX_COMMS_TO_MTX_RD_INDEX );
12750+ uint32_t ui32CCBWoff = PSB_RMSVDX32 ( MSVDX_COMMS_TO_MTX_WRT_INDEX );
12751+
12752+ if( (ui32FWStatus & MSVDX_FW_STATUS_HW_IDLE) && (ui32CCBRoff == ui32CCBWoff))
12753+ {
12754+ PSB_DEBUG_GENERAL("MSVDX_CLOCK: Setting clock to minimal...\n");
12755+ PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
12756+ }
12757+ }
12758+ }
12759+#endif
12760+ DRM_MEMORYBARRIER ();
12761+}
12762+
12763+void
12764+psb_msvdx_lockup (struct drm_psb_private *dev_priv,
12765+ int *msvdx_lockup, int *msvdx_idle)
12766+{
12767+ unsigned long irq_flags;
12768+// struct psb_scheduler *scheduler = &dev_priv->scheduler;
12769+
12770+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
12771+ *msvdx_lockup = 0;
12772+ *msvdx_idle = 1;
12773+
12774+ if (!dev_priv->has_msvdx)
12775+ {
12776+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
12777+ return;
12778+ }
12779+#if 0
12780+ PSB_DEBUG_GENERAL ("MSVDXTimer: current_sequence:%d "
12781+ "last_sequence:%d and last_submitted_sequence :%d\n",
12782+ dev_priv->msvdx_current_sequence,
12783+ dev_priv->msvdx_last_sequence,
12784+ dev_priv->sequence[PSB_ENGINE_VIDEO]);
12785+#endif
12786+ if (dev_priv->msvdx_current_sequence -
12787+ dev_priv->sequence[PSB_ENGINE_VIDEO] > 0x0FFFFFFF)
12788+ {
12789+
12790+ if (dev_priv->msvdx_current_sequence == dev_priv->msvdx_last_sequence)
12791+ {
12792+ PSB_DEBUG_GENERAL
12793+ ("MSVDXTimer: msvdx locked-up for sequence:%d\n",
12794+ dev_priv->msvdx_current_sequence);
12795+ *msvdx_lockup = 1;
12796+ }
12797+ else
12798+ {
12799+ PSB_DEBUG_GENERAL ("MSVDXTimer: msvdx responded fine so far...\n");
12800+ dev_priv->msvdx_last_sequence = dev_priv->msvdx_current_sequence;
12801+ *msvdx_idle = 0;
12802+ }
12803+ if (dev_priv->msvdx_start_idle)
12804+ dev_priv->msvdx_start_idle = 0;
12805+ }
12806+ else
12807+ {
12808+ if (dev_priv->msvdx_needs_reset == 0)
12809+ {
12810+ if (dev_priv->msvdx_start_idle && (dev_priv->msvdx_finished_sequence == dev_priv->msvdx_current_sequence))
12811+ {
12812+ //if (dev_priv->msvdx_idle_start_jiffies + MSVDX_MAX_IDELTIME >= jiffies)
12813+ if (time_after_eq(jiffies, dev_priv->msvdx_idle_start_jiffies + MSVDX_MAX_IDELTIME))
12814+ {
12815+ printk("set the msvdx clock to 0 in the %s\n", __FUNCTION__);
12816+ PSB_WMSVDX32 (0, MSVDX_MAN_CLK_ENABLE);
12817+ dev_priv->msvdx_needs_reset = 1;
12818+ }
12819+ else
12820+ {
12821+ *msvdx_idle = 0;
12822+ }
12823+ }
12824+ else
12825+ {
12826+ dev_priv->msvdx_start_idle = 1;
12827+ dev_priv->msvdx_idle_start_jiffies = jiffies;
12828+ dev_priv->msvdx_finished_sequence = dev_priv->msvdx_current_sequence;
12829+ *msvdx_idle = 0;
12830+ }
12831+ }
12832+ }
12833+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
12834+}
12835Index: linux-2.6.28/drivers/gpu/drm/psb/psb_msvdx.h
12836===================================================================
12837--- /dev/null 1970-01-01 00:00:00.000000000 +0000
12838+++ linux-2.6.28/drivers/gpu/drm/psb/psb_msvdx.h 2009-02-25 15:37:02.000000000 +0000
12839@@ -0,0 +1,564 @@
12840+/**************************************************************************
12841+ *
12842+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
12843+ * Copyright (c) Imagination Technologies Limited, UK
12844+ * All Rights Reserved.
12845+ *
12846+ * Permission is hereby granted, free of charge, to any person obtaining a
12847+ * copy of this software and associated documentation files (the
12848+ * "Software"), to deal in the Software without restriction, including
12849+ * without limitation the rights to use, copy, modify, merge, publish,
12850+ * distribute, sub license, and/or sell copies of the Software, and to
12851+ * permit persons to whom the Software is furnished to do so, subject to
12852+ * the following conditions:
12853+ *
12854+ * The above copyright notice and this permission notice (including the
12855+ * next paragraph) shall be included in all copies or substantial portions
12856+ * of the Software.
12857+ *
12858+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12859+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12860+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
12861+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
12862+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
12863+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
12864+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
12865+ *
12866+ **************************************************************************/
12867+
12868+#ifndef _PSB_MSVDX_H_
12869+#define _PSB_MSVDX_H_
12870+
12871+#define assert(expr) \
12872+ if(unlikely(!(expr))) { \
12873+ printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
12874+ #expr,__FILE__,__FUNCTION__,__LINE__); \
12875+ }
12876+
12877+#define PSB_ASSERT(x) assert (x)
12878+#define IMG_ASSERT(x) assert (x)
12879+
12880+#include "psb_drv.h"
12881+int
12882+psb_wait_for_register (struct drm_psb_private *dev_priv,
12883+ uint32_t ui32Offset,
12884+ uint32_t ui32Value, uint32_t ui32Enable);
12885+
12886+void psb_msvdx_mtx_interrupt (struct drm_device *dev);
12887+int psb_msvdx_init (struct drm_device *dev);
12888+int psb_msvdx_uninit (struct drm_device *dev);
12889+int psb_msvdx_reset (struct drm_psb_private *dev_priv);
12890+uint32_t psb_get_default_pd_addr (struct psb_mmu_driver *driver);
12891+int psb_mtx_send (struct drm_psb_private *dev_priv, const void *pvMsg);
12892+void psb_msvdx_irq_preinstall (struct drm_psb_private *dev_priv);
12893+void psb_msvdx_irq_postinstall (struct drm_psb_private *dev_priv);
12894+void psb_msvdx_flush_cmd_queue (struct drm_device *dev);
12895+extern void psb_msvdx_lockup (struct drm_psb_private *dev_priv,
12896+ int *msvdx_lockup, int *msvdx_idle);
12897+#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2 /* Non-Optimal Invalidation is not default */
12898+#define FW_VA_RENDER_HOST_INT 0x00004000
12899+#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020
12900+
12901+#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200
12902+
12903+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION \
12904+ | MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
12905+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION \
12906+ | MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
12907+
12908+
12909+#define POULSBO_D0 0x5
12910+#define POULSBO_D1 0x6
12911+#define PSB_REVID_OFFSET 0x8
12912+
12913+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001 /* There is no work currently underway on the hardware*/
12914+
12915+#define clk_enable_all MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
12916+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
12917+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
12918+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
12919+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
12920+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
12921+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
12922+
12923+#define clk_enable_minimal MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
12924+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
12925+
12926+#define clk_enable_auto MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \
12927+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \
12928+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \
12929+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \
12930+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \
12931+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
12932+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
12933+
12934+#define msvdx_sw_reset_all MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \
12935+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \
12936+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \
12937+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \
12938+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK
12939+
12940+
12941+#define PCI_PORT5_REG80_FFUSE 0xD0058000
12942+#define MTX_CODE_BASE (0x80900000)
12943+#define MTX_DATA_BASE (0x82880000)
12944+#define PC_START_ADDRESS (0x80900000)
12945+
12946+#define MTX_CORE_CODE_MEM (0x10 )
12947+#define MTX_CORE_DATA_MEM (0x18 )
12948+
12949+#define MTX_INTERNAL_REG( R_SPECIFIER , U_SPECIFIER ) ( ((R_SPECIFIER)<<4) | (U_SPECIFIER) )
12950+#define MTX_PC MTX_INTERNAL_REG( 0 , 5 )
12951+
12952+#define RENDEC_A_SIZE ( 2 * 1024* 1024 )
12953+#define RENDEC_B_SIZE ( RENDEC_A_SIZE / 4 )
12954+
12955+#define MEMIO_READ_FIELD(vpMem, field) \
12956+ ((uint32_t)(((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) & field##_MASK) >> field##_SHIFT))
12957+
12958+#define MEMIO_WRITE_FIELD(vpMem, field, ui32Value) \
12959+ (*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) = \
12960+ ((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) & (field##_TYPE)~field##_MASK) | \
12961+ (field##_TYPE)(( (uint32_t) (ui32Value) << field##_SHIFT) & field##_MASK);
12962+
12963+#define MEMIO_WRITE_FIELD_LITE(vpMem, field, ui32Value) \
12964+ (*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) = \
12965+ ((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) | \
12966+ (field##_TYPE) (( (uint32_t) (ui32Value) << field##_SHIFT)) );
12967+
12968+#define REGIO_READ_FIELD(ui32RegValue, reg, field) \
12969+ ((ui32RegValue & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
12970+
12971+#define REGIO_WRITE_FIELD(ui32RegValue, reg, field, ui32Value) \
12972+ (ui32RegValue) = \
12973+ ((ui32RegValue) & ~(reg##_##field##_MASK)) | \
12974+ (((ui32Value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));
12975+
12976+#define REGIO_WRITE_FIELD_LITE(ui32RegValue, reg, field, ui32Value) \
12977+ (ui32RegValue) = \
12978+ ( (ui32RegValue) | ( (ui32Value) << (reg##_##field##_SHIFT) ) );
12979+
12980+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK (0x00000001)
12981+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK (0x00000002)
12982+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK (0x00000004)
12983+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK (0x00000008)
12984+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK (0x00000010)
12985+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK (0x00000020)
12986+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK (0x00000040)
12987+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK (0x00040000)
12988+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK (0x00080000)
12989+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK (0x00100000)
12990+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK (0x00200000)
12991+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
12992+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK (0x00010000)
12993+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK (0x00100000)
12994+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK (0x01000000)
12995+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK (0x10000000)
12996+
12997+/* MTX registers */
12998+#define MSVDX_MTX_ENABLE (0x0000)
12999+#define MSVDX_MTX_KICKI (0x0088)
13000+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC)
13001+#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8)
13002+#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104)
13003+#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108)
13004+#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C)
13005+#define MSVDX_MTX_SOFT_RESET (0x0200)
13006+
13007+/* MSVDX registers */
13008+#define MSVDX_CONTROL (0x0600)
13009+#define MSVDX_INTERRUPT_CLEAR (0x060C)
13010+#define MSVDX_INTERRUPT_STATUS (0x0608)
13011+#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610)
13012+#define MSVDX_MMU_CONTROL0 (0x0680)
13013+#define MSVDX_MTX_RAM_BANK (0x06F0)
13014+#define MSVDX_MAN_CLK_ENABLE (0x0620)
13015+
13016+/* RENDEC registers */
13017+#define MSVDX_RENDEC_CONTROL0 (0x0868)
13018+#define MSVDX_RENDEC_CONTROL1 (0x086C)
13019+#define MSVDX_RENDEC_BUFFER_SIZE (0x0870)
13020+#define MSVDX_RENDEC_BASE_ADDR0 (0x0874)
13021+#define MSVDX_RENDEC_BASE_ADDR1 (0x0878)
13022+#define MSVDX_RENDEC_READ_DATA (0x0898)
13023+#define MSVDX_RENDEC_CONTEXT0 (0x0950)
13024+#define MSVDX_RENDEC_CONTEXT1 (0x0954)
13025+#define MSVDX_RENDEC_CONTEXT2 (0x0958)
13026+#define MSVDX_RENDEC_CONTEXT3 (0x095C)
13027+#define MSVDX_RENDEC_CONTEXT4 (0x0960)
13028+#define MSVDX_RENDEC_CONTEXT5 (0x0964)
13029+
13030+/*
13031+ * This defines the MSVDX communication buffer
13032+ */
13033+#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */
13034+#define NUM_WORDS_HOST_BUF (100) /*!< Host buffer size (in 32-bit words) */
13035+#define NUM_WORDS_MTX_BUF (100) /*!< MTX buffer size (in 32-bit words) */
13036+
13037+#define MSVDX_COMMS_AREA_ADDR (0x02cc0)
13038+
13039+#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10)
13040+#define MSVDX_COMMS_SCRATCH (MSVDX_COMMS_AREA_ADDR - 0x08)
13041+#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04)
13042+#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00)
13043+#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04)
13044+#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08)
13045+#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C)
13046+#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10)
13047+#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14)
13048+#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18)
13049+#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C)
13050+#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20)
13051+#define MSVDX_COMMS_TO_MTX_BUF (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
13052+
13053+#define MSVDX_COMMS_AREA_END (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
13054+
13055+#if (MSVDX_COMMS_AREA_END != 0x03000)
13056+#error
13057+#endif
13058+
13059+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000)
13060+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31)
13061+
13062+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000)
13063+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16)
13064+
13065+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000)
13066+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20)
13067+
13068+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC)
13069+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2)
13070+
13071+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002)
13072+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1)
13073+
13074+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001)
13075+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0)
13076+
13077+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001)
13078+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0)
13079+
13080+#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001)
13081+#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0)
13082+
13083+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
13084+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
13085+
13086+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00)
13087+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8)
13088+
13089+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000)
13090+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14)
13091+
13092+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002)
13093+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1)
13094+
13095+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000)
13096+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16)
13097+
13098+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF)
13099+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0)
13100+
13101+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000)
13102+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16)
13103+
13104+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF)
13105+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0)
13106+
13107+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000)
13108+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18)
13109+
13110+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000)
13111+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16)
13112+
13113+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000)
13114+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24)
13115+
13116+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001)
13117+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0)
13118+
13119+#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80) /*!< Start of parser specific Host->MTX messages. */
13120+#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0) /*!< Start of parser specific MTX->Host messages. */
13121+#define FWRK_MSGID_PADDING ( 0 )
13122+
13123+#define FWRK_GENMSG_SIZE_TYPE uint8_t
13124+#define FWRK_GENMSG_SIZE_MASK (0xFF)
13125+#define FWRK_GENMSG_SIZE_SHIFT (0)
13126+#define FWRK_GENMSG_SIZE_OFFSET (0x0000)
13127+#define FWRK_GENMSG_ID_TYPE uint8_t
13128+#define FWRK_GENMSG_ID_MASK (0xFF)
13129+#define FWRK_GENMSG_ID_SHIFT (0)
13130+#define FWRK_GENMSG_ID_OFFSET (0x0001)
13131+#define FWRK_PADMSG_SIZE (2)
13132+
13133+/*!
13134+******************************************************************************
13135+ This type defines the framework specified message ids
13136+******************************************************************************/
13137+enum
13138+{
13139+ /*! Sent by the DXVA driver on the host to the mtx firmware.
13140+ */
13141+ VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
13142+ VA_MSGID_RENDER,
13143+ VA_MSGID_DEBLOCK,
13144+ VA_MSGID_OOLD,
13145+
13146+ /* Test Messages */
13147+ VA_MSGID_TEST1,
13148+ VA_MSGID_TEST2,
13149+
13150+ /*! Sent by the mtx firmware to itself.
13151+ */
13152+ VA_MSGID_RENDER_MC_INTERRUPT,
13153+
13154+ /*! Sent by the DXVA firmware on the MTX to the host.
13155+ */
13156+ VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
13157+ VA_MSGID_CMD_COMPLETED_BATCH,
13158+ VA_MSGID_DEBLOCK_REQUIRED,
13159+ VA_MSGID_TEST_RESPONCE,
13160+ VA_MSGID_ACK,
13161+
13162+ VA_MSGID_CMD_FAILED,
13163+ VA_MSGID_CMD_UNSUPPORTED,
13164+ VA_MSGID_CMD_HW_PANIC,
13165+};
13166+
13167+/* MSVDX Firmware interface */
13168+
13169+#define FW_VA_RENDER_SIZE (32)
13170+
13171+// FW_VA_RENDER MSG_SIZE
13172+#define FW_VA_RENDER_MSG_SIZE_ALIGNMENT (1)
13173+#define FW_VA_RENDER_MSG_SIZE_TYPE uint8_t
13174+#define FW_VA_RENDER_MSG_SIZE_MASK (0xFF)
13175+#define FW_VA_RENDER_MSG_SIZE_LSBMASK (0xFF)
13176+#define FW_VA_RENDER_MSG_SIZE_OFFSET (0x0000)
13177+#define FW_VA_RENDER_MSG_SIZE_SHIFT (0)
13178+
13179+// FW_VA_RENDER ID
13180+#define FW_VA_RENDER_ID_ALIGNMENT (1)
13181+#define FW_VA_RENDER_ID_TYPE uint8_t
13182+#define FW_VA_RENDER_ID_MASK (0xFF)
13183+#define FW_VA_RENDER_ID_LSBMASK (0xFF)
13184+#define FW_VA_RENDER_ID_OFFSET (0x0001)
13185+#define FW_VA_RENDER_ID_SHIFT (0)
13186+
13187+// FW_VA_RENDER BUFFER_SIZE
13188+#define FW_VA_RENDER_BUFFER_SIZE_ALIGNMENT (2)
13189+#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t
13190+#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF)
13191+#define FW_VA_RENDER_BUFFER_SIZE_LSBMASK (0x0FFF)
13192+#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002)
13193+#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0)
13194+
13195+// FW_VA_RENDER MMUPTD
13196+#define FW_VA_RENDER_MMUPTD_ALIGNMENT (4)
13197+#define FW_VA_RENDER_MMUPTD_TYPE uint32_t
13198+#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF)
13199+#define FW_VA_RENDER_MMUPTD_LSBMASK (0xFFFFFFFF)
13200+#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004)
13201+#define FW_VA_RENDER_MMUPTD_SHIFT (0)
13202+
13203+// FW_VA_RENDER LLDMA_ADDRESS
13204+#define FW_VA_RENDER_LLDMA_ADDRESS_ALIGNMENT (4)
13205+#define FW_VA_RENDER_LLDMA_ADDRESS_TYPE uint32_t
13206+#define FW_VA_RENDER_LLDMA_ADDRESS_MASK (0xFFFFFFFF)
13207+#define FW_VA_RENDER_LLDMA_ADDRESS_LSBMASK (0xFFFFFFFF)
13208+#define FW_VA_RENDER_LLDMA_ADDRESS_OFFSET (0x0008)
13209+#define FW_VA_RENDER_LLDMA_ADDRESS_SHIFT (0)
13210+
13211+// FW_VA_RENDER CONTEXT
13212+#define FW_VA_RENDER_CONTEXT_ALIGNMENT (4)
13213+#define FW_VA_RENDER_CONTEXT_TYPE uint32_t
13214+#define FW_VA_RENDER_CONTEXT_MASK (0xFFFFFFFF)
13215+#define FW_VA_RENDER_CONTEXT_LSBMASK (0xFFFFFFFF)
13216+#define FW_VA_RENDER_CONTEXT_OFFSET (0x000C)
13217+#define FW_VA_RENDER_CONTEXT_SHIFT (0)
13218+
13219+// FW_VA_RENDER FENCE_VALUE
13220+#define FW_VA_RENDER_FENCE_VALUE_ALIGNMENT (4)
13221+#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t
13222+#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF)
13223+#define FW_VA_RENDER_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
13224+#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010)
13225+#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0)
13226+
13227+// FW_VA_RENDER OPERATING_MODE
13228+#define FW_VA_RENDER_OPERATING_MODE_ALIGNMENT (4)
13229+#define FW_VA_RENDER_OPERATING_MODE_TYPE uint32_t
13230+#define FW_VA_RENDER_OPERATING_MODE_MASK (0xFFFFFFFF)
13231+#define FW_VA_RENDER_OPERATING_MODE_LSBMASK (0xFFFFFFFF)
13232+#define FW_VA_RENDER_OPERATING_MODE_OFFSET (0x0014)
13233+#define FW_VA_RENDER_OPERATING_MODE_SHIFT (0)
13234+
13235+// FW_VA_RENDER FIRST_MB_IN_SLICE
13236+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_ALIGNMENT (2)
13237+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_TYPE uint16_t
13238+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_MASK (0xFFFF)
13239+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_LSBMASK (0xFFFF)
13240+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_OFFSET (0x0018)
13241+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_SHIFT (0)
13242+
13243+// FW_VA_RENDER LAST_MB_IN_FRAME
13244+#define FW_VA_RENDER_LAST_MB_IN_FRAME_ALIGNMENT (2)
13245+#define FW_VA_RENDER_LAST_MB_IN_FRAME_TYPE uint16_t
13246+#define FW_VA_RENDER_LAST_MB_IN_FRAME_MASK (0xFFFF)
13247+#define FW_VA_RENDER_LAST_MB_IN_FRAME_LSBMASK (0xFFFF)
13248+#define FW_VA_RENDER_LAST_MB_IN_FRAME_OFFSET (0x001A)
13249+#define FW_VA_RENDER_LAST_MB_IN_FRAME_SHIFT (0)
13250+
13251+// FW_VA_RENDER FLAGS
13252+#define FW_VA_RENDER_FLAGS_ALIGNMENT (4)
13253+#define FW_VA_RENDER_FLAGS_TYPE uint32_t
13254+#define FW_VA_RENDER_FLAGS_MASK (0xFFFFFFFF)
13255+#define FW_VA_RENDER_FLAGS_LSBMASK (0xFFFFFFFF)
13256+#define FW_VA_RENDER_FLAGS_OFFSET (0x001C)
13257+#define FW_VA_RENDER_FLAGS_SHIFT (0)
13258+
13259+#define FW_VA_CMD_COMPLETED_SIZE (12)
13260+
13261+// FW_VA_CMD_COMPLETED MSG_SIZE
13262+#define FW_VA_CMD_COMPLETED_MSG_SIZE_ALIGNMENT (1)
13263+#define FW_VA_CMD_COMPLETED_MSG_SIZE_TYPE uint8_t
13264+#define FW_VA_CMD_COMPLETED_MSG_SIZE_MASK (0xFF)
13265+#define FW_VA_CMD_COMPLETED_MSG_SIZE_LSBMASK (0xFF)
13266+#define FW_VA_CMD_COMPLETED_MSG_SIZE_OFFSET (0x0000)
13267+#define FW_VA_CMD_COMPLETED_MSG_SIZE_SHIFT (0)
13268+
13269+// FW_VA_CMD_COMPLETED ID
13270+#define FW_VA_CMD_COMPLETED_ID_ALIGNMENT (1)
13271+#define FW_VA_CMD_COMPLETED_ID_TYPE uint8_t
13272+#define FW_VA_CMD_COMPLETED_ID_MASK (0xFF)
13273+#define FW_VA_CMD_COMPLETED_ID_LSBMASK (0xFF)
13274+#define FW_VA_CMD_COMPLETED_ID_OFFSET (0x0001)
13275+#define FW_VA_CMD_COMPLETED_ID_SHIFT (0)
13276+
13277+// FW_VA_CMD_COMPLETED FENCE_VALUE
13278+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_ALIGNMENT (4)
13279+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t
13280+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF)
13281+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
13282+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004)
13283+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0)
13284+
13285+// FW_VA_CMD_COMPLETED FLAGS
13286+#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4)
13287+#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t
13288+#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF)
13289+#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF)
13290+#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008)
13291+#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0)
13292+
13293+#define FW_VA_CMD_FAILED_SIZE (12)
13294+
13295+// FW_VA_CMD_FAILED MSG_SIZE
13296+#define FW_VA_CMD_FAILED_MSG_SIZE_ALIGNMENT (1)
13297+#define FW_VA_CMD_FAILED_MSG_SIZE_TYPE uint8_t
13298+#define FW_VA_CMD_FAILED_MSG_SIZE_MASK (0xFF)
13299+#define FW_VA_CMD_FAILED_MSG_SIZE_LSBMASK (0xFF)
13300+#define FW_VA_CMD_FAILED_MSG_SIZE_OFFSET (0x0000)
13301+#define FW_VA_CMD_FAILED_MSG_SIZE_SHIFT (0)
13302+
13303+// FW_VA_CMD_FAILED ID
13304+#define FW_VA_CMD_FAILED_ID_ALIGNMENT (1)
13305+#define FW_VA_CMD_FAILED_ID_TYPE uint8_t
13306+#define FW_VA_CMD_FAILED_ID_MASK (0xFF)
13307+#define FW_VA_CMD_FAILED_ID_LSBMASK (0xFF)
13308+#define FW_VA_CMD_FAILED_ID_OFFSET (0x0001)
13309+#define FW_VA_CMD_FAILED_ID_SHIFT (0)
13310+
13311+// FW_VA_CMD_FAILED FLAGS
13312+#define FW_VA_CMD_FAILED_FLAGS_ALIGNMENT (2)
13313+#define FW_VA_CMD_FAILED_FLAGS_TYPE uint16_t
13314+#define FW_VA_CMD_FAILED_FLAGS_MASK (0xFFFF)
13315+#define FW_VA_CMD_FAILED_FLAGS_LSBMASK (0xFFFF)
13316+#define FW_VA_CMD_FAILED_FLAGS_OFFSET (0x0002)
13317+#define FW_VA_CMD_FAILED_FLAGS_SHIFT (0)
13318+
13319+// FW_VA_CMD_FAILED FENCE_VALUE
13320+#define FW_VA_CMD_FAILED_FENCE_VALUE_ALIGNMENT (4)
13321+#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t
13322+#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF)
13323+#define FW_VA_CMD_FAILED_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
13324+#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004)
13325+#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0)
13326+
13327+// FW_VA_CMD_FAILED IRQSTATUS
13328+#define FW_VA_CMD_FAILED_IRQSTATUS_ALIGNMENT (4)
13329+#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t
13330+#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF)
13331+#define FW_VA_CMD_FAILED_IRQSTATUS_LSBMASK (0xFFFFFFFF)
13332+#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008)
13333+#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0)
13334+
13335+#define FW_VA_DEBLOCK_REQUIRED_SIZE (8)
13336+
13337+// FW_VA_DEBLOCK_REQUIRED MSG_SIZE
13338+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_ALIGNMENT (1)
13339+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_TYPE uint8_t
13340+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_MASK (0xFF)
13341+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_LSBMASK (0xFF)
13342+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_OFFSET (0x0000)
13343+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_SHIFT (0)
13344+
13345+// FW_VA_DEBLOCK_REQUIRED ID
13346+#define FW_VA_DEBLOCK_REQUIRED_ID_ALIGNMENT (1)
13347+#define FW_VA_DEBLOCK_REQUIRED_ID_TYPE uint8_t
13348+#define FW_VA_DEBLOCK_REQUIRED_ID_MASK (0xFF)
13349+#define FW_VA_DEBLOCK_REQUIRED_ID_LSBMASK (0xFF)
13350+#define FW_VA_DEBLOCK_REQUIRED_ID_OFFSET (0x0001)
13351+#define FW_VA_DEBLOCK_REQUIRED_ID_SHIFT (0)
13352+
13353+// FW_VA_DEBLOCK_REQUIRED CONTEXT
13354+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_ALIGNMENT (4)
13355+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t
13356+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF)
13357+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_LSBMASK (0xFFFFFFFF)
13358+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004)
13359+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0)
13360+
13361+#define FW_VA_HW_PANIC_SIZE (12)
13362+
13363+// FW_VA_HW_PANIC FLAGS
13364+#define FW_VA_HW_PANIC_FLAGS_ALIGNMENT (2)
13365+#define FW_VA_HW_PANIC_FLAGS_TYPE uint16_t
13366+#define FW_VA_HW_PANIC_FLAGS_MASK (0xFFFF)
13367+#define FW_VA_HW_PANIC_FLAGS_LSBMASK (0xFFFF)
13368+#define FW_VA_HW_PANIC_FLAGS_OFFSET (0x0002)
13369+#define FW_VA_HW_PANIC_FLAGS_SHIFT (0)
13370+
13371+// FW_VA_HW_PANIC MSG_SIZE
13372+#define FW_VA_HW_PANIC_MSG_SIZE_ALIGNMENT (1)
13373+#define FW_VA_HW_PANIC_MSG_SIZE_TYPE uint8_t
13374+#define FW_VA_HW_PANIC_MSG_SIZE_MASK (0xFF)
13375+#define FW_VA_HW_PANIC_MSG_SIZE_LSBMASK (0xFF)
13376+#define FW_VA_HW_PANIC_MSG_SIZE_OFFSET (0x0000)
13377+#define FW_VA_HW_PANIC_MSG_SIZE_SHIFT (0)
13378+
13379+// FW_VA_HW_PANIC ID
13380+#define FW_VA_HW_PANIC_ID_ALIGNMENT (1)
13381+#define FW_VA_HW_PANIC_ID_TYPE uint8_t
13382+#define FW_VA_HW_PANIC_ID_MASK (0xFF)
13383+#define FW_VA_HW_PANIC_ID_LSBMASK (0xFF)
13384+#define FW_VA_HW_PANIC_ID_OFFSET (0x0001)
13385+#define FW_VA_HW_PANIC_ID_SHIFT (0)
13386+
13387+// FW_VA_HW_PANIC FENCE_VALUE
13388+#define FW_VA_HW_PANIC_FENCE_VALUE_ALIGNMENT (4)
13389+#define FW_VA_HW_PANIC_FENCE_VALUE_TYPE uint32_t
13390+#define FW_VA_HW_PANIC_FENCE_VALUE_MASK (0xFFFFFFFF)
13391+#define FW_VA_HW_PANIC_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
13392+#define FW_VA_HW_PANIC_FENCE_VALUE_OFFSET (0x0004)
13393+#define FW_VA_HW_PANIC_FENCE_VALUE_SHIFT (0)
13394+
13395+// FW_VA_HW_PANIC IRQSTATUS
13396+#define FW_VA_HW_PANIC_IRQSTATUS_ALIGNMENT (4)
13397+#define FW_VA_HW_PANIC_IRQSTATUS_TYPE uint32_t
13398+#define FW_VA_HW_PANIC_IRQSTATUS_MASK (0xFFFFFFFF)
13399+#define FW_VA_HW_PANIC_IRQSTATUS_LSBMASK (0xFFFFFFFF)
13400+#define FW_VA_HW_PANIC_IRQSTATUS_OFFSET (0x0008)
13401+#define FW_VA_HW_PANIC_IRQSTATUS_SHIFT (0)
13402+
13403+#endif
13404Index: linux-2.6.28/drivers/gpu/drm/psb/psb_msvdxinit.c
13405===================================================================
13406--- /dev/null 1970-01-01 00:00:00.000000000 +0000
13407+++ linux-2.6.28/drivers/gpu/drm/psb/psb_msvdxinit.c 2009-02-25 15:37:02.000000000 +0000
13408@@ -0,0 +1,625 @@
13409+/**
13410+ * file psb_msvdxinit.c
13411+ * MSVDX initialization and mtx-firmware upload
13412+ *
13413+ */
13414+
13415+/**************************************************************************
13416+ *
13417+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
13418+ * Copyright (c) Imagination Technologies Limited, UK
13419+ * All Rights Reserved.
13420+ *
13421+ * Permission is hereby granted, free of charge, to any person obtaining a
13422+ * copy of this software and associated documentation files (the
13423+ * "Software"), to deal in the Software without restriction, including
13424+ * without limitation the rights to use, copy, modify, merge, publish,
13425+ * distribute, sub license, and/or sell copies of the Software, and to
13426+ * permit persons to whom the Software is furnished to do so, subject to
13427+ * the following conditions:
13428+ *
13429+ * The above copyright notice and this permission notice (including the
13430+ * next paragraph) shall be included in all copies or substantial portions
13431+ * of the Software.
13432+ *
13433+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13434+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13435+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
13436+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
13437+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
13438+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
13439+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
13440+ *
13441+ **************************************************************************/
13442+
13443+#include "drmP.h"
13444+#include "drm.h"
13445+#include "psb_drv.h"
13446+#include "psb_msvdx.h"
13447+#include <linux/firmware.h>
13448+
13449+/*MSVDX FW header*/
13450+struct msvdx_fw
13451+{
13452+ uint32_t ver;
13453+ uint32_t text_size;
13454+ uint32_t data_size;
13455+ uint32_t data_location;
13456+};
13457+
13458+int
13459+psb_wait_for_register (struct drm_psb_private *dev_priv,
13460+ uint32_t ui32Offset,
13461+ uint32_t ui32Value, uint32_t ui32Enable)
13462+{
13463+ uint32_t ui32Temp;
13464+ uint32_t ui32PollCount = 1000;
13465+ while (ui32PollCount)
13466+ {
13467+ ui32Temp = PSB_RMSVDX32 (ui32Offset);
13468+ if (ui32Value == (ui32Temp & ui32Enable)) /* All the bits are reset */
13469+ return 0; /* So exit */
13470+
13471+ /* Wait a bit */
13472+ DRM_UDELAY (100);
13473+ ui32PollCount--;
13474+ }
13475+ PSB_DEBUG_GENERAL
13476+ ("MSVDX: Timeout while waiting for register %08x: expecting %08x (mask %08x), got %08x\n",
13477+ ui32Offset, ui32Value, ui32Enable, ui32Temp);
13478+ return 1;
13479+}
13480+
13481+int
13482+psb_poll_mtx_irq (struct drm_psb_private *dev_priv)
13483+{
13484+ int ret = 0;
13485+ uint32_t MtxInt = 0;
13486+ REGIO_WRITE_FIELD_LITE (MtxInt, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
13487+
13488+ ret = psb_wait_for_register (dev_priv, MSVDX_INTERRUPT_STATUS, MtxInt, /* Required value */
13489+ MtxInt /* Enabled bits */ );
13490+ if (ret)
13491+ {
13492+ PSB_DEBUG_GENERAL
13493+ ("MSVDX: Error Mtx did not return int within a resonable time\n");
13494+
13495+ return ret;
13496+ }
13497+
13498+ PSB_DEBUG_GENERAL ("MSVDX: Got MTX Int\n");
13499+
13500+ /* Got it so clear the bit */
13501+ PSB_WMSVDX32 (MtxInt, MSVDX_INTERRUPT_CLEAR);
13502+
13503+ return ret;
13504+}
13505+
13506+void
13507+psb_write_mtx_core_reg (struct drm_psb_private *dev_priv,
13508+ const uint32_t ui32CoreRegister,
13509+ const uint32_t ui32Val)
13510+{
13511+ uint32_t ui32Reg = 0;
13512+
13513+ /* Put data in MTX_RW_DATA */
13514+ PSB_WMSVDX32 (ui32Val, MSVDX_MTX_REGISTER_READ_WRITE_DATA);
13515+
13516+ /* DREADY is set to 0 and request a write */
13517+ ui32Reg = ui32CoreRegister;
13518+ REGIO_WRITE_FIELD_LITE (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
13519+ MTX_RNW, 0);
13520+ REGIO_WRITE_FIELD_LITE (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
13521+ MTX_DREADY, 0);
13522+ PSB_WMSVDX32 (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST);
13523+
13524+ psb_wait_for_register (dev_priv, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK, /* Required Value */
13525+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
13526+}
13527+
13528+void
13529+psb_upload_fw (struct drm_psb_private *dev_priv, const uint32_t ui32DataMem,
13530+ uint32_t ui32RamBankSize, uint32_t ui32Address,
13531+ const unsigned int uiWords, const uint32_t * const pui32Data)
13532+{
13533+ uint32_t ui32Loop, ui32Ctrl, ui32RamId, ui32Addr, ui32CurrBank =
13534+ (uint32_t) ~ 0;
13535+ uint32_t ui32AccessControl;
13536+
13537+ /* Save the access control register... */
13538+ ui32AccessControl = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_CONTROL);
13539+
13540+ /* Wait for MCMSTAT to become be idle 1 */
13541+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
13542+ 0xffffffff /* Enables */ );
13543+
13544+ for (ui32Loop = 0; ui32Loop < uiWords; ui32Loop++)
13545+ {
13546+ ui32RamId = ui32DataMem + (ui32Address / ui32RamBankSize);
13547+
13548+ if (ui32RamId != ui32CurrBank)
13549+ {
13550+ ui32Addr = ui32Address >> 2;
13551+
13552+ ui32Ctrl = 0;
13553+
13554+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
13555+ MSVDX_MTX_RAM_ACCESS_CONTROL,
13556+ MTX_MCMID, ui32RamId);
13557+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
13558+ MSVDX_MTX_RAM_ACCESS_CONTROL,
13559+ MTX_MCM_ADDR, ui32Addr);
13560+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
13561+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMAI, 1);
13562+
13563+ PSB_WMSVDX32 (ui32Ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
13564+
13565+ ui32CurrBank = ui32RamId;
13566+ }
13567+ ui32Address += 4;
13568+
13569+ PSB_WMSVDX32 (pui32Data[ui32Loop], MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
13570+
13571+ /* Wait for MCMSTAT to become be idle 1 */
13572+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
13573+ 0xffffffff /* Enables */ );
13574+ }
13575+ PSB_DEBUG_GENERAL ("MSVDX: Upload done\n");
13576+
13577+ /* Restore the access control register... */
13578+ PSB_WMSVDX32 (ui32AccessControl, MSVDX_MTX_RAM_ACCESS_CONTROL);
13579+}
13580+
13581+static int
13582+psb_verify_fw (struct drm_psb_private *dev_priv,
13583+ const uint32_t ui32RamBankSize,
13584+ const uint32_t ui32DataMem, uint32_t ui32Address,
13585+ const uint32_t uiWords, const uint32_t * const pui32Data)
13586+{
13587+ uint32_t ui32Loop, ui32Ctrl, ui32RamId, ui32Addr, ui32CurrBank =
13588+ (uint32_t) ~ 0;
13589+ uint32_t ui32AccessControl;
13590+ int ret = 0;
13591+
13592+ /* Save the access control register... */
13593+ ui32AccessControl = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_CONTROL);
13594+
13595+ /* Wait for MCMSTAT to become be idle 1 */
13596+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
13597+ 0xffffffff /* Enables */ );
13598+
13599+ for (ui32Loop = 0; ui32Loop < uiWords; ui32Loop++)
13600+ {
13601+ uint32_t ui32ReadBackVal;
13602+ ui32RamId = ui32DataMem + (ui32Address / ui32RamBankSize);
13603+
13604+ if (ui32RamId != ui32CurrBank)
13605+ {
13606+ ui32Addr = ui32Address >> 2;
13607+ ui32Ctrl = 0;
13608+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
13609+ MSVDX_MTX_RAM_ACCESS_CONTROL,
13610+ MTX_MCMID, ui32RamId);
13611+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
13612+ MSVDX_MTX_RAM_ACCESS_CONTROL,
13613+ MTX_MCM_ADDR, ui32Addr);
13614+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
13615+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMAI, 1);
13616+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
13617+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMR, 1);
13618+
13619+ PSB_WMSVDX32 (ui32Ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
13620+
13621+ ui32CurrBank = ui32RamId;
13622+ }
13623+ ui32Address += 4;
13624+
13625+ /* Wait for MCMSTAT to become be idle 1 */
13626+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
13627+ 0xffffffff /* Enables */ );
13628+
13629+ ui32ReadBackVal = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
13630+ if (pui32Data[ui32Loop] != ui32ReadBackVal)
13631+ {
13632+ DRM_ERROR
13633+ ("psb: Firmware validation fails at index=%08x\n", ui32Loop);
13634+ ret = 1;
13635+ break;
13636+ }
13637+ }
13638+
13639+ /* Restore the access control register... */
13640+ PSB_WMSVDX32 (ui32AccessControl, MSVDX_MTX_RAM_ACCESS_CONTROL);
13641+
13642+ return ret;
13643+}
13644+
13645+static uint32_t *
13646+msvdx_get_fw (struct drm_device *dev,
13647+ const struct firmware **raw, uint8_t * name)
13648+{
13649+ int rc;
13650+ int *ptr = NULL;
13651+
13652+ rc = request_firmware (raw, name, &dev->pdev->dev);
13653+ if (rc < 0)
13654+ {
13655+ DRM_ERROR ("MSVDX: %s request_firmware failed: Reason %d\n", name, rc);
13656+ return NULL;
13657+ }
13658+
13659+ if ((*raw)->size < sizeof (struct msvdx_fw))
13660+ {
13661+ PSB_DEBUG_GENERAL ("MSVDX: %s is is not correct size(%zd)\n",
13662+ name, (*raw)->size);
13663+ return NULL;
13664+ }
13665+
13666+ ptr = (int *) ((*raw))->data;
13667+
13668+ if (!ptr)
13669+ {
13670+ PSB_DEBUG_GENERAL ("MSVDX: Failed to load %s\n", name);
13671+ return NULL;
13672+ }
13673+ /*another sanity check... */
13674+ if ((*raw)->size !=
13675+ (sizeof (struct msvdx_fw) +
13676+ sizeof (uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
13677+ sizeof (uint32_t) * ((struct msvdx_fw *) ptr)->data_size))
13678+ {
13679+ PSB_DEBUG_GENERAL ("MSVDX: %s is is not correct size(%zd)\n",
13680+ name, (*raw)->size);
13681+ return NULL;
13682+ }
13683+ return ptr;
13684+}
13685+
13686+static int
13687+psb_setup_fw (struct drm_device *dev)
13688+{
13689+ struct drm_psb_private *dev_priv = dev->dev_private;
13690+ int ret = 0;
13691+
13692+ uint32_t ram_bank_size;
13693+ struct msvdx_fw *fw;
13694+ uint32_t *fw_ptr = NULL;
13695+ uint32_t *text_ptr = NULL;
13696+ uint32_t *data_ptr = NULL;
13697+ const struct firmware *raw = NULL;
13698+ /* todo : Assert the clock is on - if not turn it on to upload code */
13699+
13700+ PSB_DEBUG_GENERAL ("MSVDX: psb_setup_fw\n");
13701+
13702+ /* Reset MTX */
13703+ PSB_WMSVDX32 (MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK, MSVDX_MTX_SOFT_RESET);
13704+
13705+ /* Initialses Communication controll area to 0 */
13706+ if(dev_priv->psb_rev_id >= POULSBO_D1)
13707+ {
13708+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1 or later revision.\n");
13709+ PSB_WMSVDX32 (MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1, MSVDX_COMMS_OFFSET_FLAGS);
13710+ }
13711+ else
13712+ {
13713+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0 or earlier revision.\n");
13714+ PSB_WMSVDX32 (MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0, MSVDX_COMMS_OFFSET_FLAGS);
13715+ }
13716+
13717+ PSB_WMSVDX32 (0, MSVDX_COMMS_MSG_COUNTER);
13718+ PSB_WMSVDX32 (0, MSVDX_COMMS_SIGNATURE);
13719+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_HOST_RD_INDEX);
13720+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
13721+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_MTX_RD_INDEX);
13722+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
13723+ PSB_WMSVDX32 (0, MSVDX_COMMS_FW_STATUS);
13724+
13725+ /* read register bank size */
13726+ {
13727+ uint32_t ui32BankSize, ui32Reg;
13728+ ui32Reg = PSB_RMSVDX32 (MSVDX_MTX_RAM_BANK);
13729+ ui32BankSize =
13730+ REGIO_READ_FIELD (ui32Reg, MSVDX_MTX_RAM_BANK, CR_MTX_RAM_BANK_SIZE);
13731+ ram_bank_size = (uint32_t) (1 << (ui32BankSize + 2));
13732+ }
13733+
13734+ PSB_DEBUG_GENERAL ("MSVDX: RAM bank size = %d bytes\n", ram_bank_size);
13735+
13736+ fw_ptr = msvdx_get_fw (dev, &raw, "msvdx_fw.bin");
13737+
13738+ if (!fw_ptr)
13739+ {
13740+ DRM_ERROR ("psb: No valid msvdx_fw.bin firmware found.\n");
13741+ ret = 1;
13742+ goto out;
13743+ }
13744+
13745+ fw = (struct msvdx_fw *) fw_ptr;
13746+ if (fw->ver != 0x02)
13747+ {
13748+ DRM_ERROR
13749+ ("psb: msvdx_fw.bin firmware version mismatch, got version=%02x expected version=%02x\n",
13750+ fw->ver, 0x02);
13751+ ret = 1;
13752+ goto out;
13753+ }
13754+
13755+ text_ptr = (uint32_t *) ((uint8_t *) fw_ptr + sizeof (struct msvdx_fw));
13756+ data_ptr = text_ptr + fw->text_size;
13757+
13758+ PSB_DEBUG_GENERAL ("MSVDX: Retrieved pointers for firmware\n");
13759+ PSB_DEBUG_GENERAL ("MSVDX: text_size: %d\n", fw->text_size);
13760+ PSB_DEBUG_GENERAL ("MSVDX: data_size: %d\n", fw->data_size);
13761+ PSB_DEBUG_GENERAL ("MSVDX: data_location: 0x%x\n", fw->data_location);
13762+ PSB_DEBUG_GENERAL ("MSVDX: First 4 bytes of text: 0x%x\n", *text_ptr);
13763+ PSB_DEBUG_GENERAL ("MSVDX: First 4 bytes of data: 0x%x\n", *data_ptr);
13764+
13765+ PSB_DEBUG_GENERAL ("MSVDX: Uploading firmware\n");
13766+ psb_upload_fw (dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
13767+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size, text_ptr);
13768+ psb_upload_fw (dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
13769+ fw->data_location - MTX_DATA_BASE, fw->data_size, data_ptr);
13770+
13771+ /*todo : Verify code upload possibly only in debug */
13772+ if (psb_verify_fw
13773+ (dev_priv, ram_bank_size, MTX_CORE_CODE_MEM,
13774+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size, text_ptr))
13775+ {
13776+ /* Firmware code upload failed */
13777+ ret = 1;
13778+ goto out;
13779+ }
13780+ if (psb_verify_fw
13781+ (dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
13782+ fw->data_location - MTX_DATA_BASE, fw->data_size, data_ptr))
13783+ {
13784+ /* Firmware data upload failed */
13785+ ret = 1;
13786+ goto out;
13787+ }
13788+
13789+ /* -- Set starting PC address */
13790+ psb_write_mtx_core_reg (dev_priv, MTX_PC, PC_START_ADDRESS);
13791+
13792+ /* -- Turn on the thread */
13793+ PSB_WMSVDX32 (MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE);
13794+
13795+ /* Wait for the signature value to be written back */
13796+ ret = psb_wait_for_register (dev_priv, MSVDX_COMMS_SIGNATURE, MSVDX_COMMS_SIGNATURE_VALUE, /* Required value */
13797+ 0xffffffff /* Enabled bits */ );
13798+ if (ret)
13799+ {
13800+ DRM_ERROR ("psb: MSVDX firmware fails to initialize.\n");
13801+ goto out;
13802+ }
13803+
13804+ PSB_DEBUG_GENERAL ("MSVDX: MTX Initial indications OK\n");
13805+ PSB_DEBUG_GENERAL ("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
13806+ MSVDX_COMMS_AREA_ADDR);
13807+out:
13808+ if (raw)
13809+ {
13810+ PSB_DEBUG_GENERAL ("MSVDX releasing firmware resouces....\n");
13811+ release_firmware (raw);
13812+ }
13813+ return ret;
13814+}
13815+
13816+static void
13817+psb_free_ccb (struct drm_buffer_object **ccb)
13818+{
13819+ drm_bo_usage_deref_unlocked (ccb);
13820+ *ccb = NULL;
13821+}
13822+
13823+/*******************************************************************************
13824+
13825+ @Function psb_msvdx_reset
13826+
13827+ @Description
13828+
13829+ Reset chip and disable interrupts.
13830+
13831+ @Input psDeviceNode - device info. structure
13832+
13833+ @Return 0 - Success
13834+ 1 - Failure
13835+
13836+******************************************************************************/
13837+int
13838+psb_msvdx_reset (struct drm_psb_private *dev_priv)
13839+{
13840+ int ret = 0;
13841+
13842+ /* Issue software reset */
13843+ PSB_WMSVDX32 (msvdx_sw_reset_all, MSVDX_CONTROL);
13844+
13845+ ret = psb_wait_for_register (dev_priv, MSVDX_CONTROL, 0, /* Required value */
13846+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK
13847+ /* Enabled bits */ );
13848+
13849+ if (!ret)
13850+ {
13851+ /* Clear interrupt enabled flag */
13852+ PSB_WMSVDX32 (0, MSVDX_HOST_INTERRUPT_ENABLE);
13853+
13854+ /* Clear any pending interrupt flags */
13855+ PSB_WMSVDX32 (0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR);
13856+ }
13857+
13858+ mutex_destroy (&dev_priv->msvdx_mutex);
13859+
13860+ return ret;
13861+}
13862+
13863+static int
13864+psb_allocate_ccb (struct drm_device *dev,
13865+ struct drm_buffer_object **ccb,
13866+ uint32_t * base_addr, int size)
13867+{
13868+ int ret;
13869+ struct drm_bo_kmap_obj tmp_kmap;
13870+ int is_iomem;
13871+
13872+ ret = drm_buffer_object_create (dev, size,
13873+ drm_bo_type_kernel,
13874+ DRM_BO_FLAG_READ |
13875+ DRM_PSB_FLAG_MEM_KERNEL |
13876+ DRM_BO_FLAG_NO_EVICT,
13877+ DRM_BO_HINT_DONT_FENCE, 0, 0, ccb);
13878+ if (ret)
13879+ {
13880+ PSB_DEBUG_GENERAL ("Failed to allocate CCB.\n");
13881+ *ccb = NULL;
13882+ return 1;
13883+ }
13884+
13885+ ret = drm_bo_kmap (*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
13886+ if (ret)
13887+ {
13888+ PSB_DEBUG_GENERAL ("drm_bo_kmap failed ret: %d\n", ret);
13889+ drm_bo_usage_deref_unlocked (ccb);
13890+ *ccb = NULL;
13891+ return 1;
13892+ }
13893+
13894+ memset (drm_bmo_virtual (&tmp_kmap, &is_iomem), 0, size);
13895+ drm_bo_kunmap (&tmp_kmap);
13896+
13897+ *base_addr = (*ccb)->offset;
13898+ return 0;
13899+}
13900+
13901+int
13902+psb_msvdx_init (struct drm_device *dev)
13903+{
13904+ struct drm_psb_private *dev_priv = dev->dev_private;
13905+ uint32_t ui32Cmd;
13906+ int ret;
13907+
13908+ PSB_DEBUG_GENERAL ("MSVDX: psb_msvdx_init\n");
13909+
13910+ /*Initialize command msvdx queueing */
13911+ INIT_LIST_HEAD (&dev_priv->msvdx_queue);
13912+ mutex_init (&dev_priv->msvdx_mutex);
13913+ spin_lock_init (&dev_priv->msvdx_lock);
13914+ dev_priv->msvdx_busy = 0;
13915+
13916+ /*figure out the stepping*/
13917+ pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &dev_priv->psb_rev_id );
13918+
13919+ /* Enable Clocks */
13920+ PSB_DEBUG_GENERAL ("Enabling clocks\n");
13921+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
13922+
13923+ /* Enable MMU by removing all bypass bits */
13924+ PSB_WMSVDX32 (0, MSVDX_MMU_CONTROL0);
13925+
13926+ PSB_DEBUG_GENERAL ("MSVDX: Setting up RENDEC\n");
13927+ /* Allocate device virtual memory as required by rendec.... */
13928+ if (!dev_priv->ccb0)
13929+ {
13930+ ret =
13931+ psb_allocate_ccb (dev, &dev_priv->ccb0,
13932+ &dev_priv->base_addr0, RENDEC_A_SIZE);
13933+ if (ret)
13934+ goto err_exit;
13935+ }
13936+
13937+ if (!dev_priv->ccb1)
13938+ {
13939+ ret =
13940+ psb_allocate_ccb (dev, &dev_priv->ccb1,
13941+ &dev_priv->base_addr1, RENDEC_B_SIZE);
13942+ if (ret)
13943+ goto err_exit;
13944+ }
13945+
13946+ PSB_DEBUG_GENERAL ("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
13947+ dev_priv->base_addr0, dev_priv->base_addr1);
13948+
13949+ PSB_WMSVDX32 (dev_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0);
13950+ PSB_WMSVDX32 (dev_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1);
13951+
13952+ ui32Cmd = 0;
13953+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE,
13954+ RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
13955+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE,
13956+ RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
13957+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE);
13958+
13959+ ui32Cmd = 0;
13960+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1,
13961+ RENDEC_DECODE_START_SIZE, 0);
13962+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1, RENDEC_BURST_SIZE_W, 1);
13963+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1, RENDEC_BURST_SIZE_R, 1);
13964+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1,
13965+ RENDEC_EXTERNAL_MEMORY, 1);
13966+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTROL1);
13967+
13968+ ui32Cmd = 0x00101010;
13969+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT0);
13970+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT1);
13971+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT2);
13972+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT3);
13973+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT4);
13974+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT5);
13975+
13976+ ui32Cmd = 0;
13977+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE, 1);
13978+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTROL0);
13979+
13980+ ret = psb_setup_fw (dev);
13981+ if (ret)
13982+ goto err_exit;
13983+
13984+ PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
13985+
13986+ return 0;
13987+
13988+err_exit:
13989+ if (dev_priv->ccb0)
13990+ psb_free_ccb (&dev_priv->ccb0);
13991+ if (dev_priv->ccb1)
13992+ psb_free_ccb (&dev_priv->ccb1);
13993+
13994+ return 1;
13995+}
13996+
13997+int
13998+psb_msvdx_uninit (struct drm_device *dev)
13999+{
14000+ struct drm_psb_private *dev_priv = dev->dev_private;
14001+
14002+ /*Reset MSVDX chip */
14003+ psb_msvdx_reset (dev_priv);
14004+
14005+// PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
14006+ printk("set the msvdx clock to 0 in the %s\n", __FUNCTION__);
14007+ PSB_WMSVDX32 (0, MSVDX_MAN_CLK_ENABLE);
14008+
14009+ /*Clean up resources...*/
14010+ if (dev_priv->ccb0)
14011+ psb_free_ccb (&dev_priv->ccb0);
14012+ if (dev_priv->ccb1)
14013+ psb_free_ccb (&dev_priv->ccb1);
14014+
14015+ return 0;
14016+}
14017+
14018+int psb_hw_info_ioctl(struct drm_device *dev, void *data,
14019+ struct drm_file *file_priv)
14020+{
14021+ struct drm_psb_private *dev_priv = dev->dev_private;
14022+ struct drm_psb_hw_info *hw_info = data;
14023+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
14024+
14025+ hw_info->rev_id = dev_priv->psb_rev_id;
14026+
14027+ /*read the fuse info to determine the caps*/
14028+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
14029+ pci_read_config_dword(pci_root, 0xD4, &hw_info->caps);
14030+
14031+ PSB_DEBUG_GENERAL("MSVDX: PSB caps: 0x%x\n", hw_info->caps);
14032+ return 0;
14033+}
14034Index: linux-2.6.28/drivers/gpu/drm/psb/psb_reg.h
14035===================================================================
14036--- /dev/null 1970-01-01 00:00:00.000000000 +0000
14037+++ linux-2.6.28/drivers/gpu/drm/psb/psb_reg.h 2009-02-25 15:37:02.000000000 +0000
14038@@ -0,0 +1,562 @@
14039+/**************************************************************************
14040+ *
14041+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
14042+ * Copyright (c) 2007, Intel Corporation.
14043+ * All Rights Reserved.
14044+ *
14045+ * This program is free software; you can redistribute it and/or modify it
14046+ * under the terms and conditions of the GNU General Public License,
14047+ * version 2, as published by the Free Software Foundation.
14048+ *
14049+ * This program is distributed in the hope it will be useful, but WITHOUT
14050+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14051+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14052+ * more details.
14053+ *
14054+ * You should have received a copy of the GNU General Public License along with
14055+ * this program; if not, write to the Free Software Foundation, Inc.,
14056+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
14057+ *
14058+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
14059+ * develop this driver.
14060+ *
14061+ **************************************************************************/
14062+/*
14063+ */
14064+#ifndef _PSB_REG_H_
14065+#define _PSB_REG_H_
14066+
14067+#define PSB_CR_CLKGATECTL 0x0000
14068+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
14069+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
14070+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
14071+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
14072+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
14073+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
14074+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
14075+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
14076+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
14077+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
14078+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
14079+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
14080+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
14081+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
14082+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
14083+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
14084+
14085+#define PSB_CR_CORE_ID 0x0010
14086+#define _PSB_CC_ID_ID_SHIFT (16)
14087+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
14088+#define _PSB_CC_ID_CONFIG_SHIFT (0)
14089+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
14090+
14091+#define PSB_CR_CORE_REVISION 0x0014
14092+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
14093+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
14094+#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
14095+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
14096+#define _PSB_CC_REVISION_MINOR_SHIFT (8)
14097+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
14098+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
14099+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
14100+
14101+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
14102+
14103+#define PSB_CR_SOFT_RESET 0x0080
14104+#define _PSB_CS_RESET_TSP_RESET (1 << 6)
14105+#define _PSB_CS_RESET_ISP_RESET (1 << 5)
14106+#define _PSB_CS_RESET_USE_RESET (1 << 4)
14107+#define _PSB_CS_RESET_TA_RESET (1 << 3)
14108+#define _PSB_CS_RESET_DPM_RESET (1 << 2)
14109+#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
14110+#define _PSB_CS_RESET_BIF_RESET (1 << 0)
14111+
14112+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
14113+
14114+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
14115+
14116+#define PSB_CR_EVENT_STATUS2 0x0118
14117+
14118+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
14119+#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
14120+
14121+#define PSB_CR_EVENT_STATUS 0x012C
14122+
14123+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
14124+
14125+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
14126+#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
14127+#define _PSB_CE_TA_DPM_FAULT (1 << 28)
14128+#define _PSB_CE_TWOD_COMPLETE (1 << 27)
14129+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
14130+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
14131+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
14132+#define _PSB_CE_SW_EVENT (1 << 14)
14133+#define _PSB_CE_TA_FINISHED (1 << 13)
14134+#define _PSB_CE_TA_TERMINATE (1 << 12)
14135+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
14136+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
14137+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
14138+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
14139+
14140+
14141+#define PSB_USE_OFFSET_MASK 0x0007FFFF
14142+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
14143+#define PSB_CR_USE_CODE_BASE0 0x0A0C
14144+#define PSB_CR_USE_CODE_BASE1 0x0A10
14145+#define PSB_CR_USE_CODE_BASE2 0x0A14
14146+#define PSB_CR_USE_CODE_BASE3 0x0A18
14147+#define PSB_CR_USE_CODE_BASE4 0x0A1C
14148+#define PSB_CR_USE_CODE_BASE5 0x0A20
14149+#define PSB_CR_USE_CODE_BASE6 0x0A24
14150+#define PSB_CR_USE_CODE_BASE7 0x0A28
14151+#define PSB_CR_USE_CODE_BASE8 0x0A2C
14152+#define PSB_CR_USE_CODE_BASE9 0x0A30
14153+#define PSB_CR_USE_CODE_BASE10 0x0A34
14154+#define PSB_CR_USE_CODE_BASE11 0x0A38
14155+#define PSB_CR_USE_CODE_BASE12 0x0A3C
14156+#define PSB_CR_USE_CODE_BASE13 0x0A40
14157+#define PSB_CR_USE_CODE_BASE14 0x0A44
14158+#define PSB_CR_USE_CODE_BASE15 0x0A48
14159+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
14160+#define _PSB_CUC_BASE_DM_SHIFT (25)
14161+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
14162+#define _PSB_CUC_BASE_ADDR_SHIFT (0) // 1024-bit aligned address?
14163+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
14164+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
14165+#define _PSB_CUC_DM_VERTEX (0)
14166+#define _PSB_CUC_DM_PIXEL (1)
14167+#define _PSB_CUC_DM_RESERVED (2)
14168+#define _PSB_CUC_DM_EDM (3)
14169+
14170+#define PSB_CR_PDS_EXEC_BASE 0x0AB8
14171+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) // 1MB aligned address
14172+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
14173+
14174+#define PSB_CR_EVENT_KICKER 0x0AC4
14175+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) // 128-bit aligned address
14176+
14177+#define PSB_CR_EVENT_KICK 0x0AC8
14178+#define _PSB_CE_KICK_NOW (1 << 0)
14179+
14180+
14181+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
14182+
14183+#define PSB_CR_BIF_CTRL 0x0C00
14184+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
14185+#define _PSB_CB_CTRL_INVALDC (1 << 3)
14186+#define _PSB_CB_CTRL_FLUSH (1 << 2)
14187+
14188+#define PSB_CR_BIF_INT_STAT 0x0C04
14189+
14190+#define PSB_CR_BIF_FAULT 0x0C08
14191+#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
14192+#define _PSB_CBI_STAT_FAULT_SHIFT (0)
14193+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
14194+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
14195+#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
14196+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
14197+#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
14198+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
14199+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
14200+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
14201+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
14202+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
14203+
14204+#define PSB_CR_BIF_BANK0 0x0C78
14205+
14206+#define PSB_CR_BIF_BANK1 0x0C7C
14207+
14208+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
14209+
14210+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
14211+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
14212+
14213+#define PSB_CR_2D_SOCIF 0x0E18
14214+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
14215+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
14216+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
14217+
14218+#define PSB_CR_2D_BLIT_STATUS 0x0E04
14219+#define _PSB_C2B_STATUS_BUSY (1 << 24)
14220+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
14221+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
14222+
14223+/*
14224+ * 2D defs.
14225+ */
14226+
14227+/*
14228+ * 2D Slave Port Data : Block Header's Object Type
14229+ */
14230+
14231+#define PSB_2D_CLIP_BH (0x00000000)
14232+#define PSB_2D_PAT_BH (0x10000000)
14233+#define PSB_2D_CTRL_BH (0x20000000)
14234+#define PSB_2D_SRC_OFF_BH (0x30000000)
14235+#define PSB_2D_MASK_OFF_BH (0x40000000)
14236+#define PSB_2D_RESERVED1_BH (0x50000000)
14237+#define PSB_2D_RESERVED2_BH (0x60000000)
14238+#define PSB_2D_FENCE_BH (0x70000000)
14239+#define PSB_2D_BLIT_BH (0x80000000)
14240+#define PSB_2D_SRC_SURF_BH (0x90000000)
14241+#define PSB_2D_DST_SURF_BH (0xA0000000)
14242+#define PSB_2D_PAT_SURF_BH (0xB0000000)
14243+#define PSB_2D_SRC_PAL_BH (0xC0000000)
14244+#define PSB_2D_PAT_PAL_BH (0xD0000000)
14245+#define PSB_2D_MASK_SURF_BH (0xE0000000)
14246+#define PSB_2D_FLUSH_BH (0xF0000000)
14247+
14248+/*
14249+ * Clip Definition block (PSB_2D_CLIP_BH)
14250+ */
14251+#define PSB_2D_CLIPCOUNT_MAX (1)
14252+#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
14253+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
14254+#define PSB_2D_CLIPCOUNT_SHIFT (0)
14255+// clip rectangle min & max
14256+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
14257+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
14258+#define PSB_2D_CLIP_XMAX_SHIFT (12)
14259+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
14260+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
14261+#define PSB_2D_CLIP_XMIN_SHIFT (0)
14262+// clip rectangle offset
14263+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
14264+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
14265+#define PSB_2D_CLIP_YMAX_SHIFT (12)
14266+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
14267+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
14268+#define PSB_2D_CLIP_YMIN_SHIFT (0)
14269+
14270+/*
14271+ * Pattern Control (PSB_2D_PAT_BH)
14272+ */
14273+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
14274+#define PSB_2D_PAT_HEIGHT_SHIFT (0)
14275+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
14276+#define PSB_2D_PAT_WIDTH_SHIFT (5)
14277+#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
14278+#define PSB_2D_PAT_YSTART_SHIFT (10)
14279+#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
14280+#define PSB_2D_PAT_XSTART_SHIFT (15)
14281+
14282+/*
14283+ * 2D Control block (PSB_2D_CTRL_BH)
14284+ */
14285+// Present Flags
14286+#define PSB_2D_SRCCK_CTRL (0x00000001)
14287+#define PSB_2D_DSTCK_CTRL (0x00000002)
14288+#define PSB_2D_ALPHA_CTRL (0x00000004)
14289+// Colour Key Colour (SRC/DST)
14290+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
14291+#define PSB_2D_CK_COL_CLRMASK (0x00000000)
14292+#define PSB_2D_CK_COL_SHIFT (0)
14293+// Colour Key Mask (SRC/DST)
14294+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
14295+#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
14296+#define PSB_2D_CK_MASK_SHIFT (0)
14297+// Alpha Control (Alpha/RGB)
14298+#define PSB_2D_GBLALPHA_MASK (0x000FF000)
14299+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
14300+#define PSB_2D_GBLALPHA_SHIFT (12)
14301+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
14302+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
14303+#define PSB_2D_SRCALPHA_OP_SHIFT (20)
14304+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
14305+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
14306+#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
14307+#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
14308+#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
14309+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
14310+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
14311+#define PSB_2D_SRCALPHA_INVERT (0x00800000)
14312+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
14313+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
14314+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
14315+#define PSB_2D_DSTALPHA_OP_SHIFT (24)
14316+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
14317+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
14318+#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
14319+#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
14320+#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
14321+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
14322+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
14323+#define PSB_2D_DSTALPHA_INVERT (0x08000000)
14324+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
14325+
14326+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
14327+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
14328+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
14329+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
14330+
14331+/*
14332+ *Source Offset (PSB_2D_SRC_OFF_BH)
14333+ */
14334+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
14335+#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
14336+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
14337+#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
14338+
14339+/*
14340+ * Mask Offset (PSB_2D_MASK_OFF_BH)
14341+ */
14342+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
14343+#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
14344+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
14345+#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
14346+
14347+/*
14348+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
14349+ */
14350+
14351+/*
14352+ *Blit Rectangle (PSB_2D_BLIT_BH)
14353+ */
14354+
14355+#define PSB_2D_ROT_MASK (3<<25)
14356+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
14357+#define PSB_2D_ROT_NONE (0<<25)
14358+#define PSB_2D_ROT_90DEGS (1<<25)
14359+#define PSB_2D_ROT_180DEGS (2<<25)
14360+#define PSB_2D_ROT_270DEGS (3<<25)
14361+
14362+#define PSB_2D_COPYORDER_MASK (3<<23)
14363+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
14364+#define PSB_2D_COPYORDER_TL2BR (0<<23)
14365+#define PSB_2D_COPYORDER_BR2TL (1<<23)
14366+#define PSB_2D_COPYORDER_TR2BL (2<<23)
14367+#define PSB_2D_COPYORDER_BL2TR (3<<23)
14368+
14369+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
14370+#define PSB_2D_DSTCK_DISABLE (0x00000000)
14371+#define PSB_2D_DSTCK_PASS (0x00200000)
14372+#define PSB_2D_DSTCK_REJECT (0x00400000)
14373+
14374+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
14375+#define PSB_2D_SRCCK_DISABLE (0x00000000)
14376+#define PSB_2D_SRCCK_PASS (0x00080000)
14377+#define PSB_2D_SRCCK_REJECT (0x00100000)
14378+
14379+#define PSB_2D_CLIP_ENABLE (0x00040000)
14380+
14381+#define PSB_2D_ALPHA_ENABLE (0x00020000)
14382+
14383+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
14384+#define PSB_2D_PAT_MASK (0x00010000)
14385+#define PSB_2D_USE_PAT (0x00010000)
14386+#define PSB_2D_USE_FILL (0x00000000)
14387+/*
14388+ * Tungsten Graphics note on rop codes: If rop A and rop B are
14389+ * identical, the mask surface will not be read and need not be
14390+ * set up.
14391+ */
14392+
14393+#define PSB_2D_ROP3B_MASK (0x0000FF00)
14394+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
14395+#define PSB_2D_ROP3B_SHIFT (8)
14396+// rop code A
14397+#define PSB_2D_ROP3A_MASK (0x000000FF)
14398+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
14399+#define PSB_2D_ROP3A_SHIFT (0)
14400+
14401+#define PSB_2D_ROP4_MASK (0x0000FFFF)
14402+/*
14403+ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
14404+ * Fill Colour RGBA8888
14405+ */
14406+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
14407+#define PSB_2D_FILLCOLOUR_SHIFT (0)
14408+/*
14409+ * DWORD1: (Always Present)
14410+ * X Start (Dest)
14411+ * Y Start (Dest)
14412+ */
14413+#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
14414+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
14415+#define PSB_2D_DST_XSTART_SHIFT (12)
14416+#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
14417+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
14418+#define PSB_2D_DST_YSTART_SHIFT (0)
14419+/*
14420+ * DWORD2: (Always Present)
14421+ * X Size (Dest)
14422+ * Y Size (Dest)
14423+ */
14424+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
14425+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
14426+#define PSB_2D_DST_XSIZE_SHIFT (12)
14427+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
14428+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
14429+#define PSB_2D_DST_YSIZE_SHIFT (0)
14430+
14431+/*
14432+ * Source Surface (PSB_2D_SRC_SURF_BH)
14433+ */
14434+/*
14435+ * WORD 0
14436+ */
14437+
14438+#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
14439+#define PSB_2D_SRC_1_PAL (0x00000000)
14440+#define PSB_2D_SRC_2_PAL (0x00008000)
14441+#define PSB_2D_SRC_4_PAL (0x00010000)
14442+#define PSB_2D_SRC_8_PAL (0x00018000)
14443+#define PSB_2D_SRC_8_ALPHA (0x00020000)
14444+#define PSB_2D_SRC_4_ALPHA (0x00028000)
14445+#define PSB_2D_SRC_332RGB (0x00030000)
14446+#define PSB_2D_SRC_4444ARGB (0x00038000)
14447+#define PSB_2D_SRC_555RGB (0x00040000)
14448+#define PSB_2D_SRC_1555ARGB (0x00048000)
14449+#define PSB_2D_SRC_565RGB (0x00050000)
14450+#define PSB_2D_SRC_0888ARGB (0x00058000)
14451+#define PSB_2D_SRC_8888ARGB (0x00060000)
14452+#define PSB_2D_SRC_8888UYVY (0x00068000)
14453+#define PSB_2D_SRC_RESERVED (0x00070000)
14454+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
14455+
14456+
14457+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
14458+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
14459+#define PSB_2D_SRC_STRIDE_SHIFT (0)
14460+/*
14461+ * WORD 1 - Base Address
14462+ */
14463+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
14464+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
14465+#define PSB_2D_SRC_ADDR_SHIFT (2)
14466+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
14467+
14468+/*
14469+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
14470+ */
14471+/*
14472+ * WORD 0
14473+ */
14474+
14475+#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
14476+#define PSB_2D_PAT_1_PAL (0x00000000)
14477+#define PSB_2D_PAT_2_PAL (0x00008000)
14478+#define PSB_2D_PAT_4_PAL (0x00010000)
14479+#define PSB_2D_PAT_8_PAL (0x00018000)
14480+#define PSB_2D_PAT_8_ALPHA (0x00020000)
14481+#define PSB_2D_PAT_4_ALPHA (0x00028000)
14482+#define PSB_2D_PAT_332RGB (0x00030000)
14483+#define PSB_2D_PAT_4444ARGB (0x00038000)
14484+#define PSB_2D_PAT_555RGB (0x00040000)
14485+#define PSB_2D_PAT_1555ARGB (0x00048000)
14486+#define PSB_2D_PAT_565RGB (0x00050000)
14487+#define PSB_2D_PAT_0888ARGB (0x00058000)
14488+#define PSB_2D_PAT_8888ARGB (0x00060000)
14489+
14490+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
14491+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
14492+#define PSB_2D_PAT_STRIDE_SHIFT (0)
14493+/*
14494+ * WORD 1 - Base Address
14495+ */
14496+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
14497+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
14498+#define PSB_2D_PAT_ADDR_SHIFT (2)
14499+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
14500+
14501+/*
14502+ * Destination Surface (PSB_2D_DST_SURF_BH)
14503+ */
14504+/*
14505+ * WORD 0
14506+ */
14507+
14508+#define PSB_2D_DST_FORMAT_MASK (0x00078000)
14509+#define PSB_2D_DST_332RGB (0x00030000)
14510+#define PSB_2D_DST_4444ARGB (0x00038000)
14511+#define PSB_2D_DST_555RGB (0x00040000)
14512+#define PSB_2D_DST_1555ARGB (0x00048000)
14513+#define PSB_2D_DST_565RGB (0x00050000)
14514+#define PSB_2D_DST_0888ARGB (0x00058000)
14515+#define PSB_2D_DST_8888ARGB (0x00060000)
14516+#define PSB_2D_DST_8888AYUV (0x00070000)
14517+
14518+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
14519+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
14520+#define PSB_2D_DST_STRIDE_SHIFT (0)
14521+/*
14522+ * WORD 1 - Base Address
14523+ */
14524+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
14525+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
14526+#define PSB_2D_DST_ADDR_SHIFT (2)
14527+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
14528+
14529+/*
14530+ * Mask Surface (PSB_2D_MASK_SURF_BH)
14531+ */
14532+/*
14533+ * WORD 0
14534+ */
14535+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
14536+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
14537+#define PSB_2D_MASK_STRIDE_SHIFT (0)
14538+/*
14539+ * WORD 1 - Base Address
14540+ */
14541+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
14542+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
14543+#define PSB_2D_MASK_ADDR_SHIFT (2)
14544+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
14545+
14546+/*
14547+ * Source Palette (PSB_2D_SRC_PAL_BH)
14548+ */
14549+
14550+#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
14551+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
14552+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
14553+#define PSB_2D_SRCPAL_BYTEALIGN (1024)
14554+
14555+/*
14556+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
14557+ */
14558+
14559+#define PSB_2D_PATPAL_ADDR_SHIFT (0)
14560+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
14561+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
14562+#define PSB_2D_PATPAL_BYTEALIGN (1024)
14563+
14564+/*
14565+ * Rop3 Codes (2 LS bytes)
14566+ */
14567+
14568+#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
14569+#define PSB_2D_ROP3_PATCOPY (0xF0F0)
14570+#define PSB_2D_ROP3_WHITENESS (0xFFFF)
14571+#define PSB_2D_ROP3_BLACKNESS (0x0000)
14572+#define PSB_2D_ROP3_SRC (0xCC)
14573+#define PSB_2D_ROP3_PAT (0xF0)
14574+#define PSB_2D_ROP3_DST (0xAA)
14575+
14576+
14577+/*
14578+ * Sizes.
14579+ */
14580+
14581+#define PSB_SCENE_HW_COOKIE_SIZE 16
14582+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
14583+
14584+/*
14585+ * Scene stuff.
14586+ */
14587+
14588+#define PSB_NUM_HW_SCENES 2
14589+
14590+/*
14591+ * Scheduler completion actions.
14592+ */
14593+
14594+#define PSB_RASTER_BLOCK 0
14595+#define PSB_RASTER 1
14596+#define PSB_RETURN 2
14597+#define PSB_TA 3
14598+
14599+
14600+#endif
14601Index: linux-2.6.28/drivers/gpu/drm/psb/psb_regman.c
14602===================================================================
14603--- /dev/null 1970-01-01 00:00:00.000000000 +0000
14604+++ linux-2.6.28/drivers/gpu/drm/psb/psb_regman.c 2009-02-25 15:37:02.000000000 +0000
14605@@ -0,0 +1,175 @@
14606+/**************************************************************************
14607+ * Copyright (c) 2007, Intel Corporation.
14608+ * All Rights Reserved.
14609+ *
14610+ * This program is free software; you can redistribute it and/or modify it
14611+ * under the terms and conditions of the GNU General Public License,
14612+ * version 2, as published by the Free Software Foundation.
14613+ *
14614+ * This program is distributed in the hope it will be useful, but WITHOUT
14615+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14616+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14617+ * more details.
14618+ *
14619+ * You should have received a copy of the GNU General Public License along with
14620+ * this program; if not, write to the Free Software Foundation, Inc.,
14621+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
14622+ *
14623+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
14624+ * develop this driver.
14625+ *
14626+ **************************************************************************/
14627+/*
14628+ */
14629+
14630+#include "drmP.h"
14631+#include "psb_drv.h"
14632+
14633+struct psb_use_reg {
14634+ struct drm_reg reg;
14635+ struct drm_psb_private *dev_priv;
14636+ uint32_t reg_seq;
14637+ uint32_t base;
14638+ uint32_t data_master;
14639+};
14640+
14641+struct psb_use_reg_data {
14642+ uint32_t base;
14643+ uint32_t size;
14644+ uint32_t data_master;
14645+};
14646+
14647+static int psb_use_reg_reusable(const struct drm_reg *reg, const void *data)
14648+{
14649+ struct psb_use_reg *use_reg =
14650+ container_of(reg, struct psb_use_reg, reg);
14651+ struct psb_use_reg_data *use_data = (struct psb_use_reg_data *)data;
14652+
14653+ return ((use_reg->base <= use_data->base) &&
14654+ (use_reg->base + PSB_USE_OFFSET_SIZE >
14655+ use_data->base + use_data->size) &&
14656+ use_reg->data_master == use_data->data_master);
14657+}
14658+
14659+static int psb_use_reg_set(struct psb_use_reg *use_reg,
14660+ const struct psb_use_reg_data *use_data)
14661+{
14662+ struct drm_psb_private *dev_priv = use_reg->dev_priv;
14663+
14664+ if (use_reg->reg.fence == NULL)
14665+ use_reg->data_master = use_data->data_master;
14666+
14667+ if (use_reg->reg.fence == NULL &&
14668+ !psb_use_reg_reusable(&use_reg->reg, (const void *)use_data)) {
14669+
14670+ use_reg->base = use_data->base & ~PSB_USE_OFFSET_MASK;
14671+ use_reg->data_master = use_data->data_master;
14672+
14673+ if (!psb_use_reg_reusable(&use_reg->reg,
14674+ (const void *)use_data)) {
14675+ DRM_ERROR("USE base mechanism didn't support "
14676+ "buffer size or alignment\n");
14677+ return -EINVAL;
14678+ }
14679+
14680+ PSB_WSGX32(PSB_ALPL(use_reg->base, _PSB_CUC_BASE_ADDR) |
14681+ (use_reg->data_master << _PSB_CUC_BASE_DM_SHIFT),
14682+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
14683+ }
14684+ return 0;
14685+
14686+}
14687+
14688+int psb_grab_use_base(struct drm_psb_private *dev_priv,
14689+ unsigned long base,
14690+ unsigned long size,
14691+ unsigned int data_master,
14692+ uint32_t fence_class,
14693+ uint32_t fence_type,
14694+ int no_wait,
14695+ int interruptible, int *r_reg, uint32_t * r_offset)
14696+{
14697+ struct psb_use_reg_data use_data = {
14698+ .base = base,
14699+ .size = size,
14700+ .data_master = data_master
14701+ };
14702+ int ret;
14703+
14704+ struct drm_reg *reg;
14705+ struct psb_use_reg *use_reg;
14706+
14707+ ret = drm_regs_alloc(&dev_priv->use_manager,
14708+ (const void *)&use_data,
14709+ fence_class,
14710+ fence_type, interruptible, no_wait, &reg);
14711+ if (ret)
14712+ return ret;
14713+
14714+ use_reg = container_of(reg, struct psb_use_reg, reg);
14715+ ret = psb_use_reg_set(use_reg, &use_data);
14716+
14717+ if (ret)
14718+ return ret;
14719+
14720+ *r_reg = use_reg->reg_seq;
14721+ *r_offset = base - use_reg->base;
14722+
14723+ return 0;
14724+};
14725+
14726+static void psb_use_reg_destroy(struct drm_reg *reg)
14727+{
14728+ struct psb_use_reg *use_reg =
14729+ container_of(reg, struct psb_use_reg, reg);
14730+ struct drm_psb_private *dev_priv = use_reg->dev_priv;
14731+
14732+ PSB_WSGX32(PSB_ALPL(0, _PSB_CUC_BASE_ADDR),
14733+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
14734+
14735+ drm_free(use_reg, sizeof(*use_reg), DRM_MEM_DRIVER);
14736+}
14737+
14738+int psb_init_use_base(struct drm_psb_private *dev_priv,
14739+ unsigned int reg_start, unsigned int reg_num)
14740+{
14741+ struct psb_use_reg *use_reg;
14742+ int i;
14743+ int ret = 0;
14744+
14745+ mutex_lock(&dev_priv->cmdbuf_mutex);
14746+
14747+ drm_regs_init(&dev_priv->use_manager,
14748+ &psb_use_reg_reusable, &psb_use_reg_destroy);
14749+
14750+ for (i = reg_start; i < reg_start + reg_num; ++i) {
14751+ use_reg = drm_calloc(1, sizeof(*use_reg), DRM_MEM_DRIVER);
14752+ if (!use_reg) {
14753+ ret = -ENOMEM;
14754+ goto out;
14755+ }
14756+
14757+ use_reg->dev_priv = dev_priv;
14758+ use_reg->reg_seq = i;
14759+ use_reg->base = 0;
14760+ use_reg->data_master = _PSB_CUC_DM_PIXEL;
14761+
14762+ PSB_WSGX32(PSB_ALPL(use_reg->base, _PSB_CUC_BASE_ADDR) |
14763+ (use_reg->data_master << _PSB_CUC_BASE_DM_SHIFT),
14764+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
14765+
14766+ drm_regs_add(&dev_priv->use_manager, &use_reg->reg);
14767+ }
14768+ out:
14769+ mutex_unlock(&dev_priv->cmdbuf_mutex);
14770+
14771+ return ret;
14772+
14773+}
14774+
14775+void psb_takedown_use_base(struct drm_psb_private *dev_priv)
14776+{
14777+ mutex_lock(&dev_priv->cmdbuf_mutex);
14778+ drm_regs_free(&dev_priv->use_manager);
14779+ mutex_unlock(&dev_priv->cmdbuf_mutex);
14780+}
14781Index: linux-2.6.28/drivers/gpu/drm/psb/psb_reset.c
14782===================================================================
14783--- /dev/null 1970-01-01 00:00:00.000000000 +0000
14784+++ linux-2.6.28/drivers/gpu/drm/psb/psb_reset.c 2009-02-25 15:37:02.000000000 +0000
14785@@ -0,0 +1,374 @@
14786+/**************************************************************************
14787+ * Copyright (c) 2007, Intel Corporation.
14788+ * All Rights Reserved.
14789+ *
14790+ * This program is free software; you can redistribute it and/or modify it
14791+ * under the terms and conditions of the GNU General Public License,
14792+ * version 2, as published by the Free Software Foundation.
14793+ *
14794+ * This program is distributed in the hope it will be useful, but WITHOUT
14795+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14796+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14797+ * more details.
14798+ *
14799+ * You should have received a copy of the GNU General Public License along with
14800+ * this program; if not, write to the Free Software Foundation, Inc.,
14801+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
14802+ *
14803+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
14804+ * develop this driver.
14805+ *
14806+ **************************************************************************/
14807+/*
14808+ * Authors:
14809+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
14810+ */
14811+
14812+#include "drmP.h"
14813+#include "psb_drv.h"
14814+#include "psb_reg.h"
14815+#include "psb_scene.h"
14816+#include "psb_msvdx.h"
14817+
14818+#define PSB_2D_TIMEOUT_MSEC 100
14819+
14820+void psb_reset(struct drm_psb_private *dev_priv, int reset_2d)
14821+{
14822+ uint32_t val;
14823+
14824+ val = _PSB_CS_RESET_BIF_RESET |
14825+ _PSB_CS_RESET_DPM_RESET |
14826+ _PSB_CS_RESET_TA_RESET |
14827+ _PSB_CS_RESET_USE_RESET |
14828+ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET;
14829+
14830+ if (reset_2d)
14831+ val |= _PSB_CS_RESET_TWOD_RESET;
14832+
14833+ PSB_WSGX32(val, PSB_CR_SOFT_RESET);
14834+ (void)PSB_RSGX32(PSB_CR_SOFT_RESET);
14835+
14836+ msleep(1);
14837+
14838+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
14839+ wmb();
14840+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
14841+ PSB_CR_BIF_CTRL);
14842+ wmb();
14843+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
14844+
14845+ msleep(1);
14846+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
14847+ PSB_CR_BIF_CTRL);
14848+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
14849+}
14850+
14851+void psb_print_pagefault(struct drm_psb_private *dev_priv)
14852+{
14853+ uint32_t val;
14854+ uint32_t addr;
14855+
14856+ val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
14857+ addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
14858+
14859+ if (val) {
14860+ if (val & _PSB_CBI_STAT_PF_N_RW)
14861+ DRM_ERROR("Poulsbo MMU page fault:\n");
14862+ else
14863+ DRM_ERROR("Poulsbo MMU read / write "
14864+ "protection fault:\n");
14865+
14866+ if (val & _PSB_CBI_STAT_FAULT_CACHE)
14867+ DRM_ERROR("\tCache requestor.\n");
14868+ if (val & _PSB_CBI_STAT_FAULT_TA)
14869+ DRM_ERROR("\tTA requestor.\n");
14870+ if (val & _PSB_CBI_STAT_FAULT_VDM)
14871+ DRM_ERROR("\tVDM requestor.\n");
14872+ if (val & _PSB_CBI_STAT_FAULT_2D)
14873+ DRM_ERROR("\t2D requestor.\n");
14874+ if (val & _PSB_CBI_STAT_FAULT_PBE)
14875+ DRM_ERROR("\tPBE requestor.\n");
14876+ if (val & _PSB_CBI_STAT_FAULT_TSP)
14877+ DRM_ERROR("\tTSP requestor.\n");
14878+ if (val & _PSB_CBI_STAT_FAULT_ISP)
14879+ DRM_ERROR("\tISP requestor.\n");
14880+ if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
14881+ DRM_ERROR("\tUSSEPDS requestor.\n");
14882+ if (val & _PSB_CBI_STAT_FAULT_HOST)
14883+ DRM_ERROR("\tHost requestor.\n");
14884+
14885+ DRM_ERROR("\tMMU failing address is 0x%08x.\n", (unsigned)addr);
14886+ }
14887+}
14888+
14889+void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
14890+{
14891+ struct timer_list *wt = &dev_priv->watchdog_timer;
14892+ unsigned long irq_flags;
14893+
14894+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
14895+ if (dev_priv->timer_available && !timer_pending(wt)) {
14896+ wt->expires = jiffies + PSB_WATCHDOG_DELAY;
14897+ add_timer(wt);
14898+ }
14899+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
14900+}
14901+
14902+#if 0
14903+static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv,
14904+ unsigned int engine, int *lockup, int *idle)
14905+{
14906+ uint32_t received_seq;
14907+
14908+ received_seq = dev_priv->comm[engine << 4];
14909+ spin_lock(&dev_priv->sequence_lock);
14910+ *idle = (received_seq == dev_priv->sequence[engine]);
14911+ spin_unlock(&dev_priv->sequence_lock);
14912+
14913+ if (*idle) {
14914+ dev_priv->idle[engine] = 1;
14915+ *lockup = 0;
14916+ return;
14917+ }
14918+
14919+ if (dev_priv->idle[engine]) {
14920+ dev_priv->idle[engine] = 0;
14921+ dev_priv->last_sequence[engine] = received_seq;
14922+ *lockup = 0;
14923+ return;
14924+ }
14925+
14926+ *lockup = (dev_priv->last_sequence[engine] == received_seq);
14927+}
14928+
14929+#endif
14930+static void psb_watchdog_func(unsigned long data)
14931+{
14932+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)data;
14933+ int lockup;
14934+ int msvdx_lockup;
14935+ int msvdx_idle;
14936+ int lockup_2d;
14937+ int idle_2d;
14938+ int idle;
14939+ unsigned long irq_flags;
14940+
14941+ psb_scheduler_lockup(dev_priv, &lockup, &idle);
14942+ psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
14943+#if 0
14944+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
14945+#else
14946+ lockup_2d = 0;
14947+ idle_2d = 1;
14948+#endif
14949+ if (lockup || msvdx_lockup || lockup_2d) {
14950+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
14951+ dev_priv->timer_available = 0;
14952+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
14953+ if (lockup) {
14954+ psb_print_pagefault(dev_priv);
14955+ schedule_work(&dev_priv->watchdog_wq);
14956+ }
14957+ if (msvdx_lockup)
14958+ schedule_work(&dev_priv->msvdx_watchdog_wq);
14959+ }
14960+ if (!idle || !msvdx_idle || !idle_2d)
14961+ psb_schedule_watchdog(dev_priv);
14962+}
14963+
14964+void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
14965+{
14966+ struct drm_psb_private *dev_priv = dev->dev_private;
14967+ struct psb_msvdx_cmd_queue *msvdx_cmd;
14968+ struct list_head *list, *next;
14969+ /*Flush the msvdx cmd queue and signal all fences in the queue */
14970+ list_for_each_safe(list, next, &dev_priv->msvdx_queue) {
14971+ msvdx_cmd = list_entry(list, struct psb_msvdx_cmd_queue, head);
14972+ PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
14973+ msvdx_cmd->sequence);
14974+ dev_priv->msvdx_current_sequence = msvdx_cmd->sequence;
14975+ psb_fence_error(dev, PSB_ENGINE_VIDEO,
14976+ dev_priv->msvdx_current_sequence,
14977+ DRM_FENCE_TYPE_EXE, DRM_CMD_HANG);
14978+ list_del(list);
14979+ kfree(msvdx_cmd->cmd);
14980+ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue),
14981+ DRM_MEM_DRIVER);
14982+ }
14983+}
14984+
14985+static void psb_msvdx_reset_wq(struct work_struct *work)
14986+{
14987+ struct drm_psb_private *dev_priv =
14988+ container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
14989+
14990+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
14991+ unsigned long irq_flags;
14992+
14993+ mutex_lock(&dev_priv->msvdx_mutex);
14994+ dev_priv->msvdx_needs_reset = 1;
14995+ dev_priv->msvdx_current_sequence++;
14996+ PSB_DEBUG_GENERAL
14997+ ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
14998+ dev_priv->msvdx_current_sequence);
14999+
15000+ psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
15001+ dev_priv->msvdx_current_sequence, DRM_FENCE_TYPE_EXE,
15002+ DRM_CMD_HANG);
15003+
15004+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
15005+ dev_priv->timer_available = 1;
15006+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
15007+
15008+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
15009+ psb_msvdx_flush_cmd_queue(scheduler->dev);
15010+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
15011+
15012+ psb_schedule_watchdog(dev_priv);
15013+ mutex_unlock(&dev_priv->msvdx_mutex);
15014+}
15015+
15016+static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv)
15017+{
15018+ struct psb_xhw_buf buf;
15019+ uint32_t bif_ctrl;
15020+
15021+ INIT_LIST_HEAD(&buf.head);
15022+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
15023+ bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
15024+ PSB_WSGX32(bif_ctrl |
15025+ _PSB_CB_CTRL_CLEAR_FAULT |
15026+ _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
15027+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
15028+ msleep(1);
15029+ PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL);
15030+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
15031+ return psb_xhw_reset_dpm(dev_priv, &buf);
15032+}
15033+
15034+/*
15035+ * Block command submission and reset hardware and schedulers.
15036+ */
15037+
15038+static void psb_reset_wq(struct work_struct *work)
15039+{
15040+ struct drm_psb_private *dev_priv =
15041+ container_of(work, struct drm_psb_private, watchdog_wq);
15042+ int lockup_2d;
15043+ int idle_2d;
15044+ unsigned long irq_flags;
15045+ int ret;
15046+ int reset_count = 0;
15047+ struct psb_xhw_buf buf;
15048+ uint32_t xhw_lockup;
15049+
15050+ /*
15051+ * Block command submission.
15052+ */
15053+
15054+ mutex_lock(&dev_priv->reset_mutex);
15055+
15056+ INIT_LIST_HEAD(&buf.head);
15057+ if (psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup) == 0) {
15058+ if (xhw_lockup == 0 && psb_extend_raster_timeout(dev_priv) == 0) {
15059+ /*
15060+ * no lockup, just re-schedule
15061+ */
15062+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
15063+ dev_priv->timer_available = 1;
15064+ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
15065+ irq_flags);
15066+ psb_schedule_watchdog(dev_priv);
15067+ mutex_unlock(&dev_priv->reset_mutex);
15068+ return;
15069+ }
15070+ }
15071+#if 0
15072+ msleep(PSB_2D_TIMEOUT_MSEC);
15073+
15074+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
15075+
15076+ if (lockup_2d) {
15077+ uint32_t seq_2d;
15078+ spin_lock(&dev_priv->sequence_lock);
15079+ seq_2d = dev_priv->sequence[PSB_ENGINE_2D];
15080+ spin_unlock(&dev_priv->sequence_lock);
15081+ psb_fence_error(dev_priv->scheduler.dev,
15082+ PSB_ENGINE_2D,
15083+ seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY);
15084+ DRM_INFO("Resetting 2D engine.\n");
15085+ }
15086+
15087+ psb_reset(dev_priv, lockup_2d);
15088+#else
15089+ (void)lockup_2d;
15090+ (void)idle_2d;
15091+ psb_reset(dev_priv, 0);
15092+#endif
15093+ (void)psb_xhw_mmu_reset(dev_priv);
15094+ DRM_INFO("Resetting scheduler.\n");
15095+ psb_scheduler_pause(dev_priv);
15096+ psb_scheduler_reset(dev_priv, -EBUSY);
15097+ psb_scheduler_ta_mem_check(dev_priv);
15098+
15099+ while (dev_priv->ta_mem &&
15100+ !dev_priv->force_ta_mem_load && ++reset_count < 10) {
15101+
15102+ /*
15103+ * TA memory is currently fenced so offsets
15104+ * are valid. Reload offsets into the dpm now.
15105+ */
15106+
15107+ struct psb_xhw_buf buf;
15108+ INIT_LIST_HEAD(&buf.head);
15109+
15110+ msleep(100);
15111+ DRM_INFO("Trying to reload TA memory.\n");
15112+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
15113+ PSB_TA_MEM_FLAG_TA |
15114+ PSB_TA_MEM_FLAG_RASTER |
15115+ PSB_TA_MEM_FLAG_HOSTA |
15116+ PSB_TA_MEM_FLAG_HOSTD |
15117+ PSB_TA_MEM_FLAG_INIT,
15118+ dev_priv->ta_mem->ta_memory->offset,
15119+ dev_priv->ta_mem->hw_data->offset,
15120+ dev_priv->ta_mem->hw_cookie);
15121+ if (!ret)
15122+ break;
15123+
15124+ psb_reset(dev_priv, 0);
15125+ (void)psb_xhw_mmu_reset(dev_priv);
15126+ }
15127+
15128+ psb_scheduler_restart(dev_priv);
15129+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
15130+ dev_priv->timer_available = 1;
15131+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
15132+ mutex_unlock(&dev_priv->reset_mutex);
15133+}
15134+
15135+void psb_watchdog_init(struct drm_psb_private *dev_priv)
15136+{
15137+ struct timer_list *wt = &dev_priv->watchdog_timer;
15138+ unsigned long irq_flags;
15139+
15140+ dev_priv->watchdog_lock = SPIN_LOCK_UNLOCKED;
15141+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
15142+ init_timer(wt);
15143+ INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq);
15144+ INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
15145+ wt->data = (unsigned long)dev_priv;
15146+ wt->function = &psb_watchdog_func;
15147+ dev_priv->timer_available = 1;
15148+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
15149+}
15150+
15151+void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
15152+{
15153+ unsigned long irq_flags;
15154+
15155+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
15156+ dev_priv->timer_available = 0;
15157+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
15158+ (void)del_timer_sync(&dev_priv->watchdog_timer);
15159+}
15160Index: linux-2.6.28/drivers/gpu/drm/psb/psb_scene.c
15161===================================================================
15162--- /dev/null 1970-01-01 00:00:00.000000000 +0000
15163+++ linux-2.6.28/drivers/gpu/drm/psb/psb_scene.c 2009-02-25 15:37:02.000000000 +0000
15164@@ -0,0 +1,531 @@
15165+/**************************************************************************
15166+ * Copyright (c) 2007, Intel Corporation.
15167+ * All Rights Reserved.
15168+ *
15169+ * This program is free software; you can redistribute it and/or modify it
15170+ * under the terms and conditions of the GNU General Public License,
15171+ * version 2, as published by the Free Software Foundation.
15172+ *
15173+ * This program is distributed in the hope it will be useful, but WITHOUT
15174+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15175+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15176+ * more details.
15177+ *
15178+ * You should have received a copy of the GNU General Public License along with
15179+ * this program; if not, write to the Free Software Foundation, Inc.,
15180+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
15181+ *
15182+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
15183+ * develop this driver.
15184+ *
15185+ **************************************************************************/
15186+/*
15187+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
15188+ */
15189+
15190+#include "drmP.h"
15191+#include "psb_drv.h"
15192+#include "psb_scene.h"
15193+
15194+void psb_clear_scene_atomic(struct psb_scene *scene)
15195+{
15196+ int i;
15197+ struct page *page;
15198+ void *v;
15199+
15200+ for (i = 0; i < scene->clear_num_pages; ++i) {
15201+ page = drm_ttm_get_page(scene->hw_data->ttm,
15202+ scene->clear_p_start + i);
15203+ if (in_irq())
15204+ v = kmap_atomic(page, KM_IRQ0);
15205+ else
15206+ v = kmap_atomic(page, KM_USER0);
15207+
15208+ memset(v, 0, PAGE_SIZE);
15209+
15210+ if (in_irq())
15211+ kunmap_atomic(v, KM_IRQ0);
15212+ else
15213+ kunmap_atomic(v, KM_USER0);
15214+ }
15215+}
15216+
15217+int psb_clear_scene(struct psb_scene *scene)
15218+{
15219+ struct drm_bo_kmap_obj bmo;
15220+ int is_iomem;
15221+ void *addr;
15222+
15223+ int ret = drm_bo_kmap(scene->hw_data, scene->clear_p_start,
15224+ scene->clear_num_pages, &bmo);
15225+
15226+ PSB_DEBUG_RENDER("Scene clear\n");
15227+ if (ret)
15228+ return ret;
15229+
15230+ addr = drm_bmo_virtual(&bmo, &is_iomem);
15231+ BUG_ON(is_iomem);
15232+ memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT);
15233+ drm_bo_kunmap(&bmo);
15234+
15235+ return 0;
15236+}
15237+
15238+static void psb_destroy_scene_devlocked(struct psb_scene *scene)
15239+{
15240+ if (!scene)
15241+ return;
15242+
15243+ PSB_DEBUG_RENDER("Scene destroy\n");
15244+ drm_bo_usage_deref_locked(&scene->hw_data);
15245+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
15246+}
15247+
15248+void psb_scene_unref_devlocked(struct psb_scene **scene)
15249+{
15250+ struct psb_scene *tmp_scene = *scene;
15251+
15252+ PSB_DEBUG_RENDER("Scene unref\n");
15253+ *scene = NULL;
15254+ if (atomic_dec_and_test(&tmp_scene->ref_count)) {
15255+ psb_scheduler_remove_scene_refs(tmp_scene);
15256+ psb_destroy_scene_devlocked(tmp_scene);
15257+ }
15258+}
15259+
15260+struct psb_scene *psb_scene_ref(struct psb_scene *src)
15261+{
15262+ PSB_DEBUG_RENDER("Scene ref\n");
15263+ atomic_inc(&src->ref_count);
15264+ return src;
15265+}
15266+
15267+static struct psb_scene *psb_alloc_scene(struct drm_device *dev,
15268+ uint32_t w, uint32_t h)
15269+{
15270+ struct drm_psb_private *dev_priv =
15271+ (struct drm_psb_private *)dev->dev_private;
15272+ int ret = -EINVAL;
15273+ struct psb_scene *scene;
15274+ uint32_t bo_size;
15275+ struct psb_xhw_buf buf;
15276+
15277+ PSB_DEBUG_RENDER("Alloc scene w %u h %u\n", w, h);
15278+
15279+ scene = drm_calloc(1, sizeof(*scene), DRM_MEM_DRIVER);
15280+
15281+ if (!scene) {
15282+ DRM_ERROR("Out of memory allocating scene object.\n");
15283+ return NULL;
15284+ }
15285+
15286+ scene->dev = dev;
15287+ scene->w = w;
15288+ scene->h = h;
15289+ scene->hw_scene = NULL;
15290+ atomic_set(&scene->ref_count, 1);
15291+
15292+ INIT_LIST_HEAD(&buf.head);
15293+ ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h,
15294+ scene->hw_cookie, &bo_size,
15295+ &scene->clear_p_start,
15296+ &scene->clear_num_pages);
15297+ if (ret)
15298+ goto out_err;
15299+
15300+ ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
15301+ DRM_PSB_FLAG_MEM_MMU |
15302+ DRM_BO_FLAG_READ |
15303+ DRM_BO_FLAG_CACHED |
15304+ PSB_BO_FLAG_SCENE |
15305+ DRM_BO_FLAG_WRITE,
15306+ DRM_BO_HINT_DONT_FENCE,
15307+ 0, 0, &scene->hw_data);
15308+ if (ret)
15309+ goto out_err;
15310+
15311+ return scene;
15312+ out_err:
15313+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
15314+ return NULL;
15315+}
15316+
15317+int psb_validate_scene_pool(struct psb_scene_pool *pool, uint64_t flags,
15318+ uint64_t mask,
15319+ uint32_t hint,
15320+ uint32_t w,
15321+ uint32_t h,
15322+ int final_pass, struct psb_scene **scene_p)
15323+{
15324+ struct drm_device *dev = pool->dev;
15325+ struct drm_psb_private *dev_priv =
15326+ (struct drm_psb_private *)dev->dev_private;
15327+ struct psb_scene *scene = pool->scenes[pool->cur_scene];
15328+ int ret;
15329+ unsigned long irq_flags;
15330+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
15331+ uint32_t bin_pt_offset;
15332+ uint32_t bin_param_offset;
15333+
15334+ PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n", pool->cur_scene);
15335+
15336+ if (unlikely(!dev_priv->ta_mem)) {
15337+ dev_priv->ta_mem =
15338+ psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages);
15339+ if (!dev_priv->ta_mem)
15340+ return -ENOMEM;
15341+
15342+ bin_pt_offset = ~0;
15343+ bin_param_offset = ~0;
15344+ } else {
15345+ bin_pt_offset = dev_priv->ta_mem->hw_data->offset;
15346+ bin_param_offset = dev_priv->ta_mem->ta_memory->offset;
15347+ }
15348+
15349+ pool->w = w;
15350+ pool->h = h;
15351+ if (scene && (scene->w != pool->w || scene->h != pool->h)) {
15352+ spin_lock_irqsave(&scheduler->lock, irq_flags);
15353+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
15354+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
15355+ DRM_ERROR("Trying to resize a dirty scene.\n");
15356+ return -EINVAL;
15357+ }
15358+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
15359+ mutex_lock(&dev->struct_mutex);
15360+ psb_scene_unref_devlocked(&pool->scenes[pool->cur_scene]);
15361+ mutex_unlock(&dev->struct_mutex);
15362+ scene = NULL;
15363+ }
15364+
15365+ if (!scene) {
15366+ pool->scenes[pool->cur_scene] = scene =
15367+ psb_alloc_scene(pool->dev, pool->w, pool->h);
15368+
15369+ if (!scene)
15370+ return -ENOMEM;
15371+
15372+ scene->flags = PSB_SCENE_FLAG_CLEARED;
15373+ }
15374+
15375+ /*
15376+ * FIXME: We need atomic bit manipulation here for the
15377+ * scheduler. For now use the spinlock.
15378+ */
15379+
15380+ spin_lock_irqsave(&scheduler->lock, irq_flags);
15381+ if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) {
15382+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
15383+ PSB_DEBUG_RENDER("Waiting to clear scene memory.\n");
15384+ mutex_lock(&scene->hw_data->mutex);
15385+ ret = drm_bo_wait(scene->hw_data, 0, 0, 0);
15386+ mutex_unlock(&scene->hw_data->mutex);
15387+ if (ret)
15388+ return ret;
15389+
15390+ ret = psb_clear_scene(scene);
15391+
15392+ if (ret)
15393+ return ret;
15394+ spin_lock_irqsave(&scheduler->lock, irq_flags);
15395+ scene->flags |= PSB_SCENE_FLAG_CLEARED;
15396+ }
15397+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
15398+
15399+ ret = drm_bo_do_validate(scene->hw_data, flags, mask, hint,
15400+ PSB_ENGINE_TA, 0, NULL);
15401+ if (ret)
15402+ return ret;
15403+ ret = drm_bo_do_validate(dev_priv->ta_mem->hw_data, 0, 0, 0,
15404+ PSB_ENGINE_TA, 0, NULL);
15405+ if (ret)
15406+ return ret;
15407+ ret = drm_bo_do_validate(dev_priv->ta_mem->ta_memory, 0, 0, 0,
15408+ PSB_ENGINE_TA, 0, NULL);
15409+ if (ret)
15410+ return ret;
15411+
15412+ if (unlikely(bin_param_offset !=
15413+ dev_priv->ta_mem->ta_memory->offset ||
15414+ bin_pt_offset !=
15415+ dev_priv->ta_mem->hw_data->offset ||
15416+ dev_priv->force_ta_mem_load)) {
15417+
15418+ struct psb_xhw_buf buf;
15419+
15420+ INIT_LIST_HEAD(&buf.head);
15421+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
15422+ PSB_TA_MEM_FLAG_TA |
15423+ PSB_TA_MEM_FLAG_RASTER |
15424+ PSB_TA_MEM_FLAG_HOSTA |
15425+ PSB_TA_MEM_FLAG_HOSTD |
15426+ PSB_TA_MEM_FLAG_INIT,
15427+ dev_priv->ta_mem->ta_memory->offset,
15428+ dev_priv->ta_mem->hw_data->offset,
15429+ dev_priv->ta_mem->hw_cookie);
15430+ if (ret)
15431+ return ret;
15432+
15433+ dev_priv->force_ta_mem_load = 0;
15434+ }
15435+
15436+ if (final_pass) {
15437+
15438+ /*
15439+ * Clear the scene on next use. Advance the scene counter.
15440+ */
15441+
15442+ spin_lock_irqsave(&scheduler->lock, irq_flags);
15443+ scene->flags &= ~PSB_SCENE_FLAG_CLEARED;
15444+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
15445+ pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes;
15446+ }
15447+
15448+ *scene_p = psb_scene_ref(scene);
15449+ return 0;
15450+}
15451+
15452+static void psb_scene_pool_destroy_devlocked(struct psb_scene_pool *pool)
15453+{
15454+ int i;
15455+
15456+ if (!pool)
15457+ return;
15458+
15459+ PSB_DEBUG_RENDER("Scene pool destroy.\n");
15460+ for (i = 0; i < pool->num_scenes; ++i) {
15461+ PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i,
15462+ (unsigned long)pool->scenes[i]);
15463+ if (pool->scenes[i])
15464+ psb_scene_unref_devlocked(&pool->scenes[i]);
15465+ }
15466+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
15467+}
15468+
15469+void psb_scene_pool_unref_devlocked(struct psb_scene_pool **pool)
15470+{
15471+ struct psb_scene_pool *tmp_pool = *pool;
15472+ struct drm_device *dev = tmp_pool->dev;
15473+
15474+ PSB_DEBUG_RENDER("Scene pool unref\n");
15475+ (void)dev;
15476+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
15477+ *pool = NULL;
15478+ if (--tmp_pool->ref_count == 0)
15479+ psb_scene_pool_destroy_devlocked(tmp_pool);
15480+}
15481+
15482+struct psb_scene_pool *psb_scene_pool_ref_devlocked(struct psb_scene_pool *src)
15483+{
15484+ ++src->ref_count;
15485+ return src;
15486+}
15487+
15488+/*
15489+ * Callback for user object manager.
15490+ */
15491+
15492+static void psb_scene_pool_destroy(struct drm_file *priv,
15493+ struct drm_user_object *base)
15494+{
15495+ struct psb_scene_pool *pool =
15496+ drm_user_object_entry(base, struct psb_scene_pool, user);
15497+
15498+ psb_scene_pool_unref_devlocked(&pool);
15499+}
15500+
15501+struct psb_scene_pool *psb_scene_pool_lookup_devlocked(struct drm_file *priv,
15502+ uint32_t handle,
15503+ int check_owner)
15504+{
15505+ struct drm_user_object *uo;
15506+ struct psb_scene_pool *pool;
15507+
15508+ uo = drm_lookup_user_object(priv, handle);
15509+ if (!uo || (uo->type != PSB_USER_OBJECT_SCENE_POOL)) {
15510+ DRM_ERROR("Could not find scene pool object 0x%08x\n", handle);
15511+ return NULL;
15512+ }
15513+
15514+ if (check_owner && priv != uo->owner) {
15515+ if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
15516+ return NULL;
15517+ }
15518+
15519+ pool = drm_user_object_entry(uo, struct psb_scene_pool, user);
15520+ return psb_scene_pool_ref_devlocked(pool);
15521+}
15522+
15523+struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
15524+ int shareable,
15525+ uint32_t num_scenes,
15526+ uint32_t w, uint32_t h)
15527+{
15528+ struct drm_device *dev = priv->minor->dev;
15529+ struct psb_scene_pool *pool;
15530+ int ret;
15531+
15532+ PSB_DEBUG_RENDER("Scene pool alloc\n");
15533+ pool = drm_calloc(1, sizeof(*pool), DRM_MEM_DRIVER);
15534+ if (!pool) {
15535+ DRM_ERROR("Out of memory allocating scene pool object.\n");
15536+ return NULL;
15537+ }
15538+ pool->w = w;
15539+ pool->h = h;
15540+ pool->dev = dev;
15541+ pool->num_scenes = num_scenes;
15542+
15543+ mutex_lock(&dev->struct_mutex);
15544+ ret = drm_add_user_object(priv, &pool->user, shareable);
15545+ if (ret)
15546+ goto out_err;
15547+
15548+ pool->user.type = PSB_USER_OBJECT_SCENE_POOL;
15549+ pool->user.remove = &psb_scene_pool_destroy;
15550+ pool->ref_count = 2;
15551+ mutex_unlock(&dev->struct_mutex);
15552+ return pool;
15553+ out_err:
15554+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
15555+ return NULL;
15556+}
15557+
15558+/*
15559+ * Code to support multiple ta memory buffers.
15560+ */
15561+
15562+static void psb_destroy_ta_mem_devlocked(struct psb_ta_mem *ta_mem)
15563+{
15564+ if (!ta_mem)
15565+ return;
15566+
15567+ drm_bo_usage_deref_locked(&ta_mem->hw_data);
15568+ drm_bo_usage_deref_locked(&ta_mem->ta_memory);
15569+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
15570+}
15571+
15572+void psb_ta_mem_unref_devlocked(struct psb_ta_mem **ta_mem)
15573+{
15574+ struct psb_ta_mem *tmp_ta_mem = *ta_mem;
15575+ struct drm_device *dev = tmp_ta_mem->dev;
15576+
15577+ (void)dev;
15578+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
15579+ *ta_mem = NULL;
15580+ if (--tmp_ta_mem->ref_count == 0)
15581+ psb_destroy_ta_mem_devlocked(tmp_ta_mem);
15582+}
15583+
15584+void psb_ta_mem_ref_devlocked(struct psb_ta_mem **dst, struct psb_ta_mem *src)
15585+{
15586+ struct drm_device *dev = src->dev;
15587+
15588+ (void)dev;
15589+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
15590+ *dst = src;
15591+ ++src->ref_count;
15592+}
15593+
15594+struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages)
15595+{
15596+ struct drm_psb_private *dev_priv =
15597+ (struct drm_psb_private *)dev->dev_private;
15598+ int ret = -EINVAL;
15599+ struct psb_ta_mem *ta_mem;
15600+ uint32_t bo_size;
15601+ struct psb_xhw_buf buf;
15602+
15603+ INIT_LIST_HEAD(&buf.head);
15604+
15605+ ta_mem = drm_calloc(1, sizeof(*ta_mem), DRM_MEM_DRIVER);
15606+
15607+ if (!ta_mem) {
15608+ DRM_ERROR("Out of memory allocating parameter memory.\n");
15609+ return NULL;
15610+ }
15611+
15612+ ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages,
15613+ ta_mem->hw_cookie, &bo_size);
15614+ if (ret == -ENOMEM) {
15615+ DRM_ERROR("Parameter memory size is too small.\n");
15616+ DRM_INFO("Attempted to use %u kiB of parameter memory.\n",
15617+ (unsigned int)(pages * (PAGE_SIZE / 1024)));
15618+ DRM_INFO("The Xpsb driver thinks this is too small and\n");
15619+ DRM_INFO("suggests %u kiB. Check the psb DRM\n",
15620+ (unsigned int)(bo_size / 1024));
15621+ DRM_INFO("\"ta_mem_size\" parameter!\n");
15622+ }
15623+ if (ret)
15624+ goto out_err0;
15625+
15626+ bo_size = pages * PAGE_SIZE;
15627+ ta_mem->dev = dev;
15628+ ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
15629+ DRM_PSB_FLAG_MEM_MMU | DRM_BO_FLAG_READ |
15630+ DRM_BO_FLAG_WRITE |
15631+ PSB_BO_FLAG_SCENE,
15632+ DRM_BO_HINT_DONT_FENCE, 0, 0,
15633+ &ta_mem->hw_data);
15634+ if (ret)
15635+ goto out_err0;
15636+
15637+ ret =
15638+ drm_buffer_object_create(dev, pages << PAGE_SHIFT,
15639+ drm_bo_type_kernel,
15640+ DRM_PSB_FLAG_MEM_RASTGEOM |
15641+ DRM_BO_FLAG_READ |
15642+ DRM_BO_FLAG_WRITE |
15643+ PSB_BO_FLAG_SCENE,
15644+ DRM_BO_HINT_DONT_FENCE, 0,
15645+ 1024 * 1024 >> PAGE_SHIFT,
15646+ &ta_mem->ta_memory);
15647+ if (ret)
15648+ goto out_err1;
15649+
15650+ ta_mem->ref_count = 1;
15651+ return ta_mem;
15652+ out_err1:
15653+ drm_bo_usage_deref_unlocked(&ta_mem->hw_data);
15654+ out_err0:
15655+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
15656+ return NULL;
15657+}
15658+
15659+int drm_psb_scene_unref_ioctl(struct drm_device *dev,
15660+ void *data, struct drm_file *file_priv)
15661+{
15662+ struct drm_psb_scene *scene = (struct drm_psb_scene *)data;
15663+ struct drm_user_object *uo;
15664+ struct drm_ref_object *ro;
15665+ int ret = 0;
15666+
15667+ mutex_lock(&dev->struct_mutex);
15668+ if (!scene->handle_valid)
15669+ goto out_unlock;
15670+
15671+ uo = drm_lookup_user_object(file_priv, scene->handle);
15672+ if (!uo) {
15673+ ret = -EINVAL;
15674+ goto out_unlock;
15675+ }
15676+ if (uo->type != PSB_USER_OBJECT_SCENE_POOL) {
15677+ DRM_ERROR("Not a scene pool object.\n");
15678+ ret = -EINVAL;
15679+ goto out_unlock;
15680+ }
15681+ if (uo->owner != file_priv) {
15682+ DRM_ERROR("Not owner of scene pool object.\n");
15683+ ret = -EPERM;
15684+ goto out_unlock;
15685+ }
15686+
15687+ scene->handle_valid = 0;
15688+ ro = drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE);
15689+ BUG_ON(!ro);
15690+ drm_remove_ref_object(file_priv, ro);
15691+
15692+ out_unlock:
15693+ mutex_unlock(&dev->struct_mutex);
15694+ return ret;
15695+}
15696Index: linux-2.6.28/drivers/gpu/drm/psb/psb_scene.h
15697===================================================================
15698--- /dev/null 1970-01-01 00:00:00.000000000 +0000
15699+++ linux-2.6.28/drivers/gpu/drm/psb/psb_scene.h 2009-02-25 15:37:02.000000000 +0000
15700@@ -0,0 +1,112 @@
15701+/**************************************************************************
15702+ * Copyright (c) 2007, Intel Corporation.
15703+ * All Rights Reserved.
15704+ *
15705+ * This program is free software; you can redistribute it and/or modify it
15706+ * under the terms and conditions of the GNU General Public License,
15707+ * version 2, as published by the Free Software Foundation.
15708+ *
15709+ * This program is distributed in the hope it will be useful, but WITHOUT
15710+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15711+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15712+ * more details.
15713+ *
15714+ * You should have received a copy of the GNU General Public License along with
15715+ * this program; if not, write to the Free Software Foundation, Inc.,
15716+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
15717+ *
15718+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
15719+ * develop this driver.
15720+ *
15721+ **************************************************************************/
15722+/*
15723+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
15724+ */
15725+
15726+#ifndef _PSB_SCENE_H_
15727+#define _PSB_SCENE_H_
15728+
15729+#define PSB_USER_OBJECT_SCENE_POOL drm_driver_type0
15730+#define PSB_USER_OBJECT_TA_MEM drm_driver_type1
15731+#define PSB_MAX_NUM_SCENES 8
15732+
15733+struct psb_hw_scene;
15734+struct psb_hw_ta_mem;
15735+
15736+struct psb_scene_pool {
15737+ struct drm_device *dev;
15738+ struct drm_user_object user;
15739+ uint32_t ref_count;
15740+ uint32_t w;
15741+ uint32_t h;
15742+ uint32_t cur_scene;
15743+ struct psb_scene *scenes[PSB_MAX_NUM_SCENES];
15744+ uint32_t num_scenes;
15745+};
15746+
15747+struct psb_scene {
15748+ struct drm_device *dev;
15749+ atomic_t ref_count;
15750+ uint32_t hw_cookie[PSB_SCENE_HW_COOKIE_SIZE];
15751+ uint32_t bo_size;
15752+ uint32_t w;
15753+ uint32_t h;
15754+ struct psb_ta_mem *ta_mem;
15755+ struct psb_hw_scene *hw_scene;
15756+ struct drm_buffer_object *hw_data;
15757+ uint32_t flags;
15758+ uint32_t clear_p_start;
15759+ uint32_t clear_num_pages;
15760+};
15761+
15762+struct psb_scene_entry {
15763+ struct list_head head;
15764+ struct psb_scene *scene;
15765+};
15766+
15767+struct psb_user_scene {
15768+ struct drm_device *dev;
15769+ struct drm_user_object user;
15770+};
15771+
15772+struct psb_ta_mem {
15773+ struct drm_device *dev;
15774+ struct drm_user_object user;
15775+ uint32_t ref_count;
15776+ uint32_t hw_cookie[PSB_TA_MEM_HW_COOKIE_SIZE];
15777+ uint32_t bo_size;
15778+ struct drm_buffer_object *ta_memory;
15779+ struct drm_buffer_object *hw_data;
15780+ int is_deallocating;
15781+ int deallocating_scheduled;
15782+};
15783+
15784+extern struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
15785+ int shareable,
15786+ uint32_t num_scenes,
15787+ uint32_t w, uint32_t h);
15788+extern void psb_scene_pool_unref_devlocked(struct psb_scene_pool **pool);
15789+extern struct psb_scene_pool *psb_scene_pool_lookup_devlocked(struct drm_file
15790+ *priv,
15791+ uint32_t handle,
15792+ int check_owner);
15793+extern int psb_validate_scene_pool(struct psb_scene_pool *pool, uint64_t flags,
15794+ uint64_t mask, uint32_t hint, uint32_t w,
15795+ uint32_t h, int final_pass,
15796+ struct psb_scene **scene_p);
15797+extern void psb_scene_unref_devlocked(struct psb_scene **scene);
15798+extern struct psb_scene *psb_scene_ref(struct psb_scene *src);
15799+extern int drm_psb_scene_unref_ioctl(struct drm_device *dev,
15800+ void *data, struct drm_file *file_priv);
15801+
15802+static inline uint32_t psb_scene_pool_handle(struct psb_scene_pool *pool)
15803+{
15804+ return pool->user.hash.key;
15805+}
15806+extern struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev,
15807+ uint32_t pages);
15808+extern void psb_ta_mem_ref_devlocked(struct psb_ta_mem **dst,
15809+ struct psb_ta_mem *src);
15810+extern void psb_ta_mem_unref_devlocked(struct psb_ta_mem **ta_mem);
15811+
15812+#endif
15813Index: linux-2.6.28/drivers/gpu/drm/psb/psb_schedule.c
15814===================================================================
15815--- /dev/null 1970-01-01 00:00:00.000000000 +0000
15816+++ linux-2.6.28/drivers/gpu/drm/psb/psb_schedule.c 2009-02-25 15:37:02.000000000 +0000
15817@@ -0,0 +1,1445 @@
15818+/**************************************************************************
15819+ * Copyright (c) 2007, Intel Corporation.
15820+ * All Rights Reserved.
15821+ *
15822+ * This program is free software; you can redistribute it and/or modify it
15823+ * under the terms and conditions of the GNU General Public License,
15824+ * version 2, as published by the Free Software Foundation.
15825+ *
15826+ * This program is distributed in the hope it will be useful, but WITHOUT
15827+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15828+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15829+ * more details.
15830+ *
15831+ * You should have received a copy of the GNU General Public License along with
15832+ * this program; if not, write to the Free Software Foundation, Inc.,
15833+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
15834+ *
15835+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
15836+ * develop this driver.
15837+ *
15838+ **************************************************************************/
15839+/*
15840+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
15841+ */
15842+
15843+#include "drmP.h"
15844+#include "psb_drm.h"
15845+#include "psb_drv.h"
15846+#include "psb_reg.h"
15847+#include "psb_scene.h"
15848+
15849+#define PSB_ALLOWED_RASTER_RUNTIME (DRM_HZ * 20)
15850+#define PSB_RASTER_TIMEOUT (DRM_HZ / 2)
15851+#define PSB_TA_TIMEOUT (DRM_HZ / 5)
15852+
15853+#undef PSB_SOFTWARE_WORKAHEAD
15854+
15855+#ifdef PSB_STABLE_SETTING
15856+
15857+/*
15858+ * Software blocks completely while the engines are working so there can be no
15859+ * overlap.
15860+ */
15861+
15862+#define PSB_WAIT_FOR_RASTER_COMPLETION
15863+#define PSB_WAIT_FOR_TA_COMPLETION
15864+
15865+#elif defined(PSB_PARANOID_SETTING)
15866+/*
15867+ * Software blocks "almost" while the engines are working so there can be no
15868+ * overlap.
15869+ */
15870+
15871+#define PSB_WAIT_FOR_RASTER_COMPLETION
15872+#define PSB_WAIT_FOR_TA_COMPLETION
15873+#define PSB_BE_PARANOID
15874+
15875+#elif defined(PSB_SOME_OVERLAP_BUT_LOCKUP)
15876+/*
15877+ * Software leaps ahead while the rasterizer is running and prepares
15878+ * a new ta job that can be scheduled before the rasterizer has
15879+ * finished.
15880+ */
15881+
15882+#define PSB_WAIT_FOR_TA_COMPLETION
15883+
15884+#elif defined(PSB_SOFTWARE_WORKAHEAD)
15885+/*
15886+ * Don't sync, but allow software to work ahead. and queue a number of jobs.
15887+ * But block overlapping in the scheduler.
15888+ */
15889+
15890+#define PSB_BLOCK_OVERLAP
15891+#define ONLY_ONE_JOB_IN_RASTER_QUEUE
15892+
15893+#endif
15894+
15895+/*
15896+ * Avoid pixelbe pagefaults on C0.
15897+ */
15898+#if 0
15899+#define PSB_BLOCK_OVERLAP
15900+#endif
15901+
15902+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
15903+ struct psb_scheduler *scheduler,
15904+ uint32_t reply_flag);
15905+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
15906+ struct psb_scheduler *scheduler,
15907+ uint32_t reply_flag);
15908+
15909+#ifdef FIX_TG_16
15910+
15911+static void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
15912+static int psb_2d_trylock(struct drm_psb_private *dev_priv);
15913+static int psb_check_2d_idle(struct drm_psb_private *dev_priv);
15914+
15915+#endif
15916+
15917+void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
15918+ int *lockup, int *idle)
15919+{
15920+ unsigned long irq_flags;
15921+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
15922+
15923+ *lockup = 0;
15924+ *idle = 1;
15925+
15926+ spin_lock_irqsave(&scheduler->lock, irq_flags);
15927+
15928+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
15929+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
15930+ *lockup = 1;
15931+ }
15932+ if (!*lockup
15933+ && (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
15934+ && time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
15935+ *lockup = 1;
15936+ }
15937+ if (!*lockup)
15938+ *idle = scheduler->idle;
15939+
15940+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
15941+}
15942+
15943+static inline void psb_set_idle(struct psb_scheduler *scheduler)
15944+{
15945+ scheduler->idle =
15946+ (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] == NULL) &&
15947+ (scheduler->current_task[PSB_SCENE_ENGINE_TA] == NULL);
15948+ if (scheduler->idle)
15949+ wake_up(&scheduler->idle_queue);
15950+}
15951+
15952+/*
15953+ * Call with the scheduler spinlock held.
15954+ * Assigns a scene context to either the ta or the rasterizer,
15955+ * flushing out other scenes to memory if necessary.
15956+ */
15957+
15958+static int psb_set_scene_fire(struct psb_scheduler *scheduler,
15959+ struct psb_scene *scene,
15960+ int engine, struct psb_task *task)
15961+{
15962+ uint32_t flags = 0;
15963+ struct psb_hw_scene *hw_scene;
15964+ struct drm_device *dev = scene->dev;
15965+ struct drm_psb_private *dev_priv =
15966+ (struct drm_psb_private *)dev->dev_private;
15967+
15968+ hw_scene = scene->hw_scene;
15969+ if (hw_scene && hw_scene->last_scene == scene) {
15970+
15971+ /*
15972+ * Reuse the last hw scene context and delete it from the
15973+ * free list.
15974+ */
15975+
15976+ PSB_DEBUG_RENDER("Reusing hw scene %d.\n",
15977+ hw_scene->context_number);
15978+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
15979+
15980+ /*
15981+ * No hw context initialization to be done.
15982+ */
15983+
15984+ flags |= PSB_SCENE_FLAG_SETUP_ONLY;
15985+ }
15986+
15987+ list_del_init(&hw_scene->head);
15988+
15989+ } else {
15990+ struct list_head *list;
15991+ hw_scene = NULL;
15992+
15993+ /*
15994+ * Grab a new hw scene context.
15995+ */
15996+
15997+ list_for_each(list, &scheduler->hw_scenes) {
15998+ hw_scene = list_entry(list, struct psb_hw_scene, head);
15999+ break;
16000+ }
16001+ BUG_ON(!hw_scene);
16002+ PSB_DEBUG_RENDER("New hw scene %d.\n",
16003+ hw_scene->context_number);
16004+
16005+ list_del_init(list);
16006+ }
16007+ scene->hw_scene = hw_scene;
16008+ hw_scene->last_scene = scene;
16009+
16010+ flags |= PSB_SCENE_FLAG_SETUP;
16011+
16012+ /*
16013+ * Switch context and setup the engine.
16014+ */
16015+
16016+ return psb_xhw_scene_bind_fire(dev_priv,
16017+ &task->buf,
16018+ task->flags,
16019+ hw_scene->context_number,
16020+ scene->hw_cookie,
16021+ task->oom_cmds,
16022+ task->oom_cmd_size,
16023+ scene->hw_data->offset,
16024+ engine, flags | scene->flags);
16025+}
16026+
16027+static inline void psb_report_fence(struct psb_scheduler *scheduler,
16028+ uint32_t class,
16029+ uint32_t sequence,
16030+ uint32_t type, int call_handler)
16031+{
16032+ struct psb_scheduler_seq *seq = &scheduler->seq[type];
16033+
16034+ seq->sequence = sequence;
16035+ seq->reported = 0;
16036+ if (call_handler)
16037+ psb_fence_handler(scheduler->dev, class);
16038+}
16039+
16040+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
16041+ struct psb_scheduler *scheduler);
16042+
16043+static void psb_schedule_ta(struct drm_psb_private *dev_priv,
16044+ struct psb_scheduler *scheduler)
16045+{
16046+ struct psb_task *task = NULL;
16047+ struct list_head *list, *next;
16048+ int pushed_raster_task = 0;
16049+
16050+ PSB_DEBUG_RENDER("schedule ta\n");
16051+
16052+ if (scheduler->idle_count != 0)
16053+ return;
16054+
16055+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL)
16056+ return;
16057+
16058+ if (scheduler->ta_state)
16059+ return;
16060+
16061+ /*
16062+ * Skip the ta stage for rasterization-only
16063+ * tasks. They arrive here to make sure we're rasterizing
16064+ * tasks in the correct order.
16065+ */
16066+
16067+ list_for_each_safe(list, next, &scheduler->ta_queue) {
16068+ task = list_entry(list, struct psb_task, head);
16069+ if (task->task_type != psb_raster_task)
16070+ break;
16071+
16072+ list_del_init(list);
16073+ list_add_tail(list, &scheduler->raster_queue);
16074+ psb_report_fence(scheduler, task->engine, task->sequence,
16075+ _PSB_FENCE_TA_DONE_SHIFT, 1);
16076+ task = NULL;
16077+ pushed_raster_task = 1;
16078+ }
16079+
16080+ if (pushed_raster_task)
16081+ psb_schedule_raster(dev_priv, scheduler);
16082+
16083+ if (!task)
16084+ return;
16085+
16086+ /*
16087+ * Still waiting for a vistest?
16088+ */
16089+
16090+ if (scheduler->feedback_task == task)
16091+ return;
16092+
16093+#ifdef ONLY_ONE_JOB_IN_RASTER_QUEUE
16094+
16095+ /*
16096+ * Block ta from trying to use both hardware contexts
16097+ * without the rasterizer starting to render from one of them.
16098+ */
16099+
16100+ if (!list_empty(&scheduler->raster_queue)) {
16101+ return;
16102+ }
16103+#endif
16104+
16105+#ifdef PSB_BLOCK_OVERLAP
16106+ /*
16107+ * Make sure rasterizer isn't doing anything.
16108+ */
16109+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
16110+ return;
16111+#endif
16112+ if (list_empty(&scheduler->hw_scenes))
16113+ return;
16114+
16115+#ifdef FIX_TG_16
16116+ if (psb_check_2d_idle(dev_priv))
16117+ return;
16118+#endif
16119+
16120+ list_del_init(&task->head);
16121+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
16122+ scheduler->ta_state = 1;
16123+
16124+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = task;
16125+ scheduler->idle = 0;
16126+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
16127+
16128+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
16129+ 0x00000000 : PSB_RF_FIRE_TA;
16130+
16131+ (void)psb_reg_submit(dev_priv, task->ta_cmds, task->ta_cmd_size);
16132+ psb_set_scene_fire(scheduler, task->scene, PSB_SCENE_ENGINE_TA, task);
16133+ psb_schedule_watchdog(dev_priv);
16134+}
16135+
16136+static int psb_fire_raster(struct psb_scheduler *scheduler,
16137+ struct psb_task *task)
16138+{
16139+ struct drm_device *dev = scheduler->dev;
16140+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)
16141+ dev->dev_private;
16142+
16143+ PSB_DEBUG_RENDER("Fire raster %d\n", task->sequence);
16144+
16145+ return psb_xhw_fire_raster(dev_priv, &task->buf, task->flags);
16146+}
16147+
16148+/*
16149+ * Take the first rasterization task from the hp raster queue or from the
16150+ * raster queue and fire the rasterizer.
16151+ */
16152+
16153+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
16154+ struct psb_scheduler *scheduler)
16155+{
16156+ struct psb_task *task;
16157+ struct list_head *list;
16158+
16159+ if (scheduler->idle_count != 0)
16160+ return;
16161+
16162+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) {
16163+ PSB_DEBUG_RENDER("Raster busy.\n");
16164+ return;
16165+ }
16166+#ifdef PSB_BLOCK_OVERLAP
16167+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) {
16168+ PSB_DEBUG_RENDER("TA busy.\n");
16169+ return;
16170+ }
16171+#endif
16172+
16173+ if (!list_empty(&scheduler->hp_raster_queue))
16174+ list = scheduler->hp_raster_queue.next;
16175+ else if (!list_empty(&scheduler->raster_queue))
16176+ list = scheduler->raster_queue.next;
16177+ else {
16178+ PSB_DEBUG_RENDER("Nothing in list\n");
16179+ return;
16180+ }
16181+
16182+ task = list_entry(list, struct psb_task, head);
16183+
16184+ /*
16185+ * Sometimes changing ZLS format requires an ISP reset.
16186+ * Doesn't seem to consume too much time.
16187+ */
16188+
16189+ if (task->scene)
16190+ PSB_WSGX32(_PSB_CS_RESET_ISP_RESET, PSB_CR_SOFT_RESET);
16191+
16192+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = task;
16193+
16194+ list_del_init(list);
16195+ scheduler->idle = 0;
16196+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
16197+ scheduler->total_raster_jiffies = 0;
16198+
16199+ if (task->scene)
16200+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
16201+
16202+ (void)psb_reg_submit(dev_priv, task->raster_cmds,
16203+ task->raster_cmd_size);
16204+
16205+ if (task->scene) {
16206+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
16207+ 0x00000000 : PSB_RF_FIRE_RASTER;
16208+ psb_set_scene_fire(scheduler,
16209+ task->scene, PSB_SCENE_ENGINE_RASTER, task);
16210+ } else {
16211+ task->reply_flags = PSB_RF_DEALLOC | PSB_RF_FIRE_RASTER;
16212+ psb_fire_raster(scheduler, task);
16213+ }
16214+ psb_schedule_watchdog(dev_priv);
16215+}
16216+
16217+int psb_extend_raster_timeout(struct drm_psb_private *dev_priv)
16218+{
16219+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
16220+ unsigned long irq_flags;
16221+ int ret;
16222+
16223+ spin_lock_irqsave(&scheduler->lock, irq_flags);
16224+ scheduler->total_raster_jiffies +=
16225+ jiffies - scheduler->raster_end_jiffies + PSB_RASTER_TIMEOUT;
16226+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
16227+ ret = (scheduler->total_raster_jiffies > PSB_ALLOWED_RASTER_RUNTIME) ?
16228+ -EBUSY : 0;
16229+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
16230+ return ret;
16231+}
16232+
16233+/*
16234+ * TA done handler.
16235+ */
16236+
16237+static void psb_ta_done(struct drm_psb_private *dev_priv,
16238+ struct psb_scheduler *scheduler)
16239+{
16240+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
16241+ struct psb_scene *scene = task->scene;
16242+
16243+ PSB_DEBUG_RENDER("TA done %u\n", task->sequence);
16244+
16245+ switch (task->ta_complete_action) {
16246+ case PSB_RASTER_BLOCK:
16247+ scheduler->ta_state = 1;
16248+ scene->flags |=
16249+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
16250+ list_add_tail(&task->head, &scheduler->raster_queue);
16251+ break;
16252+ case PSB_RASTER:
16253+ scene->flags |=
16254+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
16255+ list_add_tail(&task->head, &scheduler->raster_queue);
16256+ break;
16257+ case PSB_RETURN:
16258+ scheduler->ta_state = 0;
16259+ scene->flags |= PSB_SCENE_FLAG_DIRTY;
16260+ list_add_tail(&scene->hw_scene->head, &scheduler->hw_scenes);
16261+
16262+ break;
16263+ }
16264+
16265+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
16266+
16267+#ifdef FIX_TG_16
16268+ psb_2d_atomic_unlock(dev_priv);
16269+#endif
16270+
16271+ if (task->ta_complete_action != PSB_RASTER_BLOCK)
16272+ psb_report_fence(scheduler, task->engine, task->sequence,
16273+ _PSB_FENCE_TA_DONE_SHIFT, 1);
16274+
16275+ psb_schedule_raster(dev_priv, scheduler);
16276+ psb_schedule_ta(dev_priv, scheduler);
16277+ psb_set_idle(scheduler);
16278+
16279+ if (task->ta_complete_action != PSB_RETURN)
16280+ return;
16281+
16282+ list_add_tail(&task->head, &scheduler->task_done_queue);
16283+ schedule_delayed_work(&scheduler->wq, 1);
16284+}
16285+
16286+/*
16287+ * Rasterizer done handler.
16288+ */
16289+
16290+static void psb_raster_done(struct drm_psb_private *dev_priv,
16291+ struct psb_scheduler *scheduler)
16292+{
16293+ struct psb_task *task =
16294+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
16295+ struct psb_scene *scene = task->scene;
16296+ uint32_t complete_action = task->raster_complete_action;
16297+
16298+ PSB_DEBUG_RENDER("Raster done %u\n", task->sequence);
16299+
16300+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
16301+
16302+ if (complete_action != PSB_RASTER)
16303+ psb_schedule_raster(dev_priv, scheduler);
16304+
16305+ if (scene) {
16306+ if (task->feedback.page) {
16307+ if (unlikely(scheduler->feedback_task)) {
16308+ /*
16309+ * This should never happen, since the previous
16310+ * feedback query will return before the next
16311+ * raster task is fired.
16312+ */
16313+ DRM_ERROR("Feedback task busy.\n");
16314+ }
16315+ scheduler->feedback_task = task;
16316+ psb_xhw_vistest(dev_priv, &task->buf);
16317+ }
16318+ switch (complete_action) {
16319+ case PSB_RETURN:
16320+ scene->flags &=
16321+ ~(PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
16322+ list_add_tail(&scene->hw_scene->head,
16323+ &scheduler->hw_scenes);
16324+ psb_report_fence(scheduler, task->engine,
16325+ task->sequence,
16326+ _PSB_FENCE_SCENE_DONE_SHIFT, 1);
16327+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM) {
16328+ scheduler->ta_state = 0;
16329+ }
16330+ break;
16331+ case PSB_RASTER:
16332+ list_add(&task->head, &scheduler->raster_queue);
16333+ task->raster_complete_action = PSB_RETURN;
16334+ psb_schedule_raster(dev_priv, scheduler);
16335+ break;
16336+ case PSB_TA:
16337+ list_add(&task->head, &scheduler->ta_queue);
16338+ scheduler->ta_state = 0;
16339+ task->raster_complete_action = PSB_RETURN;
16340+ task->ta_complete_action = PSB_RASTER;
16341+ break;
16342+
16343+ }
16344+ }
16345+ psb_schedule_ta(dev_priv, scheduler);
16346+ psb_set_idle(scheduler);
16347+
16348+ if (complete_action == PSB_RETURN) {
16349+ if (task->scene == NULL) {
16350+ psb_report_fence(scheduler, task->engine,
16351+ task->sequence,
16352+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
16353+ }
16354+ if (!task->feedback.page) {
16355+ list_add_tail(&task->head, &scheduler->task_done_queue);
16356+ schedule_delayed_work(&scheduler->wq, 1);
16357+ }
16358+ }
16359+}
16360+
16361+void psb_scheduler_pause(struct drm_psb_private *dev_priv)
16362+{
16363+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
16364+ unsigned long irq_flags;
16365+
16366+ spin_lock_irqsave(&scheduler->lock, irq_flags);
16367+ scheduler->idle_count++;
16368+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
16369+}
16370+
16371+void psb_scheduler_restart(struct drm_psb_private *dev_priv)
16372+{
16373+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
16374+ unsigned long irq_flags;
16375+
16376+ spin_lock_irqsave(&scheduler->lock, irq_flags);
16377+ if (--scheduler->idle_count == 0) {
16378+ psb_schedule_ta(dev_priv, scheduler);
16379+ psb_schedule_raster(dev_priv, scheduler);
16380+ }
16381+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
16382+}
16383+
16384+int psb_scheduler_idle(struct drm_psb_private *dev_priv)
16385+{
16386+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
16387+ unsigned long irq_flags;
16388+ int ret;
16389+ spin_lock_irqsave(&scheduler->lock, irq_flags);
16390+ ret = scheduler->idle_count != 0 && scheduler->idle;
16391+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
16392+ return ret;
16393+}
16394+
16395+int psb_scheduler_finished(struct drm_psb_private *dev_priv)
16396+{
16397+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
16398+ unsigned long irq_flags;
16399+ int ret;
16400+ spin_lock_irqsave(&scheduler->lock, irq_flags);
16401+ ret = (scheduler->idle &&
16402+ list_empty(&scheduler->raster_queue) &&
16403+ list_empty(&scheduler->ta_queue) &&
16404+ list_empty(&scheduler->hp_raster_queue));
16405+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
16406+ return ret;
16407+}
16408+
16409+static void psb_ta_oom(struct drm_psb_private *dev_priv,
16410+ struct psb_scheduler *scheduler)
16411+{
16412+
16413+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
16414+ if (!task)
16415+ return;
16416+
16417+ if (task->aborting)
16418+ return;
16419+ task->aborting = 1;
16420+
16421+ DRM_INFO("Info: TA out of parameter memory.\n");
16422+
16423+ (void)psb_xhw_ta_oom(dev_priv, &task->buf, task->scene->hw_cookie);
16424+}
16425+
16426+static void psb_ta_oom_reply(struct drm_psb_private *dev_priv,
16427+ struct psb_scheduler *scheduler)
16428+{
16429+
16430+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
16431+ uint32_t flags;
16432+ if (!task)
16433+ return;
16434+
16435+ psb_xhw_ta_oom_reply(dev_priv, &task->buf,
16436+ task->scene->hw_cookie,
16437+ &task->ta_complete_action,
16438+ &task->raster_complete_action, &flags);
16439+ task->flags |= flags;
16440+ task->aborting = 0;
16441+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM_REPLY);
16442+}
16443+
16444+static void psb_ta_hw_scene_freed(struct drm_psb_private *dev_priv,
16445+ struct psb_scheduler *scheduler)
16446+{
16447+ DRM_ERROR("TA hw scene freed.\n");
16448+}
16449+
16450+static void psb_vistest_reply(struct drm_psb_private *dev_priv,
16451+ struct psb_scheduler *scheduler)
16452+{
16453+ struct psb_task *task = scheduler->feedback_task;
16454+ uint8_t *feedback_map;
16455+ uint32_t add;
16456+ uint32_t cur;
16457+ struct drm_psb_vistest *vistest;
16458+ int i;
16459+
16460+ scheduler->feedback_task = NULL;
16461+ if (!task) {
16462+ DRM_ERROR("No Poulsbo feedback task.\n");
16463+ return;
16464+ }
16465+ if (!task->feedback.page) {
16466+ DRM_ERROR("No Poulsbo feedback page.\n");
16467+ goto out;
16468+ }
16469+
16470+ if (in_irq())
16471+ feedback_map = kmap_atomic(task->feedback.page, KM_IRQ0);
16472+ else
16473+ feedback_map = kmap_atomic(task->feedback.page, KM_USER0);
16474+
16475+ /*
16476+ * Loop over all requested vistest components here.
16477+ * Only one (vistest) currently.
16478+ */
16479+
16480+ vistest = (struct drm_psb_vistest *)
16481+ (feedback_map + task->feedback.offset);
16482+
16483+ for (i = 0; i < PSB_HW_FEEDBACK_SIZE; ++i) {
16484+ add = task->buf.arg.arg.feedback[i];
16485+ cur = vistest->vt[i];
16486+
16487+ /*
16488+ * Vistest saturates.
16489+ */
16490+
16491+ vistest->vt[i] = (cur + add < cur) ? ~0 : cur + add;
16492+ }
16493+ if (in_irq())
16494+ kunmap_atomic(feedback_map, KM_IRQ0);
16495+ else
16496+ kunmap_atomic(feedback_map, KM_USER0);
16497+ out:
16498+ psb_report_fence(scheduler, task->engine, task->sequence,
16499+ _PSB_FENCE_FEEDBACK_SHIFT, 1);
16500+
16501+ if (list_empty(&task->head)) {
16502+ list_add_tail(&task->head, &scheduler->task_done_queue);
16503+ schedule_delayed_work(&scheduler->wq, 1);
16504+ } else
16505+ psb_schedule_ta(dev_priv, scheduler);
16506+}
16507+
16508+static void psb_ta_fire_reply(struct drm_psb_private *dev_priv,
16509+ struct psb_scheduler *scheduler)
16510+{
16511+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
16512+
16513+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
16514+
16515+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_FIRE_TA);
16516+}
16517+
16518+static void psb_raster_fire_reply(struct drm_psb_private *dev_priv,
16519+ struct psb_scheduler *scheduler)
16520+{
16521+ struct psb_task *task =
16522+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
16523+ uint32_t reply_flags;
16524+
16525+ if (!task) {
16526+ DRM_ERROR("Null task.\n");
16527+ return;
16528+ }
16529+
16530+ task->raster_complete_action = task->buf.arg.arg.sb.rca;
16531+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
16532+
16533+ reply_flags = PSB_RF_FIRE_RASTER;
16534+ if (task->raster_complete_action == PSB_RASTER)
16535+ reply_flags |= PSB_RF_DEALLOC;
16536+
16537+ psb_dispatch_raster(dev_priv, scheduler, reply_flags);
16538+}
16539+
16540+static int psb_user_interrupt(struct drm_psb_private *dev_priv,
16541+ struct psb_scheduler *scheduler)
16542+{
16543+ uint32_t type;
16544+ int ret;
16545+ unsigned long irq_flags;
16546+
16547+ /*
16548+ * Xhw cannot write directly to the comm page, so
16549+ * do it here. Firmware would have written directly.
16550+ */
16551+
16552+ ret = psb_xhw_handler(dev_priv);
16553+ if (unlikely(ret))
16554+ return ret;
16555+
16556+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
16557+ type = dev_priv->comm[PSB_COMM_USER_IRQ];
16558+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
16559+ if (dev_priv->comm[PSB_COMM_USER_IRQ_LOST]) {
16560+ dev_priv->comm[PSB_COMM_USER_IRQ_LOST] = 0;
16561+ DRM_ERROR("Lost Poulsbo hardware event.\n");
16562+ }
16563+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
16564+
16565+ if (type == 0)
16566+ return 0;
16567+
16568+ switch (type) {
16569+ case PSB_UIRQ_VISTEST:
16570+ psb_vistest_reply(dev_priv, scheduler);
16571+ break;
16572+ case PSB_UIRQ_OOM_REPLY:
16573+ psb_ta_oom_reply(dev_priv, scheduler);
16574+ break;
16575+ case PSB_UIRQ_FIRE_TA_REPLY:
16576+ psb_ta_fire_reply(dev_priv, scheduler);
16577+ break;
16578+ case PSB_UIRQ_FIRE_RASTER_REPLY:
16579+ psb_raster_fire_reply(dev_priv, scheduler);
16580+ break;
16581+ default:
16582+ DRM_ERROR("Unknown Poulsbo hardware event. %d\n", type);
16583+ }
16584+ return 0;
16585+}
16586+
16587+int psb_forced_user_interrupt(struct drm_psb_private *dev_priv)
16588+{
16589+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
16590+ unsigned long irq_flags;
16591+ int ret;
16592+
16593+ spin_lock_irqsave(&scheduler->lock, irq_flags);
16594+ ret = psb_user_interrupt(dev_priv, scheduler);
16595+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
16596+ return ret;
16597+}
16598+
16599+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
16600+ struct psb_scheduler *scheduler,
16601+ uint32_t reply_flag)
16602+{
16603+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
16604+ uint32_t flags;
16605+ uint32_t mask;
16606+
16607+ task->reply_flags |= reply_flag;
16608+ flags = task->reply_flags;
16609+ mask = PSB_RF_FIRE_TA;
16610+
16611+ if (!(flags & mask))
16612+ return;
16613+
16614+ mask = PSB_RF_TA_DONE;
16615+ if ((flags & mask) == mask) {
16616+ task->reply_flags &= ~mask;
16617+ psb_ta_done(dev_priv, scheduler);
16618+ }
16619+
16620+ mask = PSB_RF_OOM;
16621+ if ((flags & mask) == mask) {
16622+ task->reply_flags &= ~mask;
16623+ psb_ta_oom(dev_priv, scheduler);
16624+ }
16625+
16626+ mask = (PSB_RF_OOM_REPLY | PSB_RF_TERMINATE);
16627+ if ((flags & mask) == mask) {
16628+ task->reply_flags &= ~mask;
16629+ psb_ta_done(dev_priv, scheduler);
16630+ }
16631+}
16632+
16633+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
16634+ struct psb_scheduler *scheduler,
16635+ uint32_t reply_flag)
16636+{
16637+ struct psb_task *task =
16638+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
16639+ uint32_t flags;
16640+ uint32_t mask;
16641+
16642+ task->reply_flags |= reply_flag;
16643+ flags = task->reply_flags;
16644+ mask = PSB_RF_FIRE_RASTER;
16645+
16646+ if (!(flags & mask))
16647+ return;
16648+
16649+ /*
16650+ * For rasterizer-only tasks, don't report fence done here,
16651+ * as this is time consuming and the rasterizer wants a new
16652+ * task immediately. For other tasks, the hardware is probably
16653+ * still busy deallocating TA memory, so we can report
16654+ * fence done in parallel.
16655+ */
16656+
16657+ if (task->raster_complete_action == PSB_RETURN &&
16658+ (reply_flag & PSB_RF_RASTER_DONE) && task->scene != NULL) {
16659+ psb_report_fence(scheduler, task->engine, task->sequence,
16660+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
16661+ }
16662+
16663+ mask = PSB_RF_RASTER_DONE | PSB_RF_DEALLOC;
16664+ if ((flags & mask) == mask) {
16665+ task->reply_flags &= ~mask;
16666+ psb_raster_done(dev_priv, scheduler);
16667+ }
16668+}
16669+
16670+void psb_scheduler_handler(struct drm_psb_private *dev_priv, uint32_t status)
16671+{
16672+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
16673+
16674+ spin_lock(&scheduler->lock);
16675+
16676+ if (status & _PSB_CE_PIXELBE_END_RENDER) {
16677+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_RASTER_DONE);
16678+ }
16679+ if (status & _PSB_CE_DPM_3D_MEM_FREE) {
16680+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_DEALLOC);
16681+ }
16682+ if (status & _PSB_CE_TA_FINISHED) {
16683+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TA_DONE);
16684+ }
16685+ if (status & _PSB_CE_TA_TERMINATE) {
16686+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TERMINATE);
16687+ }
16688+ if (status & (_PSB_CE_DPM_REACHED_MEM_THRESH |
16689+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
16690+ _PSB_CE_DPM_OUT_OF_MEMORY_MT)) {
16691+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM);
16692+ }
16693+ if (status & _PSB_CE_DPM_TA_MEM_FREE) {
16694+ psb_ta_hw_scene_freed(dev_priv, scheduler);
16695+ }
16696+ if (status & _PSB_CE_SW_EVENT) {
16697+ psb_user_interrupt(dev_priv, scheduler);
16698+ }
16699+ spin_unlock(&scheduler->lock);
16700+}
16701+
16702+static void psb_free_task_wq(struct work_struct *work)
16703+{
16704+ struct psb_scheduler *scheduler =
16705+ container_of(work, struct psb_scheduler, wq.work);
16706+
16707+ struct drm_device *dev = scheduler->dev;
16708+ struct list_head *list, *next;
16709+ unsigned long irq_flags;
16710+ struct psb_task *task;
16711+
16712+ if (!mutex_trylock(&scheduler->task_wq_mutex))
16713+ return;
16714+
16715+ spin_lock_irqsave(&scheduler->lock, irq_flags);
16716+ list_for_each_safe(list, next, &scheduler->task_done_queue) {
16717+ task = list_entry(list, struct psb_task, head);
16718+ list_del_init(list);
16719+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
16720+
16721+ PSB_DEBUG_RENDER("Checking Task %d: Scene 0x%08lx, "
16722+ "Feedback bo 0x%08lx, done %d\n",
16723+ task->sequence, (unsigned long)task->scene,
16724+ (unsigned long)task->feedback.bo,
16725+ atomic_read(&task->buf.done));
16726+
16727+ if (task->scene) {
16728+ mutex_lock(&dev->struct_mutex);
16729+ PSB_DEBUG_RENDER("Unref scene %d\n", task->sequence);
16730+ psb_scene_unref_devlocked(&task->scene);
16731+ if (task->feedback.bo) {
16732+ PSB_DEBUG_RENDER("Unref feedback bo %d\n",
16733+ task->sequence);
16734+ drm_bo_usage_deref_locked(&task->feedback.bo);
16735+ }
16736+ mutex_unlock(&dev->struct_mutex);
16737+ }
16738+
16739+ if (atomic_read(&task->buf.done)) {
16740+ PSB_DEBUG_RENDER("Deleting task %d\n", task->sequence);
16741+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
16742+ task = NULL;
16743+ }
16744+ spin_lock_irqsave(&scheduler->lock, irq_flags);
16745+ if (task != NULL)
16746+ list_add(list, &scheduler->task_done_queue);
16747+ }
16748+ if (!list_empty(&scheduler->task_done_queue)) {
16749+ PSB_DEBUG_RENDER("Rescheduling wq\n");
16750+ schedule_delayed_work(&scheduler->wq, 1);
16751+ }
16752+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
16753+
16754+ mutex_unlock(&scheduler->task_wq_mutex);
16755+}
16756+
16757+/*
16758+ * Check if any of the tasks in the queues is using a scene.
16759+ * In that case we know the TA memory buffer objects are
16760+ * fenced and will not be evicted until that fence is signaled.
16761+ */
16762+
16763+void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv)
16764+{
16765+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
16766+ unsigned long irq_flags;
16767+ struct psb_task *task;
16768+ struct psb_task *next_task;
16769+
16770+ dev_priv->force_ta_mem_load = 1;
16771+ spin_lock_irqsave(&scheduler->lock, irq_flags);
16772+ list_for_each_entry_safe(task, next_task, &scheduler->ta_queue, head) {
16773+ if (task->scene) {
16774+ dev_priv->force_ta_mem_load = 0;
16775+ break;
16776+ }
16777+ }
16778+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
16779+ head) {
16780+ if (task->scene) {
16781+ dev_priv->force_ta_mem_load = 0;
16782+ break;
16783+ }
16784+ }
16785+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
16786+}
16787+
16788+void psb_scheduler_reset(struct drm_psb_private *dev_priv, int error_condition)
16789+{
16790+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
16791+ unsigned long wait_jiffies;
16792+ unsigned long cur_jiffies;
16793+ struct psb_task *task;
16794+ struct psb_task *next_task;
16795+ unsigned long irq_flags;
16796+
16797+ psb_scheduler_pause(dev_priv);
16798+ if (!psb_scheduler_idle(dev_priv)) {
16799+ spin_lock_irqsave(&scheduler->lock, irq_flags);
16800+
16801+ cur_jiffies = jiffies;
16802+ wait_jiffies = cur_jiffies;
16803+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] &&
16804+ time_after_eq(scheduler->ta_end_jiffies, wait_jiffies))
16805+ wait_jiffies = scheduler->ta_end_jiffies;
16806+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] &&
16807+ time_after_eq(scheduler->raster_end_jiffies, wait_jiffies))
16808+ wait_jiffies = scheduler->raster_end_jiffies;
16809+
16810+ wait_jiffies -= cur_jiffies;
16811+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
16812+
16813+ (void)wait_event_timeout(scheduler->idle_queue,
16814+ psb_scheduler_idle(dev_priv),
16815+ wait_jiffies);
16816+ }
16817+
16818+ if (!psb_scheduler_idle(dev_priv)) {
16819+ spin_lock_irqsave(&scheduler->lock, irq_flags);
16820+ task = scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
16821+ if (task) {
16822+ DRM_ERROR("Detected Poulsbo rasterizer lockup.\n");
16823+ if (task->engine == PSB_ENGINE_HPRAST) {
16824+ psb_fence_error(scheduler->dev,
16825+ PSB_ENGINE_HPRAST,
16826+ task->sequence,
16827+ _PSB_FENCE_TYPE_RASTER_DONE,
16828+ error_condition);
16829+
16830+ list_del(&task->head);
16831+ psb_xhw_clean_buf(dev_priv, &task->buf);
16832+ list_add_tail(&task->head,
16833+ &scheduler->task_done_queue);
16834+ } else {
16835+ list_add(&task->head, &scheduler->raster_queue);
16836+ }
16837+ }
16838+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
16839+ task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
16840+ if (task) {
16841+ DRM_ERROR("Detected Poulsbo ta lockup.\n");
16842+ list_add_tail(&task->head, &scheduler->raster_queue);
16843+#ifdef FIX_TG_16
16844+ psb_2d_atomic_unlock(dev_priv);
16845+#endif
16846+ }
16847+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
16848+ scheduler->ta_state = 0;
16849+
16850+#ifdef FIX_TG_16
16851+ atomic_set(&dev_priv->ta_wait_2d, 0);
16852+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
16853+ wake_up(&dev_priv->queue_2d);
16854+#endif
16855+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
16856+ }
16857+
16858+ /*
16859+ * Empty raster queue.
16860+ */
16861+
16862+ spin_lock_irqsave(&scheduler->lock, irq_flags);
16863+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
16864+ head) {
16865+ struct psb_scene *scene = task->scene;
16866+
16867+ psb_fence_error(scheduler->dev,
16868+ task->engine,
16869+ task->sequence,
16870+ _PSB_FENCE_TYPE_TA_DONE |
16871+ _PSB_FENCE_TYPE_RASTER_DONE |
16872+ _PSB_FENCE_TYPE_SCENE_DONE |
16873+ _PSB_FENCE_TYPE_FEEDBACK, error_condition);
16874+ if (scene) {
16875+ scene->flags = 0;
16876+ if (scene->hw_scene) {
16877+ list_add_tail(&scene->hw_scene->head,
16878+ &scheduler->hw_scenes);
16879+ scene->hw_scene = NULL;
16880+ }
16881+ }
16882+
16883+ psb_xhw_clean_buf(dev_priv, &task->buf);
16884+ list_del(&task->head);
16885+ list_add_tail(&task->head, &scheduler->task_done_queue);
16886+ }
16887+
16888+ schedule_delayed_work(&scheduler->wq, 1);
16889+ scheduler->idle = 1;
16890+ wake_up(&scheduler->idle_queue);
16891+
16892+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
16893+ psb_scheduler_restart(dev_priv);
16894+
16895+}
16896+
16897+int psb_scheduler_init(struct drm_device *dev, struct psb_scheduler *scheduler)
16898+{
16899+ struct psb_hw_scene *hw_scene;
16900+ int i;
16901+
16902+ memset(scheduler, 0, sizeof(*scheduler));
16903+ scheduler->dev = dev;
16904+ mutex_init(&scheduler->task_wq_mutex);
16905+ scheduler->lock = SPIN_LOCK_UNLOCKED;
16906+ scheduler->idle = 1;
16907+
16908+ INIT_LIST_HEAD(&scheduler->ta_queue);
16909+ INIT_LIST_HEAD(&scheduler->raster_queue);
16910+ INIT_LIST_HEAD(&scheduler->hp_raster_queue);
16911+ INIT_LIST_HEAD(&scheduler->hw_scenes);
16912+ INIT_LIST_HEAD(&scheduler->task_done_queue);
16913+ INIT_DELAYED_WORK(&scheduler->wq, &psb_free_task_wq);
16914+ init_waitqueue_head(&scheduler->idle_queue);
16915+
16916+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
16917+ hw_scene = &scheduler->hs[i];
16918+ hw_scene->context_number = i;
16919+ list_add_tail(&hw_scene->head, &scheduler->hw_scenes);
16920+ }
16921+
16922+ for (i = 0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) {
16923+ scheduler->seq[i].reported = 0;
16924+ }
16925+
16926+ return 0;
16927+}
16928+
16929+/*
16930+ * Scene references maintained by the scheduler are not refcounted.
16931+ * Remove all references to a particular scene here.
16932+ */
16933+
16934+void psb_scheduler_remove_scene_refs(struct psb_scene *scene)
16935+{
16936+ struct drm_psb_private *dev_priv =
16937+ (struct drm_psb_private *)scene->dev->dev_private;
16938+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
16939+ struct psb_hw_scene *hw_scene;
16940+ unsigned long irq_flags;
16941+ unsigned int i;
16942+
16943+ spin_lock_irqsave(&scheduler->lock, irq_flags);
16944+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
16945+ hw_scene = &scheduler->hs[i];
16946+ if (hw_scene->last_scene == scene) {
16947+ BUG_ON(list_empty(&hw_scene->head));
16948+ hw_scene->last_scene = NULL;
16949+ }
16950+ }
16951+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
16952+}
16953+
16954+void psb_scheduler_takedown(struct psb_scheduler *scheduler)
16955+{
16956+ flush_scheduled_work();
16957+}
16958+
16959+static int psb_setup_task_devlocked(struct drm_device *dev,
16960+ struct drm_psb_cmdbuf_arg *arg,
16961+ struct drm_buffer_object *raster_cmd_buffer,
16962+ struct drm_buffer_object *ta_cmd_buffer,
16963+ struct drm_buffer_object *oom_cmd_buffer,
16964+ struct psb_scene *scene,
16965+ enum psb_task_type task_type,
16966+ uint32_t engine,
16967+ uint32_t flags, struct psb_task **task_p)
16968+{
16969+ struct psb_task *task;
16970+ int ret;
16971+
16972+ if (ta_cmd_buffer && arg->ta_size > PSB_MAX_TA_CMDS) {
16973+ DRM_ERROR("Too many ta cmds %d.\n", arg->ta_size);
16974+ return -EINVAL;
16975+ }
16976+ if (raster_cmd_buffer && arg->cmdbuf_size > PSB_MAX_RASTER_CMDS) {
16977+ DRM_ERROR("Too many raster cmds %d.\n", arg->cmdbuf_size);
16978+ return -EINVAL;
16979+ }
16980+ if (oom_cmd_buffer && arg->oom_size > PSB_MAX_OOM_CMDS) {
16981+ DRM_ERROR("Too many raster cmds %d.\n", arg->oom_size);
16982+ return -EINVAL;
16983+ }
16984+
16985+ task = drm_calloc(1, sizeof(*task), DRM_MEM_DRIVER);
16986+ if (!task)
16987+ return -ENOMEM;
16988+
16989+ atomic_set(&task->buf.done, 1);
16990+ task->engine = engine;
16991+ INIT_LIST_HEAD(&task->head);
16992+ INIT_LIST_HEAD(&task->buf.head);
16993+ if (ta_cmd_buffer && arg->ta_size != 0) {
16994+ task->ta_cmd_size = arg->ta_size;
16995+ ret = psb_submit_copy_cmdbuf(dev, ta_cmd_buffer,
16996+ arg->ta_offset,
16997+ arg->ta_size,
16998+ PSB_ENGINE_TA, task->ta_cmds);
16999+ if (ret)
17000+ goto out_err;
17001+ }
17002+ if (raster_cmd_buffer) {
17003+ task->raster_cmd_size = arg->cmdbuf_size;
17004+ ret = psb_submit_copy_cmdbuf(dev, raster_cmd_buffer,
17005+ arg->cmdbuf_offset,
17006+ arg->cmdbuf_size,
17007+ PSB_ENGINE_TA, task->raster_cmds);
17008+ if (ret)
17009+ goto out_err;
17010+ }
17011+ if (oom_cmd_buffer && arg->oom_size != 0) {
17012+ task->oom_cmd_size = arg->oom_size;
17013+ ret = psb_submit_copy_cmdbuf(dev, oom_cmd_buffer,
17014+ arg->oom_offset,
17015+ arg->oom_size,
17016+ PSB_ENGINE_TA, task->oom_cmds);
17017+ if (ret)
17018+ goto out_err;
17019+ }
17020+ task->task_type = task_type;
17021+ task->flags = flags;
17022+ if (scene)
17023+ task->scene = psb_scene_ref(scene);
17024+
17025+ *task_p = task;
17026+ return 0;
17027+ out_err:
17028+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
17029+ *task_p = NULL;
17030+ return ret;
17031+}
17032+
17033+int psb_cmdbuf_ta(struct drm_file *priv,
17034+ struct drm_psb_cmdbuf_arg *arg,
17035+ struct drm_buffer_object *cmd_buffer,
17036+ struct drm_buffer_object *ta_buffer,
17037+ struct drm_buffer_object *oom_buffer,
17038+ struct psb_scene *scene,
17039+ struct psb_feedback_info *feedback,
17040+ struct drm_fence_arg *fence_arg)
17041+{
17042+ struct drm_device *dev = priv->minor->dev;
17043+ struct drm_psb_private *dev_priv = dev->dev_private;
17044+ struct drm_fence_object *fence = NULL;
17045+ struct psb_task *task = NULL;
17046+ int ret;
17047+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
17048+ unsigned long irq_flags;
17049+
17050+ PSB_DEBUG_RENDER("Cmdbuf ta\n");
17051+
17052+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
17053+ if (ret)
17054+ return -EAGAIN;
17055+
17056+ mutex_lock(&dev->struct_mutex);
17057+ ret = psb_setup_task_devlocked(dev, arg, cmd_buffer, ta_buffer,
17058+ oom_buffer, scene,
17059+ psb_ta_task, PSB_ENGINE_TA,
17060+ PSB_FIRE_FLAG_RASTER_DEALLOC, &task);
17061+ mutex_unlock(&dev->struct_mutex);
17062+
17063+ if (ret)
17064+ goto out_err;
17065+
17066+ task->feedback = *feedback;
17067+
17068+ /*
17069+ * Hand the task over to the scheduler.
17070+ */
17071+
17072+ spin_lock_irqsave(&scheduler->lock, irq_flags);
17073+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
17074+
17075+ psb_report_fence(scheduler, PSB_ENGINE_TA, task->sequence, 0, 1);
17076+
17077+ task->ta_complete_action = PSB_RASTER;
17078+ task->raster_complete_action = PSB_RETURN;
17079+
17080+ list_add_tail(&task->head, &scheduler->ta_queue);
17081+ PSB_DEBUG_RENDER("queued ta %u\n", task->sequence);
17082+
17083+ psb_schedule_ta(dev_priv, scheduler);
17084+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
17085+
17086+ psb_fence_or_sync(priv, PSB_ENGINE_TA, arg, fence_arg, &fence);
17087+ drm_regs_fence(&dev_priv->use_manager, fence);
17088+ if (fence)
17089+ fence_arg->signaled |= 0x1;
17090+
17091+ out_err:
17092+ if (ret && ret != -EAGAIN)
17093+ DRM_ERROR("TA task queue job failed.\n");
17094+
17095+ if (fence) {
17096+#ifdef PSB_WAIT_FOR_TA_COMPLETION
17097+ drm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
17098+ _PSB_FENCE_TYPE_TA_DONE);
17099+#ifdef PSB_BE_PARANOID
17100+ drm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
17101+ _PSB_FENCE_TYPE_SCENE_DONE);
17102+#endif
17103+#endif
17104+ drm_fence_usage_deref_unlocked(&fence);
17105+ }
17106+ mutex_unlock(&dev_priv->reset_mutex);
17107+
17108+ return ret;
17109+}
17110+
17111+int psb_cmdbuf_raster(struct drm_file *priv,
17112+ struct drm_psb_cmdbuf_arg *arg,
17113+ struct drm_buffer_object *cmd_buffer,
17114+ struct drm_fence_arg *fence_arg)
17115+{
17116+ struct drm_device *dev = priv->minor->dev;
17117+ struct drm_psb_private *dev_priv = dev->dev_private;
17118+ struct drm_fence_object *fence = NULL;
17119+ struct psb_task *task = NULL;
17120+ int ret;
17121+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
17122+ unsigned long irq_flags;
17123+
17124+ PSB_DEBUG_RENDER("Cmdbuf Raster\n");
17125+
17126+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
17127+ if (ret)
17128+ return -EAGAIN;
17129+
17130+ mutex_lock(&dev->struct_mutex);
17131+ ret = psb_setup_task_devlocked(dev, arg, cmd_buffer, NULL, NULL,
17132+ NULL, psb_raster_task,
17133+ PSB_ENGINE_TA, 0, &task);
17134+ mutex_unlock(&dev->struct_mutex);
17135+
17136+ if (ret)
17137+ goto out_err;
17138+
17139+ /*
17140+ * Hand the task over to the scheduler.
17141+ */
17142+
17143+ spin_lock_irqsave(&scheduler->lock, irq_flags);
17144+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
17145+ psb_report_fence(scheduler, PSB_ENGINE_TA, task->sequence, 0, 1);
17146+ task->ta_complete_action = PSB_RASTER;
17147+ task->raster_complete_action = PSB_RETURN;
17148+
17149+ list_add_tail(&task->head, &scheduler->ta_queue);
17150+ PSB_DEBUG_RENDER("queued raster %u\n", task->sequence);
17151+ psb_schedule_ta(dev_priv, scheduler);
17152+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
17153+
17154+ psb_fence_or_sync(priv, PSB_ENGINE_TA, arg, fence_arg, &fence);
17155+ drm_regs_fence(&dev_priv->use_manager, fence);
17156+ if (fence)
17157+ fence_arg->signaled |= 0x1;
17158+ out_err:
17159+ if (ret && ret != -EAGAIN)
17160+ DRM_ERROR("Raster task queue job failed.\n");
17161+
17162+ if (fence) {
17163+#ifdef PSB_WAIT_FOR_RASTER_COMPLETION
17164+ drm_fence_object_wait(fence, 1, 1, fence->type);
17165+#endif
17166+ drm_fence_usage_deref_unlocked(&fence);
17167+ }
17168+
17169+ mutex_unlock(&dev_priv->reset_mutex);
17170+
17171+ return ret;
17172+}
17173+
17174+#ifdef FIX_TG_16
17175+
17176+static int psb_check_2d_idle(struct drm_psb_private *dev_priv)
17177+{
17178+ if (psb_2d_trylock(dev_priv)) {
17179+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
17180+ !((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
17181+ _PSB_C2B_STATUS_BUSY))) {
17182+ return 0;
17183+ }
17184+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 0, 1) == 0)
17185+ psb_2D_irq_on(dev_priv);
17186+
17187+ PSB_WSGX32(PSB_2D_FENCE_BH, PSB_SGX_2D_SLAVE_PORT);
17188+ PSB_WSGX32(PSB_2D_FLUSH_BH, PSB_SGX_2D_SLAVE_PORT);
17189+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT);
17190+
17191+ psb_2d_atomic_unlock(dev_priv);
17192+ }
17193+
17194+ atomic_set(&dev_priv->ta_wait_2d, 1);
17195+ return -EBUSY;
17196+}
17197+
17198+static void psb_atomic_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
17199+{
17200+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
17201+
17202+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d, 1, 0) == 1) {
17203+ psb_schedule_ta(dev_priv, scheduler);
17204+ if (atomic_read(&dev_priv->waiters_2d) != 0)
17205+ wake_up(&dev_priv->queue_2d);
17206+ }
17207+}
17208+
17209+void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
17210+{
17211+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
17212+ unsigned long irq_flags;
17213+
17214+ spin_lock_irqsave(&scheduler->lock, irq_flags);
17215+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 1, 0) == 1) {
17216+ atomic_set(&dev_priv->ta_wait_2d, 0);
17217+ psb_2D_irq_off(dev_priv);
17218+ psb_schedule_ta(dev_priv, scheduler);
17219+ if (atomic_read(&dev_priv->waiters_2d) != 0)
17220+ wake_up(&dev_priv->queue_2d);
17221+ }
17222+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
17223+}
17224+
17225+/*
17226+ * 2D locking functions. Can't use a mutex since the trylock() and
17227+ * unlock() methods need to be accessible from interrupt context.
17228+ */
17229+
17230+static int psb_2d_trylock(struct drm_psb_private *dev_priv)
17231+{
17232+ return (atomic_cmpxchg(&dev_priv->lock_2d, 0, 1) == 0);
17233+}
17234+
17235+static void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv)
17236+{
17237+ atomic_set(&dev_priv->lock_2d, 0);
17238+ if (atomic_read(&dev_priv->waiters_2d) != 0)
17239+ wake_up(&dev_priv->queue_2d);
17240+}
17241+
17242+void psb_2d_unlock(struct drm_psb_private *dev_priv)
17243+{
17244+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
17245+ unsigned long irq_flags;
17246+
17247+ spin_lock_irqsave(&scheduler->lock, irq_flags);
17248+ psb_2d_atomic_unlock(dev_priv);
17249+ if (atomic_read(&dev_priv->ta_wait_2d) != 0)
17250+ psb_atomic_resume_ta_2d_idle(dev_priv);
17251+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
17252+}
17253+
17254+void psb_2d_lock(struct drm_psb_private *dev_priv)
17255+{
17256+ atomic_inc(&dev_priv->waiters_2d);
17257+ wait_event(dev_priv->queue_2d, atomic_read(&dev_priv->ta_wait_2d) == 0);
17258+ wait_event(dev_priv->queue_2d, psb_2d_trylock(dev_priv));
17259+ atomic_dec(&dev_priv->waiters_2d);
17260+}
17261+
17262+#endif
17263Index: linux-2.6.28/drivers/gpu/drm/psb/psb_schedule.h
17264===================================================================
17265--- /dev/null 1970-01-01 00:00:00.000000000 +0000
17266+++ linux-2.6.28/drivers/gpu/drm/psb/psb_schedule.h 2009-02-25 15:37:02.000000000 +0000
17267@@ -0,0 +1,170 @@
17268+/**************************************************************************
17269+ * Copyright (c) 2007, Intel Corporation.
17270+ * All Rights Reserved.
17271+ *
17272+ * This program is free software; you can redistribute it and/or modify it
17273+ * under the terms and conditions of the GNU General Public License,
17274+ * version 2, as published by the Free Software Foundation.
17275+ *
17276+ * This program is distributed in the hope it will be useful, but WITHOUT
17277+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17278+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17279+ * more details.
17280+ *
17281+ * You should have received a copy of the GNU General Public License along with
17282+ * this program; if not, write to the Free Software Foundation, Inc.,
17283+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17284+ *
17285+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
17286+ * develop this driver.
17287+ *
17288+ **************************************************************************/
17289+/*
17290+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
17291+ */
17292+
17293+#ifndef _PSB_SCHEDULE_H_
17294+#define _PSB_SCHEDULE_H_
17295+
17296+#include "drmP.h"
17297+
17298+enum psb_task_type {
17299+ psb_ta_midscene_task,
17300+ psb_ta_task,
17301+ psb_raster_task,
17302+ psb_freescene_task
17303+};
17304+
17305+#define PSB_MAX_TA_CMDS 60
17306+#define PSB_MAX_RASTER_CMDS 60
17307+#define PSB_MAX_OOM_CMDS 6
17308+
17309+struct psb_xhw_buf {
17310+ struct list_head head;
17311+ int copy_back;
17312+ atomic_t done;
17313+ struct drm_psb_xhw_arg arg;
17314+
17315+};
17316+
17317+struct psb_feedback_info {
17318+ struct drm_buffer_object *bo;
17319+ struct page *page;
17320+ uint32_t offset;
17321+};
17322+
17323+struct psb_task {
17324+ struct list_head head;
17325+ struct psb_scene *scene;
17326+ struct psb_feedback_info feedback;
17327+ enum psb_task_type task_type;
17328+ uint32_t engine;
17329+ uint32_t sequence;
17330+ uint32_t ta_cmds[PSB_MAX_TA_CMDS];
17331+ uint32_t raster_cmds[PSB_MAX_RASTER_CMDS];
17332+ uint32_t oom_cmds[PSB_MAX_OOM_CMDS];
17333+ uint32_t ta_cmd_size;
17334+ uint32_t raster_cmd_size;
17335+ uint32_t oom_cmd_size;
17336+ uint32_t feedback_offset;
17337+ uint32_t ta_complete_action;
17338+ uint32_t raster_complete_action;
17339+ uint32_t hw_cookie;
17340+ uint32_t flags;
17341+ uint32_t reply_flags;
17342+ uint32_t aborting;
17343+ struct psb_xhw_buf buf;
17344+};
17345+
17346+struct psb_hw_scene {
17347+ struct list_head head;
17348+ uint32_t context_number;
17349+
17350+ /*
17351+ * This pointer does not refcount the last_scene_buffer,
17352+ * so we must make sure it is set to NULL before destroying
17353+ * the corresponding task.
17354+ */
17355+
17356+ struct psb_scene *last_scene;
17357+};
17358+
17359+struct psb_scene;
17360+struct drm_psb_private;
17361+
17362+struct psb_scheduler_seq {
17363+ uint32_t sequence;
17364+ int reported;
17365+};
17366+
17367+struct psb_scheduler {
17368+ struct drm_device *dev;
17369+ struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES];
17370+ struct psb_hw_scene hs[PSB_NUM_HW_SCENES];
17371+ struct mutex task_wq_mutex;
17372+ spinlock_t lock;
17373+ struct list_head hw_scenes;
17374+ struct list_head ta_queue;
17375+ struct list_head raster_queue;
17376+ struct list_head hp_raster_queue;
17377+ struct list_head task_done_queue;
17378+ struct psb_task *current_task[PSB_SCENE_NUM_ENGINES];
17379+ struct psb_task *feedback_task;
17380+ int ta_state;
17381+ struct psb_hw_scene *pending_hw_scene;
17382+ uint32_t pending_hw_scene_seq;
17383+ struct delayed_work wq;
17384+ struct psb_scene_pool *pool;
17385+ uint32_t idle_count;
17386+ int idle;
17387+ wait_queue_head_t idle_queue;
17388+ unsigned long ta_end_jiffies;
17389+ unsigned long raster_end_jiffies;
17390+ unsigned long total_raster_jiffies;
17391+};
17392+
17393+#define PSB_RF_FIRE_TA (1 << 0)
17394+#define PSB_RF_OOM (1 << 1)
17395+#define PSB_RF_OOM_REPLY (1 << 2)
17396+#define PSB_RF_TERMINATE (1 << 3)
17397+#define PSB_RF_TA_DONE (1 << 4)
17398+#define PSB_RF_FIRE_RASTER (1 << 5)
17399+#define PSB_RF_RASTER_DONE (1 << 6)
17400+#define PSB_RF_DEALLOC (1 << 7)
17401+
17402+extern struct psb_scene_pool *psb_alloc_scene_pool(struct drm_file *priv,
17403+ int shareable, uint32_t w,
17404+ uint32_t h);
17405+extern uint32_t psb_scene_handle(struct psb_scene *scene);
17406+extern int psb_scheduler_init(struct drm_device *dev,
17407+ struct psb_scheduler *scheduler);
17408+extern void psb_scheduler_takedown(struct psb_scheduler *scheduler);
17409+extern int psb_cmdbuf_ta(struct drm_file *priv,
17410+ struct drm_psb_cmdbuf_arg *arg,
17411+ struct drm_buffer_object *cmd_buffer,
17412+ struct drm_buffer_object *ta_buffer,
17413+ struct drm_buffer_object *oom_buffer,
17414+ struct psb_scene *scene,
17415+ struct psb_feedback_info *feedback,
17416+ struct drm_fence_arg *fence_arg);
17417+extern int psb_cmdbuf_raster(struct drm_file *priv,
17418+ struct drm_psb_cmdbuf_arg *arg,
17419+ struct drm_buffer_object *cmd_buffer,
17420+ struct drm_fence_arg *fence_arg);
17421+extern void psb_scheduler_handler(struct drm_psb_private *dev_priv,
17422+ uint32_t status);
17423+extern void psb_scheduler_pause(struct drm_psb_private *dev_priv);
17424+extern void psb_scheduler_restart(struct drm_psb_private *dev_priv);
17425+extern int psb_scheduler_idle(struct drm_psb_private *dev_priv);
17426+extern int psb_scheduler_finished(struct drm_psb_private *dev_priv);
17427+
17428+extern void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
17429+ int *lockup, int *idle);
17430+extern void psb_scheduler_reset(struct drm_psb_private *dev_priv,
17431+ int error_condition);
17432+extern int psb_forced_user_interrupt(struct drm_psb_private *dev_priv);
17433+extern void psb_scheduler_remove_scene_refs(struct psb_scene *scene);
17434+extern void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv);
17435+extern int psb_extend_raster_timeout(struct drm_psb_private *dev_priv);
17436+
17437+#endif
17438Index: linux-2.6.28/drivers/gpu/drm/psb/psb_sgx.c
17439===================================================================
17440--- /dev/null 1970-01-01 00:00:00.000000000 +0000
17441+++ linux-2.6.28/drivers/gpu/drm/psb/psb_sgx.c 2009-02-25 15:37:02.000000000 +0000
17442@@ -0,0 +1,1422 @@
17443+/**************************************************************************
17444+ * Copyright (c) 2007, Intel Corporation.
17445+ * All Rights Reserved.
17446+ *
17447+ * This program is free software; you can redistribute it and/or modify it
17448+ * under the terms and conditions of the GNU General Public License,
17449+ * version 2, as published by the Free Software Foundation.
17450+ *
17451+ * This program is distributed in the hope it will be useful, but WITHOUT
17452+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17453+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17454+ * more details.
17455+ *
17456+ * You should have received a copy of the GNU General Public License along with
17457+ * this program; if not, write to the Free Software Foundation, Inc.,
17458+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17459+ *
17460+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
17461+ * develop this driver.
17462+ *
17463+ **************************************************************************/
17464+/*
17465+ */
17466+
17467+#include "drmP.h"
17468+#include "psb_drv.h"
17469+#include "psb_drm.h"
17470+#include "psb_reg.h"
17471+#include "psb_scene.h"
17472+
17473+#include "psb_msvdx.h"
17474+
17475+int psb_submit_video_cmdbuf(struct drm_device *dev,
17476+ struct drm_buffer_object *cmd_buffer,
17477+ unsigned long cmd_offset, unsigned long cmd_size,
17478+ struct drm_fence_object *fence);
17479+
17480+struct psb_dstbuf_cache {
17481+ unsigned int dst;
17482+ uint32_t *use_page;
17483+ unsigned int use_index;
17484+ uint32_t use_background;
17485+ struct drm_buffer_object *dst_buf;
17486+ unsigned long dst_offset;
17487+ uint32_t *dst_page;
17488+ unsigned int dst_page_offset;
17489+ struct drm_bo_kmap_obj dst_kmap;
17490+ int dst_is_iomem;
17491+};
17492+
17493+struct psb_buflist_item {
17494+ struct drm_buffer_object *bo;
17495+ void __user *data;
17496+ int ret;
17497+ int presumed_offset_correct;
17498+};
17499+
17500+
17501+#define PSB_REG_GRAN_SHIFT 2
17502+#define PSB_REG_GRANULARITY (1 << PSB_REG_GRAN_SHIFT)
17503+#define PSB_MAX_REG 0x1000
17504+
17505+static const uint32_t disallowed_ranges[][2] = {
17506+ {0x0000, 0x0200},
17507+ {0x0208, 0x0214},
17508+ {0x021C, 0x0224},
17509+ {0x0230, 0x0234},
17510+ {0x0248, 0x024C},
17511+ {0x0254, 0x0358},
17512+ {0x0428, 0x0428},
17513+ {0x0430, 0x043C},
17514+ {0x0498, 0x04B4},
17515+ {0x04CC, 0x04D8},
17516+ {0x04E0, 0x07FC},
17517+ {0x0804, 0x0A58},
17518+ {0x0A68, 0x0A80},
17519+ {0x0AA0, 0x0B1C},
17520+ {0x0B2C, 0x0CAC},
17521+ {0x0CB4, PSB_MAX_REG - PSB_REG_GRANULARITY}
17522+};
17523+
17524+static uint32_t psb_disallowed_regs[PSB_MAX_REG /
17525+ (PSB_REG_GRANULARITY *
17526+ (sizeof(uint32_t) << 3))];
17527+
17528+static inline int psb_disallowed(uint32_t reg)
17529+{
17530+ reg >>= PSB_REG_GRAN_SHIFT;
17531+ return ((psb_disallowed_regs[reg >> 5] & (1 << (reg & 31))) != 0);
17532+}
17533+
17534+void psb_init_disallowed(void)
17535+{
17536+ int i;
17537+ uint32_t reg, tmp;
17538+ static int initialized = 0;
17539+
17540+ if (initialized)
17541+ return;
17542+
17543+ initialized = 1;
17544+ memset(psb_disallowed_regs, 0, sizeof(psb_disallowed_regs));
17545+
17546+ for (i = 0; i < (sizeof(disallowed_ranges) / (2 * sizeof(uint32_t)));
17547+ ++i) {
17548+ for (reg = disallowed_ranges[i][0];
17549+ reg <= disallowed_ranges[i][1]; reg += 4) {
17550+ tmp = reg >> 2;
17551+ psb_disallowed_regs[tmp >> 5] |= (1 << (tmp & 31));
17552+ }
17553+ }
17554+}
17555+
17556+static int psb_memcpy_check(uint32_t * dst, const uint32_t * src, uint32_t size)
17557+{
17558+ size >>= 3;
17559+ while (size--) {
17560+ if (unlikely((*src >= 0x1000) || psb_disallowed(*src))) {
17561+ DRM_ERROR("Forbidden SGX register access: "
17562+ "0x%04x.\n", *src);
17563+ return -EPERM;
17564+ }
17565+ *dst++ = *src++;
17566+ *dst++ = *src++;
17567+ }
17568+ return 0;
17569+}
17570+
17571+static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
17572+ unsigned size)
17573+{
17574+ uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
17575+ int ret = 0;
17576+
17577+ retry:
17578+ if (avail < size) {
17579+#if 0
17580+ /* We'd ideally
17581+ * like to have an IRQ-driven event here.
17582+ */
17583+
17584+ psb_2D_irq_on(dev_priv);
17585+ DRM_WAIT_ON(ret, dev_priv->event_2d_queue, DRM_HZ,
17586+ ((avail = PSB_RSGX32(PSB_CR_2D_SOCIF)) >= size));
17587+ psb_2D_irq_off(dev_priv);
17588+ if (ret == 0)
17589+ return 0;
17590+ if (ret == -EINTR) {
17591+ ret = 0;
17592+ goto retry;
17593+ }
17594+#else
17595+ avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
17596+ goto retry;
17597+#endif
17598+ }
17599+ return ret;
17600+}
17601+
17602+int psb_2d_submit(struct drm_psb_private *dev_priv, uint32_t * cmdbuf,
17603+ unsigned size)
17604+{
17605+ int ret = 0;
17606+ int i;
17607+ unsigned submit_size;
17608+
17609+ while (size > 0) {
17610+ submit_size = (size < 0x60) ? size : 0x60;
17611+ size -= submit_size;
17612+ ret = psb_2d_wait_available(dev_priv, submit_size);
17613+ if (ret)
17614+ return ret;
17615+
17616+ submit_size <<= 2;
17617+
17618+ for (i = 0; i < submit_size; i += 4) {
17619+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
17620+ }
17621+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
17622+ }
17623+ return 0;
17624+}
17625+
17626+int psb_blit_sequence(struct drm_psb_private *dev_priv, uint32_t sequence)
17627+{
17628+ uint32_t buffer[8];
17629+ uint32_t *bufp = buffer;
17630+ int ret;
17631+
17632+ *bufp++ = PSB_2D_FENCE_BH;
17633+
17634+ *bufp++ = PSB_2D_DST_SURF_BH |
17635+ PSB_2D_DST_8888ARGB | (4 << PSB_2D_DST_STRIDE_SHIFT);
17636+ *bufp++ = dev_priv->comm_mmu_offset - dev_priv->mmu_2d_offset;
17637+
17638+ *bufp++ = PSB_2D_BLIT_BH |
17639+ PSB_2D_ROT_NONE |
17640+ PSB_2D_COPYORDER_TL2BR |
17641+ PSB_2D_DSTCK_DISABLE |
17642+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
17643+
17644+ *bufp++ = sequence << PSB_2D_FILLCOLOUR_SHIFT;
17645+ *bufp++ = (0 << PSB_2D_DST_XSTART_SHIFT) |
17646+ (0 << PSB_2D_DST_YSTART_SHIFT);
17647+ *bufp++ = (1 << PSB_2D_DST_XSIZE_SHIFT) | (1 << PSB_2D_DST_YSIZE_SHIFT);
17648+
17649+ *bufp++ = PSB_2D_FLUSH_BH;
17650+
17651+ psb_2d_lock(dev_priv);
17652+ ret = psb_2d_submit(dev_priv, buffer, bufp - buffer);
17653+ psb_2d_unlock(dev_priv);
17654+
17655+ if (!ret)
17656+ psb_schedule_watchdog(dev_priv);
17657+ return ret;
17658+}
17659+
17660+int psb_emit_2d_copy_blit(struct drm_device *dev,
17661+ uint32_t src_offset,
17662+ uint32_t dst_offset, uint32_t pages, int direction)
17663+{
17664+ uint32_t cur_pages;
17665+ struct drm_psb_private *dev_priv = dev->dev_private;
17666+ uint32_t buf[10];
17667+ uint32_t *bufp;
17668+ uint32_t xstart;
17669+ uint32_t ystart;
17670+ uint32_t blit_cmd;
17671+ uint32_t pg_add;
17672+ int ret = 0;
17673+
17674+ if (!dev_priv)
17675+ return 0;
17676+
17677+ if (direction) {
17678+ pg_add = (pages - 1) << PAGE_SHIFT;
17679+ src_offset += pg_add;
17680+ dst_offset += pg_add;
17681+ }
17682+
17683+ blit_cmd = PSB_2D_BLIT_BH |
17684+ PSB_2D_ROT_NONE |
17685+ PSB_2D_DSTCK_DISABLE |
17686+ PSB_2D_SRCCK_DISABLE |
17687+ PSB_2D_USE_PAT |
17688+ PSB_2D_ROP3_SRCCOPY |
17689+ (direction ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TL2BR);
17690+ xstart = (direction) ? ((PAGE_SIZE - 1) >> 2) : 0;
17691+
17692+ psb_2d_lock(dev_priv);
17693+ while (pages > 0) {
17694+ cur_pages = pages;
17695+ if (cur_pages > 2048)
17696+ cur_pages = 2048;
17697+ pages -= cur_pages;
17698+ ystart = (direction) ? cur_pages - 1 : 0;
17699+
17700+ bufp = buf;
17701+ *bufp++ = PSB_2D_FENCE_BH;
17702+
17703+ *bufp++ = PSB_2D_DST_SURF_BH | PSB_2D_DST_8888ARGB |
17704+ (PAGE_SIZE << PSB_2D_DST_STRIDE_SHIFT);
17705+ *bufp++ = dst_offset;
17706+ *bufp++ = PSB_2D_SRC_SURF_BH | PSB_2D_SRC_8888ARGB |
17707+ (PAGE_SIZE << PSB_2D_SRC_STRIDE_SHIFT);
17708+ *bufp++ = src_offset;
17709+ *bufp++ =
17710+ PSB_2D_SRC_OFF_BH | (xstart << PSB_2D_SRCOFF_XSTART_SHIFT) |
17711+ (ystart << PSB_2D_SRCOFF_YSTART_SHIFT);
17712+ *bufp++ = blit_cmd;
17713+ *bufp++ = (xstart << PSB_2D_DST_XSTART_SHIFT) |
17714+ (ystart << PSB_2D_DST_YSTART_SHIFT);
17715+ *bufp++ = ((PAGE_SIZE >> 2) << PSB_2D_DST_XSIZE_SHIFT) |
17716+ (cur_pages << PSB_2D_DST_YSIZE_SHIFT);
17717+
17718+ ret = psb_2d_submit(dev_priv, buf, bufp - buf);
17719+ if (ret)
17720+ goto out;
17721+ pg_add = (cur_pages << PAGE_SHIFT) * ((direction) ? -1 : 1);
17722+ src_offset += pg_add;
17723+ dst_offset += pg_add;
17724+ }
17725+ out:
17726+ psb_2d_unlock(dev_priv);
17727+ return ret;
17728+}
17729+
17730+void psb_init_2d(struct drm_psb_private *dev_priv)
17731+{
17732+ dev_priv->sequence_lock = SPIN_LOCK_UNLOCKED;
17733+ psb_reset(dev_priv, 1);
17734+ dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start;
17735+ PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE);
17736+ (void)PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE);
17737+}
17738+
17739+int psb_idle_2d(struct drm_device *dev)
17740+{
17741+ struct drm_psb_private *dev_priv = dev->dev_private;
17742+ unsigned long _end = jiffies + DRM_HZ;
17743+ int busy = 0;
17744+
17745+ /*
17746+ * First idle the 2D engine.
17747+ */
17748+
17749+ if (dev_priv->engine_lockup_2d)
17750+ return -EBUSY;
17751+
17752+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
17753+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
17754+ goto out;
17755+
17756+ do {
17757+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
17758+ } while (busy && !time_after_eq(jiffies, _end));
17759+
17760+ if (busy)
17761+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
17762+ if (busy)
17763+ goto out;
17764+
17765+ do {
17766+ busy =
17767+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY)
17768+ != 0);
17769+ } while (busy && !time_after_eq(jiffies, _end));
17770+ if (busy)
17771+ busy =
17772+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY)
17773+ != 0);
17774+
17775+ out:
17776+ if (busy)
17777+ dev_priv->engine_lockup_2d = 1;
17778+
17779+ return (busy) ? -EBUSY : 0;
17780+}
17781+
17782+int psb_idle_3d(struct drm_device *dev)
17783+{
17784+ struct drm_psb_private *dev_priv = dev->dev_private;
17785+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
17786+ int ret;
17787+
17788+ ret = wait_event_timeout(scheduler->idle_queue,
17789+ psb_scheduler_finished(dev_priv), DRM_HZ * 10);
17790+
17791+ return (ret < 1) ? -EBUSY : 0;
17792+}
17793+
17794+static void psb_dereference_buffers_locked(struct psb_buflist_item *buffers,
17795+ unsigned num_buffers)
17796+{
17797+ while (num_buffers--)
17798+ drm_bo_usage_deref_locked(&((buffers++)->bo));
17799+
17800+}
17801+
17802+static int psb_check_presumed(struct drm_bo_op_arg *arg,
17803+ struct drm_buffer_object *bo,
17804+ uint32_t __user * data, int *presumed_ok)
17805+{
17806+ struct drm_bo_op_req *req = &arg->d.req;
17807+ uint32_t hint_offset;
17808+ uint32_t hint = req->bo_req.hint;
17809+
17810+ *presumed_ok = 0;
17811+
17812+ if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
17813+ return 0;
17814+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
17815+ *presumed_ok = 1;
17816+ return 0;
17817+ }
17818+ if (bo->offset == req->bo_req.presumed_offset) {
17819+ *presumed_ok = 1;
17820+ return 0;
17821+ }
17822+
17823+ /*
17824+ * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
17825+ * the user-space IOCTL argument list, since the buffer has moved,
17826+ * we're about to apply relocations and we might subsequently
17827+ * hit an -EAGAIN. In that case the argument list will be reused by
17828+ * user-space, but the presumed offset is no longer valid.
17829+ *
17830+ * Needless to say, this is a bit ugly.
17831+ */
17832+
17833+ hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
17834+ hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
17835+ return __put_user(hint, data + hint_offset);
17836+}
17837+
17838+static int psb_validate_buffer_list(struct drm_file *file_priv,
17839+ unsigned fence_class,
17840+ unsigned long data,
17841+ struct psb_buflist_item *buffers,
17842+ unsigned *num_buffers)
17843+{
17844+ struct drm_bo_op_arg arg;
17845+ struct drm_bo_op_req *req = &arg.d.req;
17846+ int ret = 0;
17847+ unsigned buf_count = 0;
17848+ struct psb_buflist_item *item = buffers;
17849+
17850+ do {
17851+ if (buf_count >= *num_buffers) {
17852+ DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
17853+ ret = -EINVAL;
17854+ goto out_err;
17855+ }
17856+ item = buffers + buf_count;
17857+ item->bo = NULL;
17858+
17859+ if (copy_from_user(&arg, (void __user *)data, sizeof(arg))) {
17860+ ret = -EFAULT;
17861+ DRM_ERROR("Error copying validate list.\n"
17862+ "\tbuffer %d, user addr 0x%08lx %d\n",
17863+ buf_count, (unsigned long)data, sizeof(arg));
17864+ goto out_err;
17865+ }
17866+
17867+ ret = 0;
17868+ if (req->op != drm_bo_validate) {
17869+ DRM_ERROR
17870+ ("Buffer object operation wasn't \"validate\".\n");
17871+ ret = -EINVAL;
17872+ goto out_err;
17873+ }
17874+
17875+ item->ret = 0;
17876+ item->data = (void *)__user data;
17877+ ret = drm_bo_handle_validate(file_priv,
17878+ req->bo_req.handle,
17879+ fence_class,
17880+ req->bo_req.flags,
17881+ req->bo_req.mask,
17882+ req->bo_req.hint,
17883+ 0, NULL, &item->bo);
17884+ if (ret)
17885+ goto out_err;
17886+
17887+ PSB_DEBUG_GENERAL("Validated buffer at 0x%08lx\n",
17888+ buffers[buf_count].bo->offset);
17889+
17890+ buf_count++;
17891+
17892+
17893+ ret = psb_check_presumed(&arg, item->bo,
17894+ (uint32_t __user *)
17895+ (unsigned long) data,
17896+ &item->presumed_offset_correct);
17897+
17898+ if (ret)
17899+ goto out_err;
17900+
17901+ data = arg.next;
17902+ } while (data);
17903+
17904+ *num_buffers = buf_count;
17905+
17906+ return 0;
17907+ out_err:
17908+
17909+ *num_buffers = buf_count;
17910+ item->ret = (ret != -EAGAIN) ? ret : 0;
17911+ return ret;
17912+}
17913+
17914+int
17915+psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t * regs,
17916+ unsigned int cmds)
17917+{
17918+ int i;
17919+
17920+ /*
17921+ * cmds is 32-bit words.
17922+ */
17923+
17924+ cmds >>= 1;
17925+ for (i = 0; i < cmds; ++i) {
17926+ PSB_WSGX32(regs[1], regs[0]);
17927+ regs += 2;
17928+ }
17929+ wmb();
17930+ return 0;
17931+}
17932+
17933+/*
17934+ * Security: Block user-space writing to MMU mapping registers.
17935+ * This is important for security and brings Poulsbo DRM
17936+ * up to par with the other DRM drivers. Using this,
17937+ * user-space should not be able to map arbitrary memory
17938+ * pages to graphics memory, but all user-space processes
17939+ * basically have access to all buffer objects mapped to
17940+ * graphics memory.
17941+ */
17942+
17943+int
17944+psb_submit_copy_cmdbuf(struct drm_device *dev,
17945+ struct drm_buffer_object *cmd_buffer,
17946+ unsigned long cmd_offset,
17947+ unsigned long cmd_size,
17948+ int engine, uint32_t * copy_buffer)
17949+{
17950+ unsigned long cmd_end = cmd_offset + (cmd_size << 2);
17951+ struct drm_psb_private *dev_priv = dev->dev_private;
17952+ unsigned long cmd_page_offset = cmd_offset - (cmd_offset & PAGE_MASK);
17953+ unsigned long cmd_next;
17954+ struct drm_bo_kmap_obj cmd_kmap;
17955+ uint32_t *cmd_page;
17956+ unsigned cmds;
17957+ int is_iomem;
17958+ int ret = 0;
17959+
17960+ if (cmd_size == 0)
17961+ return 0;
17962+
17963+ if (engine == PSB_ENGINE_2D)
17964+ psb_2d_lock(dev_priv);
17965+
17966+ do {
17967+ cmd_next = drm_bo_offset_end(cmd_offset, cmd_end);
17968+ ret = drm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT,
17969+ 1, &cmd_kmap);
17970+
17971+ if (ret)
17972+ return ret;
17973+ cmd_page = drm_bmo_virtual(&cmd_kmap, &is_iomem);
17974+ cmd_page_offset = (cmd_offset & ~PAGE_MASK) >> 2;
17975+ cmds = (cmd_next - cmd_offset) >> 2;
17976+
17977+ switch (engine) {
17978+ case PSB_ENGINE_2D:
17979+ ret =
17980+ psb_2d_submit(dev_priv, cmd_page + cmd_page_offset,
17981+ cmds);
17982+ break;
17983+ case PSB_ENGINE_RASTERIZER:
17984+ case PSB_ENGINE_TA:
17985+ case PSB_ENGINE_HPRAST:
17986+ PSB_DEBUG_GENERAL("Reg copy.\n");
17987+ ret = psb_memcpy_check(copy_buffer,
17988+ cmd_page + cmd_page_offset,
17989+ cmds * sizeof(uint32_t));
17990+ copy_buffer += cmds;
17991+ break;
17992+ default:
17993+ ret = -EINVAL;
17994+ }
17995+ drm_bo_kunmap(&cmd_kmap);
17996+ if (ret)
17997+ break;
17998+ } while (cmd_offset = cmd_next, cmd_offset != cmd_end);
17999+
18000+ if (engine == PSB_ENGINE_2D)
18001+ psb_2d_unlock(dev_priv);
18002+
18003+ return ret;
18004+}
18005+
18006+static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
18007+{
18008+ if (dst_cache->dst_page) {
18009+ drm_bo_kunmap(&dst_cache->dst_kmap);
18010+ dst_cache->dst_page = NULL;
18011+ }
18012+ dst_cache->dst_buf = NULL;
18013+ dst_cache->dst = ~0;
18014+ dst_cache->use_page = NULL;
18015+}
18016+
18017+static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
18018+ struct psb_buflist_item *buffers,
18019+ unsigned int dst, unsigned long dst_offset)
18020+{
18021+ int ret;
18022+
18023+ PSB_DEBUG_RELOC("Destination buffer is %d.\n", dst);
18024+
18025+ if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
18026+ psb_clear_dstbuf_cache(dst_cache);
18027+ dst_cache->dst = dst;
18028+ dst_cache->dst_buf = buffers[dst].bo;
18029+ }
18030+
18031+ if (unlikely(dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
18032+ DRM_ERROR("Relocation destination out of bounds.\n");
18033+ return -EINVAL;
18034+ }
18035+
18036+ if (!drm_bo_same_page(dst_cache->dst_offset, dst_offset) ||
18037+ NULL == dst_cache->dst_page) {
18038+ if (NULL != dst_cache->dst_page) {
18039+ drm_bo_kunmap(&dst_cache->dst_kmap);
18040+ dst_cache->dst_page = NULL;
18041+ }
18042+
18043+ ret = drm_bo_kmap(dst_cache->dst_buf, dst_offset >> PAGE_SHIFT,
18044+ 1, &dst_cache->dst_kmap);
18045+ if (ret) {
18046+ DRM_ERROR("Could not map destination buffer for "
18047+ "relocation.\n");
18048+ return ret;
18049+ }
18050+
18051+ dst_cache->dst_page = drm_bmo_virtual(&dst_cache->dst_kmap,
18052+ &dst_cache->dst_is_iomem);
18053+ dst_cache->dst_offset = dst_offset & PAGE_MASK;
18054+ dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
18055+ }
18056+ return 0;
18057+}
18058+
18059+static int psb_apply_reloc(struct drm_psb_private *dev_priv,
18060+ uint32_t fence_class,
18061+ const struct drm_psb_reloc *reloc,
18062+ struct psb_buflist_item *buffers,
18063+ int num_buffers,
18064+ struct psb_dstbuf_cache *dst_cache,
18065+ int no_wait, int interruptible)
18066+{
18067+ int reg;
18068+ uint32_t val;
18069+ uint32_t background;
18070+ unsigned int index;
18071+ int ret;
18072+ unsigned int shift;
18073+ unsigned int align_shift;
18074+ uint32_t fence_type;
18075+ struct drm_buffer_object *reloc_bo;
18076+
18077+ PSB_DEBUG_RELOC("Reloc type %d\n"
18078+ "\t where 0x%04x\n"
18079+ "\t buffer 0x%04x\n"
18080+ "\t mask 0x%08x\n"
18081+ "\t shift 0x%08x\n"
18082+ "\t pre_add 0x%08x\n"
18083+ "\t background 0x%08x\n"
18084+ "\t dst_buffer 0x%08x\n"
18085+ "\t arg0 0x%08x\n"
18086+ "\t arg1 0x%08x\n",
18087+ reloc->reloc_op,
18088+ reloc->where,
18089+ reloc->buffer,
18090+ reloc->mask,
18091+ reloc->shift,
18092+ reloc->pre_add,
18093+ reloc->background,
18094+ reloc->dst_buffer, reloc->arg0, reloc->arg1);
18095+
18096+ if (unlikely(reloc->buffer >= num_buffers)) {
18097+ DRM_ERROR("Illegal relocation buffer %d.\n", reloc->buffer);
18098+ return -EINVAL;
18099+ }
18100+
18101+ if (buffers[reloc->buffer].presumed_offset_correct)
18102+ return 0;
18103+
18104+ if (unlikely(reloc->dst_buffer >= num_buffers)) {
18105+ DRM_ERROR("Illegal destination buffer for relocation %d.\n",
18106+ reloc->dst_buffer);
18107+ return -EINVAL;
18108+ }
18109+
18110+ ret = psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer,
18111+ reloc->where << 2);
18112+ if (ret)
18113+ return ret;
18114+
18115+ reloc_bo = buffers[reloc->buffer].bo;
18116+
18117+ if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
18118+ DRM_ERROR("Illegal relocation offset add.\n");
18119+ return -EINVAL;
18120+ }
18121+
18122+ switch (reloc->reloc_op) {
18123+ case PSB_RELOC_OP_OFFSET:
18124+ val = reloc_bo->offset + reloc->pre_add;
18125+ break;
18126+ case PSB_RELOC_OP_2D_OFFSET:
18127+ val = reloc_bo->offset + reloc->pre_add -
18128+ dev_priv->mmu_2d_offset;
18129+ if (unlikely(val >= PSB_2D_SIZE)) {
18130+ DRM_ERROR("2D relocation out of bounds\n");
18131+ return -EINVAL;
18132+ }
18133+ break;
18134+ case PSB_RELOC_OP_PDS_OFFSET:
18135+ val = reloc_bo->offset + reloc->pre_add - PSB_MEM_PDS_START;
18136+ if (unlikely(val >= (PSB_MEM_MMU_START - PSB_MEM_PDS_START))) {
18137+ DRM_ERROR("PDS relocation out of bounds\n");
18138+ return -EINVAL;
18139+ }
18140+ break;
18141+ case PSB_RELOC_OP_USE_OFFSET:
18142+ case PSB_RELOC_OP_USE_REG:
18143+
18144+ /*
18145+ * Security:
18146+ * Only allow VERTEX or PIXEL data masters, as
18147+ * shaders run under other data masters may in theory
18148+ * alter MMU mappings.
18149+ */
18150+
18151+ if (unlikely(reloc->arg1 != _PSB_CUC_DM_PIXEL &&
18152+ reloc->arg1 != _PSB_CUC_DM_VERTEX)) {
18153+ DRM_ERROR("Invalid data master in relocation. %d\n",
18154+ reloc->arg1);
18155+ return -EPERM;
18156+ }
18157+
18158+ fence_type = reloc_bo->fence_type;
18159+ ret = psb_grab_use_base(dev_priv,
18160+ reloc_bo->offset +
18161+ reloc->pre_add, reloc->arg0,
18162+ reloc->arg1, fence_class,
18163+ fence_type, no_wait,
18164+ interruptible, &reg, &val);
18165+ if (ret)
18166+ return ret;
18167+
18168+ val = (reloc->reloc_op == PSB_RELOC_OP_USE_REG) ? reg : val;
18169+ break;
18170+ default:
18171+ DRM_ERROR("Unimplemented relocation.\n");
18172+ return -EINVAL;
18173+ }
18174+
18175+ shift = (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
18176+ align_shift = (reloc->shift & PSB_RELOC_ALSHIFT_MASK) >>
18177+ PSB_RELOC_ALSHIFT_SHIFT;
18178+
18179+ val = ((val >> align_shift) << shift);
18180+ index = reloc->where - dst_cache->dst_page_offset;
18181+
18182+ background = reloc->background;
18183+
18184+ if (reloc->reloc_op == PSB_RELOC_OP_USE_OFFSET) {
18185+ if (dst_cache->use_page == dst_cache->dst_page &&
18186+ dst_cache->use_index == index)
18187+ background = dst_cache->use_background;
18188+ else
18189+ background = dst_cache->dst_page[index];
18190+ }
18191+#if 0
18192+ if (dst_cache->dst_page[index] != PSB_RELOC_MAGIC &&
18193+ reloc->reloc_op != PSB_RELOC_OP_USE_OFFSET)
18194+ DRM_ERROR("Inconsistent relocation 0x%08lx.\n",
18195+ (unsigned long)dst_cache->dst_page[index]);
18196+#endif
18197+
18198+ val = (background & ~reloc->mask) | (val & reloc->mask);
18199+ dst_cache->dst_page[index] = val;
18200+
18201+ if (reloc->reloc_op == PSB_RELOC_OP_USE_OFFSET ||
18202+ reloc->reloc_op == PSB_RELOC_OP_USE_REG) {
18203+ dst_cache->use_page = dst_cache->dst_page;
18204+ dst_cache->use_index = index;
18205+ dst_cache->use_background = val;
18206+ }
18207+
18208+ PSB_DEBUG_RELOC("Reloc buffer %d index 0x%08x, value 0x%08x\n",
18209+ reloc->dst_buffer, index, dst_cache->dst_page[index]);
18210+
18211+ return 0;
18212+}
18213+
18214+static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
18215+ unsigned int num_pages)
18216+{
18217+ int ret = 0;
18218+
18219+ spin_lock(&dev_priv->reloc_lock);
18220+ if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
18221+ dev_priv->rel_mapped_pages += num_pages;
18222+ ret = 1;
18223+ }
18224+ spin_unlock(&dev_priv->reloc_lock);
18225+ return ret;
18226+}
18227+
18228+static int psb_fixup_relocs(struct drm_file *file_priv,
18229+ uint32_t fence_class,
18230+ unsigned int num_relocs,
18231+ unsigned int reloc_offset,
18232+ uint32_t reloc_handle,
18233+ struct psb_buflist_item *buffers,
18234+ unsigned int num_buffers,
18235+ int no_wait, int interruptible)
18236+{
18237+ struct drm_device *dev = file_priv->minor->dev;
18238+ struct drm_psb_private *dev_priv =
18239+ (struct drm_psb_private *)dev->dev_private;
18240+ struct drm_buffer_object *reloc_buffer = NULL;
18241+ unsigned int reloc_num_pages;
18242+ unsigned int reloc_first_page;
18243+ unsigned int reloc_last_page;
18244+ struct psb_dstbuf_cache dst_cache;
18245+ struct drm_psb_reloc *reloc;
18246+ struct drm_bo_kmap_obj reloc_kmap;
18247+ int reloc_is_iomem;
18248+ int count;
18249+ int ret = 0;
18250+ int registered = 0;
18251+ int short_circuit = 1;
18252+ int i;
18253+
18254+ if (num_relocs == 0)
18255+ return 0;
18256+
18257+ for (i=0; i<num_buffers; ++i) {
18258+ if (!buffers[i].presumed_offset_correct) {
18259+ short_circuit = 0;
18260+ break;
18261+ }
18262+ }
18263+
18264+ if (short_circuit)
18265+ return 0;
18266+
18267+ memset(&dst_cache, 0, sizeof(dst_cache));
18268+ memset(&reloc_kmap, 0, sizeof(reloc_kmap));
18269+
18270+ mutex_lock(&dev->struct_mutex);
18271+ reloc_buffer = drm_lookup_buffer_object(file_priv, reloc_handle, 1);
18272+ mutex_unlock(&dev->struct_mutex);
18273+ if (!reloc_buffer)
18274+ goto out;
18275+
18276+ reloc_first_page = reloc_offset >> PAGE_SHIFT;
18277+ reloc_last_page =
18278+ (reloc_offset +
18279+ num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
18280+ reloc_num_pages = reloc_last_page - reloc_first_page + 1;
18281+ reloc_offset &= ~PAGE_MASK;
18282+
18283+ if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
18284+ DRM_ERROR("Relocation buffer is too large\n");
18285+ ret = -EINVAL;
18286+ goto out;
18287+ }
18288+
18289+ DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
18290+ (registered =
18291+ psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
18292+
18293+ if (ret == -EINTR) {
18294+ ret = -EAGAIN;
18295+ goto out;
18296+ }
18297+ if (ret) {
18298+ DRM_ERROR("Error waiting for space to map "
18299+ "relocation buffer.\n");
18300+ goto out;
18301+ }
18302+
18303+ ret = drm_bo_kmap(reloc_buffer, reloc_first_page,
18304+ reloc_num_pages, &reloc_kmap);
18305+
18306+ if (ret) {
18307+ DRM_ERROR("Could not map relocation buffer.\n"
18308+ "\tReloc buffer id 0x%08x.\n"
18309+ "\tReloc first page %d.\n"
18310+ "\tReloc num pages %d.\n",
18311+ reloc_handle, reloc_first_page, reloc_num_pages);
18312+ goto out;
18313+ }
18314+
18315+ reloc = (struct drm_psb_reloc *)
18316+ ((unsigned long)drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem) +
18317+ reloc_offset);
18318+
18319+ for (count = 0; count < num_relocs; ++count) {
18320+ ret = psb_apply_reloc(dev_priv, fence_class,
18321+ reloc, buffers,
18322+ num_buffers, &dst_cache,
18323+ no_wait, interruptible);
18324+ if (ret)
18325+ goto out1;
18326+ reloc++;
18327+ }
18328+
18329+ out1:
18330+ drm_bo_kunmap(&reloc_kmap);
18331+ out:
18332+ if (registered) {
18333+ spin_lock(&dev_priv->reloc_lock);
18334+ dev_priv->rel_mapped_pages -= reloc_num_pages;
18335+ spin_unlock(&dev_priv->reloc_lock);
18336+ DRM_WAKEUP(&dev_priv->rel_mapped_queue);
18337+ }
18338+
18339+ psb_clear_dstbuf_cache(&dst_cache);
18340+ if (reloc_buffer)
18341+ drm_bo_usage_deref_unlocked(&reloc_buffer);
18342+ return ret;
18343+}
18344+
18345+static int psb_cmdbuf_2d(struct drm_file *priv,
18346+ struct drm_psb_cmdbuf_arg *arg,
18347+ struct drm_buffer_object *cmd_buffer,
18348+ struct drm_fence_arg *fence_arg)
18349+{
18350+ struct drm_device *dev = priv->minor->dev;
18351+ struct drm_psb_private *dev_priv =
18352+ (struct drm_psb_private *)dev->dev_private;
18353+ int ret;
18354+
18355+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
18356+ if (ret)
18357+ return -EAGAIN;
18358+
18359+ ret = psb_submit_copy_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
18360+ arg->cmdbuf_size, PSB_ENGINE_2D, NULL);
18361+ if (ret)
18362+ goto out_unlock;
18363+
18364+ psb_fence_or_sync(priv, PSB_ENGINE_2D, arg, fence_arg, NULL);
18365+
18366+ mutex_lock(&cmd_buffer->mutex);
18367+ if (cmd_buffer->fence != NULL)
18368+ drm_fence_usage_deref_unlocked(&cmd_buffer->fence);
18369+ mutex_unlock(&cmd_buffer->mutex);
18370+ out_unlock:
18371+ mutex_unlock(&dev_priv->reset_mutex);
18372+ return ret;
18373+}
18374+
18375+#if 0
18376+static int psb_dump_page(struct drm_buffer_object *bo,
18377+ unsigned int page_offset, unsigned int num)
18378+{
18379+ struct drm_bo_kmap_obj kmobj;
18380+ int is_iomem;
18381+ uint32_t *p;
18382+ int ret;
18383+ unsigned int i;
18384+
18385+ ret = drm_bo_kmap(bo, page_offset, 1, &kmobj);
18386+ if (ret)
18387+ return ret;
18388+
18389+ p = drm_bmo_virtual(&kmobj, &is_iomem);
18390+ for (i = 0; i < num; ++i)
18391+ PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
18392+
18393+ drm_bo_kunmap(&kmobj);
18394+ return 0;
18395+}
18396+#endif
18397+
18398+static void psb_idle_engine(struct drm_device *dev, int engine)
18399+{
18400+ struct drm_psb_private *dev_priv =
18401+ (struct drm_psb_private *)dev->dev_private;
18402+ uint32_t dummy;
18403+
18404+ switch (engine) {
18405+ case PSB_ENGINE_2D:
18406+
18407+ /*
18408+ * Make sure we flush 2D properly using a dummy
18409+ * fence sequence emit.
18410+ */
18411+
18412+ (void)psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
18413+ &dummy, &dummy);
18414+ psb_2d_lock(dev_priv);
18415+ (void)psb_idle_2d(dev);
18416+ psb_2d_unlock(dev_priv);
18417+ break;
18418+ case PSB_ENGINE_TA:
18419+ case PSB_ENGINE_RASTERIZER:
18420+ case PSB_ENGINE_HPRAST:
18421+ (void)psb_idle_3d(dev);
18422+ break;
18423+ default:
18424+
18425+ /*
18426+ * FIXME: Insert video engine idle command here.
18427+ */
18428+
18429+ break;
18430+ }
18431+}
18432+
18433+void psb_fence_or_sync(struct drm_file *priv,
18434+ int engine,
18435+ struct drm_psb_cmdbuf_arg *arg,
18436+ struct drm_fence_arg *fence_arg,
18437+ struct drm_fence_object **fence_p)
18438+{
18439+ struct drm_device *dev = priv->minor->dev;
18440+ int ret;
18441+ struct drm_fence_object *fence;
18442+
18443+ ret = drm_fence_buffer_objects(dev, NULL, arg->fence_flags,
18444+ NULL, &fence);
18445+
18446+ if (ret) {
18447+
18448+ /*
18449+ * Fence creation failed.
18450+ * Fall back to synchronous operation and idle the engine.
18451+ */
18452+
18453+ psb_idle_engine(dev, engine);
18454+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
18455+
18456+ /*
18457+ * Communicate to user-space that
18458+ * fence creation has failed and that
18459+ * the engine is idle.
18460+ */
18461+
18462+ fence_arg->handle = ~0;
18463+ fence_arg->error = ret;
18464+ }
18465+
18466+ drm_putback_buffer_objects(dev);
18467+ if (fence_p)
18468+ *fence_p = NULL;
18469+ return;
18470+ }
18471+
18472+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
18473+
18474+ ret = drm_fence_add_user_object(priv, fence,
18475+ arg->fence_flags &
18476+ DRM_FENCE_FLAG_SHAREABLE);
18477+ if (!ret)
18478+ drm_fence_fill_arg(fence, fence_arg);
18479+ else {
18480+ /*
18481+ * Fence user object creation failed.
18482+ * We must idle the engine here as well, as user-
18483+ * space expects a fence object to wait on. Since we
18484+ * have a fence object we wait for it to signal
18485+ * to indicate engine "sufficiently" idle.
18486+ */
18487+
18488+ (void)drm_fence_object_wait(fence, 0, 1, fence->type);
18489+ drm_fence_usage_deref_unlocked(&fence);
18490+ fence_arg->handle = ~0;
18491+ fence_arg->error = ret;
18492+ }
18493+ }
18494+
18495+ if (fence_p)
18496+ *fence_p = fence;
18497+ else if (fence)
18498+ drm_fence_usage_deref_unlocked(&fence);
18499+}
18500+
18501+int psb_handle_copyback(struct drm_device *dev,
18502+ struct psb_buflist_item *buffers,
18503+ unsigned int num_buffers, int ret, void *data)
18504+{
18505+ struct drm_psb_private *dev_priv =
18506+ (struct drm_psb_private *)dev->dev_private;
18507+ struct drm_bo_op_arg arg;
18508+ struct psb_buflist_item *item = buffers;
18509+ struct drm_buffer_object *bo;
18510+ int err = ret;
18511+ int i;
18512+
18513+ /*
18514+ * Clear the unfenced use base register lists and buffer lists.
18515+ */
18516+
18517+ if (ret) {
18518+ drm_regs_fence(&dev_priv->use_manager, NULL);
18519+ drm_putback_buffer_objects(dev);
18520+ }
18521+
18522+ if (ret != -EAGAIN) {
18523+ for (i = 0; i < num_buffers; ++i) {
18524+ arg.handled = 1;
18525+ arg.d.rep.ret = item->ret;
18526+ bo = item->bo;
18527+ mutex_lock(&bo->mutex);
18528+ drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
18529+ mutex_unlock(&bo->mutex);
18530+ if (copy_to_user(item->data, &arg, sizeof(arg)))
18531+ err = -EFAULT;
18532+ ++item;
18533+ }
18534+ }
18535+
18536+ return err;
18537+}
18538+
18539+static int psb_cmdbuf_video(struct drm_file *priv,
18540+ struct drm_psb_cmdbuf_arg *arg,
18541+ unsigned int num_buffers,
18542+ struct drm_buffer_object *cmd_buffer,
18543+ struct drm_fence_arg *fence_arg)
18544+{
18545+ struct drm_device *dev = priv->minor->dev;
18546+ struct drm_fence_object *fence;
18547+ int ret;
18548+
18549+ /*
18550+ * Check this. Doesn't seem right. Have fencing done AFTER command
18551+ * submission and make sure drm_psb_idle idles the MSVDX completely.
18552+ */
18553+
18554+ psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, arg, fence_arg, &fence);
18555+ ret = psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
18556+ arg->cmdbuf_size, fence);
18557+
18558+ if (ret)
18559+ return ret;
18560+
18561+ drm_fence_usage_deref_unlocked(&fence);
18562+ mutex_lock(&cmd_buffer->mutex);
18563+ if (cmd_buffer->fence != NULL)
18564+ drm_fence_usage_deref_unlocked(&cmd_buffer->fence);
18565+ mutex_unlock(&cmd_buffer->mutex);
18566+ return 0;
18567+}
18568+
18569+int psb_feedback_buf(struct drm_file *file_priv,
18570+ uint32_t feedback_ops,
18571+ uint32_t handle,
18572+ uint32_t offset,
18573+ uint32_t feedback_breakpoints,
18574+ uint32_t feedback_size, struct psb_feedback_info *feedback)
18575+{
18576+ struct drm_buffer_object *bo;
18577+ struct page *page;
18578+ uint32_t page_no;
18579+ uint32_t page_offset;
18580+ int ret;
18581+
18582+ if (feedback_ops & ~PSB_FEEDBACK_OP_VISTEST) {
18583+ DRM_ERROR("Illegal feedback op.\n");
18584+ return -EINVAL;
18585+ }
18586+
18587+ if (feedback_breakpoints != 0) {
18588+ DRM_ERROR("Feedback breakpoints not implemented yet.\n");
18589+ return -EINVAL;
18590+ }
18591+
18592+ if (feedback_size < PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) {
18593+ DRM_ERROR("Feedback buffer size too small.\n");
18594+ return -EINVAL;
18595+ }
18596+
18597+ page_offset = offset & ~PAGE_MASK;
18598+ if ((PAGE_SIZE - PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t))
18599+ < page_offset) {
18600+ DRM_ERROR("Illegal feedback buffer alignment.\n");
18601+ return -EINVAL;
18602+ }
18603+
18604+ ret = drm_bo_handle_validate(file_priv,
18605+ handle,
18606+ PSB_ENGINE_TA,
18607+ DRM_BO_FLAG_MEM_LOCAL |
18608+ DRM_BO_FLAG_CACHED |
18609+ DRM_BO_FLAG_WRITE |
18610+ PSB_BO_FLAG_FEEDBACK,
18611+ DRM_BO_MASK_MEM |
18612+ DRM_BO_FLAG_CACHED |
18613+ DRM_BO_FLAG_WRITE |
18614+ PSB_BO_FLAG_FEEDBACK, 0, 0, NULL, &bo);
18615+ if (ret)
18616+ return ret;
18617+
18618+ page_no = offset >> PAGE_SHIFT;
18619+ if (page_no >= bo->num_pages) {
18620+ ret = -EINVAL;
18621+ DRM_ERROR("Illegal feedback buffer offset.\n");
18622+ goto out_unref;
18623+ }
18624+
18625+ if (bo->ttm == NULL) {
18626+ ret = -EINVAL;
18627+ DRM_ERROR("Vistest buffer without TTM.\n");
18628+ goto out_unref;
18629+ }
18630+
18631+ page = drm_ttm_get_page(bo->ttm, page_no);
18632+ if (!page) {
18633+ ret = -ENOMEM;
18634+ goto out_unref;
18635+ }
18636+
18637+ feedback->page = page;
18638+ feedback->bo = bo;
18639+ feedback->offset = page_offset;
18640+ return 0;
18641+
18642+ out_unref:
18643+ drm_bo_usage_deref_unlocked(&bo);
18644+ return ret;
18645+}
18646+
18647+int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
18648+ struct drm_file *file_priv)
18649+{
18650+ drm_psb_cmdbuf_arg_t *arg = data;
18651+ int ret = 0;
18652+ unsigned num_buffers;
18653+ struct drm_buffer_object *cmd_buffer = NULL;
18654+ struct drm_buffer_object *ta_buffer = NULL;
18655+ struct drm_buffer_object *oom_buffer = NULL;
18656+ struct drm_fence_arg fence_arg;
18657+ struct drm_psb_scene user_scene;
18658+ struct psb_scene_pool *pool = NULL;
18659+ struct psb_scene *scene = NULL;
18660+ struct drm_psb_private *dev_priv =
18661+ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
18662+ int engine;
18663+ struct psb_feedback_info feedback;
18664+
18665+ if (!dev_priv)
18666+ return -EINVAL;
18667+
18668+ ret = drm_bo_read_lock(&dev->bm.bm_lock);
18669+ if (ret)
18670+ return ret;
18671+
18672+ num_buffers = PSB_NUM_VALIDATE_BUFFERS;
18673+
18674+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
18675+ if (ret) {
18676+ drm_bo_read_unlock(&dev->bm.bm_lock);
18677+ return -EAGAIN;
18678+ }
18679+ if (unlikely(dev_priv->buffers == NULL)) {
18680+ dev_priv->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
18681+ sizeof(*dev_priv->buffers));
18682+ if (dev_priv->buffers == NULL) {
18683+ drm_bo_read_unlock(&dev->bm.bm_lock);
18684+ return -ENOMEM;
18685+ }
18686+ }
18687+
18688+
18689+ engine = (arg->engine == PSB_ENGINE_RASTERIZER) ?
18690+ PSB_ENGINE_TA : arg->engine;
18691+
18692+ ret =
18693+ psb_validate_buffer_list(file_priv, engine,
18694+ (unsigned long)arg->buffer_list,
18695+ dev_priv->buffers, &num_buffers);
18696+ if (ret)
18697+ goto out_err0;
18698+
18699+ ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
18700+ arg->reloc_offset, arg->reloc_handle,
18701+ dev_priv->buffers, num_buffers, 0, 1);
18702+ if (ret)
18703+ goto out_err0;
18704+
18705+ mutex_lock(&dev->struct_mutex);
18706+ cmd_buffer = drm_lookup_buffer_object(file_priv, arg->cmdbuf_handle, 1);
18707+ mutex_unlock(&dev->struct_mutex);
18708+ if (!cmd_buffer) {
18709+ ret = -EINVAL;
18710+ goto out_err0;
18711+ }
18712+
18713+ switch (arg->engine) {
18714+ case PSB_ENGINE_2D:
18715+ ret = psb_cmdbuf_2d(file_priv, arg, cmd_buffer, &fence_arg);
18716+ if (ret)
18717+ goto out_err0;
18718+ break;
18719+ case PSB_ENGINE_VIDEO:
18720+ ret =
18721+ psb_cmdbuf_video(file_priv, arg, num_buffers, cmd_buffer,
18722+ &fence_arg);
18723+ if (ret)
18724+ goto out_err0;
18725+ break;
18726+ case PSB_ENGINE_RASTERIZER:
18727+ ret = psb_cmdbuf_raster(file_priv, arg, cmd_buffer, &fence_arg);
18728+ if (ret)
18729+ goto out_err0;
18730+ break;
18731+ case PSB_ENGINE_TA:
18732+ if (arg->ta_handle == arg->cmdbuf_handle) {
18733+ mutex_lock(&dev->struct_mutex);
18734+ atomic_inc(&cmd_buffer->usage);
18735+ ta_buffer = cmd_buffer;
18736+ mutex_unlock(&dev->struct_mutex);
18737+ } else {
18738+ mutex_lock(&dev->struct_mutex);
18739+ ta_buffer =
18740+ drm_lookup_buffer_object(file_priv,
18741+ arg->ta_handle, 1);
18742+ mutex_unlock(&dev->struct_mutex);
18743+ if (!ta_buffer) {
18744+ ret = -EINVAL;
18745+ goto out_err0;
18746+ }
18747+ }
18748+ if (arg->oom_size != 0) {
18749+ if (arg->oom_handle == arg->cmdbuf_handle) {
18750+ mutex_lock(&dev->struct_mutex);
18751+ atomic_inc(&cmd_buffer->usage);
18752+ oom_buffer = cmd_buffer;
18753+ mutex_unlock(&dev->struct_mutex);
18754+ } else {
18755+ mutex_lock(&dev->struct_mutex);
18756+ oom_buffer =
18757+ drm_lookup_buffer_object(file_priv,
18758+ arg->oom_handle,
18759+ 1);
18760+ mutex_unlock(&dev->struct_mutex);
18761+ if (!oom_buffer) {
18762+ ret = -EINVAL;
18763+ goto out_err0;
18764+ }
18765+ }
18766+ }
18767+
18768+ ret = copy_from_user(&user_scene, (void __user *)
18769+ ((unsigned long)arg->scene_arg),
18770+ sizeof(user_scene));
18771+ if (ret)
18772+ goto out_err0;
18773+
18774+ if (!user_scene.handle_valid) {
18775+ pool = psb_scene_pool_alloc(file_priv, 0,
18776+ user_scene.num_buffers,
18777+ user_scene.w, user_scene.h);
18778+ if (!pool) {
18779+ ret = -ENOMEM;
18780+ goto out_err0;
18781+ }
18782+
18783+ user_scene.handle = psb_scene_pool_handle(pool);
18784+ user_scene.handle_valid = 1;
18785+ ret = copy_to_user((void __user *)
18786+ ((unsigned long)arg->scene_arg),
18787+ &user_scene, sizeof(user_scene));
18788+
18789+ if (ret)
18790+ goto out_err0;
18791+ } else {
18792+ mutex_lock(&dev->struct_mutex);
18793+ pool = psb_scene_pool_lookup_devlocked(file_priv,
18794+ user_scene.
18795+ handle, 1);
18796+ mutex_unlock(&dev->struct_mutex);
18797+ if (!pool) {
18798+ ret = -EINVAL;
18799+ goto out_err0;
18800+ }
18801+ }
18802+
18803+ mutex_lock(&dev_priv->reset_mutex);
18804+ ret = psb_validate_scene_pool(pool, 0, 0, 0,
18805+ user_scene.w,
18806+ user_scene.h,
18807+ arg->ta_flags &
18808+ PSB_TA_FLAG_LASTPASS, &scene);
18809+ mutex_unlock(&dev_priv->reset_mutex);
18810+
18811+ if (ret)
18812+ goto out_err0;
18813+
18814+ memset(&feedback, 0, sizeof(feedback));
18815+ if (arg->feedback_ops) {
18816+ ret = psb_feedback_buf(file_priv,
18817+ arg->feedback_ops,
18818+ arg->feedback_handle,
18819+ arg->feedback_offset,
18820+ arg->feedback_breakpoints,
18821+ arg->feedback_size, &feedback);
18822+ if (ret)
18823+ goto out_err0;
18824+ }
18825+ ret = psb_cmdbuf_ta(file_priv, arg, cmd_buffer, ta_buffer,
18826+ oom_buffer, scene, &feedback, &fence_arg);
18827+ if (ret)
18828+ goto out_err0;
18829+ break;
18830+ default:
18831+ DRM_ERROR("Unimplemented command submission mechanism (%x).\n",
18832+ arg->engine);
18833+ ret = -EINVAL;
18834+ goto out_err0;
18835+ }
18836+
18837+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
18838+ ret = copy_to_user((void __user *)
18839+ ((unsigned long)arg->fence_arg),
18840+ &fence_arg, sizeof(fence_arg));
18841+ }
18842+
18843+ out_err0:
18844+ ret =
18845+ psb_handle_copyback(dev, dev_priv->buffers, num_buffers, ret, data);
18846+ mutex_lock(&dev->struct_mutex);
18847+ if (scene)
18848+ psb_scene_unref_devlocked(&scene);
18849+ if (pool)
18850+ psb_scene_pool_unref_devlocked(&pool);
18851+ if (cmd_buffer)
18852+ drm_bo_usage_deref_locked(&cmd_buffer);
18853+ if (ta_buffer)
18854+ drm_bo_usage_deref_locked(&ta_buffer);
18855+ if (oom_buffer)
18856+ drm_bo_usage_deref_locked(&oom_buffer);
18857+
18858+ psb_dereference_buffers_locked(dev_priv->buffers, num_buffers);
18859+ mutex_unlock(&dev->struct_mutex);
18860+ mutex_unlock(&dev_priv->cmdbuf_mutex);
18861+
18862+ drm_bo_read_unlock(&dev->bm.bm_lock);
18863+ return ret;
18864+}
18865Index: linux-2.6.28/drivers/gpu/drm/psb/psb_xhw.c
18866===================================================================
18867--- /dev/null 1970-01-01 00:00:00.000000000 +0000
18868+++ linux-2.6.28/drivers/gpu/drm/psb/psb_xhw.c 2009-02-25 15:37:02.000000000 +0000
18869@@ -0,0 +1,614 @@
18870+/**************************************************************************
18871+ * Copyright (c) 2007, Intel Corporation.
18872+ * All Rights Reserved.
18873+ *
18874+ * This program is free software; you can redistribute it and/or modify it
18875+ * under the terms and conditions of the GNU General Public License,
18876+ * version 2, as published by the Free Software Foundation.
18877+ *
18878+ * This program is distributed in the hope it will be useful, but WITHOUT
18879+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18880+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18881+ * more details.
18882+ *
18883+ * You should have received a copy of the GNU General Public License along with
18884+ * this program; if not, write to the Free Software Foundation, Inc.,
18885+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18886+ *
18887+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
18888+ * develop this driver.
18889+ *
18890+ **************************************************************************/
18891+/*
18892+ * Make calls into closed source X server code.
18893+ */
18894+
18895+#include "drmP.h"
18896+#include "psb_drv.h"
18897+
18898+void
18899+psb_xhw_clean_buf(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
18900+{
18901+ unsigned long irq_flags;
18902+
18903+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
18904+ list_del_init(&buf->head);
18905+ if (dev_priv->xhw_cur_buf == buf)
18906+ dev_priv->xhw_cur_buf = NULL;
18907+ atomic_set(&buf->done, 1);
18908+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
18909+}
18910+
18911+static inline int psb_xhw_add(struct drm_psb_private *dev_priv,
18912+ struct psb_xhw_buf *buf)
18913+{
18914+ unsigned long irq_flags;
18915+
18916+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
18917+ atomic_set(&buf->done, 0);
18918+ if (unlikely(!dev_priv->xhw_submit_ok)) {
18919+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
18920+ DRM_ERROR("No Xpsb 3D extension available.\n");
18921+ return -EINVAL;
18922+ }
18923+ if (!list_empty(&buf->head)) {
18924+ DRM_ERROR("Recursive list adding.\n");
18925+ goto out;
18926+ }
18927+ list_add_tail(&buf->head, &dev_priv->xhw_in);
18928+ wake_up_interruptible(&dev_priv->xhw_queue);
18929+ out:
18930+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
18931+ return 0;
18932+}
18933+
18934+int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
18935+ struct psb_xhw_buf *buf,
18936+ uint32_t w,
18937+ uint32_t h,
18938+ uint32_t * hw_cookie,
18939+ uint32_t * bo_size,
18940+ uint32_t * clear_p_start, uint32_t * clear_num_pages)
18941+{
18942+ struct drm_psb_xhw_arg *xa = &buf->arg;
18943+ int ret;
18944+
18945+ buf->copy_back = 1;
18946+ xa->op = PSB_XHW_SCENE_INFO;
18947+ xa->irq_op = 0;
18948+ xa->issue_irq = 0;
18949+ xa->arg.si.w = w;
18950+ xa->arg.si.h = h;
18951+
18952+ ret = psb_xhw_add(dev_priv, buf);
18953+ if (ret)
18954+ return ret;
18955+
18956+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
18957+ atomic_read(&buf->done), DRM_HZ);
18958+
18959+ if (!atomic_read(&buf->done)) {
18960+ psb_xhw_clean_buf(dev_priv, buf);
18961+ return -EBUSY;
18962+ }
18963+
18964+ if (!xa->ret) {
18965+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
18966+ *bo_size = xa->arg.si.size;
18967+ *clear_p_start = xa->arg.si.clear_p_start;
18968+ *clear_num_pages = xa->arg.si.clear_num_pages;
18969+ }
18970+ return xa->ret;
18971+}
18972+
18973+int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
18974+ struct psb_xhw_buf *buf, uint32_t fire_flags)
18975+{
18976+ struct drm_psb_xhw_arg *xa = &buf->arg;
18977+
18978+ buf->copy_back = 0;
18979+ xa->op = PSB_XHW_FIRE_RASTER;
18980+ xa->issue_irq = 0;
18981+ xa->arg.sb.fire_flags = 0;
18982+
18983+ return psb_xhw_add(dev_priv, buf);
18984+}
18985+
18986+int psb_xhw_vistest(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
18987+{
18988+ struct drm_psb_xhw_arg *xa = &buf->arg;
18989+
18990+ buf->copy_back = 1;
18991+ xa->op = PSB_XHW_VISTEST;
18992+ /*
18993+ * Could perhaps decrease latency somewhat by
18994+ * issuing an irq in this case.
18995+ */
18996+ xa->issue_irq = 0;
18997+ xa->irq_op = PSB_UIRQ_VISTEST;
18998+ return psb_xhw_add(dev_priv, buf);
18999+}
19000+
19001+int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
19002+ struct psb_xhw_buf *buf,
19003+ uint32_t fire_flags,
19004+ uint32_t hw_context,
19005+ uint32_t * cookie,
19006+ uint32_t * oom_cmds,
19007+ uint32_t num_oom_cmds,
19008+ uint32_t offset, uint32_t engine, uint32_t flags)
19009+{
19010+ struct drm_psb_xhw_arg *xa = &buf->arg;
19011+
19012+ buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM);
19013+ xa->op = PSB_XHW_SCENE_BIND_FIRE;
19014+ xa->issue_irq = (buf->copy_back) ? 1 : 0;
19015+ if (unlikely(buf->copy_back))
19016+ xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ?
19017+ PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY;
19018+ else
19019+ xa->irq_op = 0;
19020+ xa->arg.sb.fire_flags = fire_flags;
19021+ xa->arg.sb.hw_context = hw_context;
19022+ xa->arg.sb.offset = offset;
19023+ xa->arg.sb.engine = engine;
19024+ xa->arg.sb.flags = flags;
19025+ xa->arg.sb.num_oom_cmds = num_oom_cmds;
19026+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
19027+ if (num_oom_cmds)
19028+ memcpy(xa->arg.sb.oom_cmds, oom_cmds,
19029+ sizeof(uint32_t) * num_oom_cmds);
19030+ return psb_xhw_add(dev_priv, buf);
19031+}
19032+
19033+int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
19034+{
19035+ struct drm_psb_xhw_arg *xa = &buf->arg;
19036+ int ret;
19037+
19038+ buf->copy_back = 1;
19039+ xa->op = PSB_XHW_RESET_DPM;
19040+ xa->issue_irq = 0;
19041+ xa->irq_op = 0;
19042+
19043+ ret = psb_xhw_add(dev_priv, buf);
19044+ if (ret)
19045+ return ret;
19046+
19047+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
19048+ atomic_read(&buf->done), 3 * DRM_HZ);
19049+
19050+ if (!atomic_read(&buf->done)) {
19051+ psb_xhw_clean_buf(dev_priv, buf);
19052+ return -EBUSY;
19053+ }
19054+
19055+ return xa->ret;
19056+}
19057+
19058+int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
19059+ struct psb_xhw_buf *buf, uint32_t * value)
19060+{
19061+ struct drm_psb_xhw_arg *xa = &buf->arg;
19062+ int ret;
19063+
19064+ *value = 0;
19065+
19066+ buf->copy_back = 1;
19067+ xa->op = PSB_XHW_CHECK_LOCKUP;
19068+ xa->issue_irq = 0;
19069+ xa->irq_op = 0;
19070+
19071+ ret = psb_xhw_add(dev_priv, buf);
19072+ if (ret)
19073+ return ret;
19074+
19075+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
19076+ atomic_read(&buf->done), DRM_HZ * 3);
19077+
19078+ if (!atomic_read(&buf->done)) {
19079+ psb_xhw_clean_buf(dev_priv, buf);
19080+ return -EBUSY;
19081+ }
19082+
19083+ if (!xa->ret)
19084+ *value = xa->arg.cl.value;
19085+
19086+ return xa->ret;
19087+}
19088+
19089+static int psb_xhw_terminate(struct drm_psb_private *dev_priv,
19090+ struct psb_xhw_buf *buf)
19091+{
19092+ struct drm_psb_xhw_arg *xa = &buf->arg;
19093+ unsigned long irq_flags;
19094+
19095+ buf->copy_back = 0;
19096+ xa->op = PSB_XHW_TERMINATE;
19097+ xa->issue_irq = 0;
19098+
19099+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
19100+ dev_priv->xhw_submit_ok = 0;
19101+ atomic_set(&buf->done, 0);
19102+ if (!list_empty(&buf->head)) {
19103+ DRM_ERROR("Recursive list adding.\n");
19104+ goto out;
19105+ }
19106+ list_add_tail(&buf->head, &dev_priv->xhw_in);
19107+ out:
19108+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
19109+ wake_up_interruptible(&dev_priv->xhw_queue);
19110+
19111+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
19112+ atomic_read(&buf->done), DRM_HZ / 10);
19113+
19114+ if (!atomic_read(&buf->done)) {
19115+ DRM_ERROR("Xpsb terminate timeout.\n");
19116+ psb_xhw_clean_buf(dev_priv, buf);
19117+ return -EBUSY;
19118+ }
19119+
19120+ return 0;
19121+}
19122+
19123+int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
19124+ struct psb_xhw_buf *buf,
19125+ uint32_t pages, uint32_t * hw_cookie, uint32_t * size)
19126+{
19127+ struct drm_psb_xhw_arg *xa = &buf->arg;
19128+ int ret;
19129+
19130+ buf->copy_back = 1;
19131+ xa->op = PSB_XHW_TA_MEM_INFO;
19132+ xa->issue_irq = 0;
19133+ xa->irq_op = 0;
19134+ xa->arg.bi.pages = pages;
19135+
19136+ ret = psb_xhw_add(dev_priv, buf);
19137+ if (ret)
19138+ return ret;
19139+
19140+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
19141+ atomic_read(&buf->done), DRM_HZ);
19142+
19143+ if (!atomic_read(&buf->done)) {
19144+ psb_xhw_clean_buf(dev_priv, buf);
19145+ return -EBUSY;
19146+ }
19147+
19148+ if (!xa->ret)
19149+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
19150+
19151+ *size = xa->arg.bi.size;
19152+ return xa->ret;
19153+}
19154+
19155+int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
19156+ struct psb_xhw_buf *buf,
19157+ uint32_t flags,
19158+ uint32_t param_offset,
19159+ uint32_t pt_offset, uint32_t * hw_cookie)
19160+{
19161+ struct drm_psb_xhw_arg *xa = &buf->arg;
19162+ int ret;
19163+
19164+ buf->copy_back = 1;
19165+ xa->op = PSB_XHW_TA_MEM_LOAD;
19166+ xa->issue_irq = 0;
19167+ xa->irq_op = 0;
19168+ xa->arg.bl.flags = flags;
19169+ xa->arg.bl.param_offset = param_offset;
19170+ xa->arg.bl.pt_offset = pt_offset;
19171+ memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie));
19172+
19173+ ret = psb_xhw_add(dev_priv, buf);
19174+ if (ret)
19175+ return ret;
19176+
19177+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
19178+ atomic_read(&buf->done), 3 * DRM_HZ);
19179+
19180+ if (!atomic_read(&buf->done)) {
19181+ psb_xhw_clean_buf(dev_priv, buf);
19182+ return -EBUSY;
19183+ }
19184+
19185+ if (!xa->ret)
19186+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
19187+
19188+ return xa->ret;
19189+}
19190+
19191+int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
19192+ struct psb_xhw_buf *buf, uint32_t * cookie)
19193+{
19194+ struct drm_psb_xhw_arg *xa = &buf->arg;
19195+
19196+ /*
19197+ * This calls the extensive closed source
19198+ * OOM handler, which resolves the condition and
19199+ * sends a reply telling the scheduler what to do
19200+ * with the task.
19201+ */
19202+
19203+ buf->copy_back = 1;
19204+ xa->op = PSB_XHW_OOM;
19205+ xa->issue_irq = 1;
19206+ xa->irq_op = PSB_UIRQ_OOM_REPLY;
19207+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
19208+
19209+ return psb_xhw_add(dev_priv, buf);
19210+}
19211+
19212+void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
19213+ struct psb_xhw_buf *buf,
19214+ uint32_t * cookie,
19215+ uint32_t * bca, uint32_t * rca, uint32_t * flags)
19216+{
19217+ struct drm_psb_xhw_arg *xa = &buf->arg;
19218+
19219+ /*
19220+ * Get info about how to schedule an OOM task.
19221+ */
19222+
19223+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
19224+ *bca = xa->arg.oom.bca;
19225+ *rca = xa->arg.oom.rca;
19226+ *flags = xa->arg.oom.flags;
19227+}
19228+
19229+void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
19230+ struct psb_xhw_buf *buf, uint32_t * cookie)
19231+{
19232+ struct drm_psb_xhw_arg *xa = &buf->arg;
19233+
19234+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
19235+}
19236+
19237+int psb_xhw_resume(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
19238+{
19239+ struct drm_psb_xhw_arg *xa = &buf->arg;
19240+
19241+ buf->copy_back = 0;
19242+ xa->op = PSB_XHW_RESUME;
19243+ xa->issue_irq = 0;
19244+ xa->irq_op = 0;
19245+ return psb_xhw_add(dev_priv, buf);
19246+}
19247+
19248+void psb_xhw_takedown(struct drm_psb_private *dev_priv)
19249+{
19250+}
19251+
19252+int psb_xhw_init(struct drm_device *dev)
19253+{
19254+ struct drm_psb_private *dev_priv =
19255+ (struct drm_psb_private *)dev->dev_private;
19256+ unsigned long irq_flags;
19257+
19258+ INIT_LIST_HEAD(&dev_priv->xhw_in);
19259+ dev_priv->xhw_lock = SPIN_LOCK_UNLOCKED;
19260+ atomic_set(&dev_priv->xhw_client, 0);
19261+ init_waitqueue_head(&dev_priv->xhw_queue);
19262+ init_waitqueue_head(&dev_priv->xhw_caller_queue);
19263+ mutex_init(&dev_priv->xhw_mutex);
19264+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
19265+ dev_priv->xhw_on = 0;
19266+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
19267+
19268+ return 0;
19269+}
19270+
19271+static int psb_xhw_init_init(struct drm_device *dev,
19272+ struct drm_file *file_priv,
19273+ struct drm_psb_xhw_init_arg *arg)
19274+{
19275+ struct drm_psb_private *dev_priv =
19276+ (struct drm_psb_private *)dev->dev_private;
19277+ int ret;
19278+ int is_iomem;
19279+
19280+ if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) {
19281+ unsigned long irq_flags;
19282+
19283+ mutex_lock(&dev->struct_mutex);
19284+ dev_priv->xhw_bo =
19285+ drm_lookup_buffer_object(file_priv, arg->buffer_handle, 1);
19286+ mutex_unlock(&dev->struct_mutex);
19287+ if (!dev_priv->xhw_bo) {
19288+ ret = -EINVAL;
19289+ goto out_err;
19290+ }
19291+ ret = drm_bo_kmap(dev_priv->xhw_bo, 0,
19292+ dev_priv->xhw_bo->num_pages,
19293+ &dev_priv->xhw_kmap);
19294+ if (ret) {
19295+ DRM_ERROR("Failed mapping X server "
19296+ "communications buffer.\n");
19297+ goto out_err0;
19298+ }
19299+ dev_priv->xhw = drm_bmo_virtual(&dev_priv->xhw_kmap, &is_iomem);
19300+ if (is_iomem) {
19301+ DRM_ERROR("X server communications buffer"
19302+ "is in device memory.\n");
19303+ ret = -EINVAL;
19304+ goto out_err1;
19305+ }
19306+ dev_priv->xhw_file = file_priv;
19307+
19308+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
19309+ dev_priv->xhw_on = 1;
19310+ dev_priv->xhw_submit_ok = 1;
19311+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
19312+ return 0;
19313+ } else {
19314+ DRM_ERROR("Xhw is already initialized.\n");
19315+ return -EBUSY;
19316+ }
19317+ out_err1:
19318+ dev_priv->xhw = NULL;
19319+ drm_bo_kunmap(&dev_priv->xhw_kmap);
19320+ out_err0:
19321+ drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
19322+ out_err:
19323+ atomic_dec(&dev_priv->xhw_client);
19324+ return ret;
19325+}
19326+
19327+static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv)
19328+{
19329+ struct psb_xhw_buf *cur_buf, *next;
19330+ unsigned long irq_flags;
19331+
19332+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
19333+ dev_priv->xhw_submit_ok = 0;
19334+
19335+ list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) {
19336+ list_del_init(&cur_buf->head);
19337+ if (cur_buf->copy_back) {
19338+ cur_buf->arg.ret = -EINVAL;
19339+ }
19340+ atomic_set(&cur_buf->done, 1);
19341+ }
19342+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
19343+ wake_up(&dev_priv->xhw_caller_queue);
19344+}
19345+
19346+void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
19347+ struct drm_file *file_priv, int closing)
19348+{
19349+
19350+ if (dev_priv->xhw_file == file_priv &&
19351+ atomic_add_unless(&dev_priv->xhw_client, -1, 0)) {
19352+
19353+ if (closing)
19354+ psb_xhw_queue_empty(dev_priv);
19355+ else {
19356+ struct psb_xhw_buf buf;
19357+ INIT_LIST_HEAD(&buf.head);
19358+
19359+ psb_xhw_terminate(dev_priv, &buf);
19360+ psb_xhw_queue_empty(dev_priv);
19361+ }
19362+
19363+ dev_priv->xhw = NULL;
19364+ drm_bo_kunmap(&dev_priv->xhw_kmap);
19365+ drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
19366+ dev_priv->xhw_file = NULL;
19367+ }
19368+}
19369+
19370+int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
19371+ struct drm_file *file_priv)
19372+{
19373+ struct drm_psb_xhw_init_arg *arg = (struct drm_psb_xhw_init_arg *)data;
19374+ struct drm_psb_private *dev_priv =
19375+ (struct drm_psb_private *)dev->dev_private;
19376+
19377+ switch (arg->operation) {
19378+ case PSB_XHW_INIT:
19379+ return psb_xhw_init_init(dev, file_priv, arg);
19380+ case PSB_XHW_TAKEDOWN:
19381+ psb_xhw_init_takedown(dev_priv, file_priv, 0);
19382+ }
19383+ return 0;
19384+}
19385+
19386+static int psb_xhw_in_empty(struct drm_psb_private *dev_priv)
19387+{
19388+ int empty;
19389+ unsigned long irq_flags;
19390+
19391+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
19392+ empty = list_empty(&dev_priv->xhw_in);
19393+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
19394+ return empty;
19395+}
19396+
19397+int psb_xhw_handler(struct drm_psb_private *dev_priv)
19398+{
19399+ unsigned long irq_flags;
19400+ struct drm_psb_xhw_arg *xa;
19401+ struct psb_xhw_buf *buf;
19402+
19403+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
19404+
19405+ if (!dev_priv->xhw_on) {
19406+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
19407+ return -EINVAL;
19408+ }
19409+
19410+ buf = dev_priv->xhw_cur_buf;
19411+ if (buf && buf->copy_back) {
19412+ xa = &buf->arg;
19413+ memcpy(xa, dev_priv->xhw, sizeof(*xa));
19414+ dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op;
19415+ atomic_set(&buf->done, 1);
19416+ wake_up(&dev_priv->xhw_caller_queue);
19417+ } else
19418+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
19419+
19420+ dev_priv->xhw_cur_buf = 0;
19421+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
19422+ return 0;
19423+}
19424+
19425+int psb_xhw_ioctl(struct drm_device *dev, void *data,
19426+ struct drm_file *file_priv)
19427+{
19428+ struct drm_psb_private *dev_priv =
19429+ (struct drm_psb_private *)dev->dev_private;
19430+ unsigned long irq_flags;
19431+ struct drm_psb_xhw_arg *xa;
19432+ int ret;
19433+ struct list_head *list;
19434+ struct psb_xhw_buf *buf;
19435+
19436+ if (!dev_priv)
19437+ return -EINVAL;
19438+
19439+ if (mutex_lock_interruptible(&dev_priv->xhw_mutex))
19440+ return -EAGAIN;
19441+
19442+ if (psb_forced_user_interrupt(dev_priv)) {
19443+ mutex_unlock(&dev_priv->xhw_mutex);
19444+ return -EINVAL;
19445+ }
19446+
19447+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
19448+ while (list_empty(&dev_priv->xhw_in)) {
19449+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
19450+ ret = wait_event_interruptible_timeout(dev_priv->xhw_queue,
19451+ !psb_xhw_in_empty
19452+ (dev_priv), DRM_HZ);
19453+ if (ret == -ERESTARTSYS || ret == 0) {
19454+ mutex_unlock(&dev_priv->xhw_mutex);
19455+ return -EAGAIN;
19456+ }
19457+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
19458+ }
19459+
19460+ list = dev_priv->xhw_in.next;
19461+ list_del_init(list);
19462+
19463+ buf = list_entry(list, struct psb_xhw_buf, head);
19464+ xa = &buf->arg;
19465+ memcpy(dev_priv->xhw, xa, sizeof(*xa));
19466+
19467+ if (unlikely(buf->copy_back))
19468+ dev_priv->xhw_cur_buf = buf;
19469+ else {
19470+ atomic_set(&buf->done, 1);
19471+ dev_priv->xhw_cur_buf = NULL;
19472+ }
19473+
19474+ if (xa->op == PSB_XHW_TERMINATE) {
19475+ dev_priv->xhw_on = 0;
19476+ wake_up(&dev_priv->xhw_caller_queue);
19477+ }
19478+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
19479+
19480+ mutex_unlock(&dev_priv->xhw_mutex);
19481+
19482+ return 0;
19483+}
19484Index: linux-2.6.28/drivers/gpu/drm/Kconfig
19485===================================================================
19486--- linux-2.6.28.orig/drivers/gpu/drm/Kconfig 2009-02-25 15:37:02.000000000 +0000
19487+++ linux-2.6.28/drivers/gpu/drm/Kconfig 2009-02-25 15:37:02.000000000 +0000
19488@@ -129,3 +129,10 @@
19489 help
19490 Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
19491 chipset. If M is selected the module will be called savage.
19492+
19493+config DRM_PSB
19494+ tristate "Intel Poulsbo"
19495+ depends on DRM && PCI && I2C_ALGOBIT
19496+ select DRM_INTEL_COMMON
19497+ help
19498+ Choose this option if you have an Intel Poulsbo chipset.
19499Index: linux-2.6.28/include/drm/drm_objects.h
19500===================================================================
19501--- /dev/null 1970-01-01 00:00:00.000000000 +0000
19502+++ linux-2.6.28/include/drm/drm_objects.h 2009-02-25 15:37:02.000000000 +0000
19503@@ -0,0 +1,717 @@
19504+/**************************************************************************
19505+ *
19506+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
19507+ * All Rights Reserved.
19508+ *
19509+ * Permission is hereby granted, free of charge, to any person obtaining a
19510+ * copy of this software and associated documentation files (the
19511+ * "Software"), to deal in the Software without restriction, including
19512+ * without limitation the rights to use, copy, modify, merge, publish,
19513+ * distribute, sub license, and/or sell copies of the Software, and to
19514+ * permit persons to whom the Software is furnished to do so, subject to
19515+ * the following conditions:
19516+ *
19517+ * The above copyright notice and this permission notice (including the
19518+ * next paragraph) shall be included in all copies or substantial portions
19519+ * of the Software.
19520+ *
19521+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19522+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19523+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19524+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19525+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19526+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19527+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
19528+ *
19529+ **************************************************************************/
19530+/*
19531+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
19532+ */
19533+
19534+#ifndef _DRM_OBJECTS_H
19535+#define _DRM_OBJECTS_H
19536+
19537+struct drm_device;
19538+struct drm_bo_mem_reg;
19539+
19540+/***************************************************
19541+ * User space objects. (drm_object.c)
19542+ */
19543+
19544+#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
19545+
19546+enum drm_object_type {
19547+ drm_fence_type,
19548+ drm_buffer_type,
19549+ drm_lock_type,
19550+ /*
19551+ * Add other user space object types here.
19552+ */
19553+ drm_driver_type0 = 256,
19554+ drm_driver_type1,
19555+ drm_driver_type2,
19556+ drm_driver_type3,
19557+ drm_driver_type4
19558+};
19559+
19560+/*
19561+ * A user object is a structure that helps the drm give out user handles
19562+ * to kernel internal objects and to keep track of these objects so that
19563+ * they can be destroyed, for example when the user space process exits.
19564+ * Designed to be accessible using a user space 32-bit handle.
19565+ */
19566+
19567+struct drm_user_object {
19568+ struct drm_hash_item hash;
19569+ struct list_head list;
19570+ enum drm_object_type type;
19571+ atomic_t refcount;
19572+ int shareable;
19573+ struct drm_file *owner;
19574+ void (*ref_struct_locked) (struct drm_file *priv,
19575+ struct drm_user_object *obj,
19576+ enum drm_ref_type ref_action);
19577+ void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
19578+ enum drm_ref_type unref_action);
19579+ void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
19580+};
19581+
19582+/*
19583+ * A ref object is a structure which is used to
19584+ * keep track of references to user objects and to keep track of these
19585+ * references so that they can be destroyed for example when the user space
19586+ * process exits. Designed to be accessible using a pointer to the _user_ object.
19587+ */
19588+
19589+struct drm_ref_object {
19590+ struct drm_hash_item hash;
19591+ struct list_head list;
19592+ atomic_t refcount;
19593+ enum drm_ref_type unref_action;
19594+};
19595+
19596+/**
19597+ * Must be called with the struct_mutex held.
19598+ */
19599+
19600+extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
19601+ int shareable);
19602+/**
19603+ * Must be called with the struct_mutex held.
19604+ */
19605+
19606+extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
19607+ uint32_t key);
19608+
19609+/*
19610+ * Must be called with the struct_mutex held. May temporarily release it.
19611+ */
19612+
19613+extern int drm_add_ref_object(struct drm_file *priv,
19614+ struct drm_user_object *referenced_object,
19615+ enum drm_ref_type ref_action);
19616+
19617+/*
19618+ * Must be called with the struct_mutex held.
19619+ */
19620+
19621+struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
19622+ struct drm_user_object *referenced_object,
19623+ enum drm_ref_type ref_action);
19624+/*
19625+ * Must be called with the struct_mutex held.
19626+ * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
19627+ * release the struct_mutex before calling drm_remove_ref_object.
19628+ * This function may temporarily release the struct_mutex.
19629+ */
19630+
19631+extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
19632+extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
19633+ enum drm_object_type type,
19634+ struct drm_user_object **object);
19635+extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
19636+ enum drm_object_type type);
19637+
19638+/***************************************************
19639+ * Fence objects. (drm_fence.c)
19640+ */
19641+
19642+struct drm_fence_object {
19643+ struct drm_user_object base;
19644+ struct drm_device *dev;
19645+ atomic_t usage;
19646+
19647+ /*
19648+ * The below three fields are protected by the fence manager spinlock.
19649+ */
19650+
19651+ struct list_head ring;
19652+ int fence_class;
19653+ uint32_t native_types;
19654+ uint32_t type;
19655+ uint32_t signaled_types;
19656+ uint32_t sequence;
19657+ uint32_t waiting_types;
19658+ uint32_t error;
19659+};
19660+
19661+#define _DRM_FENCE_CLASSES 8
19662+
19663+struct drm_fence_class_manager {
19664+ struct list_head ring;
19665+ uint32_t pending_flush;
19666+ uint32_t waiting_types;
19667+ wait_queue_head_t fence_queue;
19668+ uint32_t highest_waiting_sequence;
19669+ uint32_t latest_queued_sequence;
19670+};
19671+
19672+struct drm_fence_manager {
19673+ int initialized;
19674+ rwlock_t lock;
19675+ struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES];
19676+ uint32_t num_classes;
19677+ atomic_t count;
19678+};
19679+
19680+struct drm_fence_driver {
19681+ unsigned long *waiting_jiffies;
19682+ uint32_t num_classes;
19683+ uint32_t wrap_diff;
19684+ uint32_t flush_diff;
19685+ uint32_t sequence_mask;
19686+
19687+ /*
19688+ * Driver implemented functions:
19689+ * has_irq() : 1 if the hardware can update the indicated type_flags using an
19690+ * irq handler. 0 if polling is required.
19691+ *
19692+ * emit() : Emit a sequence number to the command stream.
19693+ * Return the sequence number.
19694+ *
19695+ * flush() : Make sure the flags indicated in fc->pending_flush will eventually
19696+ * signal for fc->highest_received_sequence and all preceding sequences.
19697+ * Acknowledge by clearing the flags fc->pending_flush.
19698+ *
19699+ * poll() : Call drm_fence_handler with any new information.
19700+ *
19701+ * needed_flush() : Given the current state of the fence->type flags and previusly
19702+ * executed or queued flushes, return the type_flags that need flushing.
19703+ *
19704+ * wait(): Wait for the "mask" flags to signal on a given fence, performing
19705+ * whatever's necessary to make this happen.
19706+ */
19707+
19708+ int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
19709+ uint32_t flags);
19710+ int (*emit) (struct drm_device *dev, uint32_t fence_class,
19711+ uint32_t flags, uint32_t *breadcrumb,
19712+ uint32_t *native_type);
19713+ void (*flush) (struct drm_device *dev, uint32_t fence_class);
19714+ void (*poll) (struct drm_device *dev, uint32_t fence_class,
19715+ uint32_t types);
19716+ uint32_t (*needed_flush) (struct drm_fence_object *fence);
19717+ int (*wait) (struct drm_fence_object *fence, int lazy,
19718+ int interruptible, uint32_t mask);
19719+};
19720+
19721+extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
19722+ int interruptible, uint32_t mask,
19723+ unsigned long end_jiffies);
19724+extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
19725+ uint32_t sequence, uint32_t type,
19726+ uint32_t error);
19727+extern void drm_fence_manager_init(struct drm_device *dev);
19728+extern void drm_fence_manager_takedown(struct drm_device *dev);
19729+extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
19730+ uint32_t sequence);
19731+extern int drm_fence_object_flush(struct drm_fence_object *fence,
19732+ uint32_t type);
19733+extern int drm_fence_object_signaled(struct drm_fence_object *fence,
19734+ uint32_t type);
19735+extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
19736+extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
19737+extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
19738+extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
19739+ struct drm_fence_object *src);
19740+extern int drm_fence_object_wait(struct drm_fence_object *fence,
19741+ int lazy, int ignore_signals, uint32_t mask);
19742+extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
19743+ uint32_t fence_flags, uint32_t fence_class,
19744+ struct drm_fence_object **c_fence);
19745+extern int drm_fence_object_emit(struct drm_fence_object *fence,
19746+ uint32_t fence_flags, uint32_t class,
19747+ uint32_t type);
19748+extern void drm_fence_fill_arg(struct drm_fence_object *fence,
19749+ struct drm_fence_arg *arg);
19750+
19751+extern int drm_fence_add_user_object(struct drm_file *priv,
19752+ struct drm_fence_object *fence,
19753+ int shareable);
19754+
19755+extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
19756+ struct drm_file *file_priv);
19757+extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data,
19758+ struct drm_file *file_priv);
19759+extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data,
19760+ struct drm_file *file_priv);
19761+extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data,
19762+ struct drm_file *file_priv);
19763+extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data,
19764+ struct drm_file *file_priv);
19765+extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data,
19766+ struct drm_file *file_priv);
19767+extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data,
19768+ struct drm_file *file_priv);
19769+extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data,
19770+ struct drm_file *file_priv);
19771+extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
19772+ struct drm_file *file_priv);
19773+/**************************************************
19774+ *TTMs
19775+ */
19776+
19777+/*
19778+ * The ttm backend GTT interface. (In our case AGP).
19779+ * Any similar type of device (PCIE?)
19780+ * needs only to implement these functions to be usable with the TTM interface.
19781+ * The AGP backend implementation lives in drm_agpsupport.c
19782+ * basically maps these calls to available functions in agpgart.
19783+ * Each drm device driver gets an
19784+ * additional function pointer that creates these types,
19785+ * so that the device can choose the correct aperture.
19786+ * (Multiple AGP apertures, etc.)
19787+ * Most device drivers will let this point to the standard AGP implementation.
19788+ */
19789+
19790+#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
19791+#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
19792+
19793+struct drm_ttm_backend;
19794+struct drm_ttm_backend_func {
19795+ int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
19796+ int (*populate) (struct drm_ttm_backend *backend,
19797+ unsigned long num_pages, struct page **pages);
19798+ void (*clear) (struct drm_ttm_backend *backend);
19799+ int (*bind) (struct drm_ttm_backend *backend,
19800+ struct drm_bo_mem_reg *bo_mem);
19801+ int (*unbind) (struct drm_ttm_backend *backend);
19802+ void (*destroy) (struct drm_ttm_backend *backend);
19803+};
19804+
19805+
19806+struct drm_ttm_backend {
19807+ struct drm_device *dev;
19808+ uint32_t flags;
19809+ struct drm_ttm_backend_func *func;
19810+};
19811+
19812+struct drm_ttm {
19813+ struct page *dummy_read_page;
19814+ struct page **pages;
19815+ uint32_t page_flags;
19816+ unsigned long num_pages;
19817+ atomic_t vma_count;
19818+ struct drm_device *dev;
19819+ int destroy;
19820+ uint32_t mapping_offset;
19821+ struct drm_ttm_backend *be;
19822+ enum {
19823+ ttm_bound,
19824+ ttm_evicted,
19825+ ttm_unbound,
19826+ ttm_unpopulated,
19827+ } state;
19828+
19829+};
19830+
19831+extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
19832+extern int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
19833+extern void drm_ttm_unbind(struct drm_ttm *ttm);
19834+extern void drm_ttm_evict(struct drm_ttm *ttm);
19835+extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
19836+extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
19837+extern void drm_ttm_cache_flush(void);
19838+extern int drm_ttm_populate(struct drm_ttm *ttm);
19839+extern int drm_ttm_set_user(struct drm_ttm *ttm,
19840+ struct task_struct *tsk,
19841+ int write,
19842+ unsigned long start,
19843+ unsigned long num_pages,
19844+ struct page *dummy_read_page);
19845+unsigned long drm_ttm_size(struct drm_device *dev,
19846+ unsigned long num_pages,
19847+ int user_bo);
19848+
19849+
19850+/*
19851+ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
19852+ * this which calls this function iff there are no vmas referencing it anymore.
19853+ * Otherwise it is called when the last vma exits.
19854+ */
19855+
19856+extern int drm_destroy_ttm(struct drm_ttm *ttm);
19857+
19858+#define DRM_FLAG_MASKED(_old, _new, _mask) {\
19859+(_old) ^= (((_old) ^ (_new)) & (_mask)); \
19860+}
19861+
19862+#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
19863+#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
19864+
19865+/*
19866+ * Page flags.
19867+ */
19868+
19869+#define DRM_TTM_PAGE_UNCACHED (1 << 0)
19870+#define DRM_TTM_PAGE_USED (1 << 1)
19871+#define DRM_TTM_PAGE_BOUND (1 << 2)
19872+#define DRM_TTM_PAGE_PRESENT (1 << 3)
19873+#define DRM_TTM_PAGE_VMALLOC (1 << 4)
19874+#define DRM_TTM_PAGE_USER (1 << 5)
19875+#define DRM_TTM_PAGE_USER_WRITE (1 << 6)
19876+#define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
19877+#define DRM_TTM_PAGE_USER_DMA (1 << 8)
19878+
19879+/***************************************************
19880+ * Buffer objects. (drm_bo.c, drm_bo_move.c)
19881+ */
19882+
19883+struct drm_bo_mem_reg {
19884+ struct drm_mm_node *mm_node;
19885+ unsigned long size;
19886+ unsigned long num_pages;
19887+ uint32_t page_alignment;
19888+ uint32_t mem_type;
19889+ uint64_t flags;
19890+ uint64_t mask;
19891+ uint32_t desired_tile_stride;
19892+ uint32_t hw_tile_stride;
19893+};
19894+
19895+enum drm_bo_type {
19896+ drm_bo_type_dc,
19897+ drm_bo_type_user,
19898+ drm_bo_type_kernel, /* for initial kernel allocations */
19899+};
19900+
19901+struct drm_buffer_object {
19902+ struct drm_device *dev;
19903+ struct drm_user_object base;
19904+
19905+ /*
19906+ * If there is a possibility that the usage variable is zero,
19907+ * then dev->struct_mutext should be locked before incrementing it.
19908+ */
19909+
19910+ atomic_t usage;
19911+ unsigned long buffer_start;
19912+ enum drm_bo_type type;
19913+ unsigned long offset;
19914+ atomic_t mapped;
19915+ struct drm_bo_mem_reg mem;
19916+
19917+ struct list_head lru;
19918+ struct list_head ddestroy;
19919+
19920+ uint32_t fence_type;
19921+ uint32_t fence_class;
19922+ uint32_t new_fence_type;
19923+ uint32_t new_fence_class;
19924+ struct drm_fence_object *fence;
19925+ uint32_t priv_flags;
19926+ wait_queue_head_t event_queue;
19927+ struct mutex mutex;
19928+ unsigned long num_pages;
19929+ unsigned long reserved_size;
19930+
19931+ /* For pinned buffers */
19932+ struct drm_mm_node *pinned_node;
19933+ uint32_t pinned_mem_type;
19934+ struct list_head pinned_lru;
19935+
19936+ /* For vm */
19937+ struct drm_ttm *ttm;
19938+ struct drm_map_list map_list;
19939+ uint32_t memory_type;
19940+ unsigned long bus_offset;
19941+ uint32_t vm_flags;
19942+ void *iomap;
19943+
19944+#ifdef DRM_ODD_MM_COMPAT
19945+ /* dev->struct_mutex only protected. */
19946+ struct list_head vma_list;
19947+ struct list_head p_mm_list;
19948+#endif
19949+
19950+};
19951+
19952+#define _DRM_BO_FLAG_UNFENCED 0x00000001
19953+#define _DRM_BO_FLAG_EVICTED 0x00000002
19954+
19955+struct drm_mem_type_manager {
19956+ int has_type;
19957+ int use_type;
19958+ struct drm_mm manager;
19959+ struct list_head lru;
19960+ struct list_head pinned;
19961+ uint32_t flags;
19962+ uint32_t drm_bus_maptype;
19963+ unsigned long gpu_offset;
19964+ unsigned long io_offset;
19965+ unsigned long io_size;
19966+ void *io_addr;
19967+};
19968+
19969+struct drm_bo_lock {
19970+ struct drm_user_object base;
19971+ wait_queue_head_t queue;
19972+ atomic_t write_lock_pending;
19973+ atomic_t readers;
19974+};
19975+
19976+#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
19977+#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
19978+#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */
19979+#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap
19980+ before kernel access. */
19981+#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
19982+#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
19983+
19984+struct drm_buffer_manager {
19985+ struct drm_bo_lock bm_lock;
19986+ struct mutex evict_mutex;
19987+ int nice_mode;
19988+ int initialized;
19989+ struct drm_file *last_to_validate;
19990+ struct drm_mem_type_manager man[DRM_BO_MEM_TYPES];
19991+ struct list_head unfenced;
19992+ struct list_head ddestroy;
19993+ struct delayed_work wq;
19994+ uint32_t fence_type;
19995+ unsigned long cur_pages;
19996+ atomic_t count;
19997+ struct page *dummy_read_page;
19998+};
19999+
20000+struct drm_bo_driver {
20001+ const uint32_t *mem_type_prio;
20002+ const uint32_t *mem_busy_prio;
20003+ uint32_t num_mem_type_prio;
20004+ uint32_t num_mem_busy_prio;
20005+ struct drm_ttm_backend *(*create_ttm_backend_entry)
20006+ (struct drm_device *dev);
20007+ int (*backend_size) (struct drm_device *dev,
20008+ unsigned long num_pages);
20009+ int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
20010+ uint32_t *type);
20011+ int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
20012+ int (*init_mem_type) (struct drm_device *dev, uint32_t type,
20013+ struct drm_mem_type_manager *man);
20014+ uint32_t(*evict_mask) (struct drm_buffer_object *bo);
20015+ int (*move) (struct drm_buffer_object *bo,
20016+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
20017+ void (*ttm_cache_flush)(struct drm_ttm *ttm);
20018+
20019+ /*
20020+ * command_stream_barrier
20021+ *
20022+ * @dev: The drm device.
20023+ *
20024+ * @bo: The buffer object to validate.
20025+ *
20026+ * @new_fence_class: The new fence class for the buffer object.
20027+ *
20028+ * @new_fence_type: The new fence type for the buffer object.
20029+ *
20030+ * @no_wait: whether this should give up and return -EBUSY
20031+ * if this operation would require sleeping
20032+ *
20033+ * Insert a command stream barrier that makes sure that the
20034+ * buffer is idle once the commands associated with the
20035+ * current validation are starting to execute. If an error
20036+ * condition is returned, or the function pointer is NULL,
20037+ * the drm core will force buffer idle
20038+ * during validation.
20039+ */
20040+
20041+ int (*command_stream_barrier) (struct drm_buffer_object *bo,
20042+ uint32_t new_fence_class,
20043+ uint32_t new_fence_type,
20044+ int no_wait);
20045+};
20046+
20047+/*
20048+ * buffer objects (drm_bo.c)
20049+ */
20050+extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
20051+extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
20052+extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
20053+extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
20054+extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
20055+extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
20056+extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
20057+extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
20058+extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
20059+extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
20060+extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
20061+extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
20062+extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
20063+extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
20064+extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
20065+extern int drm_bo_driver_finish(struct drm_device *dev);
20066+extern int drm_bo_driver_init(struct drm_device *dev);
20067+extern int drm_bo_pci_offset(struct drm_device *dev,
20068+ struct drm_bo_mem_reg *mem,
20069+ unsigned long *bus_base,
20070+ unsigned long *bus_offset,
20071+ unsigned long *bus_size);
20072+extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem);
20073+
20074+extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo);
20075+extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
20076+extern void drm_putback_buffer_objects(struct drm_device *dev);
20077+extern int drm_fence_buffer_objects(struct drm_device *dev,
20078+ struct list_head *list,
20079+ uint32_t fence_flags,
20080+ struct drm_fence_object *fence,
20081+ struct drm_fence_object **used_fence);
20082+extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
20083+extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
20084+ enum drm_bo_type type, uint64_t mask,
20085+ uint32_t hint, uint32_t page_alignment,
20086+ unsigned long buffer_start,
20087+ struct drm_buffer_object **bo);
20088+extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
20089+ int no_wait);
20090+extern int drm_bo_mem_space(struct drm_buffer_object *bo,
20091+ struct drm_bo_mem_reg *mem, int no_wait);
20092+extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
20093+ uint64_t new_mem_flags,
20094+ int no_wait, int move_unfenced);
20095+extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);
20096+extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
20097+ unsigned long p_offset, unsigned long p_size);
20098+extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
20099+ uint32_t fence_class, uint64_t flags,
20100+ uint64_t mask, uint32_t hint,
20101+ int use_old_fence_class,
20102+ struct drm_bo_info_rep *rep,
20103+ struct drm_buffer_object **bo_rep);
20104+extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
20105+ uint32_t handle,
20106+ int check_owner);
20107+extern int drm_bo_do_validate(struct drm_buffer_object *bo,
20108+ uint64_t flags, uint64_t mask, uint32_t hint,
20109+ uint32_t fence_class,
20110+ int no_wait,
20111+ struct drm_bo_info_rep *rep);
20112+extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
20113+ struct drm_bo_info_rep *rep);
20114+/*
20115+ * Buffer object memory move- and map helpers.
20116+ * drm_bo_move.c
20117+ */
20118+
20119+extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
20120+ int evict, int no_wait,
20121+ struct drm_bo_mem_reg *new_mem);
20122+extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
20123+ int evict,
20124+ int no_wait, struct drm_bo_mem_reg *new_mem);
20125+extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
20126+ int evict, int no_wait,
20127+ uint32_t fence_class, uint32_t fence_type,
20128+ uint32_t fence_flags,
20129+ struct drm_bo_mem_reg *new_mem);
20130+extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
20131+extern unsigned long drm_bo_offset_end(unsigned long offset,
20132+ unsigned long end);
20133+
20134+struct drm_bo_kmap_obj {
20135+ void *virtual;
20136+ struct page *page;
20137+ enum {
20138+ bo_map_iomap,
20139+ bo_map_vmap,
20140+ bo_map_kmap,
20141+ bo_map_premapped,
20142+ } bo_kmap_type;
20143+};
20144+
20145+static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
20146+{
20147+ *is_iomem = (map->bo_kmap_type == bo_map_iomap ||
20148+ map->bo_kmap_type == bo_map_premapped);
20149+ return map->virtual;
20150+}
20151+extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
20152+extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
20153+ unsigned long num_pages, struct drm_bo_kmap_obj *map);
20154+
20155+
20156+/*
20157+ * drm_regman.c
20158+ */
20159+
20160+struct drm_reg {
20161+ struct list_head head;
20162+ struct drm_fence_object *fence;
20163+ uint32_t fence_type;
20164+ uint32_t new_fence_type;
20165+};
20166+
20167+struct drm_reg_manager {
20168+ struct list_head free;
20169+ struct list_head lru;
20170+ struct list_head unfenced;
20171+
20172+ int (*reg_reusable)(const struct drm_reg *reg, const void *data);
20173+ void (*reg_destroy)(struct drm_reg *reg);
20174+};
20175+
20176+extern int drm_regs_alloc(struct drm_reg_manager *manager,
20177+ const void *data,
20178+ uint32_t fence_class,
20179+ uint32_t fence_type,
20180+ int interruptible,
20181+ int no_wait,
20182+ struct drm_reg **reg);
20183+
20184+extern void drm_regs_fence(struct drm_reg_manager *regs,
20185+ struct drm_fence_object *fence);
20186+
20187+extern void drm_regs_free(struct drm_reg_manager *manager);
20188+extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg);
20189+extern void drm_regs_init(struct drm_reg_manager *manager,
20190+ int (*reg_reusable)(const struct drm_reg *,
20191+ const void *),
20192+ void (*reg_destroy)(struct drm_reg *));
20193+
20194+extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
20195+ void **virtual);
20196+extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
20197+ void *virtual);
20198+/*
20199+ * drm_bo_lock.c
20200+ * Simple replacement for the hardware lock on buffer manager init and clean.
20201+ */
20202+
20203+
20204+extern void drm_bo_init_lock(struct drm_bo_lock *lock);
20205+extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
20206+extern int drm_bo_read_lock(struct drm_bo_lock *lock);
20207+extern int drm_bo_write_lock(struct drm_bo_lock *lock,
20208+ struct drm_file *file_priv);
20209+
20210+extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
20211+ struct drm_file *file_priv);
20212+
20213+#ifdef CONFIG_DEBUG_MUTEXES
20214+#define DRM_ASSERT_LOCKED(_mutex) \
20215+ BUG_ON(!mutex_is_locked(_mutex) || \
20216+ ((_mutex)->owner != current_thread_info()))
20217+#else
20218+#define DRM_ASSERT_LOCKED(_mutex)
20219+#endif
20220+#endif
20221Index: linux-2.6.28/drivers/gpu/drm/drm_crtc.c
20222===================================================================
20223--- linux-2.6.28.orig/drivers/gpu/drm/drm_crtc.c 2009-02-25 15:36:50.000000000 +0000
20224+++ linux-2.6.28/drivers/gpu/drm/drm_crtc.c 2009-02-25 15:37:02.000000000 +0000
20225@@ -807,6 +807,53 @@
20226 }
20227 EXPORT_SYMBOL(drm_mode_config_init);
20228
20229+/**
20230+ * drm_get_buffer_object - find the buffer object for a given handle
20231+ * @dev: DRM device
20232+ * @bo: pointer to caller's buffer_object pointer
20233+ * @handle: handle to lookup
20234+ *
20235+ * LOCKING:
20236+ * Must take @dev's struct_mutex to protect buffer object lookup.
20237+ *
20238+ * Given @handle, lookup the buffer object in @dev and put it in the caller's
20239+ * @bo pointer.
20240+ *
20241+ * RETURNS:
20242+ * Zero on success, -EINVAL if the handle couldn't be found.
20243+ */
20244+static int drm_get_buffer_object(struct drm_device *dev, struct drm_buffer_object **bo, unsigned long handle)
20245+{
20246+ struct drm_user_object *uo;
20247+ struct drm_hash_item *hash;
20248+ int ret;
20249+
20250+ *bo = NULL;
20251+
20252+ mutex_lock(&dev->struct_mutex);
20253+ ret = drm_ht_find_item(&dev->object_hash, handle, &hash);
20254+ if (ret) {
20255+ DRM_ERROR("Couldn't find handle.\n");
20256+ ret = -EINVAL;
20257+ goto out_err;
20258+ }
20259+
20260+ uo = drm_hash_entry(hash, struct drm_user_object, hash);
20261+ if (uo->type != drm_buffer_type) {
20262+ ret = -EINVAL;
20263+ goto out_err;
20264+ }
20265+
20266+ *bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
20267+ ret = 0;
20268+out_err:
20269+ mutex_unlock(&dev->struct_mutex);
20270+ return ret;
20271+}
20272+
20273+char drm_init_mode[32];
20274+EXPORT_SYMBOL(drm_init_mode);
20275+
20276 int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
20277 {
20278 uint32_t total_objects = 0;
20279@@ -1588,6 +1635,8 @@
20280 struct drm_mode_fb_cmd *r = data;
20281 struct drm_mode_config *config = &dev->mode_config;
20282 struct drm_framebuffer *fb;
20283+ struct drm_buffer_object *bo;
20284+ struct drm_crtc *crtc;
20285 int ret = 0;
20286
20287 if ((config->min_width > r->width) || (r->width > config->max_width)) {
20288@@ -1600,20 +1649,46 @@
20289 }
20290
20291 mutex_lock(&dev->mode_config.mutex);
20292+ /* TODO check limits are okay */
20293+ ret = drm_get_buffer_object(dev, &bo, r->handle);
20294+ if (ret || !bo) {
20295+ ret = -EINVAL;
20296+ goto out;
20297+ }
20298
20299 /* TODO check buffer is sufficently large */
20300 /* TODO setup destructor callback */
20301
20302- fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
20303+ fb = kzalloc(sizeof(struct drm_framebuffer), GFP_KERNEL);
20304+ if (!fb) {
20305+ ret = -ENOMEM;
20306+ goto out;
20307+ }
20308+
20309+ drm_framebuffer_init(dev, fb, NULL);
20310 if (!fb) {
20311 DRM_ERROR("could not create framebuffer\n");
20312 ret = -EINVAL;
20313 goto out;
20314 }
20315
20316+ fb->width = r->width;
20317+ fb->height = r->height;
20318+ fb->pitch = r->pitch;
20319+ fb->bits_per_pixel = r->bpp;
20320+ fb->depth = r->depth;
20321+ fb->offset = bo->offset;
20322+ fb->bo = bo;
20323+
20324 r->fb_id = fb->base.id;
20325 list_add(&fb->filp_head, &file_priv->fbs);
20326
20327+ /* FIXME: bind the fb to the right crtc */
20328+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
20329+ crtc->fb = fb;
20330+ dev->driver->fb_probe(dev, crtc);
20331+ }
20332+
20333 out:
20334 mutex_unlock(&dev->mode_config.mutex);
20335 return ret;
20336@@ -1669,8 +1744,10 @@
20337 /* TODO release all crtc connected to the framebuffer */
20338 /* TODO unhock the destructor from the buffer object */
20339
20340- list_del(&fb->filp_head);
20341- fb->funcs->destroy(fb);
20342+ if (fb->bo->type != drm_bo_type_kernel)
20343+ drm_framebuffer_cleanup(fb);
20344+ else
20345+ dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
20346
20347 out:
20348 mutex_unlock(&dev->mode_config.mutex);
20349@@ -1716,7 +1793,7 @@
20350 r->depth = fb->depth;
20351 r->bpp = fb->bits_per_pixel;
20352 r->pitch = fb->pitch;
20353- fb->funcs->create_handle(fb, file_priv, &r->handle);
20354+ r->handle = fb->bo->base.hash.key;
20355
20356 out:
20357 mutex_unlock(&dev->mode_config.mutex);
20358@@ -1746,7 +1823,10 @@
20359 mutex_lock(&dev->mode_config.mutex);
20360 list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
20361 list_del(&fb->filp_head);
20362- fb->funcs->destroy(fb);
20363+ if (fb->bo->type != drm_bo_type_kernel)
20364+ drm_framebuffer_cleanup(fb);
20365+ else
20366+ dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
20367 }
20368 mutex_unlock(&dev->mode_config.mutex);
20369 }
20370Index: linux-2.6.28/include/drm/drm_crtc.h
20371===================================================================
20372--- linux-2.6.28.orig/include/drm/drm_crtc.h 2009-02-25 15:37:00.000000000 +0000
20373+++ linux-2.6.28/include/drm/drm_crtc.h 2009-02-25 15:37:02.000000000 +0000
20374@@ -50,6 +50,8 @@
20375 uint32_t type;
20376 };
20377
20378+#include <drm/drm_objects.h>
20379+
20380 /*
20381 * Note on terminology: here, for brevity and convenience, we refer to connector
20382 * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
20383@@ -258,6 +260,9 @@
20384 int flags;
20385 void *fbdev;
20386 u32 pseudo_palette[17];
20387+ unsigned long offset;
20388+ struct drm_buffer_object *bo;
20389+ struct drm_bo_kmap_obj kmap;
20390 struct list_head filp_head;
20391 };
20392
20393Index: linux-2.6.28/drivers/gpu/drm/i915/intel_lvds.c
20394===================================================================
20395--- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_lvds.c 2009-02-25 15:37:02.000000000 +0000
20396+++ linux-2.6.28/drivers/gpu/drm/i915/intel_lvds.c 2009-02-25 15:37:02.000000000 +0000
20397@@ -36,6 +36,259 @@
20398 #include "i915_drm.h"
20399 #include "i915_drv.h"
20400
20401+#include <acpi/acpi_drivers.h>
20402+
20403+#define BLC_I2C_TYPE 0x01
20404+#define BLC_PWM_TYPE 0x02
20405+#define BRIGHTNESS_MASK 0xff
20406+#define BRIGHTNESS_MAX_LEVEL 100
20407+#define BLC_POLARITY_NORMAL 0
20408+#define BLC_POLARITY_INVERSE 1
20409+#define BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xfffe)
20410+#define BACKLIGHT_PWM_CTL_SHIFT (16)
20411+#define BLC_MAX_PWM_REG_FREQ 0xfffe
20412+#define BLC_MIN_PWM_REG_FREQ 0x2
20413+#define BLC_PWM_LEGACY_MODE_ENABLE 0x0001
20414+#define BLC_PWM_PRECISION_FACTOR 10//10000000
20415+#define BLC_PWM_FREQ_CALC_CONSTANT 32
20416+#define MHz 1000000
20417+#define OFFSET_OPREGION_VBT 0x400
20418+
20419+typedef struct OpRegion_Header
20420+{
20421+ char sign[16];
20422+ u32 size;
20423+ u32 over;
20424+ char sver[32];
20425+ char vver[16];
20426+ char gver[16];
20427+ u32 mbox;
20428+ char rhd1[164];
20429+} OpRegionRec, *OpRegionPtr;
20430+
20431+struct vbt_header2
20432+{
20433+ char signature[20]; /**< Always starts with 'VBT$' */
20434+ u16 version; /**< decimal */
20435+ u16 header_size; /**< in bytes */
20436+ u16 vbt_size; /**< in bytes */
20437+ u8 vbt_checksum;
20438+ u8 reserved0;
20439+ u32 bdb_offset; /**< from beginning of VBT */
20440+ u32 aim1_offset; /**< from beginning of VBT */
20441+ u32 aim2_offset; /**< from beginning of VBT */
20442+ u32 aim3_offset; /**< from beginning of VBT */
20443+ u32 aim4_offset; /**< from beginning of VBT */
20444+} __attribute__ ((packed));
20445+
20446+struct bdb_header2
20447+{
20448+ char signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
20449+ u16 version; /**< decimal */
20450+ u16 header_size; /**< in bytes */
20451+ u16 bdb_size; /**< in bytes */
20452+} __attribute__ ((packed));
20453+
20454+#define LVDS_CAP_EDID (1 << 6)
20455+#define LVDS_CAP_DITHER (1 << 5)
20456+#define LVDS_CAP_PFIT_AUTO_RATIO (1 << 4)
20457+#define LVDS_CAP_PFIT_GRAPHICS_MODE (1 << 3)
20458+#define LVDS_CAP_PFIT_TEXT_MODE (1 << 2)
20459+#define LVDS_CAP_PFIT_GRAPHICS (1 << 1)
20460+#define LVDS_CAP_PFIT_TEXT (1 << 0)
20461+struct lvds_bdb_1
20462+{
20463+ u8 id; /**< 40 */
20464+ u16 size;
20465+ u8 panel_type;
20466+ u8 reserved0;
20467+ u16 caps;
20468+} __attribute__ ((packed));
20469+
20470+struct lvds_bdb_2_fp_params
20471+{
20472+ u16 x_res;
20473+ u16 y_res;
20474+ u32 lvds_reg;
20475+ u32 lvds_reg_val;
20476+ u32 pp_on_reg;
20477+ u32 pp_on_reg_val;
20478+ u32 pp_off_reg;
20479+ u32 pp_off_reg_val;
20480+ u32 pp_cycle_reg;
20481+ u32 pp_cycle_reg_val;
20482+ u32 pfit_reg;
20483+ u32 pfit_reg_val;
20484+ u16 terminator;
20485+} __attribute__ ((packed));
20486+
20487+struct lvds_bdb_2_fp_edid_dtd
20488+{
20489+ u16 dclk; /**< In 10khz */
20490+ u8 hactive;
20491+ u8 hblank;
20492+ u8 high_h; /**< 7:4 = hactive 11:8, 3:0 = hblank 11:8 */
20493+ u8 vactive;
20494+ u8 vblank;
20495+ u8 high_v; /**< 7:4 = vactive 11:8, 3:0 = vblank 11:8 */
20496+ u8 hsync_off;
20497+ u8 hsync_pulse_width;
20498+ u8 vsync_off;
20499+ u8 high_hsync_off; /**< 7:6 = hsync off 9:8 */
20500+ u8 h_image;
20501+ u8 v_image;
20502+ u8 max_hv;
20503+ u8 h_border;
20504+ u8 v_border;
20505+ u8 flags;
20506+#define FP_EDID_FLAG_VSYNC_POSITIVE (1 << 2)
20507+#define FP_EDID_FLAG_HSYNC_POSITIVE (1 << 1)
20508+} __attribute__ ((packed));
20509+
20510+struct lvds_bdb_2_entry
20511+{
20512+ u16 fp_params_offset; /**< From beginning of BDB */
20513+ u8 fp_params_size;
20514+ u16 fp_edid_dtd_offset;
20515+ u8 fp_edid_dtd_size;
20516+ u16 fp_edid_pid_offset;
20517+ u8 fp_edid_pid_size;
20518+} __attribute__ ((packed));
20519+
20520+struct lvds_bdb_2
20521+{
20522+ u8 id; /**< 41 */
20523+ u16 size;
20524+ u8 table_size; /* not sure on this one */
20525+ struct lvds_bdb_2_entry panels[16];
20526+} __attribute__ ((packed));
20527+
20528+
20529+struct lvds_bdb_blc
20530+{
20531+ u8 id; /**< 43 */
20532+ u16 size;
20533+ u8 table_size;
20534+} __attribute__ ((packed));
20535+
20536+struct lvds_blc
20537+{
20538+ u8 type:2;
20539+ u8 pol:1;
20540+ u8 gpio:3;
20541+ u8 gmbus:2;
20542+ u16 freq;
20543+ u8 minbrightness;
20544+ u8 i2caddr;
20545+ u8 brightnesscmd;
20546+ /* more... */
20547+} __attribute__ ((packed));
20548+
20549+int drm_intel_ignore_acpi = 0;
20550+MODULE_PARM_DESC(ignore_acpi, "Ignore ACPI");
20551+module_param_named(ignore_acpi, drm_intel_ignore_acpi, int, 0600);
20552+
20553+uint8_t blc_type;
20554+uint8_t blc_pol;
20555+uint8_t blc_freq;
20556+uint8_t blc_minbrightness;
20557+uint8_t blc_i2caddr;
20558+uint8_t blc_brightnesscmd;
20559+int lvds_backlight; /* restore backlight to this value */
20560+
20561+struct intel_i2c_chan *lvds_i2c_bus;
20562+u32 CoreClock;
20563+u32 PWMControlRegFreq;
20564+
20565+unsigned char * dev_OpRegion = NULL;
20566+unsigned int dev_OpRegionSize;
20567+
20568+#define PCI_PORT5_REG80_FFUSE 0xD0058000
20569+#define PCI_PORT5_REG80_MAXRES_INT_EN 0x0040
20570+#define MAX_HDISPLAY 800
20571+#define MAX_VDISPLAY 480
20572+bool sku_bMaxResEnableInt = false;
20573+
20574+/** Set BLC through I2C*/
20575+static int
20576+LVDSI2CSetBacklight(struct drm_device *dev, unsigned char ch)
20577+{
20578+ u8 out_buf[2];
20579+ struct i2c_msg msgs[] = {
20580+ {
20581+ .addr = lvds_i2c_bus->slave_addr,
20582+ .flags = 0,
20583+ .len = 2,
20584+ .buf = out_buf,
20585+ }
20586+ };
20587+
20588+ DRM_INFO("LVDSI2CSetBacklight: the slave_addr is 0x%x, the backlight value is %d\n", lvds_i2c_bus->slave_addr, ch);
20589+
20590+ out_buf[0] = blc_brightnesscmd;
20591+ out_buf[1] = ch;
20592+
20593+ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
20594+ {
20595+ DRM_INFO("LVDSI2CSetBacklight: i2c_transfer done\n");
20596+ return true;
20597+ }
20598+
20599+ DRM_ERROR("msg: i2c_transfer error\n");
20600+ return false;
20601+}
20602+
20603+/**
20604+ * Calculate PWM control register value.
20605+ */
20606+static int
20607+LVDSCalculatePWMCtrlRegFreq(struct drm_device *dev)
20608+{
20609+ unsigned long value = 0;
20610+
20611+ DRM_INFO("Enter LVDSCalculatePWMCtrlRegFreq.\n");
20612+ if (blc_freq == 0) {
20613+ DRM_ERROR("LVDSCalculatePWMCtrlRegFreq: Frequency Requested is 0.\n");
20614+ return FALSE;
20615+ }
20616+ value = (CoreClock * MHz);
20617+ value = (value / BLC_PWM_FREQ_CALC_CONSTANT);
20618+ value = (value * BLC_PWM_PRECISION_FACTOR);
20619+ value = (value / blc_freq);
20620+ value = (value / BLC_PWM_PRECISION_FACTOR);
20621+
20622+ if (value > (unsigned long)BLC_MAX_PWM_REG_FREQ ||
20623+ value < (unsigned long)BLC_MIN_PWM_REG_FREQ) {
20624+ return FALSE;
20625+ } else {
20626+ PWMControlRegFreq = ((u32)value & ~BLC_PWM_LEGACY_MODE_ENABLE);
20627+ return TRUE;
20628+ }
20629+}
20630+
20631+/**
20632+ * Returns the maximum level of the backlight duty cycle field.
20633+ */
20634+static u32
20635+LVDSGetPWMMaxBacklight(struct drm_device *dev)
20636+{
20637+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
20638+ u32 max_pwm_blc = 0;
20639+
20640+ max_pwm_blc = ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> \
20641+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
20642+
20643+ if (!(max_pwm_blc & BLC_MAX_PWM_REG_FREQ)) {
20644+ if (LVDSCalculatePWMCtrlRegFreq(dev)) {
20645+ max_pwm_blc = PWMControlRegFreq;
20646+ }
20647+ }
20648+
20649+ DRM_INFO("LVDSGetPWMMaxBacklight: the max_pwm_blc is %d.\n", max_pwm_blc);
20650+ return max_pwm_blc;
20651+}
20652+
20653+
20654 /**
20655 * Sets the backlight level.
20656 *
20657@@ -43,12 +296,48 @@
20658 */
20659 static void intel_lvds_set_backlight(struct drm_device *dev, int level)
20660 {
20661- struct drm_i915_private *dev_priv = dev->dev_private;
20662+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
20663+ /*
20664 u32 blc_pwm_ctl;
20665
20666 blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
20667 I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
20668 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
20669+ */
20670+ u32 newbacklight = 0;
20671+
20672+ DRM_INFO("intel_lvds_set_backlight: the level is %d\n", level);
20673+
20674+ if(blc_type == BLC_I2C_TYPE){
20675+ newbacklight = BRIGHTNESS_MASK & ((unsigned long)level * \
20676+ BRIGHTNESS_MASK /BRIGHTNESS_MAX_LEVEL);
20677+
20678+ if (blc_pol == BLC_POLARITY_INVERSE) {
20679+ newbacklight = BRIGHTNESS_MASK - newbacklight;
20680+ }
20681+
20682+ LVDSI2CSetBacklight(dev, newbacklight);
20683+
20684+ } else if (blc_type == BLC_PWM_TYPE) {
20685+ u32 max_pwm_blc = LVDSGetPWMMaxBacklight(dev);
20686+
20687+ u32 blc_pwm_duty_cycle;
20688+
20689+ /* Provent LVDS going to total black */
20690+ if ( level < 20) {
20691+ level = 20;
20692+ }
20693+ blc_pwm_duty_cycle = level * max_pwm_blc/BRIGHTNESS_MAX_LEVEL;
20694+
20695+ if (blc_pol == BLC_POLARITY_INVERSE) {
20696+ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
20697+ }
20698+
20699+ blc_pwm_duty_cycle &= BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
20700+
20701+ I915_WRITE(BLC_PWM_CTL,
20702+ (max_pwm_blc << BACKLIGHT_PWM_CTL_SHIFT)| (blc_pwm_duty_cycle));
20703+ }
20704 }
20705
20706 /**
20707@@ -56,10 +345,13 @@
20708 */
20709 static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
20710 {
20711- struct drm_i915_private *dev_priv = dev->dev_private;
20712+ return BRIGHTNESS_MAX_LEVEL;
20713+ /*
20714+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
20715
20716 return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
20717 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
20718+ */
20719 }
20720
20721 /**
20722@@ -77,7 +369,7 @@
20723 pp_status = I915_READ(PP_STATUS);
20724 } while ((pp_status & PP_ON) == 0);
20725
20726- intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
20727+ intel_lvds_set_backlight(dev, lvds_backlight);
20728 } else {
20729 intel_lvds_set_backlight(dev, 0);
20730
20731@@ -93,6 +385,7 @@
20732 {
20733 struct drm_device *dev = encoder->dev;
20734
20735+ DRM_INFO("intel_lvds_dpms: the mode is %d\n", mode);
20736 if (mode == DRM_MODE_DPMS_ON)
20737 intel_lvds_set_power(dev, true);
20738 else
20739@@ -152,6 +445,13 @@
20740 return MODE_PANEL;
20741 }
20742
20743+ if (IS_POULSBO(dev) && sku_bMaxResEnableInt) {
20744+ if (mode->hdisplay > MAX_HDISPLAY)
20745+ return MODE_PANEL;
20746+ if (mode->vdisplay > MAX_VDISPLAY)
20747+ return MODE_PANEL;
20748+ }
20749+
20750 return MODE_OK;
20751 }
20752
20753@@ -185,20 +485,20 @@
20754 * with the panel scaling set up to source from the H/VDisplay
20755 * of the original mode.
20756 */
20757- if (dev_priv->panel_fixed_mode != NULL) {
20758- adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
20759+ if (dev_priv_common->panel_fixed_mode != NULL) {
20760+ adjusted_mode->hdisplay = dev_priv_common->panel_fixed_mode->hdisplay;
20761 adjusted_mode->hsync_start =
20762- dev_priv->panel_fixed_mode->hsync_start;
20763+ dev_priv_common->panel_fixed_mode->hsync_start;
20764 adjusted_mode->hsync_end =
20765- dev_priv->panel_fixed_mode->hsync_end;
20766- adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
20767- adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
20768+ dev_priv_common->panel_fixed_mode->hsync_end;
20769+ adjusted_mode->htotal = dev_priv_common->panel_fixed_mode->htotal;
20770+ adjusted_mode->vdisplay = dev_priv_common->panel_fixed_mode->vdisplay;
20771 adjusted_mode->vsync_start =
20772- dev_priv->panel_fixed_mode->vsync_start;
20773+ dev_priv_common->panel_fixed_mode->vsync_start;
20774 adjusted_mode->vsync_end =
20775- dev_priv->panel_fixed_mode->vsync_end;
20776- adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
20777- adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
20778+ dev_priv_common->panel_fixed_mode->vsync_end;
20779+ adjusted_mode->vtotal = dev_priv_common->panel_fixed_mode->vtotal;
20780+ adjusted_mode->clock = dev_priv_common->panel_fixed_mode->clock;
20781 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
20782 }
20783
20784@@ -214,10 +514,10 @@
20785 static void intel_lvds_prepare(struct drm_encoder *encoder)
20786 {
20787 struct drm_device *dev = encoder->dev;
20788- struct drm_i915_private *dev_priv = dev->dev_private;
20789+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
20790
20791- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
20792- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
20793+ dev_priv_common->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
20794+ dev_priv_common->backlight_duty_cycle = (dev_priv_common->saveBLC_PWM_CTL &
20795 BACKLIGHT_DUTY_CYCLE_MASK);
20796
20797 intel_lvds_set_power(dev, false);
20798@@ -226,10 +526,11 @@
20799 static void intel_lvds_commit( struct drm_encoder *encoder)
20800 {
20801 struct drm_device *dev = encoder->dev;
20802- struct drm_i915_private *dev_priv = dev->dev_private;
20803+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
20804
20805- if (dev_priv->backlight_duty_cycle == 0)
20806- dev_priv->backlight_duty_cycle =
20807+ if (dev_priv_common->backlight_duty_cycle == 0)
20808+ //dev_priv_common->backlight_duty_cycle =
20809+ lvds_backlight =
20810 intel_lvds_get_max_backlight(dev);
20811
20812 intel_lvds_set_power(dev, true);
20813@@ -291,10 +592,12 @@
20814 {
20815 struct drm_device *dev = connector->dev;
20816 struct intel_output *intel_output = to_intel_output(connector);
20817- struct drm_i915_private *dev_priv = dev->dev_private;
20818+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
20819 int ret = 0;
20820
20821+ mutex_lock(&dev->mode_config.mutex);
20822 ret = intel_ddc_get_modes(intel_output);
20823+ mutex_unlock(&dev->mode_config.mutex);
20824
20825 if (ret)
20826 return ret;
20827@@ -308,11 +611,11 @@
20828 connector->display_info.min_hfreq = 0;
20829 connector->display_info.max_hfreq = 200;
20830
20831- if (dev_priv->panel_fixed_mode != NULL) {
20832+ if (dev_priv_common->panel_fixed_mode != NULL) {
20833 struct drm_display_mode *mode;
20834
20835 mutex_unlock(&dev->mode_config.mutex);
20836- mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
20837+ mode = drm_mode_duplicate(dev, dev_priv_common->panel_fixed_mode);
20838 drm_mode_probed_add(connector, mode);
20839 mutex_unlock(&dev->mode_config.mutex);
20840
20841@@ -333,8 +636,11 @@
20842 {
20843 struct intel_output *intel_output = to_intel_output(connector);
20844
20845+ if(dev_OpRegion != NULL)
20846+ iounmap(dev_OpRegion);
20847 if (intel_output->ddc_bus)
20848 intel_i2c_destroy(intel_output->ddc_bus);
20849+ intel_i2c_destroy(lvds_i2c_bus);
20850 drm_sysfs_connector_remove(connector);
20851 drm_connector_cleanup(connector);
20852 kfree(connector);
20853@@ -373,7 +679,45 @@
20854 };
20855
20856
20857-
20858+int intel_get_acpi_dod(char *method)
20859+{
20860+ int status;
20861+ int found = 0;
20862+ int i;
20863+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
20864+ union acpi_object *dod = NULL;
20865+ union acpi_object *obj;
20866+
20867+ status = acpi_evaluate_object(NULL, method, NULL, &buffer);
20868+ if (ACPI_FAILURE(status))
20869+ return -ENODEV;
20870+
20871+ dod = buffer.pointer;
20872+ if (!dod || (dod->type != ACPI_TYPE_PACKAGE)) {
20873+ status = -EFAULT;
20874+ goto out;
20875+ }
20876+
20877+ DRM_DEBUG("Found %d video heads in _DOD\n", dod->package.count);
20878+
20879+ for (i = 0; i < dod->package.count; i++) {
20880+ obj = &dod->package.elements[i];
20881+
20882+ if (obj->type != ACPI_TYPE_INTEGER) {
20883+ DRM_DEBUG("Invalid _DOD data\n");
20884+ } else {
20885+ DRM_DEBUG("dod element[%d] = 0x%x\n", i,
20886+ (int)obj->integer.value);
20887+
20888+ /* look for an LVDS type */
20889+ if (obj->integer.value & 0x00000400)
20890+ found = 1;
20891+ }
20892+ }
20893+ out:
20894+ kfree(buffer.pointer);
20895+ return found;
20896+}
20897 /**
20898 * intel_lvds_init - setup LVDS connectors on this device
20899 * @dev: drm device
20900@@ -383,7 +727,7 @@
20901 */
20902 void intel_lvds_init(struct drm_device *dev)
20903 {
20904- struct drm_i915_private *dev_priv = dev->dev_private;
20905+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
20906 struct intel_output *intel_output;
20907 struct drm_connector *connector;
20908 struct drm_encoder *encoder;
20909@@ -391,12 +735,38 @@
20910 struct drm_crtc *crtc;
20911 u32 lvds;
20912 int pipe;
20913+ u32 OpRegion_Phys;
20914+ unsigned int OpRegion_Size = 0x100;
20915+ OpRegionPtr OpRegion;
20916+ char *OpRegion_String = "IntelGraphicsMem";
20917+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
20918+ u32 clock;
20919+ u32 sku_value = 0;
20920+ unsigned int CoreClocks[] = {
20921+ 100,
20922+ 133,
20923+ 150,
20924+ 178,
20925+ 200,
20926+ 266,
20927+ 266,
20928+ 266
20929+ };
20930+ struct vbt_header *vbt;
20931+ struct bdb_header *bdb;
20932+ int vbt_off, bdb_off, bdb_block_off, block_size;
20933+ int panel_type = -1;
20934+ unsigned char *bios;
20935+ unsigned char *vbt_buf;
20936
20937 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
20938 if (!intel_output) {
20939 return;
20940 }
20941
20942+ //if (!drm_intel_ignore_acpi && !intel_get_acpi_dod(ACPI_DOD))
20943+ // return;
20944+
20945 connector = &intel_output->base;
20946 encoder = &intel_output->enc;
20947 drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs,
20948@@ -414,16 +784,139 @@
20949 connector->interlace_allowed = false;
20950 connector->doublescan_allowed = false;
20951
20952+ //initialize the I2C bus and BLC data
20953+ lvds_i2c_bus = intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
20954+ if (!lvds_i2c_bus) {
20955+ dev_printk(KERN_ERR, &dev->pdev->dev, "i2c bus registration "
20956+ "failed.\n");
20957+ return;
20958+ }
20959+ lvds_i2c_bus->slave_addr = 0x2c;//0x58;
20960+ lvds_backlight = BRIGHTNESS_MAX_LEVEL;
20961+ blc_type = 0;
20962+ blc_pol = 0;
20963
20964- /*
20965- * LVDS discovery:
20966- * 1) check for EDID on DDC
20967- * 2) check for VBT data
20968- * 3) check to see if LVDS is already on
20969- * if none of the above, no panel
20970- * 4) make sure lid is open
20971- * if closed, act like it's not there for now
20972- */
20973+ //get the BLC init data from VBT
20974+
20975+
20976+
20977+
20978+ pci_read_config_dword(dev->pdev, 0xFC, &OpRegion_Phys);
20979+
20980+ dev_OpRegion = ioremap(OpRegion_Phys, OpRegion_Size);
20981+ dev_OpRegionSize = OpRegion_Size;
20982+
20983+ OpRegion = (OpRegionPtr) dev_OpRegion;
20984+
20985+ if (!memcmp(OpRegion->sign, OpRegion_String, 16)) {
20986+ unsigned int OpRegion_NewSize;
20987+
20988+ OpRegion_NewSize = OpRegion->size * 1024;
20989+
20990+ dev_OpRegionSize = OpRegion_NewSize;
20991+
20992+ iounmap(dev_OpRegion);
20993+ dev_OpRegion = ioremap(OpRegion_Phys, OpRegion_NewSize);
20994+ } else {
20995+ iounmap(dev_OpRegion);
20996+ dev_OpRegion = NULL;
20997+ }
20998+
20999+ if((dev_OpRegion != NULL)&&(dev_OpRegionSize >= OFFSET_OPREGION_VBT)) {
21000+ DRM_INFO("intel_lvds_init: OpRegion has the VBT address\n");
21001+ vbt_buf = dev_OpRegion + OFFSET_OPREGION_VBT;
21002+ vbt = (struct vbt_header *)(dev_OpRegion + OFFSET_OPREGION_VBT);
21003+ } else {
21004+ DRM_INFO("intel_lvds_init: No OpRegion, use the bios at fixed address 0xc0000\n");
21005+ bios = phys_to_virt(0xC0000);
21006+ if(*((u16 *)bios) != 0xAA55){
21007+ bios = NULL;
21008+ DRM_ERROR("the bios is incorrect\n");
21009+ goto blc_out;
21010+ }
21011+ vbt_off = bios[0x1a] | (bios[0x1a + 1] << 8);
21012+ DRM_INFO("intel_lvds_init: the vbt off is %x\n", vbt_off);
21013+ vbt_buf = bios + vbt_off;
21014+ vbt = (struct vbt_header *)(bios + vbt_off);
21015+ }
21016+
21017+ bdb_off = vbt->bdb_offset;
21018+ bdb = (struct bdb_header *)(vbt_buf + bdb_off);
21019+
21020+ DRM_INFO("intel_lvds_init: The bdb->signature is %s, the bdb_off is %d\n",bdb->signature, bdb_off);
21021+
21022+ if (memcmp(bdb->signature, "BIOS_DATA_BLOCK ", 16) != 0) {
21023+ DRM_ERROR("the vbt is error\n");
21024+ goto blc_out;
21025+ }
21026+
21027+ for (bdb_block_off = bdb->header_size; bdb_block_off < bdb->bdb_size;
21028+ bdb_block_off += block_size) {
21029+ int start = bdb_off + bdb_block_off;
21030+ int id, num_entries;
21031+ struct lvds_bdb_1 *lvds1;
21032+ struct lvds_blc *lvdsblc;
21033+ struct lvds_bdb_blc *bdbblc;
21034+
21035+ id = vbt_buf[start];
21036+ block_size = (vbt_buf[start + 1] | (vbt_buf[start + 2] << 8)) + 3;
21037+ switch (id) {
21038+ case 40:
21039+ lvds1 = (struct lvds_bdb_1 *)(vbt_buf+ start);
21040+ panel_type = lvds1->panel_type;
21041+ //if (lvds1->caps & LVDS_CAP_DITHER)
21042+ // *panelWantsDither = TRUE;
21043+ break;
21044+
21045+ case 43:
21046+ bdbblc = (struct lvds_bdb_blc *)(vbt_buf + start);
21047+ num_entries = bdbblc->table_size? (bdbblc->size - \
21048+ sizeof(bdbblc->table_size))/bdbblc->table_size : 0;
21049+ if (num_entries << 16 && bdbblc->table_size == sizeof(struct lvds_blc)) {
21050+ lvdsblc = (struct lvds_blc *)(vbt_buf + start + sizeof(struct lvds_bdb_blc));
21051+ lvdsblc += panel_type;
21052+ blc_type = lvdsblc->type;
21053+ blc_pol = lvdsblc->pol;
21054+ blc_freq = lvdsblc->freq;
21055+ blc_minbrightness = lvdsblc->minbrightness;
21056+ blc_i2caddr = lvdsblc->i2caddr;
21057+ blc_brightnesscmd = lvdsblc->brightnesscmd;
21058+ DRM_INFO("intel_lvds_init: BLC Data in BIOS VBT tables: datasize=%d paneltype=%d \
21059+ type=0x%02x pol=0x%02x freq=0x%04x minlevel=0x%02x \
21060+ i2caddr=0x%02x cmd=0x%02x \n",
21061+ 0,
21062+ panel_type,
21063+ lvdsblc->type,
21064+ lvdsblc->pol,
21065+ lvdsblc->freq,
21066+ lvdsblc->minbrightness,
21067+ lvdsblc->i2caddr,
21068+ lvdsblc->brightnesscmd);
21069+ }
21070+ break;
21071+ }
21072+ }
21073+
21074+ //get the Core Clock for calculating MAX PWM value
21075+ //check whether the MaxResEnableInt is
21076+
21077+ if(pci_root)
21078+ {
21079+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
21080+ pci_read_config_dword(pci_root, 0xD4, &clock);
21081+ CoreClock = CoreClocks[clock & 0x07];
21082+ DRM_INFO("intel_lvds_init: the CoreClock is %d\n", CoreClock);
21083+
21084+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
21085+ pci_read_config_dword(pci_root, 0xD4, &sku_value);
21086+ sku_bMaxResEnableInt = (sku_value & PCI_PORT5_REG80_MAXRES_INT_EN)? true : false;
21087+ DRM_INFO("intel_lvds_init: sku_value is 0x%08x\n", sku_value);
21088+ DRM_INFO("intel_lvds_init: sku_bMaxResEnableInt is %d\n", sku_bMaxResEnableInt);
21089+ }
21090+
21091+
21092+
21093+blc_out:
21094
21095 /* Set up the DDC bus. */
21096 intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
21097@@ -437,12 +930,14 @@
21098 * Attempt to get the fixed panel mode from DDC. Assume that the
21099 * preferred mode is the right one.
21100 */
21101+ mutex_lock(&dev->mode_config.mutex);
21102 intel_ddc_get_modes(intel_output);
21103+ mutex_unlock(&dev->mode_config.mutex);
21104
21105 list_for_each_entry(scan, &connector->probed_modes, head) {
21106 mutex_lock(&dev->mode_config.mutex);
21107 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
21108- dev_priv->panel_fixed_mode =
21109+ dev_priv_common->panel_fixed_mode =
21110 drm_mode_duplicate(dev, scan);
21111 mutex_unlock(&dev->mode_config.mutex);
21112 goto out; /* FIXME: check for quirks */
21113@@ -450,21 +945,6 @@
21114 mutex_unlock(&dev->mode_config.mutex);
21115 }
21116
21117- /* Failed to get EDID, what about VBT? */
21118- if (dev_priv->vbt_mode) {
21119- mutex_lock(&dev->mode_config.mutex);
21120- dev_priv->panel_fixed_mode =
21121- drm_mode_duplicate(dev, dev_priv->vbt_mode);
21122- mutex_unlock(&dev->mode_config.mutex);
21123- if (dev_priv->panel_fixed_mode) {
21124- dev_priv->panel_fixed_mode->type |=
21125- DRM_MODE_TYPE_PREFERRED;
21126- drm_mode_probed_add(connector,
21127- dev_priv->panel_fixed_mode);
21128- goto out;
21129- }
21130- }
21131-
21132 /*
21133 * If we didn't get EDID, try checking if the panel is already turned
21134 * on. If so, assume that whatever is currently programmed is the
21135Index: linux-2.6.28/drivers/gpu/drm/i915/intel_sdvo.c
21136===================================================================
21137--- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_sdvo.c 2009-02-25 15:37:02.000000000 +0000
21138+++ linux-2.6.28/drivers/gpu/drm/i915/intel_sdvo.c 2009-02-25 15:37:02.000000000 +0000
21139@@ -37,6 +37,9 @@
21140
21141 #undef SDVO_DEBUG
21142
21143+#define PCI_PORT5_REG80_FFUSE 0xD0058000
21144+#define PCI_PORT5_REG80_SDVO_DISABLE 0x0020
21145+
21146 struct intel_sdvo_priv {
21147 struct intel_i2c_chan *i2c_bus;
21148 int slaveaddr;
21149@@ -989,6 +992,21 @@
21150 int i;
21151 int encoder_type, output_id;
21152
21153+ if (IS_POULSBO(dev)) {
21154+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
21155+ u32 sku_value = 0;
21156+ bool sku_bSDVOEnable = true;
21157+ if(pci_root) {
21158+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
21159+ pci_read_config_dword(pci_root, 0xD4, &sku_value);
21160+ sku_bSDVOEnable = (sku_value & PCI_PORT5_REG80_SDVO_DISABLE)?false : true;
21161+ DRM_INFO("intel_sdvo_init: sku_value is 0x%08x\n", sku_value);
21162+ DRM_INFO("intel_sdvo_init: sku_bSDVOEnable is %d\n", sku_bSDVOEnable);
21163+ if (sku_bSDVOEnable == false)
21164+ return false;
21165+ }
21166+ }
21167+
21168 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
21169 if (!intel_output) {
21170 return false;
21171Index: linux-2.6.28/drivers/gpu/drm/psb/psb_priv.h
21172===================================================================
21173--- /dev/null 1970-01-01 00:00:00.000000000 +0000
21174+++ linux-2.6.28/drivers/gpu/drm/psb/psb_priv.h 2009-02-25 15:37:02.000000000 +0000
21175@@ -0,0 +1,181 @@
21176+#include "psb_drm.h"
21177+#include "psb_reg.h"
21178+#include "psb_schedule.h"
21179+#include "../i915/i915_common.h"
21180+
21181+#define DRM_DRIVER_PRIVATE_T struct drm_i915_common_private
21182+
21183+struct drm_psb_uopt {
21184+ int clock_gating;
21185+};
21186+
21187+struct drm_psb_private {
21188+ /* common is assumed to be the first item in this structure */
21189+ struct drm_i915_common_private common;
21190+
21191+ unsigned long chipset;
21192+ uint8_t psb_rev_id;
21193+
21194+ struct psb_xhw_buf resume_buf;
21195+ struct drm_psb_dev_info_arg dev_info;
21196+ struct drm_psb_uopt uopt;
21197+
21198+ struct psb_gtt *pg;
21199+
21200+ struct page *scratch_page;
21201+ struct page *comm_page;
21202+
21203+ volatile uint32_t *comm;
21204+ uint32_t comm_mmu_offset;
21205+ uint32_t mmu_2d_offset;
21206+ uint32_t sequence[PSB_NUM_ENGINES];
21207+ uint32_t last_sequence[PSB_NUM_ENGINES];
21208+ int idle[PSB_NUM_ENGINES];
21209+ uint32_t last_submitted_seq[PSB_NUM_ENGINES];
21210+ int engine_lockup_2d;
21211+
21212+ /** Protects user_irq_refcount and irq_mask_reg */
21213+ spinlock_t user_irq_lock;
21214+ u32 pipestat[2];
21215+
21216+ struct psb_mmu_driver *mmu;
21217+ struct psb_mmu_pd *pf_pd;
21218+
21219+ uint8_t *sgx_reg;
21220+ //uint8_t *vdc_reg;
21221+ uint8_t *msvdx_reg;
21222+
21223+ /*
21224+ * MSVDX
21225+ */
21226+ int msvdx_needs_reset;
21227+ int has_msvdx;
21228+ uint32_t gatt_free_offset;
21229+ atomic_t msvdx_mmu_invaldc;
21230+
21231+ /*
21232+ * Fencing / irq.
21233+ */
21234+
21235+ uint32_t sgx_irq_mask;
21236+ uint32_t sgx2_irq_mask;
21237+ uint32_t vdc_irq_mask;
21238+
21239+ spinlock_t irqmask_lock;
21240+ spinlock_t sequence_lock;
21241+ int fence0_irq_on;
21242+ int irq_enabled;
21243+ unsigned int irqen_count_2d;
21244+ wait_queue_head_t event_2d_queue;
21245+
21246+ wait_queue_head_t queue_2d;
21247+ atomic_t lock_2d;
21248+ atomic_t ta_wait_2d;
21249+ atomic_t ta_wait_2d_irq;
21250+ atomic_t waiters_2d;
21251+
21252+ uint32_t msvdx_current_sequence;
21253+ uint32_t msvdx_last_sequence;
21254+#define MSVDX_MAX_IDELTIME HZ*30
21255+ uint32_t msvdx_finished_sequence;
21256+ uint32_t msvdx_start_idle;
21257+ unsigned long msvdx_idle_start_jiffies;
21258+
21259+ int fence2_irq_on;
21260+
21261+ /*
21262+ * MSVDX Rendec Memory
21263+ */
21264+ struct drm_buffer_object *ccb0;
21265+ uint32_t base_addr0;
21266+ struct drm_buffer_object *ccb1;
21267+ uint32_t base_addr1;
21268+
21269+ /*
21270+ * Memory managers
21271+ */
21272+
21273+ int have_vram;
21274+ int have_tt;
21275+ int have_mem_mmu;
21276+ int have_mem_aper;
21277+ int have_mem_kernel;
21278+ int have_mem_pds;
21279+ int have_mem_rastgeom;
21280+ struct mutex temp_mem;
21281+
21282+ /*
21283+ * Relocation buffer mapping.
21284+ */
21285+
21286+ spinlock_t reloc_lock;
21287+ unsigned int rel_mapped_pages;
21288+ wait_queue_head_t rel_mapped_queue;
21289+
21290+ /*
21291+ * Register state
21292+ */
21293+ uint32_t saveCLOCKGATING;
21294+
21295+ /*
21296+ * USE code base register management.
21297+ */
21298+
21299+ struct drm_reg_manager use_manager;
21300+
21301+ /*
21302+ * Xhw
21303+ */
21304+
21305+ uint32_t *xhw;
21306+ struct drm_buffer_object *xhw_bo;
21307+ struct drm_bo_kmap_obj xhw_kmap;
21308+ struct list_head xhw_in;
21309+ spinlock_t xhw_lock;
21310+ atomic_t xhw_client;
21311+ struct drm_file *xhw_file;
21312+ wait_queue_head_t xhw_queue;
21313+ wait_queue_head_t xhw_caller_queue;
21314+ struct mutex xhw_mutex;
21315+ struct psb_xhw_buf *xhw_cur_buf;
21316+ int xhw_submit_ok;
21317+ int xhw_on;
21318+
21319+ /*
21320+ * Scheduling.
21321+ */
21322+
21323+ struct mutex reset_mutex;
21324+ struct mutex cmdbuf_mutex;
21325+ struct psb_scheduler scheduler;
21326+ struct psb_buflist_item *buffers;
21327+ uint32_t ta_mem_pages;
21328+ struct psb_ta_mem *ta_mem;
21329+ int force_ta_mem_load;
21330+
21331+ /*
21332+ * Watchdog
21333+ */
21334+
21335+ spinlock_t watchdog_lock;
21336+ struct timer_list watchdog_timer;
21337+ struct work_struct watchdog_wq;
21338+ struct work_struct msvdx_watchdog_wq;
21339+ int timer_available;
21340+
21341+ /*
21342+ * msvdx command queue
21343+ */
21344+ spinlock_t msvdx_lock;
21345+ struct mutex msvdx_mutex;
21346+ struct list_head msvdx_queue;
21347+ int msvdx_busy;
21348+
21349+};
21350+
21351+
21352+extern void intel_modeset_init(struct drm_device *dev);
21353+extern void intel_modeset_cleanup(struct drm_device *dev);
21354+
21355+extern void intel_crtc_mode_restore(struct drm_crtc *crtc);
21356+extern void intel_crtc_mode_save(struct drm_crtc *crtc);
21357Index: linux-2.6.28/drivers/gpu/drm/i915/intel_display.c
21358===================================================================
21359--- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_display.c 2009-02-25 15:37:02.000000000 +0000
21360+++ linux-2.6.28/drivers/gpu/drm/i915/intel_display.c 2009-02-25 15:37:02.000000000 +0000
21361@@ -342,60 +342,25 @@
21362 /* Wait for 20ms, i.e. one cycle at 50hz. */
21363 udelay(20000);
21364 }
21365+EXPORT_SYMBOL(intel_wait_for_vblank);
21366
21367 static void
21368 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
21369 struct drm_framebuffer *old_fb)
21370 {
21371 struct drm_device *dev = crtc->dev;
21372- struct drm_i915_private *dev_priv = dev->dev_private;
21373 struct drm_i915_master_private *master_priv;
21374+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
21375 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
21376- struct intel_framebuffer *intel_fb;
21377- struct drm_i915_gem_object *obj_priv;
21378- struct drm_gem_object *obj;
21379 int pipe = intel_crtc->pipe;
21380 unsigned long Start, Offset;
21381 int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR);
21382 int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
21383 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
21384 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
21385- u32 dspcntr, alignment;
21386-
21387- /* no fb bound */
21388- if (!crtc->fb) {
21389- DRM_DEBUG("No FB bound\n");
21390- return;
21391- }
21392-
21393- intel_fb = to_intel_framebuffer(crtc->fb);
21394- obj = intel_fb->obj;
21395- obj_priv = obj->driver_private;
21396-
21397- switch (obj_priv->tiling_mode) {
21398- case I915_TILING_NONE:
21399- alignment = 64 * 1024;
21400- break;
21401- case I915_TILING_X:
21402- if (IS_I9XX(dev))
21403- alignment = 1024 * 1024;
21404- else
21405- alignment = 512 * 1024;
21406- break;
21407- case I915_TILING_Y:
21408- /* FIXME: Is this true? */
21409- DRM_ERROR("Y tiled not allowed for scan out buffers\n");
21410- return;
21411- default:
21412- BUG();
21413- }
21414+ u32 dspcntr;
21415
21416- if (i915_gem_object_pin(intel_fb->obj, alignment))
21417- return;
21418-
21419- i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1);
21420-
21421- Start = obj_priv->gtt_offset;
21422+ Start = crtc->fb->offset;
21423 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
21424
21425 I915_WRITE(dspstride, crtc->fb->pitch);
21426@@ -434,13 +399,6 @@
21427 I915_READ(dspbase);
21428 }
21429
21430- intel_wait_for_vblank(dev);
21431-
21432- if (old_fb) {
21433- intel_fb = to_intel_framebuffer(old_fb);
21434- i915_gem_object_unpin(intel_fb->obj);
21435- }
21436-
21437 if (!dev->primary->master)
21438 return;
21439
21440@@ -642,7 +600,7 @@
21441 return 400000;
21442 else if (IS_I915G(dev))
21443 return 333000;
21444- else if (IS_I945GM(dev) || IS_845G(dev))
21445+ else if (IS_I945GM(dev) || IS_POULSBO(dev) || IS_845G(dev))
21446 return 200000;
21447 else if (IS_I915GM(dev)) {
21448 u16 gcfgc = 0;
21449@@ -786,13 +744,15 @@
21450
21451 dpll = DPLL_VGA_MODE_DIS;
21452 if (IS_I9XX(dev)) {
21453- if (is_lvds)
21454+ if (is_lvds) {
21455 dpll |= DPLLB_MODE_LVDS;
21456- else
21457+ if (IS_POULSBO(dev))
21458+ dpll |= DPLL_DVO_HIGH_SPEED;
21459+ } else
21460 dpll |= DPLLB_MODE_DAC_SERIAL;
21461 if (is_sdvo) {
21462 dpll |= DPLL_DVO_HIGH_SPEED;
21463- if (IS_I945G(dev) || IS_I945GM(dev)) {
21464+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_POULSBO(dev)) {
21465 int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
21466 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
21467 }
21468@@ -959,7 +919,7 @@
21469 void intel_crtc_load_lut(struct drm_crtc *crtc)
21470 {
21471 struct drm_device *dev = crtc->dev;
21472- struct drm_i915_private *dev_priv = dev->dev_private;
21473+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
21474 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
21475 int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
21476 int i;
21477@@ -1021,7 +981,7 @@
21478 ret = -ENOMEM;
21479 goto fail;
21480 }
21481-
21482+#if 0
21483 /* we only need to pin inside GTT if cursor is non-phy */
21484 if (!dev_priv->cursor_needs_physical) {
21485 ret = i915_gem_object_pin(bo, PAGE_SIZE);
21486@@ -1038,7 +998,7 @@
21487 }
21488 addr = obj_priv->phys_obj->handle->busaddr;
21489 }
21490-
21491+#endif
21492 temp = 0;
21493 /* set the pipe for the cursor */
21494 temp |= (pipe << 28);
21495@@ -1049,6 +1009,7 @@
21496 I915_WRITE(base, addr);
21497
21498 if (intel_crtc->cursor_bo) {
21499+#if 0
21500 if (dev_priv->cursor_needs_physical) {
21501 if (intel_crtc->cursor_bo != bo)
21502 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
21503@@ -1057,6 +1018,7 @@
21504 mutex_lock(&dev->struct_mutex);
21505 drm_gem_object_unreference(intel_crtc->cursor_bo);
21506 mutex_unlock(&dev->struct_mutex);
21507+#endif
21508 }
21509
21510 intel_crtc->cursor_addr = addr;
21511@@ -1456,7 +1418,8 @@
21512 {
21513 struct drm_connector *connector;
21514
21515- intel_crt_init(dev);
21516+ if (!IS_POULSBO(dev))
21517+ intel_crt_init(dev);
21518
21519 /* Set up integrated LVDS */
21520 if (IS_MOBILE(dev) && !IS_I830(dev))
21521@@ -1472,12 +1435,9 @@
21522 found = intel_sdvo_init(dev, SDVOC);
21523 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
21524 intel_hdmi_init(dev, SDVOC);
21525- } else
21526+ } else
21527 intel_dvo_init(dev);
21528
21529- if (IS_I9XX(dev) && IS_MOBILE(dev))
21530- intel_tv_init(dev);
21531-
21532 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
21533 struct intel_output *intel_output = to_intel_output(connector);
21534 struct drm_encoder *encoder = &intel_output->enc;
21535@@ -1525,8 +1485,8 @@
21536 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
21537 struct drm_device *dev = fb->dev;
21538
21539- if (fb->fbdev)
21540- intelfb_remove(dev, fb);
21541+ //if (fb->fbdev)
21542+ // intelfb_remove(dev, fb);
21543
21544 drm_framebuffer_cleanup(fb);
21545 mutex_lock(&dev->struct_mutex);
21546@@ -1603,7 +1563,7 @@
21547
21548 static const struct drm_mode_config_funcs intel_mode_funcs = {
21549 .fb_create = intel_user_framebuffer_create,
21550- .fb_changed = intelfb_probe,
21551+// .fb_changed = intelfb_probe,
21552 };
21553
21554 void intel_modeset_init(struct drm_device *dev)
21555Index: linux-2.6.28/drivers/gpu/drm/i915/i915_irq.c
21556===================================================================
21557--- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_irq.c 2009-02-25 15:37:02.000000000 +0000
21558+++ linux-2.6.28/drivers/gpu/drm/i915/i915_irq.c 2009-02-25 15:37:02.000000000 +0000
21559@@ -536,6 +536,7 @@
21560
21561 int i915_driver_irq_postinstall(struct drm_device *dev)
21562 {
21563+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
21564 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
21565
21566 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch
new file mode 100644
index 0000000000..2655acfaa5
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch
@@ -0,0 +1,486 @@
1From 84e7ccff650b8f124585ba7d5b9a1544f53457e7 Mon Sep 17 00:00:00 2001
2From: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
3Date: Fri, 27 Feb 2009 16:53:11 +0100
4Subject: [PATCH 1/8] drm: Split out the mm declarations in a separate header. Add atomic operations.
5
6Signed-off-by: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
7---
8 drivers/gpu/drm/drm_mm.c | 173 ++++++++++++++++++++++++++++++++++++++--------
9 include/drm/drmP.h | 37 +----------
10 include/drm/drm_mm.h | 90 ++++++++++++++++++++++++
11 3 files changed, 235 insertions(+), 65 deletions(-)
12 create mode 100644 include/drm/drm_mm.h
13
14Index: linux-2.6.28/drivers/gpu/drm/drm_mm.c
15===================================================================
16--- linux-2.6.28.orig/drivers/gpu/drm/drm_mm.c 2009-03-09 19:19:52.000000000 +0000
17+++ linux-2.6.28/drivers/gpu/drm/drm_mm.c 2009-03-12 13:15:05.000000000 +0000
18@@ -42,8 +43,11 @@
19 */
20
21 #include "drmP.h"
22+#include "drm_mm.h"
23 #include <linux/slab.h>
24
25+#define MM_UNUSED_TARGET 4
26+
27 unsigned long drm_mm_tail_space(struct drm_mm *mm)
28 {
29 struct list_head *tail_node;
30@@ -74,16 +78,66 @@
31 return 0;
32 }
33
34+static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
35+{
36+ struct drm_mm_node *child;
37+
38+ if (atomic) {
39+ child =
40+ (struct drm_mm_node *)kmalloc(sizeof(*child), GFP_ATOMIC);
41+ } else {
42+ child =
43+ (struct drm_mm_node *)kmalloc(sizeof(*child), GFP_KERNEL);
44+ }
45+
46+ if (unlikely(child == NULL)) {
47+ spin_lock(&mm->unused_lock);
48+ if (list_empty(&mm->unused_nodes))
49+ child = NULL;
50+ else {
51+ child =
52+ list_entry(mm->unused_nodes.next,
53+ struct drm_mm_node, fl_entry);
54+ list_del(&child->fl_entry);
55+ --mm->num_unused;
56+ }
57+ spin_unlock(&mm->unused_lock);
58+ }
59+ return child;
60+}
61+
62+int drm_mm_pre_get(struct drm_mm *mm)
63+{
64+ struct drm_mm_node *node;
65+
66+ spin_lock(&mm->unused_lock);
67+ while (mm->num_unused < MM_UNUSED_TARGET) {
68+ spin_unlock(&mm->unused_lock);
69+ node = kmalloc(sizeof(*node), GFP_KERNEL);
70+ spin_lock(&mm->unused_lock);
71+
72+ if (unlikely(node == NULL)) {
73+ int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
74+ spin_unlock(&mm->unused_lock);
75+ return ret;
76+ }
77+ ++mm->num_unused;
78+ list_add_tail(&node->fl_entry, &mm->unused_nodes);
79+ }
80+ spin_unlock(&mm->unused_lock);
81+ return 0;
82+}
83+
84+EXPORT_SYMBOL(drm_mm_pre_get);
85
86 static int drm_mm_create_tail_node(struct drm_mm *mm,
87- unsigned long start,
88- unsigned long size)
89+ unsigned long start,
90+ unsigned long size, int atomic)
91 {
92 struct drm_mm_node *child;
93
94- child = (struct drm_mm_node *)
95- drm_alloc(sizeof(*child), DRM_MEM_MM);
96- if (!child)
97+ child = drm_mm_kmalloc(mm, atomic);
98+ if (unlikely(child == NULL))
99 return -ENOMEM;
100
101 child->free = 1;
102@@ -97,8 +151,7 @@
103 return 0;
104 }
105
106-
107-int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
108+int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
109 {
110 struct list_head *tail_node;
111 struct drm_mm_node *entry;
112@@ -106,20 +159,21 @@
113 tail_node = mm->ml_entry.prev;
114 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
115 if (!entry->free) {
116- return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
117+ return drm_mm_create_tail_node(mm, entry->start + entry->size,
118+ size, atomic);
119 }
120 entry->size += size;
121 return 0;
122 }
123
124 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
125- unsigned long size)
126+ unsigned long size,
127+ int atomic)
128 {
129 struct drm_mm_node *child;
130
131- child = (struct drm_mm_node *)
132- drm_alloc(sizeof(*child), DRM_MEM_MM);
133- if (!child)
134+ child = drm_mm_kmalloc(parent->mm, atomic);
135+ if (unlikely(child == NULL))
136 return NULL;
137
138 INIT_LIST_HEAD(&child->fl_entry);
139@@ -151,8 +205,9 @@
140 tmp = parent->start % alignment;
141
142 if (tmp) {
143- align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
144- if (!align_splitoff)
145+ align_splitoff =
146+ drm_mm_split_at_start(parent, alignment - tmp, 0);
147+ if (unlikely(align_splitoff == NULL))
148 return NULL;
149 }
150
151@@ -161,7 +216,7 @@
152 parent->free = 0;
153 return parent;
154 } else {
155- child = drm_mm_split_at_start(parent, size);
156+ child = drm_mm_split_at_start(parent, size, 0);
157 }
158
159 if (align_splitoff)
160@@ -169,14 +224,50 @@
161
162 return child;
163 }
164+
165 EXPORT_SYMBOL(drm_mm_get_block);
166
167+struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
168+ unsigned long size,
169+ unsigned alignment)
170+{
171+
172+ struct drm_mm_node *align_splitoff = NULL;
173+ struct drm_mm_node *child;
174+ unsigned tmp = 0;
175+
176+ if (alignment)
177+ tmp = parent->start % alignment;
178+
179+ if (tmp) {
180+ align_splitoff =
181+ drm_mm_split_at_start(parent, alignment - tmp, 1);
182+ if (unlikely(align_splitoff == NULL))
183+ return NULL;
184+ }
185+
186+ if (parent->size == size) {
187+ list_del_init(&parent->fl_entry);
188+ parent->free = 0;
189+ return parent;
190+ } else {
191+ child = drm_mm_split_at_start(parent, size, 1);
192+ }
193+
194+ if (align_splitoff)
195+ drm_mm_put_block(align_splitoff);
196+
197+ return child;
198+}
199+
200+EXPORT_SYMBOL(drm_mm_get_block_atomic);
201+
202 /*
203 * Put a block. Merge with the previous and / or next block if they are free.
204 * Otherwise add to the free stack.
205 */
206
207-void drm_mm_put_block(struct drm_mm_node * cur)
208+void drm_mm_put_block(struct drm_mm_node *cur)
209 {
210
211 struct drm_mm *mm = cur->mm;
212@@ -188,21 +279,27 @@
213 int merged = 0;
214
215 if (cur_head->prev != root_head) {
216- prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
217+ prev_node =
218+ list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
219 if (prev_node->free) {
220 prev_node->size += cur->size;
221 merged = 1;
222 }
223 }
224 if (cur_head->next != root_head) {
225- next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
226+ next_node =
227+ list_entry(cur_head->next, struct drm_mm_node, ml_entry);
228 if (next_node->free) {
229 if (merged) {
230 prev_node->size += next_node->size;
231 list_del(&next_node->ml_entry);
232 list_del(&next_node->fl_entry);
233- drm_free(next_node, sizeof(*next_node),
234- DRM_MEM_MM);
235+ if (mm->num_unused < MM_UNUSED_TARGET) {
236+ list_add(&next_node->fl_entry,
237+ &mm->unused_nodes);
238+ ++mm->num_unused;
239+ } else
240+ kfree(next_node);
241 } else {
242 next_node->size += cur->size;
243 next_node->start = cur->start;
244@@ -215,14 +312,19 @@
245 list_add(&cur->fl_entry, &mm->fl_entry);
246 } else {
247 list_del(&cur->ml_entry);
248- drm_free(cur, sizeof(*cur), DRM_MEM_MM);
249+ if (mm->num_unused < MM_UNUSED_TARGET) {
250+ list_add(&cur->fl_entry, &mm->unused_nodes);
251+ ++mm->num_unused;
252+ } else
253+ kfree(cur);
254 }
255 }
256+
257 EXPORT_SYMBOL(drm_mm_put_block);
258
259-struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
260- unsigned long size,
261- unsigned alignment, int best_match)
262+struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
263+ unsigned long size,
264+ unsigned alignment, int best_match)
265 {
266 struct list_head *list;
267 const struct list_head *free_stack = &mm->fl_entry;
268@@ -247,7 +349,6 @@
269 wasted += alignment - tmp;
270 }
271
272-
273 if (entry->size >= size + wasted) {
274 if (!best_match)
275 return entry;
276@@ -260,6 +361,7 @@
277
278 return best;
279 }
280+EXPORT_SYMBOL(drm_mm_search_free);
281
282 int drm_mm_clean(struct drm_mm * mm)
283 {
284@@ -267,14 +369,17 @@
285
286 return (head->next->next == head);
287 }
288-EXPORT_SYMBOL(drm_mm_search_free);
289+EXPORT_SYMBOL(drm_mm_clean);
290
291 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
292 {
293 INIT_LIST_HEAD(&mm->ml_entry);
294 INIT_LIST_HEAD(&mm->fl_entry);
295+ INIT_LIST_HEAD(&mm->unused_nodes);
296+ mm->num_unused = 0;
297+ spin_lock_init(&mm->unused_lock);
298
299- return drm_mm_create_tail_node(mm, start, size);
300+ return drm_mm_create_tail_node(mm, start, size, 0);
301 }
302 EXPORT_SYMBOL(drm_mm_init);
303
304@@ -282,6 +387,7 @@
305 {
306 struct list_head *bnode = mm->fl_entry.next;
307 struct drm_mm_node *entry;
308+ struct drm_mm_node *next;
309
310 entry = list_entry(bnode, struct drm_mm_node, fl_entry);
311
312@@ -293,7 +399,16 @@
313
314 list_del(&entry->fl_entry);
315 list_del(&entry->ml_entry);
316+ kfree(entry);
317+
318+ spin_lock(&mm->unused_lock);
319+ list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
320+ list_del(&entry->fl_entry);
321+ kfree(entry);
322+ --mm->num_unused;
323+ }
324+ spin_unlock(&mm->unused_lock);
325
326- drm_free(entry, sizeof(*entry), DRM_MEM_MM);
327+ BUG_ON(mm->num_unused != 0);
328 }
329 EXPORT_SYMBOL(drm_mm_takedown);
330Index: linux-2.6.28/include/drm/drmP.h
331===================================================================
332--- linux-2.6.28.orig/include/drm/drmP.h 2009-03-12 13:13:54.000000000 +0000
333+++ linux-2.6.28/include/drm/drmP.h 2009-03-12 13:37:59.000000000 +0000
334@@ -86,6 +86,7 @@
335
336 #include "drm_os_linux.h"
337 #include "drm_hashtab.h"
338+#include "drm_mm.h"
339
340 /***********************************************************************/
341 /** \name DRM template customization defaults */
342@@ -502,26 +503,6 @@
343 };
344
345
346-/*
347- * Generic memory manager structs
348- */
349-
350-struct drm_mm_node {
351- struct list_head fl_entry;
352- struct list_head ml_entry;
353- int free;
354- unsigned long start;
355- unsigned long size;
356- struct drm_mm *mm;
357- void *private;
358-};
359-
360-struct drm_mm {
361- struct list_head fl_entry;
362- struct list_head ml_entry;
363-};
364-
365-
366 /**
367 * Mappings list
368 */
369@@ -1307,22 +1288,6 @@
370 extern int drm_sysfs_connector_add(struct drm_connector *connector);
371 extern void drm_sysfs_connector_remove(struct drm_connector *connector);
372
373-/*
374- * Basic memory manager support (drm_mm.c)
375- */
376-extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
377- unsigned long size,
378- unsigned alignment);
379-extern void drm_mm_put_block(struct drm_mm_node * cur);
380-extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
381- unsigned alignment, int best_match);
382-extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
383-extern void drm_mm_takedown(struct drm_mm *mm);
384-extern int drm_mm_clean(struct drm_mm *mm);
385-extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
386-extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
387-extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
388-
389 /* Graphics Execution Manager library functions (drm_gem.c) */
390 int drm_gem_init(struct drm_device *dev);
391 void drm_gem_destroy(struct drm_device *dev);
392Index: linux-2.6.28/include/drm/drm_mm.h
393===================================================================
394--- /dev/null 1970-01-01 00:00:00.000000000 +0000
395+++ linux-2.6.28/include/drm/drm_mm.h 2009-03-12 13:15:05.000000000 +0000
396@@ -0,0 +1,90 @@
397+/**************************************************************************
398+ *
399+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
400+ * All Rights Reserved.
401+ *
402+ * Permission is hereby granted, free of charge, to any person obtaining a
403+ * copy of this software and associated documentation files (the
404+ * "Software"), to deal in the Software without restriction, including
405+ * without limitation the rights to use, copy, modify, merge, publish,
406+ * distribute, sub license, and/or sell copies of the Software, and to
407+ * permit persons to whom the Software is furnished to do so, subject to
408+ * the following conditions:
409+ *
410+ * The above copyright notice and this permission notice (including the
411+ * next paragraph) shall be included in all copies or substantial portions
412+ * of the Software.
413+ *
414+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
415+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
416+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
417+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
418+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
419+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
420+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
421+ *
422+ *
423+ **************************************************************************/
424+/*
425+ * Authors:
426+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
427+ */
428+
429+#ifndef _DRM_MM_H_
430+#define _DRM_MM_H_
431+
432+/*
433+ * Generic range manager structs
434+ */
435+#include <linux/list.h>
436+
437+struct drm_mm_node {
438+ struct list_head fl_entry;
439+ struct list_head ml_entry;
440+ int free;
441+ unsigned long start;
442+ unsigned long size;
443+ struct drm_mm *mm;
444+ void *private;
445+};
446+
447+struct drm_mm {
448+ struct list_head fl_entry;
449+ struct list_head ml_entry;
450+ struct list_head unused_nodes;
451+ int num_unused;
452+ spinlock_t unused_lock;
453+};
454+
455+/*
456+ * Basic range manager support (drm_mm.c)
457+ */
458+
459+extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
460+ unsigned long size,
461+ unsigned alignment);
462+extern struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
463+ unsigned long size,
464+ unsigned alignment);
465+extern void drm_mm_put_block(struct drm_mm_node *cur);
466+extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
467+ unsigned long size,
468+ unsigned alignment,
469+ int best_match);
470+extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
471+ unsigned long size);
472+extern void drm_mm_takedown(struct drm_mm *mm);
473+extern int drm_mm_clean(struct drm_mm *mm);
474+extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
475+extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
476+ unsigned long size);
477+extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
478+ unsigned long size, int atomic);
479+extern int drm_mm_pre_get(struct drm_mm *mm);
480+
481+static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
482+{
483+ return block->mm;
484+}
485+
486+#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0002-drm-Add-a-tracker-for-global-objects.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0002-drm-Add-a-tracker-for-global-objects.patch
new file mode 100644
index 0000000000..3f07b91f2e
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0002-drm-Add-a-tracker-for-global-objects.patch
@@ -0,0 +1,191 @@
1From cd04a0500d70ea012089ec38183f20c0c30f8ba5 Mon Sep 17 00:00:00 2001
2From: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
3Date: Fri, 27 Feb 2009 12:31:58 +0100
4Subject: [PATCH 2/8] drm: Add a tracker for global objects.
5
6Signed-off-by: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
7---
8 drivers/gpu/drm/Makefile | 3 +-
9 drivers/gpu/drm/drm_drv.c | 3 +
10 drivers/gpu/drm/drm_global.c | 107 ++++++++++++++++++++++++++++++++++++++++++
11 include/drm/drmP.h | 20 ++++++++
12 4 files changed, 132 insertions(+), 1 deletions(-)
13 create mode 100644 drivers/gpu/drm/drm_global.c
14
15Index: linux-2.6.28/drivers/gpu/drm/Makefile
16===================================================================
17--- linux-2.6.28.orig/drivers/gpu/drm/Makefile 2009-03-12 13:13:54.000000000 +0000
18+++ linux-2.6.28/drivers/gpu/drm/Makefile 2009-03-12 13:15:18.000000000 +0000
19@@ -10,7 +10,8 @@
20 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
21 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
22 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
23- drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o
24+ drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \
25+ drm_global.o
26
27 drm-$(CONFIG_COMPAT) += drm_ioc32.o
28
29Index: linux-2.6.28/drivers/gpu/drm/drm_drv.c
30===================================================================
31--- linux-2.6.28.orig/drivers/gpu/drm/drm_drv.c 2009-03-12 13:13:54.000000000 +0000
32+++ linux-2.6.28/drivers/gpu/drm/drm_drv.c 2009-03-12 13:37:56.000000000 +0000
33@@ -382,6 +382,8 @@
34
35 DRM_INFO("Initialized %s %d.%d.%d %s\n",
36 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
37+ drm_global_init();
38+
39 return 0;
40 err_p3:
41 drm_sysfs_destroy();
42@@ -395,6 +397,7 @@
43
44 static void __exit drm_core_exit(void)
45 {
46+ drm_global_release();
47 remove_proc_entry("dri", NULL);
48 drm_sysfs_destroy();
49
50Index: linux-2.6.28/drivers/gpu/drm/drm_global.c
51===================================================================
52--- /dev/null 1970-01-01 00:00:00.000000000 +0000
53+++ linux-2.6.28/drivers/gpu/drm/drm_global.c 2009-03-12 13:15:18.000000000 +0000
54@@ -0,0 +1,107 @@
55+/**************************************************************************
56+ *
57+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
58+ * All Rights Reserved.
59+ *
60+ * Permission is hereby granted, free of charge, to any person obtaining a
61+ * copy of this software and associated documentation files (the
62+ * "Software"), to deal in the Software without restriction, including
63+ * without limitation the rights to use, copy, modify, merge, publish,
64+ * distribute, sub license, and/or sell copies of the Software, and to
65+ * permit persons to whom the Software is furnished to do so, subject to
66+ * the following conditions:
67+ *
68+ * The above copyright notice and this permission notice (including the
69+ * next paragraph) shall be included in all copies or substantial portions
70+ * of the Software.
71+ *
72+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
73+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
74+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
75+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
76+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
77+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
78+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
79+ *
80+ **************************************************************************/
81+#include <drmP.h>
82+struct drm_global_item {
83+ struct mutex mutex;
84+ void *object;
85+ int refcount;
86+};
87+
88+static struct drm_global_item glob[DRM_GLOBAL_NUM];
89+
90+void drm_global_init(void)
91+{
92+ int i;
93+
94+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
95+ struct drm_global_item *item = &glob[i];
96+ mutex_init(&item->mutex);
97+ item->object = NULL;
98+ item->refcount = 0;
99+ }
100+}
101+
102+void drm_global_release(void)
103+{
104+ int i;
105+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
106+ struct drm_global_item *item = &glob[i];
107+ BUG_ON(item->object != NULL);
108+ BUG_ON(item->refcount != 0);
109+ }
110+}
111+
112+int drm_global_item_ref(struct drm_global_reference *ref)
113+{
114+ int ret;
115+ struct drm_global_item *item = &glob[ref->global_type];
116+ void *object;
117+
118+ mutex_lock(&item->mutex);
119+ if (item->refcount == 0) {
120+ item->object = kmalloc(ref->size, GFP_KERNEL);
121+ if (unlikely(item->object == NULL)) {
122+ ret = -ENOMEM;
123+ goto out_err;
124+ }
125+
126+ ref->object = item->object;
127+ ret = ref->init(ref);
128+ if (unlikely(ret != 0))
129+ goto out_err;
130+
131+ ++item->refcount;
132+ }
133+ ref->object = item->object;
134+ object = item->object;
135+ mutex_unlock(&item->mutex);
136+ return 0;
137+ out_err:
138+ kfree(item->object);
139+ mutex_unlock(&item->mutex);
140+ item->object = NULL;
141+ return ret;
142+}
143+
144+EXPORT_SYMBOL(drm_global_item_ref);
145+
146+void drm_global_item_unref(struct drm_global_reference *ref)
147+{
148+ struct drm_global_item *item = &glob[ref->global_type];
149+
150+ mutex_lock(&item->mutex);
151+ BUG_ON(item->refcount == 0);
152+ BUG_ON(ref->object != item->object);
153+ if (--item->refcount == 0) {
154+ ref->release(ref);
155+ kfree(item->object);
156+ item->object = NULL;
157+ }
158+ mutex_unlock(&item->mutex);
159+}
160+
161+EXPORT_SYMBOL(drm_global_item_unref);
162Index: linux-2.6.28/include/drm/drmP.h
163===================================================================
164--- linux-2.6.28.orig/include/drm/drmP.h 2009-03-12 13:15:05.000000000 +0000
165+++ linux-2.6.28/include/drm/drmP.h 2009-03-12 13:37:56.000000000 +0000
166@@ -1412,5 +1412,25 @@
167
168 /*@}*/
169
170+enum drm_global_types {
171+ DRM_GLOBAL_TTM_MEM = 0,
172+ DRM_GLOBAL_TTM_BO,
173+ DRM_GLOBAL_TTM_OBJECT,
174+ DRM_GLOBAL_NUM
175+};
176+
177+struct drm_global_reference {
178+ enum drm_global_types global_type;
179+ size_t size;
180+ void *object;
181+ int (*init) (struct drm_global_reference *);
182+ void (*release) (struct drm_global_reference *);
183+};
184+
185+extern void drm_global_init(void);
186+extern void drm_global_release(void);
187+extern int drm_global_item_ref(struct drm_global_reference *ref);
188+extern void drm_global_item_unref(struct drm_global_reference *ref);
189+
190 #endif /* __KERNEL__ */
191 #endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0003-drm-Export-hash-table-functionality.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0003-drm-Export-hash-table-functionality.patch
new file mode 100644
index 0000000000..a54a3cf281
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0003-drm-Export-hash-table-functionality.patch
@@ -0,0 +1,58 @@
1From 723cc597790fb648506a44e811415eb88b9dcdfa Mon Sep 17 00:00:00 2001
2From: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
3Date: Fri, 27 Feb 2009 17:18:37 +0100
4Subject: [PATCH 3/8] drm: Export hash table functionality.
5
6Also fix include file.
7
8Signed-off-by: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
9---
10 drivers/gpu/drm/drm_hashtab.c | 4 ++++
11 include/drm/drm_hashtab.h | 1 +
12 2 files changed, 5 insertions(+), 0 deletions(-)
13
14Index: linux-2.6.28/drivers/gpu/drm/drm_hashtab.c
15===================================================================
16--- linux-2.6.28.orig/drivers/gpu/drm/drm_hashtab.c 2009-03-09 19:19:52.000000000 +0000
17+++ linux-2.6.28/drivers/gpu/drm/drm_hashtab.c 2009-03-12 13:15:25.000000000 +0000
18@@ -62,6 +62,7 @@
19 }
20 return 0;
21 }
22+EXPORT_SYMBOL(drm_ht_create);
23
24 void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
25 {
26@@ -156,6 +157,7 @@
27 }
28 return 0;
29 }
30+EXPORT_SYMBOL(drm_ht_just_insert_please);
31
32 int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
33 struct drm_hash_item **item)
34@@ -169,6 +171,7 @@
35 *item = hlist_entry(list, struct drm_hash_item, head);
36 return 0;
37 }
38+EXPORT_SYMBOL(drm_ht_find_item);
39
40 int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
41 {
42@@ -202,3 +205,4 @@
43 ht->table = NULL;
44 }
45 }
46+EXPORT_SYMBOL(drm_ht_remove);
47Index: linux-2.6.28/include/drm/drm_hashtab.h
48===================================================================
49--- linux-2.6.28.orig/include/drm/drm_hashtab.h 2008-12-24 23:26:37.000000000 +0000
50+++ linux-2.6.28/include/drm/drm_hashtab.h 2009-03-12 13:15:25.000000000 +0000
51@@ -34,6 +34,7 @@
52
53 #ifndef DRM_HASHTAB_H
54 #define DRM_HASHTAB_H
55+#include <linux/list.h>
56
57 #define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
58
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch
new file mode 100644
index 0000000000..a475cc1b7b
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch
@@ -0,0 +1,53 @@
1From a5fef5986c407d56f4e4cf618d6099e122a096ef Mon Sep 17 00:00:00 2001
2From: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
3Date: Fri, 27 Feb 2009 13:04:46 +0100
4Subject: [PATCH 7/8] drm: Add unlocked IOCTL functionality from the drm repo.
5
6---
7 drivers/gpu/drm/drm_drv.c | 11 ++++++++++-
8 include/drm/drmP.h | 2 ++
9 2 files changed, 12 insertions(+), 1 deletions(-)
10
11Index: linux-2.6.28/drivers/gpu/drm/drm_drv.c
12===================================================================
13--- linux-2.6.28.orig/drivers/gpu/drm/drm_drv.c 2009-03-12 13:15:18.000000000 +0000
14+++ linux-2.6.28/drivers/gpu/drm/drm_drv.c 2009-03-12 13:15:41.000000000 +0000
15@@ -448,9 +450,16 @@
16 * Looks up the ioctl function in the ::ioctls table, checking for root
17 * previleges if so required, and dispatches to the respective function.
18 */
19+
20 int drm_ioctl(struct inode *inode, struct file *filp,
21 unsigned int cmd, unsigned long arg)
22 {
23+ return drm_unlocked_ioctl(filp, cmd, arg);
24+}
25+EXPORT_SYMBOL(drm_ioctl);
26+
27+long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
28+{
29 struct drm_file *file_priv = filp->private_data;
30 struct drm_device *dev = file_priv->minor->dev;
31 struct drm_ioctl_desc *ioctl;
32@@ -527,7 +536,7 @@
33 return retcode;
34 }
35
36-EXPORT_SYMBOL(drm_ioctl);
37+EXPORT_SYMBOL(drm_unlocked_ioctl);
38
39 drm_local_map_t *drm_getsarea(struct drm_device *dev)
40 {
41Index: linux-2.6.28/include/drm/drmP.h
42===================================================================
43--- linux-2.6.28.orig/include/drm/drmP.h 2009-03-12 13:15:18.000000000 +0000
44+++ linux-2.6.28/include/drm/drmP.h 2009-03-12 13:15:41.000000000 +0000
45@@ -1025,6 +1025,8 @@
46 extern void drm_exit(struct drm_driver *driver);
47 extern int drm_ioctl(struct inode *inode, struct file *filp,
48 unsigned int cmd, unsigned long arg);
49+extern long drm_unlocked_ioctl(struct file *filp,
50+ unsigned int cmd, unsigned long arg);
51 extern long drm_compat_ioctl(struct file *filp,
52 unsigned int cmd, unsigned long arg);
53 extern int drm_lastclose(struct drm_device *dev);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/defconfig-netbook b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-generic
index b520435082..edf61c21ad 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/defconfig-netbook
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-generic
@@ -1,14 +1,13 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.27 3# Linux kernel version: 2.6.28.rc7-4.netbook
4# Wed Nov 5 17:17:12 2008 4# Mon Dec 8 01:05:27 2008
5# 5#
6# CONFIG_64BIT is not set 6# CONFIG_64BIT is not set
7CONFIG_X86_32=y 7CONFIG_X86_32=y
8# CONFIG_X86_64 is not set 8# CONFIG_X86_64 is not set
9CONFIG_X86=y 9CONFIG_X86=y
10CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig" 10CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
11# CONFIG_GENERIC_LOCKBREAK is not set
12CONFIG_GENERIC_TIME=y 11CONFIG_GENERIC_TIME=y
13CONFIG_GENERIC_CMOS_UPDATE=y 12CONFIG_GENERIC_CMOS_UPDATE=y
14CONFIG_CLOCKSOURCE_WATCHDOG=y 13CONFIG_CLOCKSOURCE_WATCHDOG=y
@@ -24,16 +23,14 @@ CONFIG_GENERIC_ISA_DMA=y
24CONFIG_GENERIC_IOMAP=y 23CONFIG_GENERIC_IOMAP=y
25CONFIG_GENERIC_BUG=y 24CONFIG_GENERIC_BUG=y
26CONFIG_GENERIC_HWEIGHT=y 25CONFIG_GENERIC_HWEIGHT=y
27# CONFIG_GENERIC_GPIO is not set
28CONFIG_ARCH_MAY_HAVE_PC_FDC=y 26CONFIG_ARCH_MAY_HAVE_PC_FDC=y
29# CONFIG_RWSEM_GENERIC_SPINLOCK is not set 27# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
30CONFIG_RWSEM_XCHGADD_ALGORITHM=y 28CONFIG_RWSEM_XCHGADD_ALGORITHM=y
31# CONFIG_ARCH_HAS_ILOG2_U32 is not set
32# CONFIG_ARCH_HAS_ILOG2_U64 is not set
33CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y 29CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
34CONFIG_GENERIC_CALIBRATE_DELAY=y 30CONFIG_GENERIC_CALIBRATE_DELAY=y
35# CONFIG_GENERIC_TIME_VSYSCALL is not set 31# CONFIG_GENERIC_TIME_VSYSCALL is not set
36CONFIG_ARCH_HAS_CPU_RELAX=y 32CONFIG_ARCH_HAS_CPU_RELAX=y
33CONFIG_ARCH_HAS_DEFAULT_IDLE=y
37CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y 34CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
38CONFIG_HAVE_SETUP_PER_CPU_AREA=y 35CONFIG_HAVE_SETUP_PER_CPU_AREA=y
39# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set 36# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
@@ -42,12 +39,12 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y
42# CONFIG_ZONE_DMA32 is not set 39# CONFIG_ZONE_DMA32 is not set
43CONFIG_ARCH_POPULATES_NODE_MAP=y 40CONFIG_ARCH_POPULATES_NODE_MAP=y
44# CONFIG_AUDIT_ARCH is not set 41# CONFIG_AUDIT_ARCH is not set
45CONFIG_ARCH_SUPPORTS_AOUT=y
46CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y 42CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
47CONFIG_GENERIC_HARDIRQS=y 43CONFIG_GENERIC_HARDIRQS=y
48CONFIG_GENERIC_IRQ_PROBE=y 44CONFIG_GENERIC_IRQ_PROBE=y
49CONFIG_GENERIC_PENDING_IRQ=y 45CONFIG_GENERIC_PENDING_IRQ=y
50CONFIG_X86_SMP=y 46CONFIG_X86_SMP=y
47CONFIG_USE_GENERIC_SMP_HELPERS=y
51CONFIG_X86_32_SMP=y 48CONFIG_X86_32_SMP=y
52CONFIG_X86_HT=y 49CONFIG_X86_HT=y
53CONFIG_X86_BIOS_REBOOT=y 50CONFIG_X86_BIOS_REBOOT=y
@@ -61,7 +58,7 @@ CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
61CONFIG_EXPERIMENTAL=y 58CONFIG_EXPERIMENTAL=y
62CONFIG_LOCK_KERNEL=y 59CONFIG_LOCK_KERNEL=y
63CONFIG_INIT_ENV_ARG_LIMIT=32 60CONFIG_INIT_ENV_ARG_LIMIT=32
64CONFIG_LOCALVERSION="-netbook" 61CONFIG_LOCALVERSION=""
65# CONFIG_LOCALVERSION_AUTO is not set 62# CONFIG_LOCALVERSION_AUTO is not set
66CONFIG_SWAP=y 63CONFIG_SWAP=y
67CONFIG_SYSVIPC=y 64CONFIG_SYSVIPC=y
@@ -69,15 +66,9 @@ CONFIG_SYSVIPC_SYSCTL=y
69CONFIG_POSIX_MQUEUE=y 66CONFIG_POSIX_MQUEUE=y
70CONFIG_BSD_PROCESS_ACCT=y 67CONFIG_BSD_PROCESS_ACCT=y
71CONFIG_BSD_PROCESS_ACCT_V3=y 68CONFIG_BSD_PROCESS_ACCT_V3=y
72CONFIG_TASKSTATS=y 69# CONFIG_TASKSTATS is not set
73CONFIG_TASK_DELAY_ACCT=y 70# CONFIG_AUDIT is not set
74CONFIG_TASK_XACCT=y 71# CONFIG_IKCONFIG is not set
75CONFIG_TASK_IO_ACCOUNTING=y
76CONFIG_AUDIT=y
77CONFIG_AUDITSYSCALL=y
78CONFIG_AUDIT_TREE=y
79CONFIG_IKCONFIG=y
80CONFIG_IKCONFIG_PROC=y
81CONFIG_LOG_BUF_SHIFT=17 72CONFIG_LOG_BUF_SHIFT=17
82# CONFIG_CGROUPS is not set 73# CONFIG_CGROUPS is not set
83CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y 74CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
@@ -87,7 +78,7 @@ CONFIG_RELAY=y
87CONFIG_NAMESPACES=y 78CONFIG_NAMESPACES=y
88# CONFIG_UTS_NS is not set 79# CONFIG_UTS_NS is not set
89# CONFIG_IPC_NS is not set 80# CONFIG_IPC_NS is not set
90CONFIG_USER_NS=y 81# CONFIG_USER_NS is not set
91# CONFIG_PID_NS is not set 82# CONFIG_PID_NS is not set
92CONFIG_BLK_DEV_INITRD=y 83CONFIG_BLK_DEV_INITRD=y
93CONFIG_INITRAMFS_SOURCE="" 84CONFIG_INITRAMFS_SOURCE=""
@@ -100,6 +91,7 @@ CONFIG_SYSCTL_SYSCALL=y
100CONFIG_KALLSYMS=y 91CONFIG_KALLSYMS=y
101CONFIG_KALLSYMS_ALL=y 92CONFIG_KALLSYMS_ALL=y
102CONFIG_KALLSYMS_EXTRA_PASS=y 93CONFIG_KALLSYMS_EXTRA_PASS=y
94CONFIG_KALLSYMS_STRIP_GENERATED=y
103CONFIG_HOTPLUG=y 95CONFIG_HOTPLUG=y
104CONFIG_PRINTK=y 96CONFIG_PRINTK=y
105CONFIG_BUG=y 97CONFIG_BUG=y
@@ -114,7 +106,9 @@ CONFIG_SIGNALFD=y
114CONFIG_TIMERFD=y 106CONFIG_TIMERFD=y
115CONFIG_EVENTFD=y 107CONFIG_EVENTFD=y
116CONFIG_SHMEM=y 108CONFIG_SHMEM=y
109CONFIG_AIO=y
117CONFIG_VM_EVENT_COUNTERS=y 110CONFIG_VM_EVENT_COUNTERS=y
111CONFIG_PCI_QUIRKS=y
118CONFIG_SLAB=y 112CONFIG_SLAB=y
119# CONFIG_SLUB is not set 113# CONFIG_SLUB is not set
120# CONFIG_SLOB is not set 114# CONFIG_SLOB is not set
@@ -127,11 +121,7 @@ CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
127CONFIG_HAVE_IOREMAP_PROT=y 121CONFIG_HAVE_IOREMAP_PROT=y
128CONFIG_HAVE_KPROBES=y 122CONFIG_HAVE_KPROBES=y
129CONFIG_HAVE_KRETPROBES=y 123CONFIG_HAVE_KRETPROBES=y
130# CONFIG_HAVE_ARCH_TRACEHOOK is not set 124CONFIG_HAVE_ARCH_TRACEHOOK=y
131# CONFIG_HAVE_DMA_ATTRS is not set
132CONFIG_USE_GENERIC_SMP_HELPERS=y
133# CONFIG_HAVE_CLK is not set
134CONFIG_PROC_PAGE_MONITOR=y
135CONFIG_HAVE_GENERIC_DMA_COHERENT=y 125CONFIG_HAVE_GENERIC_DMA_COHERENT=y
136CONFIG_SLABINFO=y 126CONFIG_SLABINFO=y
137CONFIG_RT_MUTEXES=y 127CONFIG_RT_MUTEXES=y
@@ -146,7 +136,7 @@ CONFIG_MODULE_UNLOAD=y
146CONFIG_KMOD=y 136CONFIG_KMOD=y
147CONFIG_STOP_MACHINE=y 137CONFIG_STOP_MACHINE=y
148CONFIG_BLOCK=y 138CONFIG_BLOCK=y
149# CONFIG_LBD is not set 139CONFIG_LBD=y
150CONFIG_BLK_DEV_IO_TRACE=y 140CONFIG_BLK_DEV_IO_TRACE=y
151# CONFIG_LSF is not set 141# CONFIG_LSF is not set
152CONFIG_BLK_DEV_BSG=y 142CONFIG_BLK_DEV_BSG=y
@@ -165,6 +155,7 @@ CONFIG_DEFAULT_CFQ=y
165# CONFIG_DEFAULT_NOOP is not set 155# CONFIG_DEFAULT_NOOP is not set
166CONFIG_DEFAULT_IOSCHED="cfq" 156CONFIG_DEFAULT_IOSCHED="cfq"
167CONFIG_CLASSIC_RCU=y 157CONFIG_CLASSIC_RCU=y
158CONFIG_FREEZER=y
168 159
169# 160#
170# Processor type and features 161# Processor type and features
@@ -174,26 +165,33 @@ CONFIG_NO_HZ=y
174CONFIG_HIGH_RES_TIMERS=y 165CONFIG_HIGH_RES_TIMERS=y
175CONFIG_GENERIC_CLOCKEVENTS_BUILD=y 166CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
176CONFIG_SMP=y 167CONFIG_SMP=y
168# CONFIG_SPARSE_IRQ is not set
177CONFIG_X86_FIND_SMP_CONFIG=y 169CONFIG_X86_FIND_SMP_CONFIG=y
178CONFIG_X86_MPPARSE=y 170CONFIG_X86_MPPARSE=y
179CONFIG_X86_PC=y 171# CONFIG_X86_PC is not set
180# CONFIG_X86_ELAN is not set 172# CONFIG_X86_ELAN is not set
181# CONFIG_X86_VOYAGER is not set 173# CONFIG_X86_VOYAGER is not set
182# CONFIG_X86_GENERICARCH is not set 174CONFIG_X86_GENERICARCH=y
175# CONFIG_X86_NUMAQ is not set
176# CONFIG_X86_SUMMIT is not set
177# CONFIG_X86_ES7000 is not set
178# CONFIG_X86_BIGSMP is not set
183# CONFIG_X86_VSMP is not set 179# CONFIG_X86_VSMP is not set
184# CONFIG_X86_RDC321X is not set 180# CONFIG_X86_RDC321X is not set
185CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 181CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
182# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
186# CONFIG_PARAVIRT_GUEST is not set 183# CONFIG_PARAVIRT_GUEST is not set
187# CONFIG_MEMTEST is not set 184# CONFIG_MEMTEST is not set
185CONFIG_X86_CYCLONE_TIMER=y
188# CONFIG_M386 is not set 186# CONFIG_M386 is not set
189# CONFIG_M486 is not set 187# CONFIG_M486 is not set
190# CONFIG_M586 is not set 188# CONFIG_M586 is not set
191# CONFIG_M586TSC is not set 189# CONFIG_M586TSC is not set
192# CONFIG_M586MMX is not set 190# CONFIG_M586MMX is not set
193CONFIG_M686=y 191# CONFIG_M686 is not set
194# CONFIG_MPENTIUMII is not set 192# CONFIG_MPENTIUMII is not set
195# CONFIG_MPENTIUMIII is not set 193# CONFIG_MPENTIUMIII is not set
196# CONFIG_MPENTIUMM is not set 194CONFIG_MPENTIUMM=y
197# CONFIG_MPENTIUM4 is not set 195# CONFIG_MPENTIUM4 is not set
198# CONFIG_MK6 is not set 196# CONFIG_MK6 is not set
199# CONFIG_MK7 is not set 197# CONFIG_MK7 is not set
@@ -201,7 +199,6 @@ CONFIG_M686=y
201# CONFIG_MCRUSOE is not set 199# CONFIG_MCRUSOE is not set
202# CONFIG_MEFFICEON is not set 200# CONFIG_MEFFICEON is not set
203# CONFIG_MWINCHIPC6 is not set 201# CONFIG_MWINCHIPC6 is not set
204# CONFIG_MWINCHIP2 is not set
205# CONFIG_MWINCHIP3D is not set 202# CONFIG_MWINCHIP3D is not set
206# CONFIG_MGEODEGX1 is not set 203# CONFIG_MGEODEGX1 is not set
207# CONFIG_MGEODE_LX is not set 204# CONFIG_MGEODE_LX is not set
@@ -211,78 +208,93 @@ CONFIG_M686=y
211# CONFIG_MPSC is not set 208# CONFIG_MPSC is not set
212# CONFIG_MCORE2 is not set 209# CONFIG_MCORE2 is not set
213# CONFIG_GENERIC_CPU is not set 210# CONFIG_GENERIC_CPU is not set
214# CONFIG_X86_GENERIC is not set 211CONFIG_X86_GENERIC=y
215CONFIG_X86_CPU=y 212CONFIG_X86_CPU=y
216CONFIG_X86_CMPXCHG=y 213CONFIG_X86_CMPXCHG=y
217CONFIG_X86_L1_CACHE_SHIFT=5 214CONFIG_X86_L1_CACHE_SHIFT=7
218CONFIG_X86_XADD=y 215CONFIG_X86_XADD=y
219# CONFIG_X86_PPRO_FENCE is not set
220CONFIG_X86_WP_WORKS_OK=y 216CONFIG_X86_WP_WORKS_OK=y
221CONFIG_X86_INVLPG=y 217CONFIG_X86_INVLPG=y
222CONFIG_X86_BSWAP=y 218CONFIG_X86_BSWAP=y
223CONFIG_X86_POPAD_OK=y 219CONFIG_X86_POPAD_OK=y
220CONFIG_X86_INTEL_USERCOPY=y
224CONFIG_X86_USE_PPRO_CHECKSUM=y 221CONFIG_X86_USE_PPRO_CHECKSUM=y
225CONFIG_X86_TSC=y 222CONFIG_X86_TSC=y
223CONFIG_X86_CMPXCHG64=y
226CONFIG_X86_CMOV=y 224CONFIG_X86_CMOV=y
227CONFIG_X86_MINIMUM_CPU_FAMILY=4 225CONFIG_X86_MINIMUM_CPU_FAMILY=4
228CONFIG_X86_DEBUGCTLMSR=y 226CONFIG_X86_DEBUGCTLMSR=y
227CONFIG_CPU_SUP_INTEL=y
228CONFIG_CPU_SUP_CYRIX_32=y
229CONFIG_CPU_SUP_AMD=y
230CONFIG_CPU_SUP_CENTAUR_32=y
231CONFIG_CPU_SUP_TRANSMETA_32=y
232CONFIG_CPU_SUP_UMC_32=y
233# CONFIG_X86_DS is not set
234# CONFIG_X86_PTRACE_BTS is not set
229CONFIG_HPET_TIMER=y 235CONFIG_HPET_TIMER=y
230CONFIG_HPET_EMULATE_RTC=y 236CONFIG_HPET_EMULATE_RTC=y
231CONFIG_DMI=y 237CONFIG_DMI=y
232# CONFIG_IOMMU_HELPER is not set 238# CONFIG_IOMMU_HELPER is not set
233CONFIG_NR_CPUS=2 239CONFIG_NR_CPUS=8
234CONFIG_SCHED_SMT=y 240CONFIG_SCHED_SMT=y
235CONFIG_SCHED_MC=y 241CONFIG_SCHED_MC=y
236# CONFIG_PREEMPT_NONE is not set 242# CONFIG_PREEMPT_NONE is not set
237CONFIG_PREEMPT_VOLUNTARY=y 243# CONFIG_PREEMPT_VOLUNTARY is not set
238# CONFIG_PREEMPT is not set 244CONFIG_PREEMPT=y
245# CONFIG_DEBUG_PREEMPT is not set
246# CONFIG_PREEMPT_TRACER is not set
239CONFIG_X86_LOCAL_APIC=y 247CONFIG_X86_LOCAL_APIC=y
240CONFIG_X86_IO_APIC=y 248CONFIG_X86_IO_APIC=y
249# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
241CONFIG_X86_MCE=y 250CONFIG_X86_MCE=y
242# CONFIG_X86_MCE_NONFATAL is not set 251# CONFIG_X86_MCE_NONFATAL is not set
243# CONFIG_X86_MCE_P4THERMAL is not set 252CONFIG_X86_MCE_P4THERMAL=y
244CONFIG_VM86=y 253CONFIG_VM86=y
245# CONFIG_TOSHIBA is not set 254CONFIG_TOSHIBA=m
246# CONFIG_I8K is not set 255CONFIG_I8K=m
247# CONFIG_X86_REBOOTFIXUPS is not set 256CONFIG_X86_REBOOTFIXUPS=y
248CONFIG_MICROCODE=y 257CONFIG_MICROCODE=y
258CONFIG_MICROCODE_INTEL=y
259# CONFIG_MICROCODE_AMD is not set
249CONFIG_MICROCODE_OLD_INTERFACE=y 260CONFIG_MICROCODE_OLD_INTERFACE=y
250CONFIG_X86_MSR=y 261CONFIG_X86_MSR=y
251CONFIG_X86_CPUID=y 262CONFIG_X86_CPUID=y
252# CONFIG_NOHIGHMEM is not set 263# CONFIG_NOHIGHMEM is not set
253CONFIG_HIGHMEM4G=y 264# CONFIG_HIGHMEM4G is not set
254# CONFIG_HIGHMEM64G is not set 265CONFIG_HIGHMEM64G=y
255CONFIG_PAGE_OFFSET=0xC0000000 266CONFIG_PAGE_OFFSET=0xC0000000
256CONFIG_HIGHMEM=y 267CONFIG_HIGHMEM=y
257CONFIG_NEED_NODE_MEMMAP_SIZE=y 268CONFIG_X86_PAE=y
269CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
258CONFIG_ARCH_FLATMEM_ENABLE=y 270CONFIG_ARCH_FLATMEM_ENABLE=y
259CONFIG_ARCH_SPARSEMEM_ENABLE=y 271CONFIG_ARCH_SPARSEMEM_ENABLE=y
260CONFIG_ARCH_SELECT_MEMORY_MODEL=y 272CONFIG_ARCH_SELECT_MEMORY_MODEL=y
261CONFIG_SELECT_MEMORY_MODEL=y 273CONFIG_SELECT_MEMORY_MODEL=y
262# CONFIG_FLATMEM_MANUAL is not set 274CONFIG_FLATMEM_MANUAL=y
263# CONFIG_DISCONTIGMEM_MANUAL is not set 275# CONFIG_DISCONTIGMEM_MANUAL is not set
264CONFIG_SPARSEMEM_MANUAL=y 276# CONFIG_SPARSEMEM_MANUAL is not set
265CONFIG_SPARSEMEM=y 277CONFIG_FLATMEM=y
266CONFIG_HAVE_MEMORY_PRESENT=y 278CONFIG_FLAT_NODE_MEM_MAP=y
267CONFIG_SPARSEMEM_STATIC=y 279CONFIG_SPARSEMEM_STATIC=y
268# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
269
270#
271# Memory hotplug is currently incompatible with Software Suspend
272#
273CONFIG_PAGEFLAGS_EXTENDED=y 280CONFIG_PAGEFLAGS_EXTENDED=y
274CONFIG_SPLIT_PTLOCK_CPUS=4 281CONFIG_SPLIT_PTLOCK_CPUS=4
275CONFIG_RESOURCES_64BIT=y 282CONFIG_RESOURCES_64BIT=y
283CONFIG_PHYS_ADDR_T_64BIT=y
276CONFIG_ZONE_DMA_FLAG=1 284CONFIG_ZONE_DMA_FLAG=1
277CONFIG_BOUNCE=y 285CONFIG_BOUNCE=y
278CONFIG_VIRT_TO_BUS=y 286CONFIG_VIRT_TO_BUS=y
279# CONFIG_HIGHPTE is not set 287CONFIG_UNEVICTABLE_LRU=y
288CONFIG_HIGHPTE=y
289# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
290CONFIG_X86_RESERVE_LOW_64K=y
280# CONFIG_MATH_EMULATION is not set 291# CONFIG_MATH_EMULATION is not set
281CONFIG_MTRR=y 292CONFIG_MTRR=y
282# CONFIG_MTRR_SANITIZER is not set 293CONFIG_MTRR_SANITIZER=y
283# CONFIG_X86_PAT is not set 294CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
284# CONFIG_EFI is not set 295CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
285# CONFIG_IRQBALANCE is not set 296CONFIG_X86_PAT=y
297CONFIG_EFI=y
286# CONFIG_SECCOMP is not set 298# CONFIG_SECCOMP is not set
287# CONFIG_HZ_100 is not set 299# CONFIG_HZ_100 is not set
288# CONFIG_HZ_250 is not set 300# CONFIG_HZ_250 is not set
@@ -290,18 +302,20 @@ CONFIG_MTRR=y
290CONFIG_HZ_1000=y 302CONFIG_HZ_1000=y
291CONFIG_HZ=1000 303CONFIG_HZ=1000
292CONFIG_SCHED_HRTICK=y 304CONFIG_SCHED_HRTICK=y
293CONFIG_KEXEC=y 305# CONFIG_KEXEC is not set
294CONFIG_CRASH_DUMP=y 306# CONFIG_CRASH_DUMP is not set
295# CONFIG_KEXEC_JUMP is not set 307CONFIG_PHYSICAL_START=0x100000
296CONFIG_PHYSICAL_START=0x400000 308# CONFIG_RELOCATABLE is not set
297CONFIG_RELOCATABLE=y 309CONFIG_PHYSICAL_ALIGN=0x400000
298CONFIG_PHYSICAL_ALIGN=0x200000
299CONFIG_HOTPLUG_CPU=y 310CONFIG_HOTPLUG_CPU=y
300CONFIG_COMPAT_VDSO=y 311# CONFIG_COMPAT_VDSO is not set
312# CONFIG_CMDLINE_BOOL is not set
313# CONFIG_CMDLINE is not set
314# CONFIG_CMDLINE_OVERRIDE is not set
301CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y 315CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
302 316
303# 317#
304# Power management options 318# Power management and ACPI options
305# 319#
306CONFIG_PM=y 320CONFIG_PM=y
307CONFIG_PM_DEBUG=y 321CONFIG_PM_DEBUG=y
@@ -328,19 +342,16 @@ CONFIG_ACPI_BUTTON=y
328CONFIG_ACPI_VIDEO=y 342CONFIG_ACPI_VIDEO=y
329CONFIG_ACPI_FAN=y 343CONFIG_ACPI_FAN=y
330CONFIG_ACPI_DOCK=y 344CONFIG_ACPI_DOCK=y
331# CONFIG_ACPI_BAY is not set
332CONFIG_ACPI_PROCESSOR=y 345CONFIG_ACPI_PROCESSOR=y
333CONFIG_ACPI_HOTPLUG_CPU=y 346CONFIG_ACPI_HOTPLUG_CPU=y
334CONFIG_ACPI_THERMAL=y 347CONFIG_ACPI_THERMAL=y
335CONFIG_ACPI_WMI=m 348CONFIG_ACPI_WMI=y
336CONFIG_ACPI_ASUS=y 349CONFIG_ACPI_ASUS=m
337# CONFIG_ACPI_TOSHIBA is not set 350CONFIG_ACPI_TOSHIBA=m
338# CONFIG_ACPI_CUSTOM_DSDT is not set 351# CONFIG_ACPI_CUSTOM_DSDT is not set
339CONFIG_ACPI_BLACKLIST_YEAR=0 352CONFIG_ACPI_BLACKLIST_YEAR=1999
340# CONFIG_ACPI_DEBUG is not set 353# CONFIG_ACPI_DEBUG is not set
341CONFIG_ACPI_EC=y
342# CONFIG_ACPI_PCI_SLOT is not set 354# CONFIG_ACPI_PCI_SLOT is not set
343CONFIG_ACPI_POWER=y
344CONFIG_ACPI_SYSTEM=y 355CONFIG_ACPI_SYSTEM=y
345CONFIG_X86_PM_TIMER=y 356CONFIG_X86_PM_TIMER=y
346CONFIG_ACPI_CONTAINER=y 357CONFIG_ACPI_CONTAINER=y
@@ -353,12 +364,12 @@ CONFIG_ACPI_SBS=m
353CONFIG_CPU_FREQ=y 364CONFIG_CPU_FREQ=y
354CONFIG_CPU_FREQ_TABLE=y 365CONFIG_CPU_FREQ_TABLE=y
355CONFIG_CPU_FREQ_DEBUG=y 366CONFIG_CPU_FREQ_DEBUG=y
356CONFIG_CPU_FREQ_STAT=m 367CONFIG_CPU_FREQ_STAT=y
357CONFIG_CPU_FREQ_STAT_DETAILS=y 368CONFIG_CPU_FREQ_STAT_DETAILS=y
358CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y 369# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
359# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set 370# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
360# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set 371# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
361# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set 372CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
362# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set 373# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
363CONFIG_CPU_FREQ_GOV_PERFORMANCE=y 374CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
364# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set 375# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
@@ -406,34 +417,54 @@ CONFIG_PCI_DIRECT=y
406CONFIG_PCI_MMCONFIG=y 417CONFIG_PCI_MMCONFIG=y
407CONFIG_PCI_DOMAINS=y 418CONFIG_PCI_DOMAINS=y
408CONFIG_PCIEPORTBUS=y 419CONFIG_PCIEPORTBUS=y
409CONFIG_PCIEAER=y 420# CONFIG_PCIEAER is not set
410CONFIG_PCIEASPM=y 421# CONFIG_PCIEASPM is not set
411# CONFIG_PCIEASPM_DEBUG is not set 422# CONFIG_PCIEASPM_DEBUG is not set
412CONFIG_ARCH_SUPPORTS_MSI=y 423CONFIG_ARCH_SUPPORTS_MSI=y
413CONFIG_PCI_MSI=y 424CONFIG_PCI_MSI=y
414CONFIG_PCI_LEGACY=y 425# CONFIG_PCI_LEGACY is not set
415# CONFIG_PCI_DEBUG is not set 426# CONFIG_PCI_DEBUG is not set
416CONFIG_HT_IRQ=y 427# CONFIG_PCI_STUB is not set
428# CONFIG_HT_IRQ is not set
417CONFIG_ISA_DMA_API=y 429CONFIG_ISA_DMA_API=y
418# CONFIG_ISA is not set 430CONFIG_ISA=y
431# CONFIG_EISA is not set
419# CONFIG_MCA is not set 432# CONFIG_MCA is not set
420# CONFIG_SCx200 is not set 433# CONFIG_SCx200 is not set
421# CONFIG_OLPC is not set 434# CONFIG_OLPC is not set
422CONFIG_K8_NB=y 435CONFIG_PCCARD=y
423# CONFIG_PCCARD is not set 436# CONFIG_PCMCIA_DEBUG is not set
437# CONFIG_PCMCIA is not set
438CONFIG_CARDBUS=y
439
440#
441# PC-card bridges
442#
443CONFIG_YENTA=y
444CONFIG_YENTA_O2=y
445CONFIG_YENTA_RICOH=y
446CONFIG_YENTA_TI=y
447CONFIG_YENTA_ENE_TUNE=y
448CONFIG_YENTA_TOSHIBA=y
449CONFIG_PCMCIA_PROBE=y
450CONFIG_PCCARD_NONSTATIC=y
424# CONFIG_HOTPLUG_PCI is not set 451# CONFIG_HOTPLUG_PCI is not set
425 452
426# 453#
427# Executable file formats / Emulations 454# Executable file formats / Emulations
428# 455#
429CONFIG_BINFMT_ELF=y 456CONFIG_BINFMT_ELF=y
457# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
458CONFIG_HAVE_AOUT=y
430# CONFIG_BINFMT_AOUT is not set 459# CONFIG_BINFMT_AOUT is not set
431CONFIG_BINFMT_MISC=y 460CONFIG_BINFMT_MISC=y
461CONFIG_HAVE_ATOMIC_IOMAP=y
432CONFIG_NET=y 462CONFIG_NET=y
433 463
434# 464#
435# Networking options 465# Networking options
436# 466#
467# CONFIG_NET_NS is not set
437CONFIG_PACKET=y 468CONFIG_PACKET=y
438CONFIG_PACKET_MMAP=y 469CONFIG_PACKET_MMAP=y
439CONFIG_UNIX=y 470CONFIG_UNIX=y
@@ -489,7 +520,6 @@ CONFIG_DEFAULT_CUBIC=y
489# CONFIG_DEFAULT_RENO is not set 520# CONFIG_DEFAULT_RENO is not set
490CONFIG_DEFAULT_TCP_CONG="cubic" 521CONFIG_DEFAULT_TCP_CONG="cubic"
491CONFIG_TCP_MD5SIG=y 522CONFIG_TCP_MD5SIG=y
492# CONFIG_IP_VS is not set
493CONFIG_IPV6=y 523CONFIG_IPV6=y
494CONFIG_IPV6_PRIVACY=y 524CONFIG_IPV6_PRIVACY=y
495CONFIG_IPV6_ROUTER_PREF=y 525CONFIG_IPV6_ROUTER_PREF=y
@@ -511,7 +541,6 @@ CONFIG_IPV6_TUNNEL=m
511CONFIG_IPV6_MULTIPLE_TABLES=y 541CONFIG_IPV6_MULTIPLE_TABLES=y
512CONFIG_IPV6_SUBTREES=y 542CONFIG_IPV6_SUBTREES=y
513# CONFIG_IPV6_MROUTE is not set 543# CONFIG_IPV6_MROUTE is not set
514CONFIG_NETLABEL=y
515CONFIG_NETWORK_SECMARK=y 544CONFIG_NETWORK_SECMARK=y
516CONFIG_NETFILTER=y 545CONFIG_NETFILTER=y
517# CONFIG_NETFILTER_DEBUG is not set 546# CONFIG_NETFILTER_DEBUG is not set
@@ -542,18 +571,19 @@ CONFIG_NF_CONNTRACK_SANE=m
542CONFIG_NF_CONNTRACK_SIP=m 571CONFIG_NF_CONNTRACK_SIP=m
543CONFIG_NF_CONNTRACK_TFTP=m 572CONFIG_NF_CONNTRACK_TFTP=m
544CONFIG_NF_CT_NETLINK=m 573CONFIG_NF_CT_NETLINK=m
574# CONFIG_NETFILTER_TPROXY is not set
545CONFIG_NETFILTER_XTABLES=y 575CONFIG_NETFILTER_XTABLES=y
546CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 576CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
547CONFIG_NETFILTER_XT_TARGET_CONNMARK=m 577CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
578CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
548CONFIG_NETFILTER_XT_TARGET_DSCP=m 579CONFIG_NETFILTER_XT_TARGET_DSCP=m
549CONFIG_NETFILTER_XT_TARGET_MARK=m 580CONFIG_NETFILTER_XT_TARGET_MARK=m
550CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
551CONFIG_NETFILTER_XT_TARGET_NFLOG=m 581CONFIG_NETFILTER_XT_TARGET_NFLOG=m
582CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
552CONFIG_NETFILTER_XT_TARGET_NOTRACK=m 583CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
553CONFIG_NETFILTER_XT_TARGET_RATEEST=m 584CONFIG_NETFILTER_XT_TARGET_RATEEST=m
554CONFIG_NETFILTER_XT_TARGET_TRACE=m 585CONFIG_NETFILTER_XT_TARGET_TRACE=m
555CONFIG_NETFILTER_XT_TARGET_SECMARK=m 586CONFIG_NETFILTER_XT_TARGET_SECMARK=m
556CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
557CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 587CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
558CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 588CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
559CONFIG_NETFILTER_XT_MATCH_COMMENT=m 589CONFIG_NETFILTER_XT_MATCH_COMMENT=m
@@ -564,19 +594,21 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
564# CONFIG_NETFILTER_XT_MATCH_DCCP is not set 594# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
565CONFIG_NETFILTER_XT_MATCH_DSCP=m 595CONFIG_NETFILTER_XT_MATCH_DSCP=m
566CONFIG_NETFILTER_XT_MATCH_ESP=m 596CONFIG_NETFILTER_XT_MATCH_ESP=m
597CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
567CONFIG_NETFILTER_XT_MATCH_HELPER=m 598CONFIG_NETFILTER_XT_MATCH_HELPER=m
568CONFIG_NETFILTER_XT_MATCH_IPRANGE=m 599CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
569CONFIG_NETFILTER_XT_MATCH_LENGTH=m 600CONFIG_NETFILTER_XT_MATCH_LENGTH=m
570CONFIG_NETFILTER_XT_MATCH_LIMIT=m 601CONFIG_NETFILTER_XT_MATCH_LIMIT=m
571CONFIG_NETFILTER_XT_MATCH_MAC=m 602CONFIG_NETFILTER_XT_MATCH_MAC=m
572CONFIG_NETFILTER_XT_MATCH_MARK=m 603CONFIG_NETFILTER_XT_MATCH_MARK=m
604CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
573CONFIG_NETFILTER_XT_MATCH_OWNER=m 605CONFIG_NETFILTER_XT_MATCH_OWNER=m
574CONFIG_NETFILTER_XT_MATCH_POLICY=m 606CONFIG_NETFILTER_XT_MATCH_POLICY=m
575CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
576CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 607CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
577CONFIG_NETFILTER_XT_MATCH_QUOTA=m 608CONFIG_NETFILTER_XT_MATCH_QUOTA=m
578CONFIG_NETFILTER_XT_MATCH_RATEEST=m 609CONFIG_NETFILTER_XT_MATCH_RATEEST=m
579CONFIG_NETFILTER_XT_MATCH_REALM=m 610CONFIG_NETFILTER_XT_MATCH_REALM=m
611# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
580CONFIG_NETFILTER_XT_MATCH_SCTP=m 612CONFIG_NETFILTER_XT_MATCH_SCTP=m
581CONFIG_NETFILTER_XT_MATCH_STATE=y 613CONFIG_NETFILTER_XT_MATCH_STATE=y
582CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 614CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
@@ -584,20 +616,20 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
584CONFIG_NETFILTER_XT_MATCH_TCPMSS=m 616CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
585CONFIG_NETFILTER_XT_MATCH_TIME=m 617CONFIG_NETFILTER_XT_MATCH_TIME=m
586CONFIG_NETFILTER_XT_MATCH_U32=m 618CONFIG_NETFILTER_XT_MATCH_U32=m
587CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m 619# CONFIG_IP_VS is not set
588 620
589# 621#
590# IP: Netfilter Configuration 622# IP: Netfilter Configuration
591# 623#
624CONFIG_NF_DEFRAG_IPV4=y
592CONFIG_NF_CONNTRACK_IPV4=y 625CONFIG_NF_CONNTRACK_IPV4=y
593# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set 626# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
594CONFIG_IP_NF_QUEUE=m 627CONFIG_IP_NF_QUEUE=m
595CONFIG_IP_NF_IPTABLES=y 628CONFIG_IP_NF_IPTABLES=y
596CONFIG_IP_NF_MATCH_RECENT=m 629CONFIG_IP_NF_MATCH_ADDRTYPE=m
597CONFIG_IP_NF_MATCH_ECN=m
598CONFIG_IP_NF_MATCH_AH=m 630CONFIG_IP_NF_MATCH_AH=m
631CONFIG_IP_NF_MATCH_ECN=m
599CONFIG_IP_NF_MATCH_TTL=m 632CONFIG_IP_NF_MATCH_TTL=m
600CONFIG_IP_NF_MATCH_ADDRTYPE=m
601CONFIG_IP_NF_FILTER=y 633CONFIG_IP_NF_FILTER=y
602CONFIG_IP_NF_TARGET_REJECT=y 634CONFIG_IP_NF_TARGET_REJECT=y
603CONFIG_IP_NF_TARGET_LOG=m 635CONFIG_IP_NF_TARGET_LOG=m
@@ -605,8 +637,8 @@ CONFIG_IP_NF_TARGET_ULOG=m
605CONFIG_NF_NAT=m 637CONFIG_NF_NAT=m
606CONFIG_NF_NAT_NEEDED=y 638CONFIG_NF_NAT_NEEDED=y
607CONFIG_IP_NF_TARGET_MASQUERADE=m 639CONFIG_IP_NF_TARGET_MASQUERADE=m
608CONFIG_IP_NF_TARGET_REDIRECT=m
609CONFIG_IP_NF_TARGET_NETMAP=m 640CONFIG_IP_NF_TARGET_NETMAP=m
641CONFIG_IP_NF_TARGET_REDIRECT=m
610CONFIG_NF_NAT_SNMP_BASIC=m 642CONFIG_NF_NAT_SNMP_BASIC=m
611CONFIG_NF_NAT_PROTO_GRE=m 643CONFIG_NF_NAT_PROTO_GRE=m
612CONFIG_NF_NAT_PROTO_UDPLITE=m 644CONFIG_NF_NAT_PROTO_UDPLITE=m
@@ -619,11 +651,10 @@ CONFIG_NF_NAT_PPTP=m
619CONFIG_NF_NAT_H323=m 651CONFIG_NF_NAT_H323=m
620CONFIG_NF_NAT_SIP=m 652CONFIG_NF_NAT_SIP=m
621CONFIG_IP_NF_MANGLE=m 653CONFIG_IP_NF_MANGLE=m
654CONFIG_IP_NF_TARGET_CLUSTERIP=m
622CONFIG_IP_NF_TARGET_ECN=m 655CONFIG_IP_NF_TARGET_ECN=m
623CONFIG_IP_NF_TARGET_TTL=m 656CONFIG_IP_NF_TARGET_TTL=m
624CONFIG_IP_NF_TARGET_CLUSTERIP=m
625CONFIG_IP_NF_RAW=m 657CONFIG_IP_NF_RAW=m
626# CONFIG_IP_NF_SECURITY is not set
627CONFIG_IP_NF_ARPTABLES=m 658CONFIG_IP_NF_ARPTABLES=m
628CONFIG_IP_NF_ARPFILTER=m 659CONFIG_IP_NF_ARPFILTER=m
629CONFIG_IP_NF_ARP_MANGLE=m 660CONFIG_IP_NF_ARP_MANGLE=m
@@ -634,26 +665,26 @@ CONFIG_IP_NF_ARP_MANGLE=m
634CONFIG_NF_CONNTRACK_IPV6=y 665CONFIG_NF_CONNTRACK_IPV6=y
635CONFIG_IP6_NF_QUEUE=m 666CONFIG_IP6_NF_QUEUE=m
636CONFIG_IP6_NF_IPTABLES=y 667CONFIG_IP6_NF_IPTABLES=y
637CONFIG_IP6_NF_MATCH_RT=m 668CONFIG_IP6_NF_MATCH_AH=m
638CONFIG_IP6_NF_MATCH_OPTS=m 669CONFIG_IP6_NF_MATCH_EUI64=m
639CONFIG_IP6_NF_MATCH_FRAG=m 670CONFIG_IP6_NF_MATCH_FRAG=m
671CONFIG_IP6_NF_MATCH_OPTS=m
640CONFIG_IP6_NF_MATCH_HL=m 672CONFIG_IP6_NF_MATCH_HL=m
641CONFIG_IP6_NF_MATCH_IPV6HEADER=m 673CONFIG_IP6_NF_MATCH_IPV6HEADER=m
642CONFIG_IP6_NF_MATCH_AH=m
643CONFIG_IP6_NF_MATCH_MH=m 674CONFIG_IP6_NF_MATCH_MH=m
644CONFIG_IP6_NF_MATCH_EUI64=m 675CONFIG_IP6_NF_MATCH_RT=m
645CONFIG_IP6_NF_FILTER=y
646CONFIG_IP6_NF_TARGET_LOG=m 676CONFIG_IP6_NF_TARGET_LOG=m
677CONFIG_IP6_NF_FILTER=y
647CONFIG_IP6_NF_TARGET_REJECT=y 678CONFIG_IP6_NF_TARGET_REJECT=y
648CONFIG_IP6_NF_MANGLE=m 679CONFIG_IP6_NF_MANGLE=m
649CONFIG_IP6_NF_TARGET_HL=m 680CONFIG_IP6_NF_TARGET_HL=m
650CONFIG_IP6_NF_RAW=m 681CONFIG_IP6_NF_RAW=m
651# CONFIG_IP6_NF_SECURITY is not set
652# CONFIG_IP_DCCP is not set 682# CONFIG_IP_DCCP is not set
653# CONFIG_IP_SCTP is not set 683# CONFIG_IP_SCTP is not set
654# CONFIG_TIPC is not set 684# CONFIG_TIPC is not set
655# CONFIG_ATM is not set 685# CONFIG_ATM is not set
656# CONFIG_BRIDGE is not set 686# CONFIG_BRIDGE is not set
687# CONFIG_NET_DSA is not set
657# CONFIG_VLAN_8021Q is not set 688# CONFIG_VLAN_8021Q is not set
658# CONFIG_DECNET is not set 689# CONFIG_DECNET is not set
659# CONFIG_LLC2 is not set 690# CONFIG_LLC2 is not set
@@ -665,6 +696,7 @@ CONFIG_IP6_NF_RAW=m
665# CONFIG_WAN_ROUTER is not set 696# CONFIG_WAN_ROUTER is not set
666# CONFIG_NET_SCHED is not set 697# CONFIG_NET_SCHED is not set
667CONFIG_NET_CLS_ROUTE=y 698CONFIG_NET_CLS_ROUTE=y
699# CONFIG_DCB is not set
668 700
669# 701#
670# Network testing 702# Network testing
@@ -673,22 +705,20 @@ CONFIG_NET_CLS_ROUTE=y
673# CONFIG_HAMRADIO is not set 705# CONFIG_HAMRADIO is not set
674# CONFIG_CAN is not set 706# CONFIG_CAN is not set
675# CONFIG_IRDA is not set 707# CONFIG_IRDA is not set
676CONFIG_BT=m 708CONFIG_BT=y
677CONFIG_BT_L2CAP=m 709CONFIG_BT_L2CAP=y
678CONFIG_BT_SCO=m 710CONFIG_BT_SCO=y
679CONFIG_BT_RFCOMM=m 711CONFIG_BT_RFCOMM=y
680CONFIG_BT_RFCOMM_TTY=y 712CONFIG_BT_RFCOMM_TTY=y
681CONFIG_BT_BNEP=m 713CONFIG_BT_BNEP=y
682# CONFIG_BT_BNEP_MC_FILTER is not set 714CONFIG_BT_BNEP_MC_FILTER=y
683# CONFIG_BT_BNEP_PROTO_FILTER is not set 715CONFIG_BT_BNEP_PROTO_FILTER=y
684# CONFIG_BT_HIDP is not set 716CONFIG_BT_HIDP=y
685 717
686# 718#
687# Bluetooth device drivers 719# Bluetooth device drivers
688# 720#
689CONFIG_BT_HCIUSB=m 721CONFIG_BT_HCIBTUSB=y
690CONFIG_BT_HCIUSB_SCO=y
691# CONFIG_BT_HCIBTUSB is not set
692CONFIG_BT_HCIBTSDIO=m 722CONFIG_BT_HCIBTSDIO=m
693CONFIG_BT_HCIUART=m 723CONFIG_BT_HCIUART=m
694CONFIG_BT_HCIUART_H4=y 724CONFIG_BT_HCIUART_H4=y
@@ -699,22 +729,28 @@ CONFIG_BT_HCIBPA10X=m
699CONFIG_BT_HCIBFUSB=m 729CONFIG_BT_HCIBFUSB=m
700CONFIG_BT_HCIVHCI=m 730CONFIG_BT_HCIVHCI=m
701# CONFIG_AF_RXRPC is not set 731# CONFIG_AF_RXRPC is not set
732# CONFIG_PHONET is not set
702CONFIG_FIB_RULES=y 733CONFIG_FIB_RULES=y
703 734CONFIG_WIRELESS=y
704# 735CONFIG_CFG80211=y
705# Wireless 736# CONFIG_CFG80211_REG_DEBUG is not set
706#
707CONFIG_CFG80211=m
708CONFIG_NL80211=y 737CONFIG_NL80211=y
738CONFIG_WIRELESS_OLD_REGULATORY=y
709CONFIG_WIRELESS_EXT=y 739CONFIG_WIRELESS_EXT=y
710# CONFIG_WIRELESS_EXT_SYSFS is not set 740CONFIG_WIRELESS_EXT_SYSFS=y
711CONFIG_MAC80211=m 741CONFIG_LIB80211=m
742CONFIG_LIB80211_CRYPT_WEP=m
743CONFIG_LIB80211_CRYPT_CCMP=m
744CONFIG_LIB80211_CRYPT_TKIP=m
745CONFIG_MAC80211=y
712 746
713# 747#
714# Rate control algorithm selection 748# Rate control algorithm selection
715# 749#
716CONFIG_MAC80211_RC_PID=y 750CONFIG_MAC80211_RC_PID=y
751# CONFIG_MAC80211_RC_MINSTREL is not set
717CONFIG_MAC80211_RC_DEFAULT_PID=y 752CONFIG_MAC80211_RC_DEFAULT_PID=y
753# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
718CONFIG_MAC80211_RC_DEFAULT="pid" 754CONFIG_MAC80211_RC_DEFAULT="pid"
719CONFIG_MAC80211_MESH=y 755CONFIG_MAC80211_MESH=y
720CONFIG_MAC80211_LEDS=y 756CONFIG_MAC80211_LEDS=y
@@ -725,8 +761,10 @@ CONFIG_IEEE80211=m
725CONFIG_IEEE80211_CRYPT_WEP=m 761CONFIG_IEEE80211_CRYPT_WEP=m
726CONFIG_IEEE80211_CRYPT_CCMP=m 762CONFIG_IEEE80211_CRYPT_CCMP=m
727CONFIG_IEEE80211_CRYPT_TKIP=m 763CONFIG_IEEE80211_CRYPT_TKIP=m
728CONFIG_RFKILL=m 764CONFIG_WIMAX=m
729CONFIG_RFKILL_INPUT=m 765CONFIG_WIMAX_DEBUG_LEVEL=8
766CONFIG_RFKILL=y
767CONFIG_RFKILL_INPUT=y
730CONFIG_RFKILL_LEDS=y 768CONFIG_RFKILL_LEDS=y
731# CONFIG_NET_9P is not set 769# CONFIG_NET_9P is not set
732 770
@@ -737,7 +775,7 @@ CONFIG_RFKILL_LEDS=y
737# 775#
738# Generic Driver Options 776# Generic Driver Options
739# 777#
740CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 778CONFIG_UEVENT_HELPER_PATH=""
741CONFIG_STANDALONE=y 779CONFIG_STANDALONE=y
742CONFIG_PREVENT_FIRMWARE_BUILD=y 780CONFIG_PREVENT_FIRMWARE_BUILD=y
743CONFIG_FW_LOADER=y 781CONFIG_FW_LOADER=y
@@ -746,19 +784,21 @@ CONFIG_EXTRA_FIRMWARE=""
746# CONFIG_DEBUG_DRIVER is not set 784# CONFIG_DEBUG_DRIVER is not set
747CONFIG_DEBUG_DEVRES=y 785CONFIG_DEBUG_DEVRES=y
748# CONFIG_SYS_HYPERVISOR is not set 786# CONFIG_SYS_HYPERVISOR is not set
749CONFIG_CONNECTOR=y 787# CONFIG_CONNECTOR is not set
750CONFIG_PROC_EVENTS=y
751# CONFIG_MTD is not set 788# CONFIG_MTD is not set
752# CONFIG_PARPORT is not set 789# CONFIG_PARPORT is not set
753CONFIG_PNP=y 790CONFIG_PNP=y
754# CONFIG_PNP_DEBUG is not set 791# CONFIG_PNP_DEBUG_MESSAGES is not set
755 792
756# 793#
757# Protocols 794# Protocols
758# 795#
796# CONFIG_ISAPNP is not set
797# CONFIG_PNPBIOS is not set
759CONFIG_PNPACPI=y 798CONFIG_PNPACPI=y
760CONFIG_BLK_DEV=y 799CONFIG_BLK_DEV=y
761# CONFIG_BLK_DEV_FD is not set 800# CONFIG_BLK_DEV_FD is not set
801# CONFIG_BLK_DEV_XD is not set
762# CONFIG_BLK_CPQ_DA is not set 802# CONFIG_BLK_CPQ_DA is not set
763# CONFIG_BLK_CPQ_CISS_DA is not set 803# CONFIG_BLK_CPQ_CISS_DA is not set
764# CONFIG_BLK_DEV_DAC960 is not set 804# CONFIG_BLK_DEV_DAC960 is not set
@@ -769,7 +809,10 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
769# CONFIG_BLK_DEV_NBD is not set 809# CONFIG_BLK_DEV_NBD is not set
770# CONFIG_BLK_DEV_SX8 is not set 810# CONFIG_BLK_DEV_SX8 is not set
771# CONFIG_BLK_DEV_UB is not set 811# CONFIG_BLK_DEV_UB is not set
772# CONFIG_BLK_DEV_RAM is not set 812CONFIG_BLK_DEV_RAM=m
813CONFIG_BLK_DEV_RAM_COUNT=16
814CONFIG_BLK_DEV_RAM_SIZE=16384
815# CONFIG_BLK_DEV_XIP is not set
773CONFIG_CDROM_PKTCDVD=m 816CONFIG_CDROM_PKTCDVD=m
774CONFIG_CDROM_PKTCDVD_BUFFERS=8 817CONFIG_CDROM_PKTCDVD_BUFFERS=8
775# CONFIG_CDROM_PKTCDVD_WCACHE is not set 818# CONFIG_CDROM_PKTCDVD_WCACHE is not set
@@ -781,19 +824,29 @@ CONFIG_MISC_DEVICES=y
781CONFIG_EEPROM_93CX6=m 824CONFIG_EEPROM_93CX6=m
782# CONFIG_SGI_IOC4 is not set 825# CONFIG_SGI_IOC4 is not set
783CONFIG_TIFM_CORE=m 826CONFIG_TIFM_CORE=m
784CONFIG_TIFM_7XX1=m 827# CONFIG_TIFM_7XX1 is not set
785# CONFIG_ACER_WMI is not set 828# CONFIG_ACER_WMI is not set
786# CONFIG_FUJITSU_LAPTOP is not set 829CONFIG_ASUS_LAPTOP=m
787# CONFIG_TC1100_WMI is not set 830CONFIG_FUJITSU_LAPTOP=m
788# CONFIG_HP_WMI is not set 831# CONFIG_FUJITSU_LAPTOP_DEBUG is not set
789# CONFIG_MSI_LAPTOP is not set 832CONFIG_TC1100_WMI=m
790# CONFIG_COMPAL_LAPTOP is not set 833CONFIG_HP_WMI=m
791# CONFIG_SONY_LAPTOP is not set 834# CONFIG_ICS932S401 is not set
792# CONFIG_THINKPAD_ACPI is not set 835CONFIG_MSI_LAPTOP=m
836CONFIG_PANASONIC_LAPTOP=m
837CONFIG_COMPAL_LAPTOP=m
838CONFIG_SONY_LAPTOP=m
839# CONFIG_SONYPI_COMPAT is not set
840CONFIG_THINKPAD_ACPI=m
841# CONFIG_THINKPAD_ACPI_DEBUG is not set
842CONFIG_THINKPAD_ACPI_BAY=y
843CONFIG_THINKPAD_ACPI_VIDEO=y
844CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
793# CONFIG_INTEL_MENLOW is not set 845# CONFIG_INTEL_MENLOW is not set
794CONFIG_EEEPC_LAPTOP=y 846# CONFIG_EEEPC_LAPTOP is not set
795# CONFIG_ENCLOSURE_SERVICES is not set 847# CONFIG_ENCLOSURE_SERVICES is not set
796# CONFIG_HP_ILO is not set 848# CONFIG_HP_ILO is not set
849# CONFIG_C2PORT is not set
797CONFIG_HAVE_IDE=y 850CONFIG_HAVE_IDE=y
798# CONFIG_IDE is not set 851# CONFIG_IDE is not set
799 852
@@ -811,12 +864,12 @@ CONFIG_SCSI_PROC_FS=y
811# SCSI support type (disk, tape, CD-ROM) 864# SCSI support type (disk, tape, CD-ROM)
812# 865#
813CONFIG_BLK_DEV_SD=y 866CONFIG_BLK_DEV_SD=y
814CONFIG_CHR_DEV_ST=m 867# CONFIG_CHR_DEV_ST is not set
815# CONFIG_CHR_DEV_OSST is not set 868# CONFIG_CHR_DEV_OSST is not set
816CONFIG_BLK_DEV_SR=y 869CONFIG_BLK_DEV_SR=y
817CONFIG_BLK_DEV_SR_VENDOR=y 870CONFIG_BLK_DEV_SR_VENDOR=y
818# CONFIG_CHR_DEV_SG is not set 871CONFIG_CHR_DEV_SG=y
819CONFIG_CHR_DEV_SCH=m 872# CONFIG_CHR_DEV_SCH is not set
820 873
821# 874#
822# Some SCSI devices (e.g. CD jukebox) support multiple LUNs 875# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
@@ -838,9 +891,13 @@ CONFIG_SCSI_WAIT_SCAN=m
838# CONFIG_SCSI_SRP_ATTRS is not set 891# CONFIG_SCSI_SRP_ATTRS is not set
839CONFIG_SCSI_LOWLEVEL=y 892CONFIG_SCSI_LOWLEVEL=y
840# CONFIG_ISCSI_TCP is not set 893# CONFIG_ISCSI_TCP is not set
894# CONFIG_SCSI_CXGB3_ISCSI is not set
841# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 895# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
842# CONFIG_SCSI_3W_9XXX is not set 896# CONFIG_SCSI_3W_9XXX is not set
897# CONFIG_SCSI_7000FASST is not set
843# CONFIG_SCSI_ACARD is not set 898# CONFIG_SCSI_ACARD is not set
899# CONFIG_SCSI_AHA152X is not set
900# CONFIG_SCSI_AHA1542 is not set
844# CONFIG_SCSI_AACRAID is not set 901# CONFIG_SCSI_AACRAID is not set
845# CONFIG_SCSI_AIC7XXX is not set 902# CONFIG_SCSI_AIC7XXX is not set
846# CONFIG_SCSI_AIC7XXX_OLD is not set 903# CONFIG_SCSI_AIC7XXX_OLD is not set
@@ -848,29 +905,42 @@ CONFIG_SCSI_LOWLEVEL=y
848# CONFIG_SCSI_AIC94XX is not set 905# CONFIG_SCSI_AIC94XX is not set
849# CONFIG_SCSI_DPT_I2O is not set 906# CONFIG_SCSI_DPT_I2O is not set
850# CONFIG_SCSI_ADVANSYS is not set 907# CONFIG_SCSI_ADVANSYS is not set
908# CONFIG_SCSI_IN2000 is not set
851# CONFIG_SCSI_ARCMSR is not set 909# CONFIG_SCSI_ARCMSR is not set
852# CONFIG_MEGARAID_NEWGEN is not set 910# CONFIG_MEGARAID_NEWGEN is not set
853# CONFIG_MEGARAID_LEGACY is not set 911# CONFIG_MEGARAID_LEGACY is not set
854# CONFIG_MEGARAID_SAS is not set 912# CONFIG_MEGARAID_SAS is not set
855# CONFIG_SCSI_HPTIOP is not set 913# CONFIG_SCSI_HPTIOP is not set
856# CONFIG_SCSI_BUSLOGIC is not set 914# CONFIG_SCSI_BUSLOGIC is not set
915# CONFIG_LIBFC is not set
916# CONFIG_FCOE is not set
857# CONFIG_SCSI_DMX3191D is not set 917# CONFIG_SCSI_DMX3191D is not set
918# CONFIG_SCSI_DTC3280 is not set
858# CONFIG_SCSI_EATA is not set 919# CONFIG_SCSI_EATA is not set
859# CONFIG_SCSI_FUTURE_DOMAIN is not set 920# CONFIG_SCSI_FUTURE_DOMAIN is not set
860# CONFIG_SCSI_GDTH is not set 921# CONFIG_SCSI_GDTH is not set
922# CONFIG_SCSI_GENERIC_NCR5380 is not set
923# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
861# CONFIG_SCSI_IPS is not set 924# CONFIG_SCSI_IPS is not set
862# CONFIG_SCSI_INITIO is not set 925# CONFIG_SCSI_INITIO is not set
863# CONFIG_SCSI_INIA100 is not set 926# CONFIG_SCSI_INIA100 is not set
864# CONFIG_SCSI_MVSAS is not set 927# CONFIG_SCSI_MVSAS is not set
928# CONFIG_SCSI_NCR53C406A is not set
865# CONFIG_SCSI_STEX is not set 929# CONFIG_SCSI_STEX is not set
866# CONFIG_SCSI_SYM53C8XX_2 is not set 930# CONFIG_SCSI_SYM53C8XX_2 is not set
867# CONFIG_SCSI_IPR is not set 931# CONFIG_SCSI_IPR is not set
932# CONFIG_SCSI_PAS16 is not set
933# CONFIG_SCSI_QLOGIC_FAS is not set
868# CONFIG_SCSI_QLOGIC_1280 is not set 934# CONFIG_SCSI_QLOGIC_1280 is not set
869# CONFIG_SCSI_QLA_FC is not set 935# CONFIG_SCSI_QLA_FC is not set
870# CONFIG_SCSI_QLA_ISCSI is not set 936# CONFIG_SCSI_QLA_ISCSI is not set
871# CONFIG_SCSI_LPFC is not set 937# CONFIG_SCSI_LPFC is not set
938# CONFIG_SCSI_SYM53C416 is not set
872# CONFIG_SCSI_DC395x is not set 939# CONFIG_SCSI_DC395x is not set
873# CONFIG_SCSI_DC390T is not set 940# CONFIG_SCSI_DC390T is not set
941# CONFIG_SCSI_T128 is not set
942# CONFIG_SCSI_U14_34F is not set
943# CONFIG_SCSI_ULTRASTOR is not set
874# CONFIG_SCSI_NSP32 is not set 944# CONFIG_SCSI_NSP32 is not set
875# CONFIG_SCSI_DEBUG is not set 945# CONFIG_SCSI_DEBUG is not set
876# CONFIG_SCSI_SRP is not set 946# CONFIG_SCSI_SRP is not set
@@ -909,17 +979,19 @@ CONFIG_ATA_PIIX=y
909# CONFIG_PATA_CS5536 is not set 979# CONFIG_PATA_CS5536 is not set
910# CONFIG_PATA_CYPRESS is not set 980# CONFIG_PATA_CYPRESS is not set
911# CONFIG_PATA_EFAR is not set 981# CONFIG_PATA_EFAR is not set
912# CONFIG_ATA_GENERIC is not set 982CONFIG_ATA_GENERIC=y
913# CONFIG_PATA_HPT366 is not set 983# CONFIG_PATA_HPT366 is not set
914# CONFIG_PATA_HPT37X is not set 984# CONFIG_PATA_HPT37X is not set
915# CONFIG_PATA_HPT3X2N is not set 985# CONFIG_PATA_HPT3X2N is not set
916# CONFIG_PATA_HPT3X3 is not set 986# CONFIG_PATA_HPT3X3 is not set
987# CONFIG_PATA_ISAPNP is not set
917# CONFIG_PATA_IT821X is not set 988# CONFIG_PATA_IT821X is not set
918# CONFIG_PATA_IT8213 is not set 989# CONFIG_PATA_IT8213 is not set
919# CONFIG_PATA_JMICRON is not set 990# CONFIG_PATA_JMICRON is not set
991# CONFIG_PATA_LEGACY is not set
920# CONFIG_PATA_TRIFLEX is not set 992# CONFIG_PATA_TRIFLEX is not set
921# CONFIG_PATA_MARVELL is not set 993# CONFIG_PATA_MARVELL is not set
922# CONFIG_PATA_MPIIX is not set 994CONFIG_PATA_MPIIX=y
923# CONFIG_PATA_OLDPIIX is not set 995# CONFIG_PATA_OLDPIIX is not set
924# CONFIG_PATA_NETCELL is not set 996# CONFIG_PATA_NETCELL is not set
925# CONFIG_PATA_NINJA32 is not set 997# CONFIG_PATA_NINJA32 is not set
@@ -928,6 +1000,7 @@ CONFIG_ATA_PIIX=y
928# CONFIG_PATA_OPTI is not set 1000# CONFIG_PATA_OPTI is not set
929# CONFIG_PATA_OPTIDMA is not set 1001# CONFIG_PATA_OPTIDMA is not set
930# CONFIG_PATA_PDC_OLD is not set 1002# CONFIG_PATA_PDC_OLD is not set
1003# CONFIG_PATA_QDI is not set
931# CONFIG_PATA_RADISYS is not set 1004# CONFIG_PATA_RADISYS is not set
932# CONFIG_PATA_RZ1000 is not set 1005# CONFIG_PATA_RZ1000 is not set
933# CONFIG_PATA_SC1200 is not set 1006# CONFIG_PATA_SC1200 is not set
@@ -937,9 +1010,27 @@ CONFIG_ATA_PIIX=y
937# CONFIG_PATA_SIS is not set 1010# CONFIG_PATA_SIS is not set
938# CONFIG_PATA_VIA is not set 1011# CONFIG_PATA_VIA is not set
939# CONFIG_PATA_WINBOND is not set 1012# CONFIG_PATA_WINBOND is not set
1013# CONFIG_PATA_WINBOND_VLB is not set
940CONFIG_PATA_SCH=y 1014CONFIG_PATA_SCH=y
941# CONFIG_MD is not set 1015CONFIG_MD=y
942# CONFIG_FUSION is not set 1016# CONFIG_BLK_DEV_MD is not set
1017CONFIG_BLK_DEV_DM=m
1018CONFIG_DM_DEBUG=y
1019# CONFIG_DM_CRYPT is not set
1020CONFIG_DM_SNAPSHOT=m
1021CONFIG_DM_MIRROR=m
1022CONFIG_DM_ZERO=m
1023CONFIG_DM_MULTIPATH=m
1024CONFIG_DM_DELAY=m
1025# CONFIG_DM_UEVENT is not set
1026CONFIG_FUSION=y
1027CONFIG_FUSION_SPI=m
1028CONFIG_FUSION_FC=m
1029CONFIG_FUSION_SAS=m
1030CONFIG_FUSION_MAX_SGE=40
1031CONFIG_FUSION_CTL=m
1032CONFIG_FUSION_LAN=m
1033CONFIG_FUSION_LOGGING=y
943 1034
944# 1035#
945# IEEE 1394 (FireWire) support 1036# IEEE 1394 (FireWire) support
@@ -957,7 +1048,7 @@ CONFIG_NETDEVICES=y
957# CONFIG_BONDING is not set 1048# CONFIG_BONDING is not set
958CONFIG_MACVLAN=m 1049CONFIG_MACVLAN=m
959# CONFIG_EQUALIZER is not set 1050# CONFIG_EQUALIZER is not set
960# CONFIG_TUN is not set 1051CONFIG_TUN=y
961# CONFIG_VETH is not set 1052# CONFIG_VETH is not set
962# CONFIG_NET_SB1000 is not set 1053# CONFIG_NET_SB1000 is not set
963# CONFIG_ARCNET is not set 1054# CONFIG_ARCNET is not set
@@ -978,42 +1069,97 @@ CONFIG_ICPLUS_PHY=m
978CONFIG_REALTEK_PHY=m 1069CONFIG_REALTEK_PHY=m
979CONFIG_MDIO_BITBANG=m 1070CONFIG_MDIO_BITBANG=m
980CONFIG_NET_ETHERNET=y 1071CONFIG_NET_ETHERNET=y
981CONFIG_MII=m 1072CONFIG_MII=y
982CONFIG_HAPPYMEAL=m 1073# CONFIG_NATIONAL_PHY is not set
983CONFIG_SUNGEM=m 1074# CONFIG_STE10XP is not set
984CONFIG_CASSINI=m 1075# CONFIG_LSI_ET1011C_PHY is not set
985CONFIG_NET_VENDOR_3COM=y 1076# CONFIG_HAPPYMEAL is not set
986# CONFIG_VORTEX is not set 1077# CONFIG_SUNGEM is not set
987# CONFIG_TYPHOON is not set 1078# CONFIG_CASSINI is not set
1079# CONFIG_NET_VENDOR_3COM is not set
1080# CONFIG_LANCE is not set
1081# CONFIG_NET_VENDOR_SMC is not set
1082# CONFIG_NET_VENDOR_RACAL is not set
988# CONFIG_NET_TULIP is not set 1083# CONFIG_NET_TULIP is not set
1084# CONFIG_AT1700 is not set
1085# CONFIG_DEPCA is not set
989# CONFIG_HP100 is not set 1086# CONFIG_HP100 is not set
1087# CONFIG_NET_ISA is not set
990# CONFIG_IBM_NEW_EMAC_ZMII is not set 1088# CONFIG_IBM_NEW_EMAC_ZMII is not set
991# CONFIG_IBM_NEW_EMAC_RGMII is not set 1089# CONFIG_IBM_NEW_EMAC_RGMII is not set
992# CONFIG_IBM_NEW_EMAC_TAH is not set 1090# CONFIG_IBM_NEW_EMAC_TAH is not set
993# CONFIG_IBM_NEW_EMAC_EMAC4 is not set 1091# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
994# CONFIG_NET_PCI is not set 1092# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
1093# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
1094# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
1095CONFIG_NET_PCI=y
1096CONFIG_PCNET32=m
1097# CONFIG_AMD8111_ETH is not set
1098# CONFIG_ADAPTEC_STARFIRE is not set
1099# CONFIG_AC3200 is not set
1100# CONFIG_APRICOT is not set
995# CONFIG_B44 is not set 1101# CONFIG_B44 is not set
1102# CONFIG_FORCEDETH is not set
1103# CONFIG_CS89x0 is not set
1104# CONFIG_EEPRO100 is not set
1105CONFIG_E100=y
1106# CONFIG_FEALNX is not set
1107# CONFIG_NATSEMI is not set
1108# CONFIG_NE2K_PCI is not set
1109CONFIG_8139CP=m
1110CONFIG_8139TOO=m
1111CONFIG_8139TOO_PIO=y
1112# CONFIG_8139TOO_TUNE_TWISTER is not set
1113# CONFIG_8139TOO_8129 is not set
1114# CONFIG_8139_OLD_RX_RESET is not set
1115# CONFIG_R6040 is not set
1116CONFIG_SIS900=m
1117# CONFIG_EPIC100 is not set
1118# CONFIG_SMSC9420 is not set
1119# CONFIG_SUNDANCE is not set
1120# CONFIG_TLAN is not set
1121# CONFIG_VIA_RHINE is not set
1122# CONFIG_SC92031 is not set
1123CONFIG_ATL2=m
996CONFIG_NETDEV_1000=y 1124CONFIG_NETDEV_1000=y
997# CONFIG_ACENIC is not set 1125# CONFIG_ACENIC is not set
998# CONFIG_DL2K is not set 1126# CONFIG_DL2K is not set
999# CONFIG_E1000 is not set 1127CONFIG_E1000=y
1000# CONFIG_E1000E is not set 1128CONFIG_E1000E=y
1001# CONFIG_IP1000 is not set 1129# CONFIG_IP1000 is not set
1002# CONFIG_IGB is not set 1130CONFIG_IGB=y
1131# CONFIG_IGB_LRO is not set
1003# CONFIG_NS83820 is not set 1132# CONFIG_NS83820 is not set
1004# CONFIG_HAMACHI is not set 1133# CONFIG_HAMACHI is not set
1005# CONFIG_YELLOWFIN is not set 1134# CONFIG_YELLOWFIN is not set
1006# CONFIG_R8169 is not set 1135# CONFIG_R8169 is not set
1007# CONFIG_SIS190 is not set 1136CONFIG_SIS190=m
1008# CONFIG_SKGE is not set 1137# CONFIG_SKGE is not set
1009# CONFIG_SKY2 is not set 1138CONFIG_SKY2=m
1139# CONFIG_SKY2_DEBUG is not set
1010# CONFIG_VIA_VELOCITY is not set 1140# CONFIG_VIA_VELOCITY is not set
1011# CONFIG_TIGON3 is not set 1141CONFIG_TIGON3=m
1012# CONFIG_BNX2 is not set 1142CONFIG_BNX2=m
1013# CONFIG_QLA3XXX is not set 1143# CONFIG_QLA3XXX is not set
1014CONFIG_ATL1=m 1144CONFIG_ATL1=m
1015CONFIG_ATL1E=m 1145# CONFIG_ATL1E is not set
1016# CONFIG_NETDEV_10000 is not set 1146# CONFIG_JME is not set
1147CONFIG_NETDEV_10000=y
1148# CONFIG_CHELSIO_T1 is not set
1149# CONFIG_CHELSIO_T3 is not set
1150# CONFIG_ENIC is not set
1151CONFIG_IXGBE=m
1152CONFIG_IXGB=m
1153# CONFIG_S2IO is not set
1154# CONFIG_MYRI10GE is not set
1155# CONFIG_NETXEN_NIC is not set
1156# CONFIG_NIU is not set
1157# CONFIG_MLX4_EN is not set
1158# CONFIG_MLX4_CORE is not set
1159# CONFIG_TEHUTI is not set
1160CONFIG_BNX2X=m
1161# CONFIG_QLGE is not set
1162# CONFIG_SFC is not set
1017# CONFIG_TR is not set 1163# CONFIG_TR is not set
1018 1164
1019# 1165#
@@ -1021,10 +1167,21 @@ CONFIG_ATL1E=m
1021# 1167#
1022CONFIG_WLAN_PRE80211=y 1168CONFIG_WLAN_PRE80211=y
1023# CONFIG_STRIP is not set 1169# CONFIG_STRIP is not set
1170# CONFIG_ARLAN is not set
1171# CONFIG_WAVELAN is not set
1024CONFIG_WLAN_80211=y 1172CONFIG_WLAN_80211=y
1025# CONFIG_IPW2100 is not set 1173CONFIG_IPW2100=m
1026# CONFIG_IPW2200 is not set 1174# CONFIG_IPW2100_MONITOR is not set
1175# CONFIG_IPW2100_DEBUG is not set
1176CONFIG_IPW2200=m
1177# CONFIG_IPW2200_MONITOR is not set
1178CONFIG_IPW2200_QOS=y
1179# CONFIG_IPW2200_DEBUG is not set
1180# CONFIG_LIBIPW_DEBUG is not set
1027# CONFIG_LIBERTAS is not set 1181# CONFIG_LIBERTAS is not set
1182# CONFIG_LIBERTAS_THINFIRM is not set
1183# CONFIG_LIBERTAS_USB is not set
1184# CONFIG_LIBERTAS_DEBUG is not set
1028# CONFIG_AIRO is not set 1185# CONFIG_AIRO is not set
1029# CONFIG_HERMES is not set 1186# CONFIG_HERMES is not set
1030# CONFIG_ATMEL is not set 1187# CONFIG_ATMEL is not set
@@ -1035,49 +1192,62 @@ CONFIG_RTL8180=m
1035CONFIG_RTL8187=m 1192CONFIG_RTL8187=m
1036# CONFIG_ADM8211 is not set 1193# CONFIG_ADM8211 is not set
1037# CONFIG_MAC80211_HWSIM is not set 1194# CONFIG_MAC80211_HWSIM is not set
1038# CONFIG_P54_COMMON is not set 1195CONFIG_P54_COMMON=m
1039CONFIG_ATH5K=m 1196CONFIG_P54_USB=m
1040# CONFIG_ATH5K_DEBUG is not set 1197CONFIG_P54_PCI=m
1041# CONFIG_ATH9K is not set 1198# CONFIG_ATH5K is not set
1199CONFIG_ATH9K=m
1200# CONFIG_ATH9K_DEBUG is not set
1042CONFIG_IWLWIFI=m 1201CONFIG_IWLWIFI=m
1043CONFIG_IWLCORE=m 1202CONFIG_IWLCORE=m
1044# CONFIG_IWLWIFI_LEDS is not set 1203# CONFIG_IWLWIFI_LEDS is not set
1045CONFIG_IWLWIFI_RFKILL=y 1204CONFIG_IWLWIFI_RFKILL=y
1046# CONFIG_IWLWIFI_DEBUG is not set 1205# CONFIG_IWLWIFI_DEBUG is not set
1047# CONFIG_IWLAGN is not set 1206CONFIG_IWLAGN=m
1207# CONFIG_IWLAGN_SPECTRUM_MEASUREMENT is not set
1208# CONFIG_IWLAGN_LEDS is not set
1209CONFIG_IWL4965=y
1210CONFIG_IWL5000=y
1048CONFIG_IWL3945=m 1211CONFIG_IWL3945=m
1049CONFIG_IWL3945_RFKILL=y 1212CONFIG_IWL3945_RFKILL=y
1050# CONFIG_IWL3945_SPECTRUM_MEASUREMENT is not set 1213# CONFIG_IWL3945_SPECTRUM_MEASUREMENT is not set
1051# CONFIG_IWL3945_LEDS is not set 1214# CONFIG_IWL3945_LEDS is not set
1052# CONFIG_IWL3945_DEBUG is not set 1215# CONFIG_IWL3945_DEBUG is not set
1053# CONFIG_HOSTAP is not set 1216# CONFIG_HOSTAP is not set
1054# CONFIG_B43 is not set 1217CONFIG_B43=m
1218CONFIG_B43_PCI_AUTOSELECT=y
1219CONFIG_B43_PCICORE_AUTOSELECT=y
1220CONFIG_B43_LEDS=y
1221CONFIG_B43_RFKILL=y
1222# CONFIG_B43_DEBUG is not set
1055# CONFIG_B43LEGACY is not set 1223# CONFIG_B43LEGACY is not set
1056# CONFIG_ZD1211RW is not set 1224# CONFIG_ZD1211RW is not set
1057CONFIG_RT2X00=m 1225CONFIG_RT2X00=m
1058CONFIG_RT2X00_LIB=m
1059CONFIG_RT2X00_LIB_PCI=m
1060CONFIG_RT2X00_LIB_USB=m
1061CONFIG_RT2X00_LIB_FIRMWARE=y
1062CONFIG_RT2X00_LIB_RFKILL=y
1063CONFIG_RT2X00_LIB_LEDS=y
1064CONFIG_RT2400PCI=m 1226CONFIG_RT2400PCI=m
1065CONFIG_RT2400PCI_RFKILL=y
1066CONFIG_RT2400PCI_LEDS=y
1067CONFIG_RT2500PCI=m 1227CONFIG_RT2500PCI=m
1068CONFIG_RT2500PCI_RFKILL=y
1069CONFIG_RT2500PCI_LEDS=y
1070CONFIG_RT61PCI=m 1228CONFIG_RT61PCI=m
1071CONFIG_RT61PCI_RFKILL=y
1072CONFIG_RT61PCI_LEDS=y
1073CONFIG_RT2500USB=m 1229CONFIG_RT2500USB=m
1074CONFIG_RT2500USB_LEDS=y
1075CONFIG_RT73USB=m 1230CONFIG_RT73USB=m
1076CONFIG_RT73USB_LEDS=y 1231CONFIG_RT2X00_LIB_PCI=m
1077CONFIG_RT2X00_LIB_DEBUGFS=y 1232CONFIG_RT2X00_LIB_USB=m
1233CONFIG_RT2X00_LIB=m
1234CONFIG_RT2X00_LIB_FIRMWARE=y
1235CONFIG_RT2X00_LIB_CRYPTO=y
1236CONFIG_RT2X00_LIB_RFKILL=y
1237CONFIG_RT2X00_LIB_LEDS=y
1238# CONFIG_RT2X00_LIB_DEBUGFS is not set
1078# CONFIG_RT2X00_DEBUG is not set 1239# CONFIG_RT2X00_DEBUG is not set
1079 1240
1080# 1241#
1242# WiMAX Wireless Broadband devices
1243#
1244CONFIG_WIMAX_I2400M_USB=m
1245CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
1246# CONFIG_WIMAX_I2400M_SDIO is not set
1247#
1248#
1249
1250#
1081# USB Network Adapters 1251# USB Network Adapters
1082# 1252#
1083CONFIG_USB_CATC=m 1253CONFIG_USB_CATC=m
@@ -1088,6 +1258,7 @@ CONFIG_USB_USBNET=m
1088CONFIG_USB_NET_AX8817X=m 1258CONFIG_USB_NET_AX8817X=m
1089CONFIG_USB_NET_CDCETHER=m 1259CONFIG_USB_NET_CDCETHER=m
1090CONFIG_USB_NET_DM9601=m 1260CONFIG_USB_NET_DM9601=m
1261CONFIG_USB_NET_SMSC95XX=m
1091CONFIG_USB_NET_GL620A=m 1262CONFIG_USB_NET_GL620A=m
1092CONFIG_USB_NET_NET1080=m 1263CONFIG_USB_NET_NET1080=m
1093CONFIG_USB_NET_PLUSB=m 1264CONFIG_USB_NET_PLUSB=m
@@ -1101,7 +1272,7 @@ CONFIG_USB_ARMLINUX=y
1101CONFIG_USB_EPSON2888=y 1272CONFIG_USB_EPSON2888=y
1102CONFIG_USB_KC2190=y 1273CONFIG_USB_KC2190=y
1103CONFIG_USB_NET_ZAURUS=m 1274CONFIG_USB_NET_ZAURUS=m
1104# CONFIG_USB_HSO is not set 1275CONFIG_USB_HSO=m
1105# CONFIG_WAN is not set 1276# CONFIG_WAN is not set
1106# CONFIG_FDDI is not set 1277# CONFIG_FDDI is not set
1107# CONFIG_HIPPI is not set 1278# CONFIG_HIPPI is not set
@@ -1111,18 +1282,16 @@ CONFIG_PPP_FILTER=y
1111CONFIG_PPP_ASYNC=m 1282CONFIG_PPP_ASYNC=m
1112CONFIG_PPP_SYNC_TTY=m 1283CONFIG_PPP_SYNC_TTY=m
1113CONFIG_PPP_DEFLATE=m 1284CONFIG_PPP_DEFLATE=m
1114# CONFIG_PPP_BSDCOMP is not set 1285CONFIG_PPP_BSDCOMP=m
1115CONFIG_PPP_MPPE=m 1286CONFIG_PPP_MPPE=m
1116CONFIG_PPPOE=m 1287CONFIG_PPPOE=m
1117CONFIG_PPPOL2TP=m 1288CONFIG_PPPOL2TP=m
1118# CONFIG_SLIP is not set 1289# CONFIG_SLIP is not set
1119CONFIG_SLHC=m 1290CONFIG_SLHC=m
1120CONFIG_NET_FC=y 1291# CONFIG_NET_FC is not set
1121CONFIG_NETCONSOLE=m 1292# CONFIG_NETCONSOLE is not set
1122CONFIG_NETCONSOLE_DYNAMIC=y 1293# CONFIG_NETPOLL is not set
1123CONFIG_NETPOLL=y 1294# CONFIG_NET_POLL_CONTROLLER is not set
1124CONFIG_NETPOLL_TRAP=y
1125CONFIG_NET_POLL_CONTROLLER=y
1126# CONFIG_ISDN is not set 1295# CONFIG_ISDN is not set
1127# CONFIG_PHONE is not set 1296# CONFIG_PHONE is not set
1128 1297
@@ -1161,10 +1330,14 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
1161CONFIG_MOUSE_PS2_SYNAPTICS=y 1330CONFIG_MOUSE_PS2_SYNAPTICS=y
1162CONFIG_MOUSE_PS2_LIFEBOOK=y 1331CONFIG_MOUSE_PS2_LIFEBOOK=y
1163CONFIG_MOUSE_PS2_TRACKPOINT=y 1332CONFIG_MOUSE_PS2_TRACKPOINT=y
1164# CONFIG_MOUSE_PS2_TOUCHKIT is not set 1333# CONFIG_MOUSE_PS2_ELANTECH is not set
1334CONFIG_MOUSE_PS2_TOUCHKIT=y
1165CONFIG_MOUSE_SERIAL=m 1335CONFIG_MOUSE_SERIAL=m
1166# CONFIG_MOUSE_APPLETOUCH is not set 1336# CONFIG_MOUSE_APPLETOUCH is not set
1167# CONFIG_MOUSE_BCM5974 is not set 1337# CONFIG_MOUSE_BCM5974 is not set
1338# CONFIG_MOUSE_INPORT is not set
1339# CONFIG_MOUSE_LOGIBM is not set
1340# CONFIG_MOUSE_PC110PAD is not set
1168CONFIG_MOUSE_VSXXXAA=m 1341CONFIG_MOUSE_VSXXXAA=m
1169CONFIG_INPUT_JOYSTICK=y 1342CONFIG_INPUT_JOYSTICK=y
1170# CONFIG_JOYSTICK_ANALOG is not set 1343# CONFIG_JOYSTICK_ANALOG is not set
@@ -1193,14 +1366,18 @@ CONFIG_INPUT_TOUCHSCREEN=y
1193CONFIG_TOUCHSCREEN_FUJITSU=m 1366CONFIG_TOUCHSCREEN_FUJITSU=m
1194CONFIG_TOUCHSCREEN_GUNZE=m 1367CONFIG_TOUCHSCREEN_GUNZE=m
1195CONFIG_TOUCHSCREEN_ELO=m 1368CONFIG_TOUCHSCREEN_ELO=m
1369# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
1196CONFIG_TOUCHSCREEN_MTOUCH=m 1370CONFIG_TOUCHSCREEN_MTOUCH=m
1197CONFIG_TOUCHSCREEN_INEXIO=m 1371CONFIG_TOUCHSCREEN_INEXIO=m
1198CONFIG_TOUCHSCREEN_MK712=m 1372CONFIG_TOUCHSCREEN_MK712=m
1373CONFIG_TOUCHSCREEN_HTCPEN=m
1199CONFIG_TOUCHSCREEN_PENMOUNT=m 1374CONFIG_TOUCHSCREEN_PENMOUNT=m
1200CONFIG_TOUCHSCREEN_TOUCHRIGHT=m 1375CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
1201CONFIG_TOUCHSCREEN_TOUCHWIN=m 1376CONFIG_TOUCHSCREEN_TOUCHWIN=m
1202CONFIG_TOUCHSCREEN_UCB1400=m 1377CONFIG_TOUCHSCREEN_WM97XX=m
1203# CONFIG_TOUCHSCREEN_WM97XX is not set 1378CONFIG_TOUCHSCREEN_WM9705=y
1379CONFIG_TOUCHSCREEN_WM9712=y
1380CONFIG_TOUCHSCREEN_WM9713=y
1204CONFIG_TOUCHSCREEN_USB_COMPOSITE=m 1381CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
1205CONFIG_TOUCHSCREEN_USB_EGALAX=y 1382CONFIG_TOUCHSCREEN_USB_EGALAX=y
1206CONFIG_TOUCHSCREEN_USB_PANJIT=y 1383CONFIG_TOUCHSCREEN_USB_PANJIT=y
@@ -1214,17 +1391,19 @@ CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
1214CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y 1391CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
1215CONFIG_TOUCHSCREEN_USB_GOTOP=y 1392CONFIG_TOUCHSCREEN_USB_GOTOP=y
1216CONFIG_TOUCHSCREEN_TOUCHIT213=m 1393CONFIG_TOUCHSCREEN_TOUCHIT213=m
1394# CONFIG_TOUCHSCREEN_TSC2007 is not set
1217CONFIG_INPUT_MISC=y 1395CONFIG_INPUT_MISC=y
1218# CONFIG_INPUT_PCSPKR is not set 1396# CONFIG_INPUT_PCSPKR is not set
1219# CONFIG_INPUT_APANEL is not set 1397# CONFIG_INPUT_APANEL is not set
1220# CONFIG_INPUT_WISTRON_BTNS is not set 1398CONFIG_INPUT_WISTRON_BTNS=m
1221CONFIG_INPUT_ATLAS_BTNS=m 1399# CONFIG_INPUT_ATLAS_BTNS is not set
1222CONFIG_INPUT_ATI_REMOTE=m 1400# CONFIG_INPUT_ATI_REMOTE is not set
1223CONFIG_INPUT_ATI_REMOTE2=m 1401# CONFIG_INPUT_ATI_REMOTE2 is not set
1224CONFIG_INPUT_KEYSPAN_REMOTE=m 1402CONFIG_INPUT_KEYSPAN_REMOTE=m
1225CONFIG_INPUT_POWERMATE=m 1403CONFIG_INPUT_POWERMATE=m
1226CONFIG_INPUT_YEALINK=m 1404CONFIG_INPUT_YEALINK=m
1227CONFIG_INPUT_UINPUT=m 1405# CONFIG_INPUT_CM109 is not set
1406# CONFIG_INPUT_UINPUT is not set
1228 1407
1229# 1408#
1230# Hardware I/O ports 1409# Hardware I/O ports
@@ -1253,18 +1432,31 @@ CONFIG_VT_HW_CONSOLE_BINDING=y
1253# 1432#
1254# Serial drivers 1433# Serial drivers
1255# 1434#
1256# CONFIG_SERIAL_8250 is not set 1435CONFIG_SERIAL_8250=y
1436# CONFIG_SERIAL_8250_CONSOLE is not set
1257CONFIG_FIX_EARLYCON_MEM=y 1437CONFIG_FIX_EARLYCON_MEM=y
1438CONFIG_SERIAL_8250_PCI=y
1439CONFIG_SERIAL_8250_PNP=y
1440CONFIG_SERIAL_8250_NR_UARTS=4
1441CONFIG_SERIAL_8250_RUNTIME_UARTS=4
1442# CONFIG_SERIAL_8250_EXTENDED is not set
1258 1443
1259# 1444#
1260# Non-8250 serial port support 1445# Non-8250 serial port support
1261# 1446#
1447CONFIG_SERIAL_CORE=y
1262# CONFIG_SERIAL_JSM is not set 1448# CONFIG_SERIAL_JSM is not set
1263CONFIG_UNIX98_PTYS=y 1449CONFIG_UNIX98_PTYS=y
1450# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
1264# CONFIG_LEGACY_PTYS is not set 1451# CONFIG_LEGACY_PTYS is not set
1265# CONFIG_IPMI_HANDLER is not set 1452# CONFIG_IPMI_HANDLER is not set
1266# CONFIG_HW_RANDOM is not set 1453CONFIG_HW_RANDOM=m
1267# CONFIG_NVRAM is not set 1454# CONFIG_HW_RANDOM_INTEL is not set
1455# CONFIG_HW_RANDOM_AMD is not set
1456# CONFIG_HW_RANDOM_GEODE is not set
1457# CONFIG_HW_RANDOM_VIA is not set
1458CONFIG_NVRAM=m
1459# CONFIG_DTLK is not set
1268# CONFIG_R3964 is not set 1460# CONFIG_R3964 is not set
1269# CONFIG_APPLICOM is not set 1461# CONFIG_APPLICOM is not set
1270# CONFIG_SONYPI is not set 1462# CONFIG_SONYPI is not set
@@ -1282,8 +1474,14 @@ CONFIG_DEVPORT=y
1282CONFIG_I2C=y 1474CONFIG_I2C=y
1283CONFIG_I2C_BOARDINFO=y 1475CONFIG_I2C_BOARDINFO=y
1284# CONFIG_I2C_CHARDEV is not set 1476# CONFIG_I2C_CHARDEV is not set
1285CONFIG_I2C_HELPER_AUTO=y 1477# CONFIG_I2C_HELPER_AUTO is not set
1286CONFIG_I2C_ALGOBIT=y 1478
1479#
1480# I2C Algorithms
1481#
1482# CONFIG_I2C_ALGOBIT is not set
1483# CONFIG_I2C_ALGOPCF is not set
1484# CONFIG_I2C_ALGOPCA is not set
1287 1485
1288# 1486#
1289# I2C Hardware Bus support 1487# I2C Hardware Bus support
@@ -1328,6 +1526,7 @@ CONFIG_I2C_ALGOBIT=y
1328# 1526#
1329# Other I2C/SMBus bus drivers 1527# Other I2C/SMBus bus drivers
1330# 1528#
1529# CONFIG_I2C_PCA_ISA is not set
1331# CONFIG_I2C_PCA_PLATFORM is not set 1530# CONFIG_I2C_PCA_PLATFORM is not set
1332# CONFIG_I2C_STUB is not set 1531# CONFIG_I2C_STUB is not set
1333# CONFIG_SCx200_ACB is not set 1532# CONFIG_SCx200_ACB is not set
@@ -1338,6 +1537,8 @@ CONFIG_I2C_ALGOBIT=y
1338# CONFIG_DS1682 is not set 1537# CONFIG_DS1682 is not set
1339# CONFIG_AT24 is not set 1538# CONFIG_AT24 is not set
1340# CONFIG_SENSORS_EEPROM is not set 1539# CONFIG_SENSORS_EEPROM is not set
1540# CONFIG_EEPROM_AT24 is not set
1541# CONFIG_EEPROM_LEGACY is not set
1341# CONFIG_SENSORS_PCF8574 is not set 1542# CONFIG_SENSORS_PCF8574 is not set
1342# CONFIG_PCF8575 is not set 1543# CONFIG_PCF8575 is not set
1343# CONFIG_SENSORS_PCA9539 is not set 1544# CONFIG_SENSORS_PCA9539 is not set
@@ -1356,6 +1557,7 @@ CONFIG_POWER_SUPPLY=y
1356# CONFIG_POWER_SUPPLY_DEBUG is not set 1557# CONFIG_POWER_SUPPLY_DEBUG is not set
1357# CONFIG_PDA_POWER is not set 1558# CONFIG_PDA_POWER is not set
1358# CONFIG_BATTERY_DS2760 is not set 1559# CONFIG_BATTERY_DS2760 is not set
1560# CONFIG_BATTERY_BQ27x00 is not set
1359CONFIG_HWMON=y 1561CONFIG_HWMON=y
1360# CONFIG_HWMON_VID is not set 1562# CONFIG_HWMON_VID is not set
1361# CONFIG_SENSORS_ABITUGURU is not set 1563# CONFIG_SENSORS_ABITUGURU is not set
@@ -1368,6 +1570,7 @@ CONFIG_HWMON=y
1368# CONFIG_SENSORS_ADM1029 is not set 1570# CONFIG_SENSORS_ADM1029 is not set
1369# CONFIG_SENSORS_ADM1031 is not set 1571# CONFIG_SENSORS_ADM1031 is not set
1370# CONFIG_SENSORS_ADM9240 is not set 1572# CONFIG_SENSORS_ADM9240 is not set
1573# CONFIG_SENSORS_ADT7462 is not set
1371# CONFIG_SENSORS_ADT7470 is not set 1574# CONFIG_SENSORS_ADT7470 is not set
1372# CONFIG_SENSORS_ADT7473 is not set 1575# CONFIG_SENSORS_ADT7473 is not set
1373# CONFIG_SENSORS_K8TEMP is not set 1576# CONFIG_SENSORS_K8TEMP is not set
@@ -1396,6 +1599,7 @@ CONFIG_HWMON=y
1396# CONFIG_SENSORS_LM90 is not set 1599# CONFIG_SENSORS_LM90 is not set
1397# CONFIG_SENSORS_LM92 is not set 1600# CONFIG_SENSORS_LM92 is not set
1398# CONFIG_SENSORS_LM93 is not set 1601# CONFIG_SENSORS_LM93 is not set
1602# CONFIG_SENSORS_LTC4245 is not set
1399# CONFIG_SENSORS_MAX1619 is not set 1603# CONFIG_SENSORS_MAX1619 is not set
1400# CONFIG_SENSORS_MAX6650 is not set 1604# CONFIG_SENSORS_MAX6650 is not set
1401# CONFIG_SENSORS_PC87360 is not set 1605# CONFIG_SENSORS_PC87360 is not set
@@ -1419,17 +1623,25 @@ CONFIG_HWMON=y
1419# CONFIG_SENSORS_W83627HF is not set 1623# CONFIG_SENSORS_W83627HF is not set
1420# CONFIG_SENSORS_W83627EHF is not set 1624# CONFIG_SENSORS_W83627EHF is not set
1421# CONFIG_SENSORS_HDAPS is not set 1625# CONFIG_SENSORS_HDAPS is not set
1626# CONFIG_SENSORS_LIS3LV02D is not set
1422# CONFIG_SENSORS_APPLESMC is not set 1627# CONFIG_SENSORS_APPLESMC is not set
1423# CONFIG_HWMON_DEBUG_CHIP is not set 1628# CONFIG_HWMON_DEBUG_CHIP is not set
1424CONFIG_THERMAL=y 1629CONFIG_THERMAL=y
1425# CONFIG_THERMAL_HWMON is not set 1630CONFIG_THERMAL_HWMON=y
1426# CONFIG_WATCHDOG is not set 1631# CONFIG_WATCHDOG is not set
1632CONFIG_SSB_POSSIBLE=y
1427 1633
1428# 1634#
1429# Sonics Silicon Backplane 1635# Sonics Silicon Backplane
1430# 1636#
1431CONFIG_SSB_POSSIBLE=y 1637CONFIG_SSB=m
1432# CONFIG_SSB is not set 1638CONFIG_SSB_SPROM=y
1639CONFIG_SSB_PCIHOST_POSSIBLE=y
1640CONFIG_SSB_PCIHOST=y
1641CONFIG_SSB_B43_PCI_BRIDGE=y
1642# CONFIG_SSB_DEBUG is not set
1643CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
1644CONFIG_SSB_DRIVER_PCICORE=y
1433 1645
1434# 1646#
1435# Multifunction device drivers 1647# Multifunction device drivers
@@ -1437,7 +1649,12 @@ CONFIG_SSB_POSSIBLE=y
1437# CONFIG_MFD_CORE is not set 1649# CONFIG_MFD_CORE is not set
1438# CONFIG_MFD_SM501 is not set 1650# CONFIG_MFD_SM501 is not set
1439# CONFIG_HTC_PASIC3 is not set 1651# CONFIG_HTC_PASIC3 is not set
1652# CONFIG_TWL4030_CORE is not set
1440# CONFIG_MFD_TMIO is not set 1653# CONFIG_MFD_TMIO is not set
1654# CONFIG_PMIC_DA903X is not set
1655# CONFIG_MFD_WM8400 is not set
1656# CONFIG_MFD_WM8350_I2C is not set
1657# CONFIG_REGULATOR is not set
1441 1658
1442# 1659#
1443# Multimedia devices 1660# Multimedia devices
@@ -1450,26 +1667,29 @@ CONFIG_VIDEO_DEV=y
1450CONFIG_VIDEO_V4L2_COMMON=y 1667CONFIG_VIDEO_V4L2_COMMON=y
1451# CONFIG_VIDEO_ALLOW_V4L1 is not set 1668# CONFIG_VIDEO_ALLOW_V4L1 is not set
1452CONFIG_VIDEO_V4L1_COMPAT=y 1669CONFIG_VIDEO_V4L1_COMPAT=y
1453CONFIG_DVB_CORE=y 1670CONFIG_DVB_CORE=m
1454CONFIG_VIDEO_MEDIA=y 1671CONFIG_VIDEO_MEDIA=m
1455 1672
1456# 1673#
1457# Multimedia drivers 1674# Multimedia drivers
1458# 1675#
1459# CONFIG_MEDIA_ATTACH is not set 1676CONFIG_MEDIA_ATTACH=y
1460CONFIG_MEDIA_TUNER=y 1677CONFIG_MEDIA_TUNER=m
1461# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set 1678# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
1462CONFIG_MEDIA_TUNER_SIMPLE=y 1679CONFIG_MEDIA_TUNER_SIMPLE=m
1463CONFIG_MEDIA_TUNER_TDA8290=y 1680CONFIG_MEDIA_TUNER_TDA8290=m
1464CONFIG_MEDIA_TUNER_TDA9887=y 1681CONFIG_MEDIA_TUNER_TDA9887=m
1465CONFIG_MEDIA_TUNER_TEA5761=y 1682CONFIG_MEDIA_TUNER_TEA5761=m
1466CONFIG_MEDIA_TUNER_TEA5767=y 1683CONFIG_MEDIA_TUNER_TEA5767=m
1467CONFIG_MEDIA_TUNER_MT20XX=y 1684CONFIG_MEDIA_TUNER_MT20XX=m
1468CONFIG_MEDIA_TUNER_XC2028=y 1685CONFIG_MEDIA_TUNER_XC2028=m
1469CONFIG_MEDIA_TUNER_XC5000=y 1686CONFIG_MEDIA_TUNER_XC5000=m
1470CONFIG_VIDEO_V4L2=y 1687CONFIG_VIDEO_V4L2=y
1688CONFIG_VIDEOBUF_GEN=m
1689CONFIG_VIDEOBUF_VMALLOC=m
1471CONFIG_VIDEO_CAPTURE_DRIVERS=y 1690CONFIG_VIDEO_CAPTURE_DRIVERS=y
1472# CONFIG_VIDEO_ADV_DEBUG is not set 1691# CONFIG_VIDEO_ADV_DEBUG is not set
1692# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
1473CONFIG_VIDEO_HELPER_CHIPS_AUTO=y 1693CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
1474# CONFIG_VIDEO_VIVI is not set 1694# CONFIG_VIDEO_VIVI is not set
1475# CONFIG_VIDEO_BT848 is not set 1695# CONFIG_VIDEO_BT848 is not set
@@ -1481,24 +1701,49 @@ CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
1481# CONFIG_VIDEO_CX88 is not set 1701# CONFIG_VIDEO_CX88 is not set
1482# CONFIG_VIDEO_CX23885 is not set 1702# CONFIG_VIDEO_CX23885 is not set
1483# CONFIG_VIDEO_AU0828 is not set 1703# CONFIG_VIDEO_AU0828 is not set
1704# CONFIG_VIDEO_IVTV is not set
1484# CONFIG_VIDEO_CX18 is not set 1705# CONFIG_VIDEO_CX18 is not set
1485# CONFIG_VIDEO_CAFE_CCIC is not set 1706# CONFIG_VIDEO_CAFE_CCIC is not set
1707# CONFIG_SOC_CAMERA is not set
1486CONFIG_V4L_USB_DRIVERS=y 1708CONFIG_V4L_USB_DRIVERS=y
1487CONFIG_USB_VIDEO_CLASS=m 1709CONFIG_USB_VIDEO_CLASS=m
1488CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y 1710CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
1489# CONFIG_USB_GSPCA is not set 1711CONFIG_USB_GSPCA=m
1712# CONFIG_USB_M5602 is not set
1713# CONFIG_USB_STV06XX is not set
1714# CONFIG_USB_GSPCA_CONEX is not set
1715# CONFIG_USB_GSPCA_ETOMS is not set
1716# CONFIG_USB_GSPCA_FINEPIX is not set
1717# CONFIG_USB_GSPCA_MARS is not set
1718# CONFIG_USB_GSPCA_OV519 is not set
1719# CONFIG_USB_GSPCA_OV534 is not set
1720# CONFIG_USB_GSPCA_PAC207 is not set
1721# CONFIG_USB_GSPCA_PAC7311 is not set
1722# CONFIG_USB_GSPCA_SONIXB is not set
1723# CONFIG_USB_GSPCA_SONIXJ is not set
1724# CONFIG_USB_GSPCA_SPCA500 is not set
1725# CONFIG_USB_GSPCA_SPCA501 is not set
1726# CONFIG_USB_GSPCA_SPCA505 is not set
1727# CONFIG_USB_GSPCA_SPCA506 is not set
1728# CONFIG_USB_GSPCA_SPCA508 is not set
1729# CONFIG_USB_GSPCA_SPCA561 is not set
1730# CONFIG_USB_GSPCA_STK014 is not set
1731# CONFIG_USB_GSPCA_SUNPLUS is not set
1732# CONFIG_USB_GSPCA_T613 is not set
1733# CONFIG_USB_GSPCA_TV8532 is not set
1734# CONFIG_USB_GSPCA_VC032X is not set
1735# CONFIG_USB_GSPCA_ZC3XX is not set
1490# CONFIG_VIDEO_PVRUSB2 is not set 1736# CONFIG_VIDEO_PVRUSB2 is not set
1491# CONFIG_VIDEO_EM28XX is not set 1737# CONFIG_VIDEO_EM28XX is not set
1492# CONFIG_VIDEO_USBVISION is not set 1738# CONFIG_VIDEO_USBVISION is not set
1493# CONFIG_USB_ET61X251 is not set 1739CONFIG_USB_ET61X251=m
1494# CONFIG_USB_SN9C102 is not set 1740CONFIG_USB_SN9C102=m
1495# CONFIG_USB_ZC0301 is not set 1741CONFIG_USB_ZC0301=m
1496# CONFIG_USB_ZR364XX is not set 1742CONFIG_USB_ZR364XX=m
1497# CONFIG_USB_STKWEBCAM is not set 1743CONFIG_USB_STKWEBCAM=m
1498# CONFIG_USB_S2255 is not set 1744CONFIG_USB_S2255=m
1499# CONFIG_SOC_CAMERA is not set
1500# CONFIG_VIDEO_SH_MOBILE_CEU is not set
1501# CONFIG_RADIO_ADAPTERS is not set 1745# CONFIG_RADIO_ADAPTERS is not set
1746# CONFIG_DVB_DYNAMIC_MINORS is not set
1502# CONFIG_DVB_CAPTURE_DRIVERS is not set 1747# CONFIG_DVB_CAPTURE_DRIVERS is not set
1503# CONFIG_DAB is not set 1748# CONFIG_DAB is not set
1504 1749
@@ -1509,7 +1754,7 @@ CONFIG_AGP=y
1509# CONFIG_AGP_ALI is not set 1754# CONFIG_AGP_ALI is not set
1510# CONFIG_AGP_ATI is not set 1755# CONFIG_AGP_ATI is not set
1511# CONFIG_AGP_AMD is not set 1756# CONFIG_AGP_AMD is not set
1512CONFIG_AGP_AMD64=y 1757# CONFIG_AGP_AMD64 is not set
1513CONFIG_AGP_INTEL=y 1758CONFIG_AGP_INTEL=y
1514# CONFIG_AGP_NVIDIA is not set 1759# CONFIG_AGP_NVIDIA is not set
1515# CONFIG_AGP_SIS is not set 1760# CONFIG_AGP_SIS is not set
@@ -1523,33 +1768,43 @@ CONFIG_DRM=y
1523CONFIG_DRM_I810=y 1768CONFIG_DRM_I810=y
1524# CONFIG_DRM_I830 is not set 1769# CONFIG_DRM_I830 is not set
1525CONFIG_DRM_I915=y 1770CONFIG_DRM_I915=y
1771# CONFIG_DRM_I915_KMS is not set
1526# CONFIG_DRM_MGA is not set 1772# CONFIG_DRM_MGA is not set
1527# CONFIG_DRM_SIS is not set 1773# CONFIG_DRM_SIS is not set
1528# CONFIG_DRM_VIA is not set 1774# CONFIG_DRM_VIA is not set
1529# CONFIG_DRM_SAVAGE is not set 1775# CONFIG_DRM_SAVAGE is not set
1530# CONFIG_VGASTATE is not set 1776# CONFIG_VGASTATE is not set
1777CONFIG_DRM_PSB=m
1531CONFIG_VIDEO_OUTPUT_CONTROL=y 1778CONFIG_VIDEO_OUTPUT_CONTROL=y
1532CONFIG_FB=y 1779CONFIG_FB=y
1533CONFIG_FIRMWARE_EDID=y 1780# CONFIG_FIRMWARE_EDID is not set
1534CONFIG_FB_DDC=y 1781# CONFIG_FB_TRIDENT_ACCEL is not set
1535CONFIG_FB_CFB_FILLRECT=y 1782# CONFIG_FB_ARK is not set
1536CONFIG_FB_CFB_COPYAREA=y 1783# CONFIG_FB_PM3 is not set
1537CONFIG_FB_CFB_IMAGEBLIT=y 1784# CONFIG_FB_CARMINE is not set
1538# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set 1785# CONFIG_FB_GEODE is not set
1539# CONFIG_FB_SYS_FILLRECT is not set 1786# CONFIG_FB_VIRTUAL is not set
1540# CONFIG_FB_SYS_COPYAREA is not set 1787# CONFIG_FB_METRONOME is not set
1541# CONFIG_FB_SYS_IMAGEBLIT is not set 1788# CONFIG_FB_MB862XX is not set
1542# CONFIG_FB_FOREIGN_ENDIAN is not set 1789
1543# CONFIG_FB_SYS_FOPS is not set 1790
1544# CONFIG_FB_SVGALIB is not set 1791CONFIG_BACKLIGHT_LCD_SUPPORT=y
1545# CONFIG_FB_MACMODES is not set 1792# CONFIG_LCD_CLASS_DEVICE is not set
1546# CONFIG_FB_BACKLIGHT is not set 1793CONFIG_BACKLIGHT_CLASS_DEVICE=y
1547CONFIG_FB_MODE_HELPERS=y 1794CONFIG_BACKLIGHT_GENERIC=y
1548# CONFIG_FB_TILEBLITTING is not set 1795# CONFIG_BACKLIGHT_CORGI is not set
1796# CONFIG_BACKLIGHT_PROGEAR is not set
1797# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
1798# CONFIG_BACKLIGHT_SAHARA is not set
1799
1549 1800
1550# 1801#
1551# Frame buffer hardware drivers 1802# Frame buffer hardware drivers
1552# 1803#
1804# CONFIG_FB_TILEBLITTING is not set
1805# CONFIG_FB_FOREIGN_ENDIAN is not set
1806# CONFIG_FB_PM2_FIFO_DISCONNECT is not set
1807# CONFIG_FB_3DFX_ACCEL is not set
1553# CONFIG_FB_CIRRUS is not set 1808# CONFIG_FB_CIRRUS is not set
1554# CONFIG_FB_PM2 is not set 1809# CONFIG_FB_PM2 is not set
1555# CONFIG_FB_CYBER2000 is not set 1810# CONFIG_FB_CYBER2000 is not set
@@ -1557,7 +1812,6 @@ CONFIG_FB_MODE_HELPERS=y
1557# CONFIG_FB_ASILIANT is not set 1812# CONFIG_FB_ASILIANT is not set
1558# CONFIG_FB_IMSTT is not set 1813# CONFIG_FB_IMSTT is not set
1559# CONFIG_FB_VGA16 is not set 1814# CONFIG_FB_VGA16 is not set
1560# CONFIG_FB_UVESA is not set
1561# CONFIG_FB_VESA is not set 1815# CONFIG_FB_VESA is not set
1562# CONFIG_FB_EFI is not set 1816# CONFIG_FB_EFI is not set
1563# CONFIG_FB_N411 is not set 1817# CONFIG_FB_N411 is not set
@@ -1565,18 +1819,26 @@ CONFIG_FB_MODE_HELPERS=y
1565# CONFIG_FB_S1D13XXX is not set 1819# CONFIG_FB_S1D13XXX is not set
1566# CONFIG_FB_NVIDIA is not set 1820# CONFIG_FB_NVIDIA is not set
1567# CONFIG_FB_RIVA is not set 1821# CONFIG_FB_RIVA is not set
1568# CONFIG_FB_I810 is not set 1822CONFIG_FB_I810=m
1823# CONFIG_FB_I810_GTF is not set
1569# CONFIG_FB_LE80578 is not set 1824# CONFIG_FB_LE80578 is not set
1570CONFIG_FB_INTEL=y 1825# CONFIG_FB_CARILLO_RANCH is not set
1571CONFIG_FB_INTEL_DEBUG=y 1826# CONFIG_FB_INTEL is not set
1572CONFIG_FB_INTEL_I2C=y 1827# CONFIG_FB_INTEL_DEBUG is not set
1828# CONFIG_FB_INTEL_I2C is not set
1573# CONFIG_FB_MATROX is not set 1829# CONFIG_FB_MATROX is not set
1574# CONFIG_FB_RADEON is not set 1830# CONFIG_FB_RADEON is not set
1831CONFIG_FB_RADEON_I2C=y
1832# CONFIG_FB_RADEON_BACKLIGHT is not set
1833# CONFIG_FB_RADEON_DEBUG is not set
1575# CONFIG_FB_ATY128 is not set 1834# CONFIG_FB_ATY128 is not set
1576# CONFIG_FB_ATY is not set 1835# CONFIG_FB_ATY is not set
1577# CONFIG_FB_S3 is not set 1836# CONFIG_FB_S3 is not set
1578# CONFIG_FB_SAVAGE is not set 1837# CONFIG_FB_SAVAGE is not set
1579# CONFIG_FB_SIS is not set 1838# CONFIG_FB_SIS is not set
1839# CONFIG_FB_SIS_300 is not set
1840# CONFIG_FB_SIS_315 is not set
1841# CONFIG_FB_VIA is not set
1580# CONFIG_FB_NEOMAGIC is not set 1842# CONFIG_FB_NEOMAGIC is not set
1581# CONFIG_FB_KYRO is not set 1843# CONFIG_FB_KYRO is not set
1582# CONFIG_FB_3DFX is not set 1844# CONFIG_FB_3DFX is not set
@@ -1589,23 +1851,13 @@ CONFIG_FB_INTEL_I2C=y
1589# CONFIG_FB_CARMINE is not set 1851# CONFIG_FB_CARMINE is not set
1590# CONFIG_FB_GEODE is not set 1852# CONFIG_FB_GEODE is not set
1591# CONFIG_FB_VIRTUAL is not set 1853# CONFIG_FB_VIRTUAL is not set
1592CONFIG_BACKLIGHT_LCD_SUPPORT=y 1854# CONFIG_FB_METRONOME is not set
1593CONFIG_LCD_CLASS_DEVICE=y 1855# CONFIG_FB_MB862XX is not set
1594# CONFIG_LCD_ILI9320 is not set
1595CONFIG_LCD_PLATFORM=y
1596CONFIG_BACKLIGHT_CLASS_DEVICE=y
1597# CONFIG_BACKLIGHT_CORGI is not set
1598# CONFIG_BACKLIGHT_PROGEAR is not set
1599CONFIG_BACKLIGHT_MBP_NVIDIA=y
1600 1856
1601# 1857#
1602# Display device support 1858# Display device support
1603# 1859#
1604CONFIG_DISPLAY_SUPPORT=y 1860# CONFIG_DISPLAY_SUPPORT is not set
1605
1606#
1607# Display hardware drivers
1608#
1609 1861
1610# 1862#
1611# Console display driver support 1863# Console display driver support
@@ -1613,16 +1865,13 @@ CONFIG_DISPLAY_SUPPORT=y
1613CONFIG_VGA_CONSOLE=y 1865CONFIG_VGA_CONSOLE=y
1614CONFIG_VGACON_SOFT_SCROLLBACK=y 1866CONFIG_VGACON_SOFT_SCROLLBACK=y
1615CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 1867CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
1616CONFIG_VIDEO_SELECT=y 1868# CONFIG_MDA_CONSOLE is not set
1617CONFIG_DUMMY_CONSOLE=y 1869CONFIG_DUMMY_CONSOLE=y
1618CONFIG_FRAMEBUFFER_CONSOLE=y 1870# CONFIG_FRAMEBUFFER_CONSOLE is not set
1619# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
1620# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
1621# CONFIG_FONTS is not set
1622CONFIG_FONT_8x8=y
1623CONFIG_FONT_8x16=y 1871CONFIG_FONT_8x16=y
1624# CONFIG_LOGO is not set 1872# CONFIG_LOGO is not set
1625CONFIG_SOUND=y 1873CONFIG_SOUND=y
1874# CONFIG_SOUND_OSS_CORE is not set
1626CONFIG_SND=y 1875CONFIG_SND=y
1627CONFIG_SND_TIMER=y 1876CONFIG_SND_TIMER=y
1628CONFIG_SND_PCM=y 1877CONFIG_SND_PCM=y
@@ -1630,9 +1879,11 @@ CONFIG_SND_HWDEP=y
1630CONFIG_SND_RAWMIDI=m 1879CONFIG_SND_RAWMIDI=m
1631CONFIG_SND_SEQUENCER=y 1880CONFIG_SND_SEQUENCER=y
1632CONFIG_SND_SEQ_DUMMY=y 1881CONFIG_SND_SEQ_DUMMY=y
1882# CONFIG_SND_OSSEMUL is not set
1633# CONFIG_SND_MIXER_OSS is not set 1883# CONFIG_SND_MIXER_OSS is not set
1634# CONFIG_SND_PCM_OSS is not set 1884# CONFIG_SND_PCM_OSS is not set
1635# CONFIG_SND_SEQUENCER_OSS is not set 1885# CONFIG_SND_SEQUENCER_OSS is not set
1886# CONFIG_SND_HRTIMER is not set
1636CONFIG_SND_DYNAMIC_MINORS=y 1887CONFIG_SND_DYNAMIC_MINORS=y
1637# CONFIG_SND_SUPPORT_OLD_API is not set 1888# CONFIG_SND_SUPPORT_OLD_API is not set
1638CONFIG_SND_VERBOSE_PROCFS=y 1889CONFIG_SND_VERBOSE_PROCFS=y
@@ -1643,7 +1894,6 @@ CONFIG_SND_PCM_XRUN_DEBUG=y
1643CONFIG_SND_VMASTER=y 1894CONFIG_SND_VMASTER=y
1644CONFIG_SND_AC97_CODEC=y 1895CONFIG_SND_AC97_CODEC=y
1645CONFIG_SND_DRIVERS=y 1896CONFIG_SND_DRIVERS=y
1646# CONFIG_SND_PCSP is not set
1647# CONFIG_SND_DUMMY is not set 1897# CONFIG_SND_DUMMY is not set
1648# CONFIG_SND_VIRMIDI is not set 1898# CONFIG_SND_VIRMIDI is not set
1649# CONFIG_SND_MTPAV is not set 1899# CONFIG_SND_MTPAV is not set
@@ -1651,6 +1901,7 @@ CONFIG_SND_DRIVERS=y
1651# CONFIG_SND_MPU401 is not set 1901# CONFIG_SND_MPU401 is not set
1652CONFIG_SND_AC97_POWER_SAVE=y 1902CONFIG_SND_AC97_POWER_SAVE=y
1653CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 1903CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5
1904# CONFIG_SND_ISA is not set
1654CONFIG_SND_PCI=y 1905CONFIG_SND_PCI=y
1655# CONFIG_SND_AD1889 is not set 1906# CONFIG_SND_AD1889 is not set
1656# CONFIG_SND_ALS300 is not set 1907# CONFIG_SND_ALS300 is not set
@@ -1692,17 +1943,21 @@ CONFIG_SND_PCI=y
1692# CONFIG_SND_FM801 is not set 1943# CONFIG_SND_FM801 is not set
1693CONFIG_SND_HDA_INTEL=y 1944CONFIG_SND_HDA_INTEL=y
1694CONFIG_SND_HDA_HWDEP=y 1945CONFIG_SND_HDA_HWDEP=y
1946# CONFIG_SND_HDA_RECONFIG is not set
1947# CONFIG_SND_HDA_INPUT_BEEP is not set
1695CONFIG_SND_HDA_CODEC_REALTEK=y 1948CONFIG_SND_HDA_CODEC_REALTEK=y
1696CONFIG_SND_HDA_CODEC_ANALOG=y 1949CONFIG_SND_HDA_CODEC_ANALOG=y
1697CONFIG_SND_HDA_CODEC_SIGMATEL=y 1950CONFIG_SND_HDA_CODEC_SIGMATEL=y
1698CONFIG_SND_HDA_CODEC_VIA=y 1951CONFIG_SND_HDA_CODEC_VIA=y
1699CONFIG_SND_HDA_CODEC_ATIHDMI=y 1952CONFIG_SND_HDA_CODEC_ATIHDMI=y
1953CONFIG_SND_HDA_CODEC_NVHDMI=y
1954CONFIG_SND_HDA_CODEC_INTELHDMI=y
1700CONFIG_SND_HDA_CODEC_CONEXANT=y 1955CONFIG_SND_HDA_CODEC_CONEXANT=y
1701CONFIG_SND_HDA_CODEC_CMEDIA=y 1956CONFIG_SND_HDA_CODEC_CMEDIA=y
1702CONFIG_SND_HDA_CODEC_SI3054=y 1957CONFIG_SND_HDA_CODEC_SI3054=y
1703CONFIG_SND_HDA_GENERIC=y 1958CONFIG_SND_HDA_GENERIC=y
1704CONFIG_SND_HDA_POWER_SAVE=y 1959CONFIG_SND_HDA_POWER_SAVE=y
1705CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 1960CONFIG_SND_HDA_POWER_SAVE_DEFAULT=5
1706# CONFIG_SND_HDSP is not set 1961# CONFIG_SND_HDSP is not set
1707# CONFIG_SND_HDSPM is not set 1962# CONFIG_SND_HDSPM is not set
1708# CONFIG_SND_HIFIER is not set 1963# CONFIG_SND_HIFIER is not set
@@ -1732,6 +1987,7 @@ CONFIG_SND_USB_AUDIO=m
1732CONFIG_SND_USB_USX2Y=m 1987CONFIG_SND_USB_USX2Y=m
1733CONFIG_SND_USB_CAIAQ=m 1988CONFIG_SND_USB_CAIAQ=m
1734CONFIG_SND_USB_CAIAQ_INPUT=y 1989CONFIG_SND_USB_CAIAQ_INPUT=y
1990# CONFIG_SND_USB_US122L is not set
1735# CONFIG_SND_SOC is not set 1991# CONFIG_SND_SOC is not set
1736# CONFIG_SOUND_PRIME is not set 1992# CONFIG_SOUND_PRIME is not set
1737CONFIG_AC97_BUS=y 1993CONFIG_AC97_BUS=y
@@ -1744,15 +2000,37 @@ CONFIG_HIDRAW=y
1744# USB Input Devices 2000# USB Input Devices
1745# 2001#
1746CONFIG_USB_HID=y 2002CONFIG_USB_HID=y
1747CONFIG_USB_HIDINPUT_POWERBOOK=y
1748CONFIG_HID_FF=y
1749CONFIG_HID_PID=y 2003CONFIG_HID_PID=y
1750CONFIG_LOGITECH_FF=y
1751# CONFIG_LOGIRUMBLEPAD2_FF is not set
1752CONFIG_PANTHERLORD_FF=y
1753CONFIG_THRUSTMASTER_FF=y
1754CONFIG_ZEROPLUS_FF=y
1755CONFIG_USB_HIDDEV=y 2004CONFIG_USB_HIDDEV=y
2005
2006#
2007# Special HID drivers
2008#
2009CONFIG_HID_COMPAT=y
2010CONFIG_HID_A4TECH=y
2011CONFIG_HID_APPLE=y
2012CONFIG_HID_BELKIN=y
2013CONFIG_HID_BRIGHT=y
2014CONFIG_HID_CHERRY=y
2015CONFIG_HID_CHICONY=y
2016CONFIG_HID_CYPRESS=y
2017CONFIG_HID_DELL=y
2018CONFIG_HID_EZKEY=y
2019CONFIG_HID_GYRATION=y
2020CONFIG_HID_LOGITECH=y
2021# CONFIG_LOGITECH_FF is not set
2022# CONFIG_LOGIRUMBLEPAD2_FF is not set
2023CONFIG_HID_MICROSOFT=y
2024CONFIG_HID_MONTEREY=y
2025CONFIG_HID_PANTHERLORD=y
2026# CONFIG_PANTHERLORD_FF is not set
2027CONFIG_HID_PETALYNX=y
2028CONFIG_HID_SAMSUNG=y
2029CONFIG_HID_SONY=y
2030CONFIG_HID_SUNPLUS=y
2031# CONFIG_GREENASIA_FF is not set
2032# CONFIG_THRUSTMASTER_FF is not set
2033# CONFIG_ZEROPLUS_FF is not set
1756CONFIG_USB_SUPPORT=y 2034CONFIG_USB_SUPPORT=y
1757CONFIG_USB_ARCH_HAS_HCD=y 2035CONFIG_USB_ARCH_HAS_HCD=y
1758CONFIG_USB_ARCH_HAS_OHCI=y 2036CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1770,6 +2048,8 @@ CONFIG_USB_DEVICEFS=y
1770CONFIG_USB_SUSPEND=y 2048CONFIG_USB_SUSPEND=y
1771# CONFIG_USB_OTG is not set 2049# CONFIG_USB_OTG is not set
1772CONFIG_USB_MON=y 2050CONFIG_USB_MON=y
2051CONFIG_USB_WUSB=m
2052# CONFIG_USB_WUSB_CBAF is not set
1773 2053
1774# 2054#
1775# USB Host Controller Drivers 2055# USB Host Controller Drivers
@@ -1778,30 +2058,34 @@ CONFIG_USB_MON=y
1778CONFIG_USB_EHCI_HCD=y 2058CONFIG_USB_EHCI_HCD=y
1779CONFIG_USB_EHCI_ROOT_HUB_TT=y 2059CONFIG_USB_EHCI_ROOT_HUB_TT=y
1780CONFIG_USB_EHCI_TT_NEWSCHED=y 2060CONFIG_USB_EHCI_TT_NEWSCHED=y
1781# CONFIG_USB_ISP116X_HCD is not set 2061# CONFIG_USB_OXU210HP_HCD is not set
2062CONFIG_USB_ISP116X_HCD=m
1782# CONFIG_USB_ISP1760_HCD is not set 2063# CONFIG_USB_ISP1760_HCD is not set
1783CONFIG_USB_OHCI_HCD=m 2064CONFIG_USB_OHCI_HCD=y
1784# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set 2065# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
1785# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set 2066# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
1786CONFIG_USB_OHCI_LITTLE_ENDIAN=y 2067CONFIG_USB_OHCI_LITTLE_ENDIAN=y
1787CONFIG_USB_UHCI_HCD=m 2068CONFIG_USB_UHCI_HCD=y
1788CONFIG_USB_U132_HCD=m 2069CONFIG_USB_U132_HCD=m
1789CONFIG_USB_SL811_HCD=m 2070CONFIG_USB_SL811_HCD=m
1790# CONFIG_USB_R8A66597_HCD is not set 2071# CONFIG_USB_R8A66597_HCD is not set
2072CONFIG_USB_WHCI_HCD=m
2073CONFIG_USB_HWA_HCD=m
1791 2074
1792# 2075#
1793# USB Device Class drivers 2076# USB Device Class drivers
1794# 2077#
1795CONFIG_USB_ACM=m 2078CONFIG_USB_ACM=m
1796CONFIG_USB_PRINTER=m 2079CONFIG_USB_PRINTER=m
1797# CONFIG_USB_WDM is not set 2080CONFIG_USB_WDM=m
2081# CONFIG_USB_TMC is not set
1798 2082
1799# 2083#
1800# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 2084# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
1801# 2085#
1802 2086
1803# 2087#
1804# may also be needed; see USB_STORAGE Help for more information 2088# see USB_STORAGE Help for more information
1805# 2089#
1806CONFIG_USB_STORAGE=y 2090CONFIG_USB_STORAGE=y
1807# CONFIG_USB_STORAGE_DEBUG is not set 2091# CONFIG_USB_STORAGE_DEBUG is not set
@@ -1817,7 +2101,7 @@ CONFIG_USB_STORAGE_ALAUDA=y
1817# CONFIG_USB_STORAGE_ONETOUCH is not set 2101# CONFIG_USB_STORAGE_ONETOUCH is not set
1818CONFIG_USB_STORAGE_KARMA=y 2102CONFIG_USB_STORAGE_KARMA=y
1819# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set 2103# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
1820# CONFIG_USB_LIBUSUAL is not set 2104CONFIG_USB_LIBUSUAL=y
1821 2105
1822# 2106#
1823# USB Imaging devices 2107# USB Imaging devices
@@ -1877,12 +2161,14 @@ CONFIG_USB_SERIAL_OTI6858=m
1877CONFIG_USB_SERIAL_HP4X=m 2161CONFIG_USB_SERIAL_HP4X=m
1878CONFIG_USB_SERIAL_SAFE=m 2162CONFIG_USB_SERIAL_SAFE=m
1879CONFIG_USB_SERIAL_SAFE_PADDED=y 2163CONFIG_USB_SERIAL_SAFE_PADDED=y
2164# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
1880CONFIG_USB_SERIAL_SIERRAWIRELESS=m 2165CONFIG_USB_SERIAL_SIERRAWIRELESS=m
1881CONFIG_USB_SERIAL_TI=m 2166CONFIG_USB_SERIAL_TI=m
1882CONFIG_USB_SERIAL_CYBERJACK=m 2167CONFIG_USB_SERIAL_CYBERJACK=m
1883CONFIG_USB_SERIAL_XIRCOM=m 2168CONFIG_USB_SERIAL_XIRCOM=m
1884CONFIG_USB_SERIAL_OPTION=m 2169CONFIG_USB_SERIAL_OPTION=m
1885CONFIG_USB_SERIAL_OMNINET=m 2170CONFIG_USB_SERIAL_OMNINET=m
2171# CONFIG_USB_SERIAL_OPTICON is not set
1886CONFIG_USB_SERIAL_DEBUG=m 2172CONFIG_USB_SERIAL_DEBUG=m
1887 2173
1888# 2174#
@@ -1891,6 +2177,7 @@ CONFIG_USB_SERIAL_DEBUG=m
1891CONFIG_USB_EMI62=m 2177CONFIG_USB_EMI62=m
1892CONFIG_USB_EMI26=m 2178CONFIG_USB_EMI26=m
1893CONFIG_USB_ADUTUX=m 2179CONFIG_USB_ADUTUX=m
2180# CONFIG_USB_SEVSEG is not set
1894# CONFIG_USB_RIO500 is not set 2181# CONFIG_USB_RIO500 is not set
1895CONFIG_USB_LEGOTOWER=m 2182CONFIG_USB_LEGOTOWER=m
1896CONFIG_USB_LCD=m 2183CONFIG_USB_LCD=m
@@ -1912,47 +2199,45 @@ CONFIG_USB_TRANCEVIBRATOR=m
1912CONFIG_USB_IOWARRIOR=m 2199CONFIG_USB_IOWARRIOR=m
1913# CONFIG_USB_TEST is not set 2200# CONFIG_USB_TEST is not set
1914# CONFIG_USB_ISIGHTFW is not set 2201# CONFIG_USB_ISIGHTFW is not set
2202# CONFIG_USB_VST is not set
1915# CONFIG_USB_GADGET is not set 2203# CONFIG_USB_GADGET is not set
1916CONFIG_MMC=m 2204CONFIG_UWB=m
2205CONFIG_UWB_HWA=m
2206CONFIG_UWB_WHCI=m
2207# CONFIG_UWB_WLP is not set
2208# CONFIG_UWB_I1480U is not set
2209CONFIG_MMC=y
1917# CONFIG_MMC_DEBUG is not set 2210# CONFIG_MMC_DEBUG is not set
1918# CONFIG_MMC_UNSAFE_RESUME is not set 2211# CONFIG_MMC_UNSAFE_RESUME is not set
1919 2212
1920# 2213#
1921# MMC/SD Card Drivers 2214# MMC/SD/SDIO Card Drivers
1922# 2215#
1923CONFIG_MMC_BLOCK=m 2216CONFIG_MMC_BLOCK=y
1924CONFIG_MMC_BLOCK_BOUNCE=y 2217CONFIG_MMC_BLOCK_BOUNCE=y
1925CONFIG_SDIO_UART=m 2218CONFIG_SDIO_UART=m
1926# CONFIG_MMC_TEST is not set 2219# CONFIG_MMC_TEST is not set
1927 2220
1928# 2221#
1929# MMC/SD Host Controller Drivers 2222# MMC/SD/SDIO Host Controller Drivers
1930# 2223#
1931CONFIG_MMC_SDHCI=m 2224CONFIG_MMC_SDHCI=y
1932# CONFIG_MMC_SDHCI_PCI is not set 2225CONFIG_MMC_SDHCI_PCI=y
2226# CONFIG_MMC_RICOH_MMC is not set
1933CONFIG_MMC_WBSD=m 2227CONFIG_MMC_WBSD=m
1934CONFIG_MMC_TIFM_SD=m 2228CONFIG_MMC_TIFM_SD=m
1935CONFIG_MEMSTICK=m 2229# CONFIG_MEMSTICK is not set
1936CONFIG_MEMSTICK_DEBUG=y
1937
1938#
1939# MemoryStick drivers
1940#
1941# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
1942CONFIG_MSPRO_BLOCK=m
1943
1944#
1945# MemoryStick Host Controller Drivers
1946#
1947# CONFIG_MEMSTICK_TIFM_MS is not set
1948# CONFIG_MEMSTICK_JMICRON_38X is not set
1949CONFIG_NEW_LEDS=y 2230CONFIG_NEW_LEDS=y
1950CONFIG_LEDS_CLASS=m 2231CONFIG_LEDS_CLASS=y
2232# CONFIG_MMC_CEATA_WR is not set
2233# CONFIG_MMC_SPI is not set
1951 2234
1952# 2235#
1953# LED drivers 2236# LED drivers
1954# 2237#
2238# CONFIG_LEDS_ALIX2 is not set
1955# CONFIG_LEDS_PCA9532 is not set 2239# CONFIG_LEDS_PCA9532 is not set
2240# CONFIG_LEDS_HP_DISK is not set
1956# CONFIG_LEDS_CLEVO_MAIL is not set 2241# CONFIG_LEDS_CLEVO_MAIL is not set
1957# CONFIG_LEDS_PCA955X is not set 2242# CONFIG_LEDS_PCA955X is not set
1958 2243
@@ -1962,6 +2247,7 @@ CONFIG_LEDS_CLASS=m
1962CONFIG_LEDS_TRIGGERS=y 2247CONFIG_LEDS_TRIGGERS=y
1963# CONFIG_LEDS_TRIGGER_TIMER is not set 2248# CONFIG_LEDS_TRIGGER_TIMER is not set
1964# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set 2249# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
2250# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
1965# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set 2251# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
1966# CONFIG_ACCESSIBILITY is not set 2252# CONFIG_ACCESSIBILITY is not set
1967# CONFIG_INFINIBAND is not set 2253# CONFIG_INFINIBAND is not set
@@ -1995,6 +2281,7 @@ CONFIG_RTC_INTF_DEV=y
1995# CONFIG_RTC_DRV_M41T80 is not set 2281# CONFIG_RTC_DRV_M41T80 is not set
1996# CONFIG_RTC_DRV_S35390A is not set 2282# CONFIG_RTC_DRV_S35390A is not set
1997# CONFIG_RTC_DRV_FM3130 is not set 2283# CONFIG_RTC_DRV_FM3130 is not set
2284# CONFIG_RTC_DRV_RX8581 is not set
1998 2285
1999# 2286#
2000# SPI RTC drivers 2287# SPI RTC drivers
@@ -2004,56 +2291,96 @@ CONFIG_RTC_INTF_DEV=y
2004# Platform RTC drivers 2291# Platform RTC drivers
2005# 2292#
2006CONFIG_RTC_DRV_CMOS=y 2293CONFIG_RTC_DRV_CMOS=y
2294# CONFIG_RTC_DRV_DS1286 is not set
2007# CONFIG_RTC_DRV_DS1511 is not set 2295# CONFIG_RTC_DRV_DS1511 is not set
2008# CONFIG_RTC_DRV_DS1553 is not set 2296# CONFIG_RTC_DRV_DS1553 is not set
2009# CONFIG_RTC_DRV_DS1742 is not set 2297# CONFIG_RTC_DRV_DS1742 is not set
2010# CONFIG_RTC_DRV_STK17TA8 is not set 2298# CONFIG_RTC_DRV_STK17TA8 is not set
2011# CONFIG_RTC_DRV_M48T86 is not set 2299# CONFIG_RTC_DRV_M48T86 is not set
2300# CONFIG_RTC_DRV_M48T35 is not set
2012# CONFIG_RTC_DRV_M48T59 is not set 2301# CONFIG_RTC_DRV_M48T59 is not set
2302# CONFIG_RTC_DRV_BQ4802 is not set
2013# CONFIG_RTC_DRV_V3020 is not set 2303# CONFIG_RTC_DRV_V3020 is not set
2014 2304
2015# 2305#
2016# on-CPU RTC drivers 2306# on-CPU RTC drivers
2017# 2307#
2018# CONFIG_DMADEVICES is not set
2019# CONFIG_UIO is not set 2308# CONFIG_UIO is not set
2309CONFIG_STAGING=y
2310# CONFIG_STAGING_EXCLUDE_BUILD is not set
2311# CONFIG_ET131X is not set
2312# CONFIG_SLICOSS is not set
2313# CONFIG_SXG is not set
2314# CONFIG_ME4000 is not set
2315# CONFIG_MEILHAUS is not set
2316# CONFIG_VIDEO_GO7007 is not set
2317CONFIG_USB_IP_COMMON=m
2318CONFIG_USB_IP_VHCI_HCD=m
2319CONFIG_USB_IP_HOST=m
2320# CONFIG_W35UND is not set
2321CONFIG_PRISM2_USB=m
2322# CONFIG_ECHO is not set
2323CONFIG_RT2860=m
2324CONFIG_RT2870=m
2325# CONFIG_BENET is not set
2326# CONFIG_COMEDI is not set
2327# CONFIG_ASUS_OLED is not set
2328# CONFIG_USB_ATMEL is not set
2329# CONFIG_AGNX is not set
2330# CONFIG_OTUS is not set
2331# CONFIG_ALTERA_PCIE_CHDMA is not set
2332# CONFIG_RTL8187SE is not set
2333# CONFIG_INPUT_MIMIO is not set
2334# CONFIG_TRANZPORT is not set
2335# CONFIG_EPL is not set
2336
2337#
2338# Android
2339#
2340# CONFIG_ANDROID is not set
2341# CONFIG_ANDROID_BINDER_IPC is not set
2342# CONFIG_ANDROID_LOGGER is not set
2343# CONFIG_ANDROID_RAM_CONSOLE is not set
2344# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set
2345CONFIG_X86_PLATFORM_DEVICES=y
2020 2346
2021# 2347#
2022# Firmware Drivers 2348# Firmware Drivers
2023# 2349#
2024# CONFIG_EDD is not set 2350# CONFIG_EDD is not set
2025CONFIG_FIRMWARE_MEMMAP=y 2351CONFIG_FIRMWARE_MEMMAP=y
2352CONFIG_EFI_VARS=m
2026# CONFIG_DELL_RBU is not set 2353# CONFIG_DELL_RBU is not set
2027# CONFIG_DCDBAS is not set 2354# CONFIG_DCDBAS is not set
2028# CONFIG_DMIID is not set 2355CONFIG_DMIID=y
2029# CONFIG_ISCSI_IBFT_FIND is not set 2356# CONFIG_ISCSI_IBFT_FIND is not set
2030 2357
2031# 2358#
2032# File systems 2359# File systems
2033# 2360#
2034# CONFIG_EXT2_FS is not set 2361CONFIG_EXT2_FS=y
2362# CONFIG_EXT2_FS_XATTR is not set
2363# CONFIG_EXT2_FS_XIP is not set
2035CONFIG_EXT3_FS=y 2364CONFIG_EXT3_FS=y
2036CONFIG_EXT3_FS_XATTR=y 2365CONFIG_EXT3_FS_XATTR=y
2037CONFIG_EXT3_FS_POSIX_ACL=y 2366CONFIG_EXT3_FS_POSIX_ACL=y
2038CONFIG_EXT3_FS_SECURITY=y 2367CONFIG_EXT3_FS_SECURITY=y
2039# CONFIG_EXT4DEV_FS is not set 2368# CONFIG_EXT4_FS is not set
2040CONFIG_JBD=y 2369CONFIG_JBD=y
2041# CONFIG_JBD_DEBUG is not set 2370# CONFIG_JBD_DEBUG is not set
2042CONFIG_FS_MBCACHE=y 2371CONFIG_FS_MBCACHE=y
2043# CONFIG_REISERFS_FS is not set 2372# CONFIG_REISERFS_FS is not set
2044# CONFIG_JFS_FS is not set 2373# CONFIG_JFS_FS is not set
2045CONFIG_FS_POSIX_ACL=y 2374CONFIG_FS_POSIX_ACL=y
2375CONFIG_FILE_LOCKING=y
2046# CONFIG_XFS_FS is not set 2376# CONFIG_XFS_FS is not set
2377# CONFIG_GFS2_FS is not set
2047# CONFIG_OCFS2_FS is not set 2378# CONFIG_OCFS2_FS is not set
2379# CONFIG_BTRFS_FS is not set
2048CONFIG_DNOTIFY=y 2380CONFIG_DNOTIFY=y
2049CONFIG_INOTIFY=y 2381CONFIG_INOTIFY=y
2050CONFIG_INOTIFY_USER=y 2382CONFIG_INOTIFY_USER=y
2051CONFIG_QUOTA=y 2383# CONFIG_QUOTA is not set
2052CONFIG_QUOTA_NETLINK_INTERFACE=y
2053# CONFIG_PRINT_QUOTA_WARNING is not set
2054# CONFIG_QFMT_V1 is not set
2055CONFIG_QFMT_V2=y
2056CONFIG_QUOTACTL=y
2057# CONFIG_AUTOFS_FS is not set 2384# CONFIG_AUTOFS_FS is not set
2058# CONFIG_AUTOFS4_FS is not set 2385# CONFIG_AUTOFS4_FS is not set
2059CONFIG_FUSE_FS=m 2386CONFIG_FUSE_FS=m
@@ -2082,28 +2409,31 @@ CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
2082# Pseudo filesystems 2409# Pseudo filesystems
2083# 2410#
2084CONFIG_PROC_FS=y 2411CONFIG_PROC_FS=y
2085CONFIG_PROC_KCORE=y 2412# CONFIG_PROC_KCORE is not set
2086CONFIG_PROC_VMCORE=y
2087CONFIG_PROC_SYSCTL=y 2413CONFIG_PROC_SYSCTL=y
2414CONFIG_PROC_PAGE_MONITOR=y
2088CONFIG_SYSFS=y 2415CONFIG_SYSFS=y
2089CONFIG_TMPFS=y 2416CONFIG_TMPFS=y
2090CONFIG_TMPFS_POSIX_ACL=y 2417CONFIG_TMPFS_POSIX_ACL=y
2091CONFIG_HUGETLBFS=y 2418# CONFIG_HUGETLBFS is not set
2092CONFIG_HUGETLB_PAGE=y 2419# CONFIG_HUGETLB_PAGE is not set
2093CONFIG_CONFIGFS_FS=m 2420CONFIG_CONFIGFS_FS=m
2094 2421
2095# 2422#
2096# Miscellaneous filesystems 2423# Miscellaneous filesystems
2097# 2424#
2425CONFIG_MISC_FILESYSTEMS=y
2098# CONFIG_ADFS_FS is not set 2426# CONFIG_ADFS_FS is not set
2099# CONFIG_AFFS_FS is not set 2427# CONFIG_AFFS_FS is not set
2100# CONFIG_ECRYPT_FS is not set
2101# CONFIG_HFS_FS is not set 2428# CONFIG_HFS_FS is not set
2102# CONFIG_HFSPLUS_FS is not set 2429# CONFIG_HFSPLUS_FS is not set
2103# CONFIG_BEFS_FS is not set 2430# CONFIG_BEFS_FS is not set
2104# CONFIG_BFS_FS is not set 2431# CONFIG_BFS_FS is not set
2105# CONFIG_EFS_FS is not set 2432# CONFIG_EFS_FS is not set
2106# CONFIG_CRAMFS is not set 2433# CONFIG_CRAMFS is not set
2434CONFIG_SQUASHFS=y
2435# CONFIG_SQUASHFS_EMBEDDED is not set
2436CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
2107# CONFIG_VXFS_FS is not set 2437# CONFIG_VXFS_FS is not set
2108# CONFIG_MINIX_FS is not set 2438# CONFIG_MINIX_FS is not set
2109# CONFIG_OMFS_FS is not set 2439# CONFIG_OMFS_FS is not set
@@ -2116,7 +2446,12 @@ CONFIG_NETWORK_FILESYSTEMS=y
2116# CONFIG_NFS_FS is not set 2446# CONFIG_NFS_FS is not set
2117# CONFIG_NFSD is not set 2447# CONFIG_NFSD is not set
2118# CONFIG_SMB_FS is not set 2448# CONFIG_SMB_FS is not set
2119# CONFIG_CIFS is not set 2449CONFIG_CIFS=m
2450# CONFIG_CIFS_STATS is not set
2451CONFIG_CIFS_WEAK_PW_HASH=y
2452# CONFIG_CIFS_XATTR is not set
2453# CONFIG_CIFS_DEBUG2 is not set
2454# CONFIG_CIFS_EXPERIMENTAL is not set
2120# CONFIG_NCP_FS is not set 2455# CONFIG_NCP_FS is not set
2121# CONFIG_CODA_FS is not set 2456# CONFIG_CODA_FS is not set
2122# CONFIG_AFS_FS is not set 2457# CONFIG_AFS_FS is not set
@@ -2126,20 +2461,21 @@ CONFIG_NETWORK_FILESYSTEMS=y
2126# 2461#
2127CONFIG_PARTITION_ADVANCED=y 2462CONFIG_PARTITION_ADVANCED=y
2128# CONFIG_ACORN_PARTITION is not set 2463# CONFIG_ACORN_PARTITION is not set
2129CONFIG_OSF_PARTITION=y 2464# CONFIG_OSF_PARTITION is not set
2130CONFIG_AMIGA_PARTITION=y 2465# CONFIG_AMIGA_PARTITION is not set
2131# CONFIG_ATARI_PARTITION is not set 2466# CONFIG_ATARI_PARTITION is not set
2132CONFIG_MAC_PARTITION=y 2467# CONFIG_MAC_PARTITION is not set
2133CONFIG_MSDOS_PARTITION=y 2468CONFIG_MSDOS_PARTITION=y
2134CONFIG_BSD_DISKLABEL=y 2469CONFIG_BSD_DISKLABEL=y
2135CONFIG_MINIX_SUBPARTITION=y 2470# CONFIG_MINIX_SUBPARTITION is not set
2136CONFIG_SOLARIS_X86_PARTITION=y 2471# CONFIG_SOLARIS_X86_PARTITION is not set
2137CONFIG_UNIXWARE_DISKLABEL=y 2472# CONFIG_UNIXWARE_DISKLABEL is not set
2138# CONFIG_LDM_PARTITION is not set 2473CONFIG_LDM_PARTITION=y
2139CONFIG_SGI_PARTITION=y 2474# CONFIG_LDM_DEBUG is not set
2475# CONFIG_SGI_PARTITION is not set
2140# CONFIG_ULTRIX_PARTITION is not set 2476# CONFIG_ULTRIX_PARTITION is not set
2141CONFIG_SUN_PARTITION=y 2477# CONFIG_SUN_PARTITION is not set
2142CONFIG_KARMA_PARTITION=y 2478# CONFIG_KARMA_PARTITION is not set
2143CONFIG_EFI_PARTITION=y 2479CONFIG_EFI_PARTITION=y
2144# CONFIG_SYSV68_PARTITION is not set 2480# CONFIG_SYSV68_PARTITION is not set
2145CONFIG_NLS=y 2481CONFIG_NLS=y
@@ -2193,7 +2529,7 @@ CONFIG_PRINTK_TIME=y
2193CONFIG_ENABLE_MUST_CHECK=y 2529CONFIG_ENABLE_MUST_CHECK=y
2194CONFIG_FRAME_WARN=1024 2530CONFIG_FRAME_WARN=1024
2195CONFIG_MAGIC_SYSRQ=y 2531CONFIG_MAGIC_SYSRQ=y
2196CONFIG_UNUSED_SYMBOLS=y 2532# CONFIG_UNUSED_SYMBOLS is not set
2197CONFIG_DEBUG_FS=y 2533CONFIG_DEBUG_FS=y
2198# CONFIG_HEADERS_CHECK is not set 2534# CONFIG_HEADERS_CHECK is not set
2199CONFIG_DEBUG_KERNEL=y 2535CONFIG_DEBUG_KERNEL=y
@@ -2201,58 +2537,75 @@ CONFIG_DEBUG_SHIRQ=y
2201CONFIG_DETECT_SOFTLOCKUP=y 2537CONFIG_DETECT_SOFTLOCKUP=y
2202# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 2538# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
2203CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 2539CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
2204CONFIG_SCHED_DEBUG=y 2540# CONFIG_SCHED_DEBUG is not set
2205CONFIG_SCHEDSTATS=y 2541CONFIG_SCHEDSTATS=y
2206CONFIG_TIMER_STATS=y 2542CONFIG_TIMER_STATS=y
2207# CONFIG_DEBUG_OBJECTS is not set 2543# CONFIG_DEBUG_OBJECTS is not set
2208# CONFIG_DEBUG_SLAB is not set 2544# CONFIG_DEBUG_SLAB is not set
2209# CONFIG_DEBUG_RT_MUTEXES is not set 2545# CONFIG_DEBUG_RT_MUTEXES is not set
2210# CONFIG_RT_MUTEX_TESTER is not set 2546# CONFIG_RT_MUTEX_TESTER is not set
2211# CONFIG_DEBUG_SPINLOCK is not set 2547CONFIG_DEBUG_SPINLOCK=y
2212# CONFIG_DEBUG_MUTEXES is not set 2548# CONFIG_DEBUG_MUTEXES is not set
2213# CONFIG_DEBUG_LOCK_ALLOC is not set 2549# CONFIG_DEBUG_LOCK_ALLOC is not set
2214# CONFIG_PROVE_LOCKING is not set 2550# CONFIG_PROVE_LOCKING is not set
2551# CONFIG_DEBUG_LOCKDEP is not set
2215# CONFIG_LOCK_STAT is not set 2552# CONFIG_LOCK_STAT is not set
2216CONFIG_DEBUG_SPINLOCK_SLEEP=y 2553CONFIG_DEBUG_SPINLOCK_SLEEP=y
2217# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 2554# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
2218CONFIG_STACKTRACE=y 2555CONFIG_STACKTRACE=y
2219# CONFIG_DEBUG_KOBJECT is not set 2556# CONFIG_DEBUG_KOBJECT is not set
2220# CONFIG_DEBUG_HIGHMEM is not set 2557CONFIG_DEBUG_HIGHMEM=y
2221CONFIG_DEBUG_BUGVERBOSE=y 2558CONFIG_DEBUG_BUGVERBOSE=y
2222# CONFIG_DEBUG_INFO is not set 2559CONFIG_DEBUG_INFO=y
2223# CONFIG_DEBUG_VM is not set 2560# CONFIG_DEBUG_VM is not set
2561# CONFIG_DEBUG_VIRTUAL is not set
2224# CONFIG_DEBUG_WRITECOUNT is not set 2562# CONFIG_DEBUG_WRITECOUNT is not set
2225CONFIG_DEBUG_MEMORY_INIT=y 2563# CONFIG_DEBUG_MEMORY_INIT it not set
2226CONFIG_DEBUG_LIST=y 2564CONFIG_DEBUG_LIST=y
2227# CONFIG_DEBUG_SG is not set 2565# CONFIG_DEBUG_SG is not set
2566CONFIG_DEBUG_NOTIFIERS=y
2228CONFIG_FRAME_POINTER=y 2567CONFIG_FRAME_POINTER=y
2229CONFIG_BOOT_PRINTK_DELAY=y 2568CONFIG_BOOT_PRINTK_DELAY=y
2230# CONFIG_RCU_TORTURE_TEST is not set 2569# CONFIG_RCU_TORTURE_TEST is not set
2570# CONFIG_RCU_CPU_STALL_DETECTOR is not set
2231# CONFIG_BACKTRACE_SELF_TEST is not set 2571# CONFIG_BACKTRACE_SELF_TEST is not set
2572# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
2232# CONFIG_FAULT_INJECTION is not set 2573# CONFIG_FAULT_INJECTION is not set
2233CONFIG_LATENCYTOP=y 2574CONFIG_LATENCYTOP=y
2234CONFIG_SYSCTL_SYSCALL_CHECK=y 2575CONFIG_SYSCTL_SYSCALL_CHECK=y
2235CONFIG_HAVE_FTRACE=y 2576CONFIG_HAVE_FUNCTION_TRACER=y
2236CONFIG_HAVE_DYNAMIC_FTRACE=y 2577CONFIG_HAVE_DYNAMIC_FTRACE=y
2237CONFIG_TRACING=y 2578CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
2238# CONFIG_FTRACE is not set 2579
2580# CONFIG_X86_VISWS is not set
2581# CONFIG_FTRACE_STARTUP_TEST is not set
2582#
2583# Tracers
2584#
2585# CONFIG_FUNCTION_TRACER is not set
2239# CONFIG_IRQSOFF_TRACER is not set 2586# CONFIG_IRQSOFF_TRACER is not set
2240CONFIG_SYSPROF_TRACER=y 2587CONFIG_SYSPROF_TRACER=y
2241# CONFIG_SCHED_TRACER is not set 2588# CONFIG_SCHED_TRACER is not set
2242# CONFIG_CONTEXT_SWITCH_TRACER is not set 2589# CONFIG_CONTEXT_SWITCH_TRACER is not set
2243# CONFIG_FTRACE_STARTUP_TEST is not set 2590CONFIG_OPEN_CLOSE_TRACER=y
2591# CONFIG_BOOT_TRACER is not set
2592CONFIG_POWER_TRACER=y
2593# CONFIG_TRACE_BRANCH_PROFILING is not set
2594# CONFIG_STACK_TRACER is not set
2244# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set 2595# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
2596# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
2245# CONFIG_SAMPLES is not set 2597# CONFIG_SAMPLES is not set
2246CONFIG_HAVE_ARCH_KGDB=y 2598CONFIG_HAVE_ARCH_KGDB=y
2247# CONFIG_KGDB is not set 2599# CONFIG_KGDB is not set
2248# CONFIG_STRICT_DEVMEM is not set 2600CONFIG_STRICT_DEVMEM=y
2249CONFIG_X86_VERBOSE_BOOTUP=y 2601CONFIG_X86_VERBOSE_BOOTUP=y
2250CONFIG_EARLY_PRINTK=y 2602CONFIG_EARLY_PRINTK=y
2603# CONFIG_EARLY_PRINTK_DBGP is not set
2251# CONFIG_DEBUG_STACKOVERFLOW is not set 2604# CONFIG_DEBUG_STACKOVERFLOW is not set
2252# CONFIG_DEBUG_STACK_USAGE is not set 2605# CONFIG_DEBUG_STACK_USAGE is not set
2253# CONFIG_DEBUG_PAGEALLOC is not set 2606# CONFIG_DEBUG_PAGEALLOC is not set
2254# CONFIG_DEBUG_PER_CPU_MAPS is not set 2607# CONFIG_DEBUG_PER_CPU_MAPS is not set
2255CONFIG_X86_PTDUMP=y 2608# CONFIG_X86_PTDUMP is not set
2256CONFIG_DEBUG_RODATA=y 2609CONFIG_DEBUG_RODATA=y
2257# CONFIG_DEBUG_RODATA_TEST is not set 2610# CONFIG_DEBUG_RODATA_TEST is not set
2258# CONFIG_DEBUG_NX_TEST is not set 2611# CONFIG_DEBUG_NX_TEST is not set
@@ -2275,25 +2628,21 @@ CONFIG_DEBUG_BOOT_PARAMS=y
2275# 2628#
2276# Security options 2629# Security options
2277# 2630#
2278CONFIG_KEYS=y 2631# CONFIG_KEYS is not set
2279CONFIG_KEYS_DEBUG_PROC_KEYS=y 2632# CONFIG_SECURITY is not set
2280CONFIG_SECURITY=y 2633# CONFIG_SECURITYFS is not set
2281CONFIG_SECURITY_NETWORK=y 2634# CONFIG_SECURITY_FILE_CAPABILITIES is not set
2282CONFIG_SECURITY_NETWORK_XFRM=y
2283CONFIG_SECURITY_FILE_CAPABILITIES=y
2284# CONFIG_SECURITY_ROOTPLUG is not set
2285CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536
2286# CONFIG_SECURITY_SELINUX is not set
2287# CONFIG_SECURITY_SMACK is not set
2288CONFIG_CRYPTO=y 2635CONFIG_CRYPTO=y
2289 2636
2290# 2637#
2291# Crypto core or helper 2638# Crypto core or helper
2292# 2639#
2640# CONFIG_CRYPTO_FIPS is not set
2293CONFIG_CRYPTO_ALGAPI=y 2641CONFIG_CRYPTO_ALGAPI=y
2294CONFIG_CRYPTO_AEAD=m 2642CONFIG_CRYPTO_AEAD=y
2295CONFIG_CRYPTO_BLKCIPHER=m 2643CONFIG_CRYPTO_BLKCIPHER=y
2296CONFIG_CRYPTO_HASH=y 2644CONFIG_CRYPTO_HASH=y
2645CONFIG_CRYPTO_RNG=y
2297CONFIG_CRYPTO_MANAGER=y 2646CONFIG_CRYPTO_MANAGER=y
2298CONFIG_CRYPTO_GF128MUL=m 2647CONFIG_CRYPTO_GF128MUL=m
2299CONFIG_CRYPTO_NULL=m 2648CONFIG_CRYPTO_NULL=m
@@ -2314,7 +2663,7 @@ CONFIG_CRYPTO_SEQIV=m
2314CONFIG_CRYPTO_CBC=m 2663CONFIG_CRYPTO_CBC=m
2315CONFIG_CRYPTO_CTR=m 2664CONFIG_CRYPTO_CTR=m
2316# CONFIG_CRYPTO_CTS is not set 2665# CONFIG_CRYPTO_CTS is not set
2317CONFIG_CRYPTO_ECB=m 2666CONFIG_CRYPTO_ECB=y
2318CONFIG_CRYPTO_LRW=m 2667CONFIG_CRYPTO_LRW=m
2319CONFIG_CRYPTO_PCBC=m 2668CONFIG_CRYPTO_PCBC=m
2320CONFIG_CRYPTO_XTS=m 2669CONFIG_CRYPTO_XTS=m
@@ -2329,6 +2678,7 @@ CONFIG_CRYPTO_XCBC=m
2329# Digest 2678# Digest
2330# 2679#
2331CONFIG_CRYPTO_CRC32C=m 2680CONFIG_CRYPTO_CRC32C=m
2681# CONFIG_CRYPTO_CRC32C_INTEL is not set
2332CONFIG_CRYPTO_MD4=m 2682CONFIG_CRYPTO_MD4=m
2333CONFIG_CRYPTO_MD5=y 2683CONFIG_CRYPTO_MD5=y
2334CONFIG_CRYPTO_MICHAEL_MIC=m 2684CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -2345,10 +2695,10 @@ CONFIG_CRYPTO_WP512=m
2345# 2695#
2346# Ciphers 2696# Ciphers
2347# 2697#
2348CONFIG_CRYPTO_AES=m 2698CONFIG_CRYPTO_AES=y
2349# CONFIG_CRYPTO_AES_586 is not set 2699CONFIG_CRYPTO_AES_586=m
2350CONFIG_CRYPTO_ANUBIS=m 2700CONFIG_CRYPTO_ANUBIS=m
2351CONFIG_CRYPTO_ARC4=m 2701CONFIG_CRYPTO_ARC4=y
2352CONFIG_CRYPTO_BLOWFISH=m 2702CONFIG_CRYPTO_BLOWFISH=m
2353CONFIG_CRYPTO_CAMELLIA=m 2703CONFIG_CRYPTO_CAMELLIA=m
2354CONFIG_CRYPTO_CAST5=m 2704CONFIG_CRYPTO_CAST5=m
@@ -2357,19 +2707,24 @@ CONFIG_CRYPTO_DES=m
2357CONFIG_CRYPTO_FCRYPT=m 2707CONFIG_CRYPTO_FCRYPT=m
2358CONFIG_CRYPTO_KHAZAD=m 2708CONFIG_CRYPTO_KHAZAD=m
2359CONFIG_CRYPTO_SALSA20=m 2709CONFIG_CRYPTO_SALSA20=m
2360# CONFIG_CRYPTO_SALSA20_586 is not set 2710CONFIG_CRYPTO_SALSA20_586=m
2361CONFIG_CRYPTO_SEED=m 2711CONFIG_CRYPTO_SEED=m
2362CONFIG_CRYPTO_SERPENT=m 2712CONFIG_CRYPTO_SERPENT=m
2363CONFIG_CRYPTO_TEA=m 2713CONFIG_CRYPTO_TEA=m
2364CONFIG_CRYPTO_TWOFISH=m 2714CONFIG_CRYPTO_TWOFISH=m
2365CONFIG_CRYPTO_TWOFISH_COMMON=m 2715CONFIG_CRYPTO_TWOFISH_COMMON=m
2366# CONFIG_CRYPTO_TWOFISH_586 is not set 2716CONFIG_CRYPTO_TWOFISH_586=m
2367 2717
2368# 2718#
2369# Compression 2719# Compression
2370# 2720#
2371CONFIG_CRYPTO_DEFLATE=m 2721CONFIG_CRYPTO_DEFLATE=m
2372# CONFIG_CRYPTO_LZO is not set 2722# CONFIG_CRYPTO_LZO is not set
2723
2724#
2725# Random Number Generation
2726#
2727# CONFIG_CRYPTO_ANSI_CPRNG is not set
2373CONFIG_CRYPTO_HW=y 2728CONFIG_CRYPTO_HW=y
2374# CONFIG_CRYPTO_DEV_PADLOCK is not set 2729# CONFIG_CRYPTO_DEV_PADLOCK is not set
2375# CONFIG_CRYPTO_DEV_GEODE is not set 2730# CONFIG_CRYPTO_DEV_GEODE is not set
@@ -2385,12 +2740,11 @@ CONFIG_GENERIC_FIND_FIRST_BIT=y
2385CONFIG_GENERIC_FIND_NEXT_BIT=y 2740CONFIG_GENERIC_FIND_NEXT_BIT=y
2386CONFIG_CRC_CCITT=m 2741CONFIG_CRC_CCITT=m
2387CONFIG_CRC16=m 2742CONFIG_CRC16=m
2388CONFIG_CRC_T10DIF=y 2743# CONFIG_CRC_T10DIF is not set
2389CONFIG_CRC_ITU_T=m 2744CONFIG_CRC_ITU_T=m
2390CONFIG_CRC32=y 2745CONFIG_CRC32=y
2391# CONFIG_CRC7 is not set 2746# CONFIG_CRC7 is not set
2392CONFIG_LIBCRC32C=m 2747CONFIG_LIBCRC32C=m
2393CONFIG_AUDIT_GENERIC=y
2394CONFIG_ZLIB_INFLATE=y 2748CONFIG_ZLIB_INFLATE=y
2395CONFIG_ZLIB_DEFLATE=m 2749CONFIG_ZLIB_DEFLATE=m
2396CONFIG_TEXTSEARCH=y 2750CONFIG_TEXTSEARCH=y
@@ -2401,3 +2755,90 @@ CONFIG_PLIST=y
2401CONFIG_HAS_IOMEM=y 2755CONFIG_HAS_IOMEM=y
2402CONFIG_HAS_IOPORT=y 2756CONFIG_HAS_IOPORT=y
2403CONFIG_HAS_DMA=y 2757CONFIG_HAS_DMA=y
2758CONFIG_CHECK_SIGNATURE=y
2759
2760
2761# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set
2762# CONFIG_MFD_PCF50633 is not set
2763# CONFIG_SENSORS_ADT7475 is not set
2764# CONFIG_LIB80211_DEBUG is not set
2765# CONFIG_DNET is not set
2766# CONFIG_BE2NET is not set
2767
2768
2769
2770# CONFIG_LNW_IPC is not set
2771# CONFIG_MRST is not set
2772# CONFIG_SFI is not set
2773# CONFIG_MDIO_GPIO is not set
2774# CONFIG_KEYBOARD_GPIO is not set
2775# CONFIG_MOUSE_GPIO is not set
2776# CONFIG_I2C_GPIO is not set
2777# CONFIG_DEBUG_GPIO is not set
2778# CONFIG_GPIO_SYSFS is not set
2779# CONFIG_GPIO_LANGWELL is not set
2780# CONFIG_GPIO_MAX732X is not set
2781# CONFIG_GPIO_PCA953X is not set
2782# CONFIG_GPIO_PCF857X is not set
2783# CONFIG_GPIO_BT8XX is not set
2784# CONFIG_UCB1400_CORE is not set
2785# CONFIG_TPS65010 is not set
2786# CONFIG_USB_GPIO_VBUS is not set
2787# CONFIG_LEDS_GPIO is not set
2788# CONFIG_ANDROID_TIMED_GPIO is not set
2789# CONFIG_X86_MRST_EARLY_PRINTK is not set
2790
2791# CONFIG_APB_TIMER is not set
2792# CONFIG_MRST_SPI_UART_BOOT_MSG is not set
2793# CONFIG_SFI_DEBUG is not set
2794# CONFIG_SFI_PROCFS is not set
2795# CONFIG_TOUCHSCREEN_UCB1400 is not set
2796# CONFIG_GPIO_LNWPMIC is not set
2797# CONFIG_RTC_DRV_VRTC is not set
2798# CONFIG_MRST_NAND is not set
2799# CONFIG_USB_LANGWELL_OTG is not set
2800# CONFIG_KEYBOARD_MRST is not set
2801# CONFIG_I2C_MRST is not set
2802# CONFIG_USB_OTG_WHITELIST is not set
2803# CONFIG_USB_OTG_BLACKLIST_HUB is not set
2804# CONFIG_SND_PCM_OSS_PLUGINS is not set
2805# CONFIG_SND_INTEL_LPE is not set
2806# CONFIG_LPE_IPC_NOT_INCLUDED is not set
2807# CONFIG_SND_INTELMID is not set
2808# CONFIG_TOUCHSCREEN_INTEL_MRST is not set
2809# CONFIG_ATL1C is not set
2810# CONFIG_MRST_MMC_WR is not set
2811
2812
2813# CONFIG_VIDEO_MRSTCI is not set
2814# CONFIG_VIDEO_MRST_ISP is not set
2815# CONFIG_VIDEO_MRST_SENSOR is not set
2816# CONFIG_VIDEO_MRST_OV2650 is not set
2817# CONFIG_VIDEO_MRST_OV5630 is not set
2818# CONFIG_SENSORS_MRST_THERMAL is not set
2819# CONFIG_SPI2_MRST is not set
2820
2821# CONFIG_SFI_PM is not set
2822# CONFIG_SFI_CPUIDLE is not set
2823# CONFIG_SFI_PROCESSOR_PM is not set
2824# CONFIG_X86_SFI_CPUFREQ is not set
2825# CONFIG_MSTWN_POWER_MGMT is not set
2826# CONFIG_USB_NET_MBM is not set
2827
2828# CONFIG_USB_GADGET_LANGWELL is not set
2829# CONFIG_USB_LANGWELL is not set
2830
2831# CONFIG_INTEL_LNW_DMAC1 is not set
2832# CONFIG_INTEL_LNW_DMAC2 is not set
2833# CONFIG_LNW_DMA_DEBUG is not set
2834# CONFIG_NET_DMA is not set
2835# CONFIG_DMATEST is not set
2836# CONFIG_8688_RC is not set
2837# CONFIG_SSB_SILENT is not set
2838
2839# CONFIG_TOUCHSCREEN_TSC2003 is not set
2840# CONFIG_MFD_TIMBERDALE is not set
2841# CONFIG_MMC_SDHCI_PLTFM is not set
2842# CONFIG_SPI_XILINX is not set
2843# CONFIG_SPI_MRST is not set
2844# CONFIG_GPE is not set
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-ivi b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-ivi
new file mode 100644
index 0000000000..0f61bd77ec
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-ivi
@@ -0,0 +1,127 @@
1CONFIG_LOCALVERSION="-ivi"
2CONFIG_INTEL_MENLOW=y
3CONFIG_DRM_PSB=y
4
5#
6# Cgroups
7#
8CONFIG_CGROUPS=y
9# CONFIG_CGROUP_DEBUG is not set
10CONFIG_CGROUP_NS=y
11CONFIG_CGROUP_FREEZER=y
12CONFIG_CGROUP_DEVICE=y
13# CONFIG_CPUSETS is not set
14# CONFIG_CGROUP_CPUACCT is not set
15# CONFIG_RESOURCE_COUNTERS is not set
16
17CONFIG_4KSTACKS=y
18CONFIG_ACER_WMI=y
19CONFIG_ARCH_WANT_FRAME_POINTERS=y
20# CONFIG_ATH5K_DEBUG is not set
21CONFIG_ATH5K=y
22CONFIG_ATL1E=y
23# CONFIG_BNX2X is not set
24CONFIG_CHELSIO_T3_DEPENDS=y
25CONFIG_COMPAT_NET_DEV_OPS=y
26CONFIG_CRYPTO_AEAD2=y
27CONFIG_CRYPTO_AEAD=m
28CONFIG_CRYPTO_ALGAPI2=y
29CONFIG_CRYPTO_BLKCIPHER2=y
30CONFIG_CRYPTO_HASH2=y
31CONFIG_CRYPTO_MANAGER2=y
32CONFIG_CRYPTO_RNG2=y
33CONFIG_CRYPTO_RNG=m
34# CONFIG_DEBUG_NOTIFIERS is not set
35# CONFIG_DEBUG_SPINLOCK is not set
36CONFIG_EEEPC_LAPTOP=y
37# CONFIG_EEPROM_AT25 is not set
38# CONFIG_ENC28J60 is not set
39# CONFIG_FB_BACKLIGHT is not set
40# CONFIG_FB_BOOT_VESA_SUPPORT is not set
41CONFIG_FB_CFB_COPYAREA=y
42CONFIG_FB_CFB_FILLRECT=y
43CONFIG_FB_CFB_IMAGEBLIT=y
44# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
45# CONFIG_FB_DDC is not set
46# CONFIG_FB_MACMODES is not set
47CONFIG_FB_MODE_HELPERS=y
48# CONFIG_FB_SVGALIB is not set
49# CONFIG_FB_SYS_COPYAREA is not set
50# CONFIG_FB_SYS_FILLRECT is not set
51# CONFIG_FB_SYS_FOPS is not set
52# CONFIG_FB_SYS_IMAGEBLIT is not set
53# CONFIG_FB_TMIO is not set
54CONFIG_GENERIC_FIND_LAST_BIT=y
55CONFIG_GENERIC_GPIO=y
56CONFIG_GPIOLIB=y
57# CONFIG_GPIO_MAX7301 is not set
58# CONFIG_GPIO_MCP23S08 is not set
59CONFIG_GPIO_SYSFS=y
60CONFIG_GPIO_TIMBERDALE=y
61CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
62CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
63CONFIG_HAVE_MMIOTRACE_SUPPORT=y
64CONFIG_HID_NTRIG=y
65CONFIG_HID_TOPSEED=y
66CONFIG_I2C_ALGOBIT=y
67CONFIG_I2C_CHARDEV=m
68CONFIG_I2C_OCORES=m
69# CONFIG_IOMMU_API is not set
70# CONFIG_KS8842 is not set
71CONFIG_LIBIPW=m
72CONFIG_MAC80211_RC_DEFAULT="minstrel"
73CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
74# CONFIG_MAC80211_RC_DEFAULT_PID is not set
75CONFIG_MAC80211_RC_MINSTREL=y
76CONFIG_MFD_CORE=y
77CONFIG_MFD_TIMBERDALE_DMA=m
78CONFIG_MFD_TIMBERDALE_I2S=m
79CONFIG_MFD_TIMBERDALE=y
80CONFIG_MMC_SDHCI_PLTFM=m
81# CONFIG_MOUSE_PS2_TOUCHKIT is not set
82# CONFIG_PREEMPT is not set
83# CONFIG_PREEMPT_RCU is not set
84# CONFIG_PREEMPT_RCU_TRACE is not set
85CONFIG_PREEMPT_VOLUNTARY=y
86CONFIG_R8169=y
87# CONFIG_RT2860 is not set
88# CONFIG_RT2870 is not set
89# CONFIG_RTC_DRV_DS1305 is not set
90# CONFIG_RTC_DRV_DS1390 is not set
91# CONFIG_RTC_DRV_DS3234 is not set
92# CONFIG_RTC_DRV_M41T94 is not set
93# CONFIG_RTC_DRV_MAX6902 is not set
94# CONFIG_RTC_DRV_R9701 is not set
95# CONFIG_RTC_DRV_RS5C348 is not set
96CONFIG_SCSI_FC_ATTRS=m
97CONFIG_SCSI_NETLINK=y
98CONFIG_SCSI_SAS_ATTRS=m
99CONFIG_SCSI_SPI_ATTRS=m
100# CONFIG_SENSORS_ADCXX is not set
101# CONFIG_SENSORS_LM70 is not set
102# CONFIG_SENSORS_MAX1111 is not set
103CONFIG_SERIAL_TIMBERDALE=m
104CONFIG_SND_HDA_ELD=y
105CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
106CONFIG_SND_JACK=y
107CONFIG_SND_SPI=y
108CONFIG_SPI_BITBANG=m
109# CONFIG_SPI_DEBUG is not set
110# CONFIG_SPI_GPIO is not set
111CONFIG_SPI_MASTER=y
112# CONFIG_SPI_SPIDEV is not set
113# CONFIG_SPI_TLE62X0 is not set
114CONFIG_SPI_XILINX=m
115CONFIG_SPI_XILINX_PLTFM=m
116CONFIG_SPI=y
117# CONFIG_TOUCHSCREEN_ADS7846 is not set
118CONFIG_TOUCHSCREEN_TSC2003=m
119CONFIG_TOUCHSCREEN_TSC2007=m
120CONFIG_TRACEPOINTS=y
121# CONFIG_TREE_RCU is not set
122# CONFIG_TREE_RCU_TRACE is not set
123CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
124CONFIG_USER_STACKTRACE_SUPPORT=y
125CONFIG_VGASTATE=m
126CONFIG_VIDEO_TIMBERDALE=m
127CONFIG_WIMAX_I2400M=m
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-menlow b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-menlow
new file mode 100644
index 0000000000..3f66175e16
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-menlow
@@ -0,0 +1,8 @@
1CONFIG_LOCALVERSION="-menlow"
2
3CONFIG_INTEL_MENLOW=y
4CONFIG_DRM_PSB=y
5
6# LIBERTAS works with Menlow sd8686
7CONFIG_LIBERTAS=m
8CONFIG_LIBERTAS_SDIO=m
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-mrst b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-mrst
new file mode 100644
index 0000000000..8b067c47c8
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-mrst
@@ -0,0 +1,2316 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29-rc8
4# Wed Mar 25 08:57:27 2009
5#
6
7#
8#MRST DRIVERS
9#
10
11# Option GTM501L SPI 3g driver
12CONFIG_SPI_MRST_GTM501=y
13
14# Marvell 8688 WiFi and BT
15CONFIG_8688_RC=y
16
17# Ericsson MBM 3G Driver
18CONFIG_USB_NET_MBM=y
19
20# MRST Poulsbo gfx driver
21CONFIG_DRM_PSB=y
22
23# MRST NAND DRIVER
24CONFIG_MRST_NAND=y
25CONFIG_MRST_NAND_POLL=y
26# CONFIG_MRST_NAND_CDMA is not set
27# CONFIG_MRST_NAND_ESL is not set
28# CONFIG_MRST_NAND_EMU is not set
29
30# MRST SFI C and P states
31CONFIG_SFI=y
32CONFIG_SFI_CPUIDLE=y
33CONFIG_SFI_PM=y
34CONFIG_SFI_PROCESSOR_PM=y
35CONFIG_X86_SFI_CPUFREQ=y
36
37# MRST MMC
38CONFIG_MRST_MMC_WR=y
39CONFIG_MMC_CEATA_WR=n
40
41# MRST THERMAL
42CONFIG_SENSORS_MRST_THERMAL=y
43
44# MRST SPI2
45CONFIG_SPI2_MRST=y
46
47# MRST I2C
48CONFIG_I2C_MRST=y
49
50# MRST KEYPAD
51CONFIG_KEYBOARD_MRST=y
52
53# MRST RESISTIVE TOUCHSCREEN
54CONFIG_TOUCHSCREEN_INTEL_MRST=y
55
56# USB OTC ClIENT
57CONFIG_USB_GADGET_LANGWELL=y
58CONFIG_USB_LANGWELL=m
59
60# MRST CAMERA
61CONFIG_VIDEO_V4L2=y
62CONFIG_VIDEO_MRSTCI=y
63CONFIG_I2C=y
64CONFIG_VIDEO_MRST_ISP=y
65CONFIG_VIDEO_MRST_SENSOR=y
66CONFIG_VIDEO_MRST_OV2650=y
67CONFIG_VIDEO_MRST_OV5630=y
68
69# MRST AUDIO
70CONFIG_SND_INTEL_LPE=y
71CONFIG_LPE_OSPM_SUPPORT=y
72CONFIG_LPE_DBG_PRINT=y
73# CONFIG_LPE_IPC_NOT_INCLUDED is not set
74CONFIG_SND_INTELMID=y
75CONFIG_MID_DBG_PRINT=y
76
77# MRST OSPM
78CONFIG_MSTWN_POWER_MGMT=y
79
80# CONFIG_64BIT is not set
81CONFIG_X86_32=y
82# CONFIG_X86_64 is not set
83CONFIG_X86=y
84CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
85CONFIG_GENERIC_TIME=y
86CONFIG_GENERIC_CMOS_UPDATE=y
87CONFIG_CLOCKSOURCE_WATCHDOG=y
88CONFIG_GENERIC_CLOCKEVENTS=y
89CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
90CONFIG_LOCKDEP_SUPPORT=y
91CONFIG_STACKTRACE_SUPPORT=y
92CONFIG_HAVE_LATENCYTOP_SUPPORT=y
93CONFIG_FAST_CMPXCHG_LOCAL=y
94CONFIG_MMU=y
95CONFIG_ZONE_DMA=y
96CONFIG_GENERIC_ISA_DMA=y
97CONFIG_GENERIC_IOMAP=y
98CONFIG_GENERIC_BUG=y
99CONFIG_GENERIC_HWEIGHT=y
100CONFIG_GENERIC_GPIO=y
101CONFIG_ARCH_MAY_HAVE_PC_FDC=y
102# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
103CONFIG_RWSEM_XCHGADD_ALGORITHM=y
104CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
105CONFIG_GENERIC_CALIBRATE_DELAY=y
106# CONFIG_GENERIC_TIME_VSYSCALL is not set
107CONFIG_ARCH_HAS_CPU_RELAX=y
108CONFIG_ARCH_HAS_DEFAULT_IDLE=y
109CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
110CONFIG_HAVE_SETUP_PER_CPU_AREA=y
111# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
112CONFIG_ARCH_HIBERNATION_POSSIBLE=y
113CONFIG_ARCH_SUSPEND_POSSIBLE=y
114# CONFIG_ZONE_DMA32 is not set
115CONFIG_ARCH_POPULATES_NODE_MAP=y
116# CONFIG_AUDIT_ARCH is not set
117CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
118CONFIG_GENERIC_HARDIRQS=y
119CONFIG_GENERIC_IRQ_PROBE=y
120CONFIG_GENERIC_PENDING_IRQ=y
121CONFIG_X86_SMP=y
122CONFIG_USE_GENERIC_SMP_HELPERS=y
123CONFIG_X86_32_SMP=y
124CONFIG_X86_HT=y
125CONFIG_X86_BIOS_REBOOT=y
126CONFIG_X86_TRAMPOLINE=y
127CONFIG_KTIME_SCALAR=y
128CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
129
130#
131# General setup
132#
133CONFIG_EXPERIMENTAL=y
134CONFIG_LOCK_KERNEL=y
135CONFIG_INIT_ENV_ARG_LIMIT=32
136CONFIG_LOCALVERSION="-mrst"
137# CONFIG_LOCALVERSION_AUTO is not set
138CONFIG_SWAP=y
139CONFIG_SYSVIPC=y
140CONFIG_SYSVIPC_SYSCTL=y
141CONFIG_POSIX_MQUEUE=y
142CONFIG_BSD_PROCESS_ACCT=y
143# CONFIG_BSD_PROCESS_ACCT_V3 is not set
144CONFIG_TASKSTATS=y
145CONFIG_TASK_DELAY_ACCT=y
146CONFIG_TASK_XACCT=y
147CONFIG_TASK_IO_ACCOUNTING=y
148CONFIG_AUDIT=y
149CONFIG_AUDITSYSCALL=y
150CONFIG_AUDIT_TREE=y
151
152#
153# RCU Subsystem
154#
155CONFIG_CLASSIC_RCU=y
156# CONFIG_TREE_RCU is not set
157# CONFIG_PREEMPT_RCU is not set
158# CONFIG_TREE_RCU_TRACE is not set
159# CONFIG_PREEMPT_RCU_TRACE is not set
160# CONFIG_IKCONFIG is not set
161CONFIG_LOG_BUF_SHIFT=18
162CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
163CONFIG_GROUP_SCHED=y
164CONFIG_FAIR_GROUP_SCHED=y
165# CONFIG_RT_GROUP_SCHED is not set
166# CONFIG_USER_SCHED is not set
167CONFIG_CGROUP_SCHED=y
168CONFIG_CGROUPS=y
169# CONFIG_CGROUP_DEBUG is not set
170CONFIG_CGROUP_NS=y
171# CONFIG_CGROUP_FREEZER is not set
172# CONFIG_CGROUP_DEVICE is not set
173CONFIG_CPUSETS=y
174CONFIG_PROC_PID_CPUSET=y
175CONFIG_CGROUP_CPUACCT=y
176CONFIG_RESOURCE_COUNTERS=y
177# CONFIG_CGROUP_MEM_RES_CTLR is not set
178# CONFIG_SYSFS_DEPRECATED_V2 is not set
179CONFIG_RELAY=y
180# CONFIG_NAMESPACES is not set
181CONFIG_BLK_DEV_INITRD=y
182CONFIG_INITRAMFS_SOURCE=""
183CONFIG_CC_OPTIMIZE_FOR_SIZE=y
184CONFIG_SYSCTL=y
185CONFIG_ANON_INODES=y
186CONFIG_EMBEDDED=y
187CONFIG_UID16=y
188CONFIG_SYSCTL_SYSCALL=y
189CONFIG_KALLSYMS=y
190CONFIG_KALLSYMS_ALL=y
191CONFIG_KALLSYMS_EXTRA_PASS=y
192CONFIG_HOTPLUG=y
193CONFIG_PRINTK=y
194CONFIG_BUG=y
195CONFIG_ELF_CORE=y
196CONFIG_PCSPKR_PLATFORM=y
197CONFIG_BASE_FULL=y
198CONFIG_FUTEX=y
199CONFIG_EPOLL=y
200CONFIG_SIGNALFD=y
201CONFIG_TIMERFD=y
202CONFIG_EVENTFD=y
203CONFIG_SHMEM=y
204CONFIG_AIO=y
205CONFIG_VM_EVENT_COUNTERS=y
206CONFIG_PCI_QUIRKS=y
207CONFIG_SLUB_DEBUG=y
208# CONFIG_COMPAT_BRK is not set
209# CONFIG_SLAB is not set
210CONFIG_SLUB=y
211# CONFIG_SLOB is not set
212CONFIG_PROFILING=y
213CONFIG_TRACEPOINTS=y
214CONFIG_MARKERS=y
215# CONFIG_OPROFILE is not set
216CONFIG_HAVE_OPROFILE=y
217CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
218CONFIG_HAVE_IOREMAP_PROT=y
219CONFIG_HAVE_KPROBES=y
220CONFIG_HAVE_KRETPROBES=y
221CONFIG_HAVE_ARCH_TRACEHOOK=y
222CONFIG_HAVE_GENERIC_DMA_COHERENT=y
223CONFIG_SLABINFO=y
224CONFIG_RT_MUTEXES=y
225CONFIG_BASE_SMALL=0
226CONFIG_MODULES=y
227CONFIG_STOP_MACHINE=y
228CONFIG_BLOCK=y
229# CONFIG_LBD is not set
230CONFIG_BLK_DEV_IO_TRACE=y
231# CONFIG_BLK_DEV_BSG is not set
232# CONFIG_BLK_DEV_INTEGRITY is not set
233
234#
235# IO Schedulers
236#
237CONFIG_IOSCHED_NOOP=y
238CONFIG_IOSCHED_AS=y
239CONFIG_IOSCHED_DEADLINE=y
240CONFIG_IOSCHED_CFQ=y
241# CONFIG_DEFAULT_AS is not set
242# CONFIG_DEFAULT_DEADLINE is not set
243CONFIG_DEFAULT_CFQ=y
244# CONFIG_DEFAULT_NOOP is not set
245CONFIG_DEFAULT_IOSCHED="cfq"
246# CONFIG_FREEZER is not set
247
248#
249# Processor type and features
250#
251CONFIG_TICK_ONESHOT=y
252CONFIG_NO_HZ=y
253CONFIG_HIGH_RES_TIMERS=y
254CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
255CONFIG_SMP=y
256# CONFIG_SPARSE_IRQ is not set
257CONFIG_X86_FIND_SMP_CONFIG=y
258CONFIG_X86_MPPARSE=y
259CONFIG_X86_PC=y
260# CONFIG_X86_ELAN is not set
261# CONFIG_X86_VOYAGER is not set
262# CONFIG_X86_GENERICARCH is not set
263# CONFIG_X86_VSMP is not set
264# CONFIG_X86_RDC321X is not set
265CONFIG_SCHED_OMIT_FRAME_POINTER=y
266# CONFIG_PARAVIRT_GUEST is not set
267# CONFIG_MEMTEST is not set
268# CONFIG_M386 is not set
269# CONFIG_M486 is not set
270CONFIG_M586=y
271# CONFIG_M586TSC is not set
272# CONFIG_M586MMX is not set
273# CONFIG_M686 is not set
274# CONFIG_MPENTIUMII is not set
275# CONFIG_MPENTIUMIII is not set
276# CONFIG_MPENTIUMM is not set
277# CONFIG_MPENTIUM4 is not set
278# CONFIG_MK6 is not set
279# CONFIG_MK7 is not set
280# CONFIG_MK8 is not set
281# CONFIG_MCRUSOE is not set
282# CONFIG_MEFFICEON is not set
283# CONFIG_MWINCHIPC6 is not set
284# CONFIG_MWINCHIP3D is not set
285# CONFIG_MGEODEGX1 is not set
286# CONFIG_MGEODE_LX is not set
287# CONFIG_MCYRIXIII is not set
288# CONFIG_MVIAC3_2 is not set
289# CONFIG_MVIAC7 is not set
290# CONFIG_MPSC is not set
291# CONFIG_MCORE2 is not set
292# CONFIG_GENERIC_CPU is not set
293CONFIG_X86_GENERIC=y
294CONFIG_X86_CPU=y
295CONFIG_X86_CMPXCHG=y
296CONFIG_X86_L1_CACHE_SHIFT=7
297CONFIG_X86_XADD=y
298# CONFIG_X86_PPRO_FENCE is not set
299CONFIG_X86_F00F_BUG=y
300CONFIG_X86_WP_WORKS_OK=y
301CONFIG_X86_INVLPG=y
302CONFIG_X86_BSWAP=y
303CONFIG_X86_POPAD_OK=y
304CONFIG_X86_ALIGNMENT_16=y
305CONFIG_X86_INTEL_USERCOPY=y
306CONFIG_X86_MINIMUM_CPU_FAMILY=4
307# CONFIG_PROCESSOR_SELECT is not set
308CONFIG_CPU_SUP_INTEL=y
309CONFIG_CPU_SUP_CYRIX_32=y
310CONFIG_CPU_SUP_AMD=y
311CONFIG_CPU_SUP_CENTAUR_32=y
312CONFIG_CPU_SUP_TRANSMETA_32=y
313CONFIG_CPU_SUP_UMC_32=y
314# CONFIG_HPET_TIMER is not set
315CONFIG_APB_TIMER=y
316CONFIG_LNW_IPC=y
317# CONFIG_DMI is not set
318# CONFIG_IOMMU_HELPER is not set
319# CONFIG_IOMMU_API is not set
320CONFIG_NR_CPUS=64
321CONFIG_SCHED_SMT=y
322# CONFIG_SCHED_MC is not set
323# CONFIG_PREEMPT_NONE is not set
324CONFIG_PREEMPT_VOLUNTARY=y
325# CONFIG_PREEMPT is not set
326CONFIG_X86_LOCAL_APIC=y
327CONFIG_X86_IO_APIC=y
328# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
329# CONFIG_X86_MCE is not set
330# CONFIG_VM86 is not set
331# CONFIG_TOSHIBA is not set
332# CONFIG_I8K is not set
333CONFIG_X86_REBOOTFIXUPS=y
334CONFIG_MICROCODE=y
335CONFIG_MICROCODE_INTEL=y
336# CONFIG_MICROCODE_AMD is not set
337CONFIG_MICROCODE_OLD_INTERFACE=y
338CONFIG_X86_MSR=y
339CONFIG_X86_CPUID=y
340CONFIG_NOHIGHMEM=y
341# CONFIG_HIGHMEM4G is not set
342# CONFIG_HIGHMEM64G is not set
343CONFIG_VMSPLIT_3G=y
344# CONFIG_VMSPLIT_3G_OPT is not set
345# CONFIG_VMSPLIT_2G is not set
346# CONFIG_VMSPLIT_2G_OPT is not set
347# CONFIG_VMSPLIT_1G is not set
348CONFIG_PAGE_OFFSET=0xC0000000
349# CONFIG_X86_PAE is not set
350# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
351CONFIG_ARCH_FLATMEM_ENABLE=y
352CONFIG_ARCH_SPARSEMEM_ENABLE=y
353CONFIG_ARCH_SELECT_MEMORY_MODEL=y
354CONFIG_SELECT_MEMORY_MODEL=y
355CONFIG_FLATMEM_MANUAL=y
356# CONFIG_DISCONTIGMEM_MANUAL is not set
357# CONFIG_SPARSEMEM_MANUAL is not set
358CONFIG_FLATMEM=y
359CONFIG_FLAT_NODE_MEM_MAP=y
360CONFIG_SPARSEMEM_STATIC=y
361CONFIG_PAGEFLAGS_EXTENDED=y
362CONFIG_SPLIT_PTLOCK_CPUS=4
363# CONFIG_PHYS_ADDR_T_64BIT is not set
364CONFIG_ZONE_DMA_FLAG=1
365CONFIG_BOUNCE=y
366CONFIG_VIRT_TO_BUS=y
367CONFIG_UNEVICTABLE_LRU=y
368# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
369# CONFIG_X86_RESERVE_LOW_64K is not set
370# CONFIG_MATH_EMULATION is not set
371CONFIG_MTRR=y
372# CONFIG_MTRR_SANITIZER is not set
373# CONFIG_X86_PAT is not set
374# CONFIG_SECCOMP is not set
375CONFIG_HZ_100=y
376# CONFIG_HZ_250 is not set
377# CONFIG_HZ_300 is not set
378# CONFIG_HZ_1000 is not set
379CONFIG_HZ=100
380CONFIG_SCHED_HRTICK=y
381CONFIG_KEXEC=y
382CONFIG_PHYSICAL_START=0x100000
383CONFIG_RELOCATABLE=y
384CONFIG_PHYSICAL_ALIGN=0x100000
385CONFIG_HOTPLUG_CPU=y
386# CONFIG_COMPAT_VDSO is not set
387# CONFIG_CMDLINE_BOOL is not set
388CONFIG_MRST=y
389CONFIG_MRST_SPI_UART_BOOT_MSG=y
390
391#
392# Power management and ACPI options
393#
394CONFIG_PM=y
395CONFIG_PM_DEBUG=y
396CONFIG_PM_VERBOSE=y
397# CONFIG_SUSPEND is not set
398# CONFIG_HIBERNATION is not set
399# CONFIG_ACPI is not set
400CONFIG_SFI=y
401# CONFIG_SFI_DEBUG is not set
402
403#
404# CPU Frequency scaling
405#
406CONFIG_CPU_FREQ=y
407CONFIG_CPU_IDLE=n
408
409#
410# Bus options (PCI etc.)
411#
412CONFIG_PCI=y
413# CONFIG_PCI_GOBIOS is not set
414# CONFIG_PCI_GOMMCONFIG is not set
415# CONFIG_PCI_GODIRECT is not set
416# CONFIG_PCI_GOOLPC is not set
417CONFIG_PCI_GOANY=y
418CONFIG_PCI_BIOS=y
419CONFIG_PCI_DIRECT=y
420CONFIG_PCI_MMCONFIG=y
421CONFIG_PCI_DOMAINS=y
422CONFIG_PCIEPORTBUS=y
423# CONFIG_PCIEAER is not set
424# CONFIG_PCIEASPM is not set
425CONFIG_ARCH_SUPPORTS_MSI=y
426CONFIG_PCI_MSI=y
427# CONFIG_PCI_LEGACY is not set
428CONFIG_PCI_DEBUG=y
429# CONFIG_PCI_STUB is not set
430# CONFIG_HT_IRQ is not set
431CONFIG_ISA_DMA_API=y
432# CONFIG_ISA is not set
433# CONFIG_MCA is not set
434# CONFIG_SCx200 is not set
435# CONFIG_OLPC is not set
436CONFIG_K8_NB=y
437# CONFIG_PCCARD is not set
438# CONFIG_HOTPLUG_PCI is not set
439
440#
441# Executable file formats / Emulations
442#
443CONFIG_BINFMT_ELF=y
444# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
445CONFIG_HAVE_AOUT=y
446# CONFIG_BINFMT_AOUT is not set
447CONFIG_BINFMT_MISC=y
448CONFIG_HAVE_ATOMIC_IOMAP=y
449CONFIG_NET=y
450
451#
452# Networking options
453#
454CONFIG_COMPAT_NET_DEV_OPS=y
455CONFIG_PACKET=y
456CONFIG_PACKET_MMAP=y
457CONFIG_UNIX=y
458CONFIG_XFRM=y
459CONFIG_XFRM_USER=y
460# CONFIG_XFRM_SUB_POLICY is not set
461# CONFIG_XFRM_MIGRATE is not set
462# CONFIG_XFRM_STATISTICS is not set
463# CONFIG_NET_KEY is not set
464CONFIG_INET=y
465CONFIG_IP_MULTICAST=y
466CONFIG_IP_ADVANCED_ROUTER=y
467CONFIG_ASK_IP_FIB_HASH=y
468# CONFIG_IP_FIB_TRIE is not set
469CONFIG_IP_FIB_HASH=y
470CONFIG_IP_MULTIPLE_TABLES=y
471CONFIG_IP_ROUTE_MULTIPATH=y
472CONFIG_IP_ROUTE_VERBOSE=y
473CONFIG_IP_PNP=y
474CONFIG_IP_PNP_DHCP=y
475CONFIG_IP_PNP_BOOTP=y
476CONFIG_IP_PNP_RARP=y
477# CONFIG_NET_IPIP is not set
478# CONFIG_NET_IPGRE is not set
479CONFIG_IP_MROUTE=y
480CONFIG_IP_PIMSM_V1=y
481CONFIG_IP_PIMSM_V2=y
482# CONFIG_ARPD is not set
483CONFIG_SYN_COOKIES=y
484# CONFIG_INET_AH is not set
485# CONFIG_INET_ESP is not set
486# CONFIG_INET_IPCOMP is not set
487# CONFIG_INET_XFRM_TUNNEL is not set
488CONFIG_INET_TUNNEL=y
489# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
490# CONFIG_INET_XFRM_MODE_TUNNEL is not set
491# CONFIG_INET_XFRM_MODE_BEET is not set
492CONFIG_INET_LRO=y
493# CONFIG_INET_DIAG is not set
494CONFIG_TCP_CONG_ADVANCED=y
495# CONFIG_TCP_CONG_BIC is not set
496CONFIG_TCP_CONG_CUBIC=y
497# CONFIG_TCP_CONG_WESTWOOD is not set
498# CONFIG_TCP_CONG_HTCP is not set
499# CONFIG_TCP_CONG_HSTCP is not set
500# CONFIG_TCP_CONG_HYBLA is not set
501# CONFIG_TCP_CONG_VEGAS is not set
502# CONFIG_TCP_CONG_SCALABLE is not set
503# CONFIG_TCP_CONG_LP is not set
504# CONFIG_TCP_CONG_VENO is not set
505# CONFIG_TCP_CONG_YEAH is not set
506# CONFIG_TCP_CONG_ILLINOIS is not set
507# CONFIG_DEFAULT_BIC is not set
508CONFIG_DEFAULT_CUBIC=y
509# CONFIG_DEFAULT_HTCP is not set
510# CONFIG_DEFAULT_VEGAS is not set
511# CONFIG_DEFAULT_WESTWOOD is not set
512# CONFIG_DEFAULT_RENO is not set
513CONFIG_DEFAULT_TCP_CONG="cubic"
514CONFIG_TCP_MD5SIG=y
515CONFIG_IPV6=y
516# CONFIG_IPV6_PRIVACY is not set
517# CONFIG_IPV6_ROUTER_PREF is not set
518# CONFIG_IPV6_OPTIMISTIC_DAD is not set
519CONFIG_INET6_AH=y
520CONFIG_INET6_ESP=y
521# CONFIG_INET6_IPCOMP is not set
522# CONFIG_IPV6_MIP6 is not set
523# CONFIG_INET6_XFRM_TUNNEL is not set
524# CONFIG_INET6_TUNNEL is not set
525CONFIG_INET6_XFRM_MODE_TRANSPORT=y
526CONFIG_INET6_XFRM_MODE_TUNNEL=y
527CONFIG_INET6_XFRM_MODE_BEET=y
528# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
529CONFIG_IPV6_SIT=y
530CONFIG_IPV6_NDISC_NODETYPE=y
531# CONFIG_IPV6_TUNNEL is not set
532# CONFIG_IPV6_MULTIPLE_TABLES is not set
533# CONFIG_IPV6_MROUTE is not set
534CONFIG_NETLABEL=y
535CONFIG_NETWORK_SECMARK=y
536CONFIG_NETFILTER=y
537# CONFIG_NETFILTER_DEBUG is not set
538# CONFIG_NETFILTER_ADVANCED is not set
539
540#
541# Core Netfilter Configuration
542#
543CONFIG_NETFILTER_NETLINK=y
544CONFIG_NETFILTER_NETLINK_LOG=y
545CONFIG_NF_CONNTRACK=y
546CONFIG_NF_CONNTRACK_SECMARK=y
547CONFIG_NF_CONNTRACK_FTP=y
548CONFIG_NF_CONNTRACK_IRC=y
549CONFIG_NF_CONNTRACK_SIP=y
550CONFIG_NF_CT_NETLINK=y
551CONFIG_NETFILTER_XTABLES=y
552CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
553CONFIG_NETFILTER_XT_TARGET_MARK=y
554CONFIG_NETFILTER_XT_TARGET_NFLOG=y
555CONFIG_NETFILTER_XT_TARGET_SECMARK=y
556CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
557CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
558CONFIG_NETFILTER_XT_MATCH_MARK=y
559CONFIG_NETFILTER_XT_MATCH_POLICY=y
560CONFIG_NETFILTER_XT_MATCH_STATE=y
561# CONFIG_IP_VS is not set
562
563#
564# IP: Netfilter Configuration
565#
566CONFIG_NF_DEFRAG_IPV4=y
567CONFIG_NF_CONNTRACK_IPV4=y
568CONFIG_NF_CONNTRACK_PROC_COMPAT=y
569CONFIG_IP_NF_IPTABLES=y
570CONFIG_IP_NF_FILTER=y
571CONFIG_IP_NF_TARGET_REJECT=y
572CONFIG_IP_NF_TARGET_LOG=y
573CONFIG_IP_NF_TARGET_ULOG=y
574CONFIG_NF_NAT=y
575CONFIG_NF_NAT_NEEDED=y
576CONFIG_IP_NF_TARGET_MASQUERADE=y
577CONFIG_NF_NAT_FTP=y
578CONFIG_NF_NAT_IRC=y
579# CONFIG_NF_NAT_TFTP is not set
580# CONFIG_NF_NAT_AMANDA is not set
581# CONFIG_NF_NAT_PPTP is not set
582# CONFIG_NF_NAT_H323 is not set
583CONFIG_NF_NAT_SIP=y
584CONFIG_IP_NF_MANGLE=y
585
586#
587# IPv6: Netfilter Configuration
588#
589CONFIG_NF_CONNTRACK_IPV6=y
590CONFIG_IP6_NF_IPTABLES=y
591CONFIG_IP6_NF_MATCH_IPV6HEADER=y
592CONFIG_IP6_NF_TARGET_LOG=y
593CONFIG_IP6_NF_FILTER=y
594CONFIG_IP6_NF_TARGET_REJECT=y
595CONFIG_IP6_NF_MANGLE=y
596# CONFIG_IP_DCCP is not set
597# CONFIG_IP_SCTP is not set
598# CONFIG_TIPC is not set
599# CONFIG_ATM is not set
600# CONFIG_BRIDGE is not set
601# CONFIG_NET_DSA is not set
602# CONFIG_VLAN_8021Q is not set
603# CONFIG_DECNET is not set
604# CONFIG_LLC2 is not set
605# CONFIG_IPX is not set
606# CONFIG_ATALK is not set
607# CONFIG_X25 is not set
608# CONFIG_LAPB is not set
609# CONFIG_ECONET is not set
610# CONFIG_WAN_ROUTER is not set
611CONFIG_NET_SCHED=y
612
613#
614# Queueing/Scheduling
615#
616# CONFIG_NET_SCH_CBQ is not set
617# CONFIG_NET_SCH_HTB is not set
618# CONFIG_NET_SCH_HFSC is not set
619# CONFIG_NET_SCH_PRIO is not set
620# CONFIG_NET_SCH_MULTIQ is not set
621# CONFIG_NET_SCH_RED is not set
622# CONFIG_NET_SCH_SFQ is not set
623# CONFIG_NET_SCH_TEQL is not set
624# CONFIG_NET_SCH_TBF is not set
625# CONFIG_NET_SCH_GRED is not set
626# CONFIG_NET_SCH_DSMARK is not set
627# CONFIG_NET_SCH_NETEM is not set
628# CONFIG_NET_SCH_DRR is not set
629# CONFIG_NET_SCH_INGRESS is not set
630
631#
632# Classification
633#
634CONFIG_NET_CLS=y
635# CONFIG_NET_CLS_BASIC is not set
636# CONFIG_NET_CLS_TCINDEX is not set
637# CONFIG_NET_CLS_ROUTE4 is not set
638# CONFIG_NET_CLS_FW is not set
639# CONFIG_NET_CLS_U32 is not set
640# CONFIG_NET_CLS_RSVP is not set
641# CONFIG_NET_CLS_RSVP6 is not set
642# CONFIG_NET_CLS_FLOW is not set
643# CONFIG_NET_CLS_CGROUP is not set
644CONFIG_NET_EMATCH=y
645CONFIG_NET_EMATCH_STACK=32
646# CONFIG_NET_EMATCH_CMP is not set
647# CONFIG_NET_EMATCH_NBYTE is not set
648# CONFIG_NET_EMATCH_U32 is not set
649# CONFIG_NET_EMATCH_META is not set
650# CONFIG_NET_EMATCH_TEXT is not set
651CONFIG_NET_CLS_ACT=y
652# CONFIG_NET_ACT_POLICE is not set
653# CONFIG_NET_ACT_GACT is not set
654# CONFIG_NET_ACT_MIRRED is not set
655# CONFIG_NET_ACT_IPT is not set
656# CONFIG_NET_ACT_NAT is not set
657# CONFIG_NET_ACT_PEDIT is not set
658# CONFIG_NET_ACT_SIMP is not set
659# CONFIG_NET_ACT_SKBEDIT is not set
660CONFIG_NET_SCH_FIFO=y
661# CONFIG_DCB is not set
662
663#
664# Network testing
665#
666# CONFIG_NET_PKTGEN is not set
667CONFIG_HAMRADIO=y
668
669#
670# Packet Radio protocols
671#
672# CONFIG_AX25 is not set
673# CONFIG_CAN is not set
674# CONFIG_IRDA is not set
675CONFIG_BT=y
676CONFIG_BT_L2CAP=y
677CONFIG_BT_SCO=y
678CONFIG_BT_RFCOMM=y
679CONFIG_BT_RFCOMM_TTY=y
680CONFIG_BT_BNEP=y
681CONFIG_BT_BNEP_MC_FILTER=y
682CONFIG_BT_BNEP_PROTO_FILTER=y
683CONFIG_BT_HIDP=y
684
685#
686# Bluetooth device drivers
687#
688CONFIG_BT_HCIBTUSB=y
689CONFIG_BT_HCIBTSDIO=y
690CONFIG_BT_HCIUART=y
691CONFIG_BT_HCIUART_H4=y
692CONFIG_BT_HCIUART_BCSP=y
693CONFIG_BT_HCIUART_LL=y
694CONFIG_BT_HCIBCM203X=y
695CONFIG_BT_HCIBPA10X=y
696CONFIG_BT_HCIBFUSB=y
697CONFIG_BT_HCIVHCI=y
698# CONFIG_AF_RXRPC is not set
699# CONFIG_PHONET is not set
700CONFIG_FIB_RULES=y
701CONFIG_WIRELESS=y
702CONFIG_CFG80211=y
703# CONFIG_CFG80211_REG_DEBUG is not set
704CONFIG_NL80211=y
705CONFIG_WIRELESS_OLD_REGULATORY=y
706CONFIG_WIRELESS_EXT=y
707CONFIG_WIRELESS_EXT_SYSFS=y
708# CONFIG_LIB80211 is not set
709CONFIG_MAC80211=y
710
711#
712# Rate control algorithm selection
713#
714# CONFIG_MAC80211_RC_PID is not set
715CONFIG_MAC80211_RC_MINSTREL=y
716# CONFIG_MAC80211_RC_DEFAULT_PID is not set
717CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
718CONFIG_MAC80211_RC_DEFAULT="minstrel"
719# CONFIG_MAC80211_MESH is not set
720CONFIG_MAC80211_LEDS=y
721# CONFIG_MAC80211_DEBUGFS is not set
722# CONFIG_MAC80211_DEBUG_MENU is not set
723# CONFIG_WIMAX is not set
724# CONFIG_RFKILL is not set
725# CONFIG_NET_9P is not set
726
727#
728# Device Drivers
729#
730
731#
732# Generic Driver Options
733#
734CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
735CONFIG_STANDALONE=y
736CONFIG_PREVENT_FIRMWARE_BUILD=y
737CONFIG_FW_LOADER=y
738CONFIG_FIRMWARE_IN_KERNEL=y
739CONFIG_EXTRA_FIRMWARE="mrvl/sd8688.bin mrvl/helper_sd.bin"
740CONFIG_EXTRA_FIRMWARE_DIR="firmware"
741# CONFIG_DEBUG_DRIVER is not set
742CONFIG_DEBUG_DEVRES=y
743# CONFIG_SYS_HYPERVISOR is not set
744CONFIG_CONNECTOR=y
745CONFIG_PROC_EVENTS=y
746# CONFIG_MTD is not set
747# CONFIG_PARPORT is not set
748CONFIG_BLK_DEV=y
749# CONFIG_BLK_DEV_FD is not set
750# CONFIG_BLK_CPQ_DA is not set
751# CONFIG_BLK_CPQ_CISS_DA is not set
752# CONFIG_BLK_DEV_DAC960 is not set
753# CONFIG_BLK_DEV_UMEM is not set
754# CONFIG_BLK_DEV_COW_COMMON is not set
755CONFIG_BLK_DEV_LOOP=y
756# CONFIG_BLK_DEV_CRYPTOLOOP is not set
757# CONFIG_BLK_DEV_NBD is not set
758# CONFIG_BLK_DEV_SX8 is not set
759# CONFIG_BLK_DEV_UB is not set
760CONFIG_BLK_DEV_RAM=y
761CONFIG_BLK_DEV_RAM_COUNT=16
762CONFIG_BLK_DEV_RAM_SIZE=16384
763# CONFIG_BLK_DEV_XIP is not set
764# CONFIG_CDROM_PKTCDVD is not set
765# CONFIG_ATA_OVER_ETH is not set
766# CONFIG_BLK_DEV_HD is not set
767CONFIG_MISC_DEVICES=y
768# CONFIG_IBM_ASM is not set
769# CONFIG_PHANTOM is not set
770# CONFIG_SGI_IOC4 is not set
771# CONFIG_TIFM_CORE is not set
772# CONFIG_ICS932S401 is not set
773# CONFIG_ENCLOSURE_SERVICES is not set
774# CONFIG_HP_ILO is not set
775# CONFIG_C2PORT is not set
776
777#
778# EEPROM support
779#
780# CONFIG_EEPROM_AT24 is not set
781# CONFIG_EEPROM_AT25 is not set
782# CONFIG_EEPROM_LEGACY is not set
783# CONFIG_EEPROM_93CX6 is not set
784CONFIG_HAVE_IDE=y
785# CONFIG_IDE is not set
786
787#
788# SCSI device support
789#
790# CONFIG_RAID_ATTRS is not set
791CONFIG_SCSI=y
792CONFIG_SCSI_DMA=y
793# CONFIG_SCSI_TGT is not set
794# CONFIG_SCSI_NETLINK is not set
795CONFIG_SCSI_PROC_FS=y
796
797#
798# SCSI support type (disk, tape, CD-ROM)
799#
800CONFIG_BLK_DEV_SD=y
801# CONFIG_CHR_DEV_ST is not set
802# CONFIG_CHR_DEV_OSST is not set
803CONFIG_BLK_DEV_SR=y
804CONFIG_BLK_DEV_SR_VENDOR=y
805CONFIG_CHR_DEV_SG=y
806# CONFIG_CHR_DEV_SCH is not set
807
808#
809# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
810#
811# CONFIG_SCSI_MULTI_LUN is not set
812CONFIG_SCSI_CONSTANTS=y
813# CONFIG_SCSI_LOGGING is not set
814# CONFIG_SCSI_SCAN_ASYNC is not set
815
816#
817# SCSI Transports
818#
819CONFIG_SCSI_SPI_ATTRS=y
820# CONFIG_SCSI_FC_ATTRS is not set
821CONFIG_SCSI_ISCSI_ATTRS=y
822# CONFIG_SCSI_SAS_LIBSAS is not set
823# CONFIG_SCSI_SRP_ATTRS is not set
824# CONFIG_SCSI_LOWLEVEL is not set
825# CONFIG_SCSI_DH is not set
826# CONFIG_ATA is not set
827# CONFIG_MD is not set
828# CONFIG_FUSION is not set
829
830#
831# IEEE 1394 (FireWire) support
832#
833
834#
835# Enable only one of the two stacks, unless you know what you are doing
836#
837# CONFIG_FIREWIRE is not set
838# CONFIG_IEEE1394 is not set
839# CONFIG_I2O is not set
840# CONFIG_MACINTOSH_DRIVERS is not set
841CONFIG_NETDEVICES=y
842# CONFIG_IFB is not set
843# CONFIG_DUMMY is not set
844# CONFIG_BONDING is not set
845# CONFIG_MACVLAN is not set
846# CONFIG_EQUALIZER is not set
847# CONFIG_TUN is not set
848# CONFIG_VETH is not set
849# CONFIG_ARCNET is not set
850# CONFIG_NET_ETHERNET is not set
851CONFIG_MII=y
852# CONFIG_NETDEV_1000 is not set
853# CONFIG_NETDEV_10000 is not set
854# CONFIG_TR is not set
855
856#
857# Wireless LAN
858#
859# CONFIG_WLAN_PRE80211 is not set
860CONFIG_WLAN_80211=y
861# CONFIG_IWLWIFI_LEDS is not set
862
863#
864# Enable WiMAX (Networking options) to see the WiMAX drivers
865#
866
867#
868# USB Network Adapters
869#
870# CONFIG_USB_CATC is not set
871# CONFIG_USB_KAWETH is not set
872# CONFIG_USB_PEGASUS is not set
873# CONFIG_USB_RTL8150 is not set
874CONFIG_USB_USBNET=y
875CONFIG_USB_NET_AX8817X=y
876CONFIG_USB_NET_CDCETHER=y
877# CONFIG_USB_NET_DM9601 is not set
878# CONFIG_USB_NET_SMSC95XX is not set
879# CONFIG_USB_NET_GL620A is not set
880CONFIG_USB_NET_NET1080=y
881# CONFIG_USB_NET_PLUSB is not set
882# CONFIG_USB_NET_MCS7830 is not set
883# CONFIG_USB_NET_RNDIS_HOST is not set
884CONFIG_USB_NET_CDC_SUBSET=y
885# CONFIG_USB_ALI_M5632 is not set
886# CONFIG_USB_AN2720 is not set
887# CONFIG_USB_BELKIN is not set
888# CONFIG_USB_ARMLINUX is not set
889# CONFIG_USB_EPSON2888 is not set
890# CONFIG_USB_KC2190 is not set
891# CONFIG_USB_NET_ZAURUS is not set
892# CONFIG_WAN is not set
893# CONFIG_FDDI is not set
894# CONFIG_HIPPI is not set
895# CONFIG_PPP is not set
896# CONFIG_SLIP is not set
897# CONFIG_NET_FC is not set
898# CONFIG_NETCONSOLE is not set
899# CONFIG_NETPOLL is not set
900# CONFIG_NET_POLL_CONTROLLER is not set
901# CONFIG_ISDN is not set
902# CONFIG_PHONE is not set
903
904#
905# Input device support
906#
907CONFIG_INPUT=y
908CONFIG_INPUT_FF_MEMLESS=y
909CONFIG_INPUT_POLLDEV=y
910
911#
912# Userland interfaces
913#
914CONFIG_INPUT_MOUSEDEV=y
915# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
916CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
917CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
918# CONFIG_INPUT_JOYDEV is not set
919CONFIG_INPUT_EVDEV=y
920# CONFIG_INPUT_EVBUG is not set
921
922#
923# Input Device Drivers
924#
925CONFIG_INPUT_KEYBOARD=y
926CONFIG_KEYBOARD_ATKBD=y
927# CONFIG_KEYBOARD_SUNKBD is not set
928# CONFIG_KEYBOARD_LKKBD is not set
929# CONFIG_KEYBOARD_XTKBD is not set
930# CONFIG_KEYBOARD_NEWTON is not set
931# CONFIG_KEYBOARD_STOWAWAY is not set
932CONFIG_KEYBOARD_GPIO=y
933CONFIG_INPUT_MOUSE=y
934CONFIG_MOUSE_PS2=y
935CONFIG_MOUSE_PS2_ALPS=y
936CONFIG_MOUSE_PS2_LOGIPS2PP=y
937CONFIG_MOUSE_PS2_SYNAPTICS=y
938CONFIG_MOUSE_PS2_LIFEBOOK=y
939CONFIG_MOUSE_PS2_TRACKPOINT=y
940# CONFIG_MOUSE_PS2_ELANTECH is not set
941# CONFIG_MOUSE_PS2_TOUCHKIT is not set
942# CONFIG_MOUSE_SERIAL is not set
943# CONFIG_MOUSE_APPLETOUCH is not set
944# CONFIG_MOUSE_BCM5974 is not set
945# CONFIG_MOUSE_VSXXXAA is not set
946# CONFIG_MOUSE_GPIO is not set
947CONFIG_INPUT_JOYSTICK=y
948# CONFIG_JOYSTICK_ANALOG is not set
949# CONFIG_JOYSTICK_A3D is not set
950# CONFIG_JOYSTICK_ADI is not set
951# CONFIG_JOYSTICK_COBRA is not set
952# CONFIG_JOYSTICK_GF2K is not set
953# CONFIG_JOYSTICK_GRIP is not set
954# CONFIG_JOYSTICK_GRIP_MP is not set
955# CONFIG_JOYSTICK_GUILLEMOT is not set
956# CONFIG_JOYSTICK_INTERACT is not set
957# CONFIG_JOYSTICK_SIDEWINDER is not set
958# CONFIG_JOYSTICK_TMDC is not set
959# CONFIG_JOYSTICK_IFORCE is not set
960# CONFIG_JOYSTICK_WARRIOR is not set
961# CONFIG_JOYSTICK_MAGELLAN is not set
962# CONFIG_JOYSTICK_SPACEORB is not set
963# CONFIG_JOYSTICK_SPACEBALL is not set
964# CONFIG_JOYSTICK_STINGER is not set
965# CONFIG_JOYSTICK_TWIDJOY is not set
966# CONFIG_JOYSTICK_ZHENHUA is not set
967# CONFIG_JOYSTICK_JOYDUMP is not set
968# CONFIG_JOYSTICK_XPAD is not set
969CONFIG_INPUT_TABLET=y
970# CONFIG_TABLET_USB_ACECAD is not set
971# CONFIG_TABLET_USB_AIPTEK is not set
972# CONFIG_TABLET_USB_GTCO is not set
973# CONFIG_TABLET_USB_KBTAB is not set
974# CONFIG_TABLET_USB_WACOM is not set
975CONFIG_INPUT_TOUCHSCREEN=y
976# CONFIG_TOUCHSCREEN_ADS7846 is not set
977# CONFIG_TOUCHSCREEN_FUJITSU is not set
978# CONFIG_TOUCHSCREEN_GUNZE is not set
979# CONFIG_TOUCHSCREEN_ELO is not set
980# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
981# CONFIG_TOUCHSCREEN_MTOUCH is not set
982# CONFIG_TOUCHSCREEN_INEXIO is not set
983# CONFIG_TOUCHSCREEN_MK712 is not set
984# CONFIG_TOUCHSCREEN_PENMOUNT is not set
985# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
986# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
987# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
988# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
989# CONFIG_TOUCHSCREEN_TSC2007 is not set
990CONFIG_INPUT_MISC=y
991# CONFIG_INPUT_PCSPKR is not set
992# CONFIG_INPUT_WISTRON_BTNS is not set
993# CONFIG_INPUT_ATI_REMOTE is not set
994# CONFIG_INPUT_ATI_REMOTE2 is not set
995# CONFIG_INPUT_KEYSPAN_REMOTE is not set
996# CONFIG_INPUT_POWERMATE is not set
997# CONFIG_INPUT_YEALINK is not set
998# CONFIG_INPUT_CM109 is not set
999# CONFIG_INPUT_UINPUT is not set
1000
1001#
1002# Hardware I/O ports
1003#
1004CONFIG_SERIO=y
1005CONFIG_SERIO_SERPORT=y
1006# CONFIG_SERIO_CT82C710 is not set
1007# CONFIG_SERIO_PCIPS2 is not set
1008CONFIG_SERIO_LIBPS2=y
1009# CONFIG_SERIO_RAW is not set
1010# CONFIG_GAMEPORT is not set
1011
1012#
1013# Character devices
1014#
1015CONFIG_VT=y
1016CONFIG_CONSOLE_TRANSLATIONS=y
1017CONFIG_VT_CONSOLE=y
1018CONFIG_HW_CONSOLE=y
1019CONFIG_VT_HW_CONSOLE_BINDING=y
1020CONFIG_DEVKMEM=y
1021CONFIG_SERIAL_NONSTANDARD=y
1022# CONFIG_COMPUTONE is not set
1023# CONFIG_ROCKETPORT is not set
1024# CONFIG_CYCLADES is not set
1025# CONFIG_DIGIEPCA is not set
1026# CONFIG_MOXA_INTELLIO is not set
1027# CONFIG_MOXA_SMARTIO is not set
1028# CONFIG_ISI is not set
1029# CONFIG_SYNCLINK is not set
1030# CONFIG_SYNCLINKMP is not set
1031# CONFIG_SYNCLINK_GT is not set
1032# CONFIG_N_HDLC is not set
1033# CONFIG_RISCOM8 is not set
1034# CONFIG_SPECIALIX is not set
1035# CONFIG_SX is not set
1036# CONFIG_RIO is not set
1037# CONFIG_STALDRV is not set
1038# CONFIG_NOZOMI is not set
1039
1040#
1041# Serial drivers
1042#
1043CONFIG_FIX_EARLYCON_MEM=y
1044
1045#
1046# Non-8250 serial port support
1047#
1048CONFIG_SERIAL_CORE=y
1049CONFIG_SERIAL_CORE_CONSOLE=y
1050# CONFIG_SERIAL_JSM is not set
1051CONFIG_UNIX98_PTYS=y
1052# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
1053# CONFIG_LEGACY_PTYS is not set
1054# CONFIG_IPMI_HANDLER is not set
1055CONFIG_HW_RANDOM=y
1056CONFIG_HW_RANDOM_INTEL=y
1057CONFIG_HW_RANDOM_AMD=y
1058CONFIG_HW_RANDOM_GEODE=y
1059CONFIG_HW_RANDOM_VIA=y
1060CONFIG_NVRAM=y
1061# CONFIG_R3964 is not set
1062# CONFIG_APPLICOM is not set
1063# CONFIG_SONYPI is not set
1064# CONFIG_MWAVE is not set
1065# CONFIG_PC8736x_GPIO is not set
1066# CONFIG_NSC_GPIO is not set
1067# CONFIG_CS5535_GPIO is not set
1068# CONFIG_RAW_DRIVER is not set
1069# CONFIG_HANGCHECK_TIMER is not set
1070# CONFIG_TCG_TPM is not set
1071# CONFIG_TELCLOCK is not set
1072CONFIG_DEVPORT=y
1073CONFIG_I2C=y
1074CONFIG_I2C_BOARDINFO=y
1075# CONFIG_I2C_CHARDEV is not set
1076CONFIG_I2C_HELPER_AUTO=y
1077CONFIG_I2C_ALGOBIT=y
1078
1079#
1080# I2C Hardware Bus support
1081#
1082
1083#
1084# PC SMBus host controller drivers
1085#
1086# CONFIG_I2C_ALI1535 is not set
1087# CONFIG_I2C_ALI1563 is not set
1088# CONFIG_I2C_ALI15X3 is not set
1089# CONFIG_I2C_AMD756 is not set
1090# CONFIG_I2C_AMD8111 is not set
1091CONFIG_I2C_I801=y
1092# CONFIG_I2C_ISCH is not set
1093# CONFIG_I2C_PIIX4 is not set
1094# CONFIG_I2C_NFORCE2 is not set
1095# CONFIG_I2C_SIS5595 is not set
1096# CONFIG_I2C_SIS630 is not set
1097# CONFIG_I2C_SIS96X is not set
1098# CONFIG_I2C_VIA is not set
1099# CONFIG_I2C_VIAPRO is not set
1100
1101#
1102# I2C system bus drivers (mostly embedded / system-on-chip)
1103#
1104# CONFIG_I2C_GPIO is not set
1105# CONFIG_I2C_OCORES is not set
1106# CONFIG_I2C_SIMTEC is not set
1107
1108#
1109# External I2C/SMBus adapter drivers
1110#
1111# CONFIG_I2C_PARPORT_LIGHT is not set
1112# CONFIG_I2C_TAOS_EVM is not set
1113# CONFIG_I2C_TINY_USB is not set
1114
1115#
1116# Graphics adapter I2C/DDC channel drivers
1117#
1118# CONFIG_I2C_VOODOO3 is not set
1119
1120#
1121# Other I2C/SMBus bus drivers
1122#
1123# CONFIG_I2C_PCA_PLATFORM is not set
1124# CONFIG_SCx200_ACB is not set
1125
1126#
1127# Miscellaneous I2C Chip support
1128#
1129# CONFIG_DS1682 is not set
1130# CONFIG_SENSORS_PCF8574 is not set
1131# CONFIG_PCF8575 is not set
1132# CONFIG_SENSORS_PCA9539 is not set
1133# CONFIG_SENSORS_PCF8591 is not set
1134# CONFIG_SENSORS_MAX6875 is not set
1135# CONFIG_SENSORS_TSL2550 is not set
1136# CONFIG_I2C_DEBUG_CORE is not set
1137# CONFIG_I2C_DEBUG_ALGO is not set
1138# CONFIG_I2C_DEBUG_BUS is not set
1139# CONFIG_I2C_DEBUG_CHIP is not set
1140CONFIG_SPI=y
1141# CONFIG_SPI_DEBUG is not set
1142CONFIG_SPI_MASTER=y
1143
1144#
1145# SPI Master Controller Drivers
1146#
1147CONFIG_SPI_BITBANG=y
1148# CONFIG_SPI_GPIO is not set
1149CONFIG_SPI_MRST=y
1150CONFIG_SPI_MRST_DMA=y
1151
1152#
1153# SPI Protocol Masters
1154#
1155CONFIG_SPI_MRST_MAX3110=y
1156# CONFIG_MRST_MAX3110_IRQ is not set
1157# CONFIG_SPI_SPIDEV is not set
1158# CONFIG_SPI_TLE62X0 is not set
1159CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
1160CONFIG_GPIOLIB=y
1161# CONFIG_DEBUG_GPIO is not set
1162CONFIG_GPIO_SYSFS=y
1163CONFIG_GPE=y
1164CONFIG_GPIO_LANGWELL=y
1165CONFIG_GPIO_LNWPMIC=y
1166# CONFIG_GPIO_LNWPMIC_NEC_WORKAROUND is not set
1167CONFIG_MRST_PMIC_BUTTON=y
1168
1169#
1170# Memory mapped GPIO expanders:
1171#
1172
1173#
1174# I2C GPIO expanders:
1175#
1176# CONFIG_GPIO_MAX732X is not set
1177# CONFIG_GPIO_PCA953X is not set
1178# CONFIG_GPIO_PCF857X is not set
1179
1180#
1181# PCI GPIO expanders:
1182#
1183# CONFIG_GPIO_BT8XX is not set
1184
1185#
1186# SPI GPIO expanders:
1187#
1188# CONFIG_GPIO_MAX7301 is not set
1189# CONFIG_GPIO_MCP23S08 is not set
1190# CONFIG_W1 is not set
1191# CONFIG_POWER_SUPPLY is not set
1192CONFIG_HWMON=y
1193# CONFIG_HWMON_VID is not set
1194# CONFIG_SENSORS_ABITUGURU is not set
1195# CONFIG_SENSORS_ABITUGURU3 is not set
1196# CONFIG_SENSORS_AD7414 is not set
1197# CONFIG_SENSORS_AD7418 is not set
1198# CONFIG_SENSORS_ADCXX is not set
1199# CONFIG_SENSORS_ADM1021 is not set
1200# CONFIG_SENSORS_ADM1025 is not set
1201# CONFIG_SENSORS_ADM1026 is not set
1202# CONFIG_SENSORS_ADM1029 is not set
1203# CONFIG_SENSORS_ADM1031 is not set
1204# CONFIG_SENSORS_ADM9240 is not set
1205# CONFIG_SENSORS_ADT7462 is not set
1206# CONFIG_SENSORS_ADT7470 is not set
1207# CONFIG_SENSORS_ADT7473 is not set
1208# CONFIG_SENSORS_ADT7475 is not set
1209# CONFIG_SENSORS_K8TEMP is not set
1210# CONFIG_SENSORS_ASB100 is not set
1211# CONFIG_SENSORS_ATXP1 is not set
1212# CONFIG_SENSORS_DS1621 is not set
1213# CONFIG_SENSORS_I5K_AMB is not set
1214# CONFIG_SENSORS_F71805F is not set
1215# CONFIG_SENSORS_F71882FG is not set
1216# CONFIG_SENSORS_F75375S is not set
1217# CONFIG_SENSORS_FSCHER is not set
1218# CONFIG_SENSORS_FSCPOS is not set
1219# CONFIG_SENSORS_FSCHMD is not set
1220# CONFIG_SENSORS_GL518SM is not set
1221# CONFIG_SENSORS_GL520SM is not set
1222# CONFIG_SENSORS_CORETEMP is not set
1223# CONFIG_SENSORS_IT87 is not set
1224# CONFIG_SENSORS_LM63 is not set
1225# CONFIG_SENSORS_LM70 is not set
1226# CONFIG_SENSORS_LM75 is not set
1227# CONFIG_SENSORS_LM77 is not set
1228# CONFIG_SENSORS_LM78 is not set
1229# CONFIG_SENSORS_LM80 is not set
1230# CONFIG_SENSORS_LM83 is not set
1231# CONFIG_SENSORS_LM85 is not set
1232# CONFIG_SENSORS_LM87 is not set
1233# CONFIG_SENSORS_LM90 is not set
1234# CONFIG_SENSORS_LM92 is not set
1235# CONFIG_SENSORS_LM93 is not set
1236# CONFIG_SENSORS_LTC4245 is not set
1237# CONFIG_SENSORS_MAX1111 is not set
1238# CONFIG_SENSORS_MAX1619 is not set
1239# CONFIG_SENSORS_MAX6650 is not set
1240# CONFIG_SENSORS_PC87360 is not set
1241# CONFIG_SENSORS_PC87427 is not set
1242# CONFIG_SENSORS_SIS5595 is not set
1243# CONFIG_SENSORS_DME1737 is not set
1244# CONFIG_SENSORS_SMSC47M1 is not set
1245# CONFIG_SENSORS_SMSC47M192 is not set
1246# CONFIG_SENSORS_SMSC47B397 is not set
1247# CONFIG_SENSORS_ADS7828 is not set
1248# CONFIG_SENSORS_THMC50 is not set
1249# CONFIG_SENSORS_VIA686A is not set
1250# CONFIG_SENSORS_VT1211 is not set
1251# CONFIG_SENSORS_VT8231 is not set
1252# CONFIG_SENSORS_W83781D is not set
1253# CONFIG_SENSORS_W83791D is not set
1254# CONFIG_SENSORS_W83792D is not set
1255# CONFIG_SENSORS_W83793 is not set
1256# CONFIG_SENSORS_W83L785TS is not set
1257# CONFIG_SENSORS_W83L786NG is not set
1258# CONFIG_SENSORS_W83627HF is not set
1259# CONFIG_SENSORS_W83627EHF is not set
1260# CONFIG_SENSORS_HDAPS is not set
1261# CONFIG_SENSORS_APPLESMC is not set
1262# CONFIG_HWMON_DEBUG_CHIP is not set
1263# CONFIG_THERMAL is not set
1264# CONFIG_THERMAL_HWMON is not set
1265# CONFIG_WATCHDOG is not set
1266CONFIG_SSB_POSSIBLE=y
1267
1268#
1269# Sonics Silicon Backplane
1270#
1271# CONFIG_SSB is not set
1272
1273#
1274# Multifunction device drivers
1275#
1276# CONFIG_MFD_CORE is not set
1277# CONFIG_MFD_SM501 is not set
1278# CONFIG_HTC_PASIC3 is not set
1279# CONFIG_TPS65010 is not set
1280# CONFIG_TWL4030_CORE is not set
1281# CONFIG_MFD_TMIO is not set
1282# CONFIG_PMIC_DA903X is not set
1283# CONFIG_MFD_WM8400 is not set
1284# CONFIG_MFD_WM8350_I2C is not set
1285# CONFIG_MFD_PCF50633 is not set
1286# CONFIG_REGULATOR is not set
1287
1288#
1289# Multimedia devices
1290#
1291
1292#
1293# Multimedia core support
1294#
1295CONFIG_VIDEO_DEV=y
1296CONFIG_VIDEO_V4L2_COMMON=y
1297# CONFIG_VIDEO_ALLOW_V4L1 is not set
1298# CONFIG_VIDEO_V4L1_COMPAT is not set
1299# CONFIG_DVB_CORE is not set
1300CONFIG_VIDEO_MEDIA=y
1301
1302#
1303# Multimedia drivers
1304#
1305# CONFIG_MEDIA_ATTACH is not set
1306CONFIG_MEDIA_TUNER=y
1307CONFIG_MEDIA_TUNER_CUSTOMIZE=y
1308# CONFIG_MEDIA_TUNER_SIMPLE is not set
1309# CONFIG_MEDIA_TUNER_TDA8290 is not set
1310# CONFIG_MEDIA_TUNER_TDA827X is not set
1311# CONFIG_MEDIA_TUNER_TDA18271 is not set
1312# CONFIG_MEDIA_TUNER_TDA9887 is not set
1313# CONFIG_MEDIA_TUNER_TEA5761 is not set
1314# CONFIG_MEDIA_TUNER_TEA5767 is not set
1315# CONFIG_MEDIA_TUNER_MT20XX is not set
1316# CONFIG_MEDIA_TUNER_MT2060 is not set
1317# CONFIG_MEDIA_TUNER_MT2266 is not set
1318# CONFIG_MEDIA_TUNER_MT2131 is not set
1319# CONFIG_MEDIA_TUNER_QT1010 is not set
1320# CONFIG_MEDIA_TUNER_XC2028 is not set
1321# CONFIG_MEDIA_TUNER_XC5000 is not set
1322# CONFIG_MEDIA_TUNER_MXL5005S is not set
1323# CONFIG_MEDIA_TUNER_MXL5007T is not set
1324CONFIG_VIDEO_V4L2=y
1325CONFIG_VIDEO_CAPTURE_DRIVERS=y
1326# CONFIG_VIDEO_ADV_DEBUG is not set
1327# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
1328# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
1329
1330#
1331# Encoders/decoders and other helper chips
1332#
1333
1334#
1335# Audio decoders
1336#
1337# CONFIG_VIDEO_TVAUDIO is not set
1338# CONFIG_VIDEO_TDA7432 is not set
1339# CONFIG_VIDEO_TDA9840 is not set
1340# CONFIG_VIDEO_TDA9875 is not set
1341# CONFIG_VIDEO_TEA6415C is not set
1342# CONFIG_VIDEO_TEA6420 is not set
1343# CONFIG_VIDEO_MSP3400 is not set
1344# CONFIG_VIDEO_CS5345 is not set
1345# CONFIG_VIDEO_CS53L32A is not set
1346# CONFIG_VIDEO_M52790 is not set
1347# CONFIG_VIDEO_TLV320AIC23B is not set
1348# CONFIG_VIDEO_WM8775 is not set
1349# CONFIG_VIDEO_WM8739 is not set
1350# CONFIG_VIDEO_VP27SMPX is not set
1351
1352#
1353# Video decoders
1354#
1355# CONFIG_VIDEO_OV7670 is not set
1356# CONFIG_VIDEO_TCM825X is not set
1357# CONFIG_VIDEO_SAA711X is not set
1358# CONFIG_VIDEO_SAA717X is not set
1359# CONFIG_VIDEO_TVP514X is not set
1360# CONFIG_VIDEO_TVP5150 is not set
1361
1362#
1363# Video and audio decoders
1364#
1365# CONFIG_VIDEO_CX25840 is not set
1366
1367#
1368# MPEG video encoders
1369#
1370# CONFIG_VIDEO_CX2341X is not set
1371
1372#
1373# Video encoders
1374#
1375# CONFIG_VIDEO_SAA7127 is not set
1376
1377#
1378# Video improvement chips
1379#
1380# CONFIG_VIDEO_UPD64031A is not set
1381# CONFIG_VIDEO_UPD64083 is not set
1382# CONFIG_VIDEO_VIVI is not set
1383# CONFIG_VIDEO_BT848 is not set
1384# CONFIG_VIDEO_SAA5246A is not set
1385# CONFIG_VIDEO_SAA5249 is not set
1386# CONFIG_VIDEO_SAA7134 is not set
1387# CONFIG_VIDEO_HEXIUM_ORION is not set
1388# CONFIG_VIDEO_HEXIUM_GEMINI is not set
1389# CONFIG_VIDEO_CX88 is not set
1390# CONFIG_VIDEO_IVTV is not set
1391# CONFIG_VIDEO_CAFE_CCIC is not set
1392# CONFIG_SOC_CAMERA is not set
1393# CONFIG_V4L_USB_DRIVERS is not set
1394CONFIG_VIDEO_MRSTCI=y
1395CONFIG_VIDEO_MRST_ISP=y
1396CONFIG_VIDEO_MRST_SENSOR=y
1397CONFIG_VIDEO_MRST_OV2650=y
1398CONFIG_VIDEO_MRST_OV5630=y
1399# CONFIG_RADIO_ADAPTERS is not set
1400CONFIG_DAB=y
1401# CONFIG_USB_DABUSB is not set
1402
1403#
1404# Graphics support
1405#
1406CONFIG_AGP=y
1407# CONFIG_AGP_ALI is not set
1408# CONFIG_AGP_ATI is not set
1409# CONFIG_AGP_AMD is not set
1410CONFIG_AGP_AMD64=y
1411CONFIG_AGP_INTEL=y
1412# CONFIG_AGP_NVIDIA is not set
1413# CONFIG_AGP_SIS is not set
1414# CONFIG_AGP_SWORKS is not set
1415# CONFIG_AGP_VIA is not set
1416# CONFIG_AGP_EFFICEON is not set
1417CONFIG_DRM=y
1418# CONFIG_DRM_TDFX is not set
1419# CONFIG_DRM_R128 is not set
1420# CONFIG_DRM_RADEON is not set
1421# CONFIG_DRM_I810 is not set
1422# CONFIG_DRM_I830 is not set
1423CONFIG_DRM_I915=y
1424# CONFIG_DRM_I915_KMS is not set
1425# CONFIG_DRM_MGA is not set
1426# CONFIG_DRM_SIS is not set
1427# CONFIG_DRM_VIA is not set
1428# CONFIG_DRM_SAVAGE is not set
1429# CONFIG_VGASTATE is not set
1430# CONFIG_VIDEO_OUTPUT_CONTROL is not set
1431CONFIG_FB=y
1432# CONFIG_FIRMWARE_EDID is not set
1433# CONFIG_FB_DDC is not set
1434# CONFIG_FB_BOOT_VESA_SUPPORT is not set
1435CONFIG_FB_CFB_FILLRECT=y
1436CONFIG_FB_CFB_COPYAREA=y
1437CONFIG_FB_CFB_IMAGEBLIT=y
1438# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
1439# CONFIG_FB_SYS_FILLRECT is not set
1440# CONFIG_FB_SYS_COPYAREA is not set
1441# CONFIG_FB_SYS_IMAGEBLIT is not set
1442# CONFIG_FB_FOREIGN_ENDIAN is not set
1443# CONFIG_FB_SYS_FOPS is not set
1444# CONFIG_FB_SVGALIB is not set
1445# CONFIG_FB_MACMODES is not set
1446# CONFIG_FB_BACKLIGHT is not set
1447CONFIG_FB_MODE_HELPERS=y
1448CONFIG_FB_TILEBLITTING=y
1449
1450#
1451# Frame buffer hardware drivers
1452#
1453# CONFIG_FB_CIRRUS is not set
1454# CONFIG_FB_PM2 is not set
1455# CONFIG_FB_CYBER2000 is not set
1456# CONFIG_FB_ARC is not set
1457# CONFIG_FB_ASILIANT is not set
1458# CONFIG_FB_IMSTT is not set
1459# CONFIG_FB_VGA16 is not set
1460# CONFIG_FB_UVESA is not set
1461# CONFIG_FB_VESA is not set
1462# CONFIG_FB_N411 is not set
1463# CONFIG_FB_HGA is not set
1464# CONFIG_FB_S1D13XXX is not set
1465# CONFIG_FB_NVIDIA is not set
1466# CONFIG_FB_RIVA is not set
1467# CONFIG_FB_I810 is not set
1468# CONFIG_FB_LE80578 is not set
1469# CONFIG_FB_INTEL is not set
1470# CONFIG_FB_MATROX is not set
1471# CONFIG_FB_RADEON is not set
1472# CONFIG_FB_ATY128 is not set
1473# CONFIG_FB_ATY is not set
1474# CONFIG_FB_S3 is not set
1475# CONFIG_FB_SAVAGE is not set
1476# CONFIG_FB_SIS is not set
1477# CONFIG_FB_VIA is not set
1478# CONFIG_FB_NEOMAGIC is not set
1479# CONFIG_FB_KYRO is not set
1480# CONFIG_FB_3DFX is not set
1481# CONFIG_FB_VOODOO1 is not set
1482# CONFIG_FB_VT8623 is not set
1483# CONFIG_FB_CYBLA is not set
1484# CONFIG_FB_TRIDENT is not set
1485# CONFIG_FB_ARK is not set
1486# CONFIG_FB_PM3 is not set
1487# CONFIG_FB_CARMINE is not set
1488# CONFIG_FB_GEODE is not set
1489# CONFIG_FB_VIRTUAL is not set
1490# CONFIG_FB_METRONOME is not set
1491# CONFIG_FB_MB862XX is not set
1492CONFIG_BACKLIGHT_LCD_SUPPORT=y
1493# CONFIG_LCD_CLASS_DEVICE is not set
1494CONFIG_BACKLIGHT_CLASS_DEVICE=y
1495CONFIG_BACKLIGHT_GENERIC=y
1496# CONFIG_BACKLIGHT_PROGEAR is not set
1497# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
1498# CONFIG_BACKLIGHT_SAHARA is not set
1499
1500#
1501# Display device support
1502#
1503CONFIG_DISPLAY_SUPPORT=y
1504
1505#
1506# Console display driver support
1507#
1508CONFIG_VGA_CONSOLE=y
1509CONFIG_VGACON_SOFT_SCROLLBACK=y
1510CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
1511CONFIG_DUMMY_CONSOLE=y
1512CONFIG_FRAMEBUFFER_CONSOLE=y
1513CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
1514CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
1515# CONFIG_FONTS is not set
1516CONFIG_FONT_8x8=y
1517CONFIG_FONT_8x16=y
1518CONFIG_LOGO=y
1519# CONFIG_LOGO_LINUX_MONO is not set
1520# CONFIG_LOGO_LINUX_VGA16 is not set
1521CONFIG_LOGO_LINUX_CLUT224=y
1522CONFIG_SOUND=y
1523CONFIG_SOUND_OSS_CORE=y
1524CONFIG_SND=y
1525CONFIG_SND_TIMER=y
1526CONFIG_SND_PCM=y
1527# CONFIG_SND_SEQUENCER is not set
1528CONFIG_SND_OSSEMUL=y
1529CONFIG_SND_MIXER_OSS=y
1530CONFIG_SND_PCM_OSS=y
1531# CONFIG_SND_PCM_OSS_PLUGINS is not set
1532# CONFIG_SND_HRTIMER is not set
1533# CONFIG_SND_DYNAMIC_MINORS is not set
1534# CONFIG_SND_SUPPORT_OLD_API is not set
1535# CONFIG_SND_VERBOSE_PROCFS is not set
1536# CONFIG_SND_VERBOSE_PRINTK is not set
1537# CONFIG_SND_DEBUG is not set
1538# CONFIG_SND_DRIVERS is not set
1539CONFIG_SND_PCI=y
1540# CONFIG_SND_AD1889 is not set
1541# CONFIG_SND_ALS300 is not set
1542# CONFIG_SND_ALS4000 is not set
1543# CONFIG_SND_ALI5451 is not set
1544# CONFIG_SND_ATIIXP is not set
1545# CONFIG_SND_ATIIXP_MODEM is not set
1546# CONFIG_SND_AU8810 is not set
1547# CONFIG_SND_AU8820 is not set
1548# CONFIG_SND_AU8830 is not set
1549# CONFIG_SND_AW2 is not set
1550# CONFIG_SND_AZT3328 is not set
1551# CONFIG_SND_BT87X is not set
1552# CONFIG_SND_CA0106 is not set
1553# CONFIG_SND_CMIPCI is not set
1554# CONFIG_SND_OXYGEN is not set
1555# CONFIG_SND_CS4281 is not set
1556# CONFIG_SND_CS46XX is not set
1557# CONFIG_SND_CS5530 is not set
1558# CONFIG_SND_CS5535AUDIO is not set
1559# CONFIG_SND_DARLA20 is not set
1560# CONFIG_SND_GINA20 is not set
1561# CONFIG_SND_LAYLA20 is not set
1562# CONFIG_SND_DARLA24 is not set
1563# CONFIG_SND_GINA24 is not set
1564# CONFIG_SND_LAYLA24 is not set
1565# CONFIG_SND_MONA is not set
1566# CONFIG_SND_MIA is not set
1567# CONFIG_SND_ECHO3G is not set
1568# CONFIG_SND_INDIGO is not set
1569# CONFIG_SND_INDIGOIO is not set
1570# CONFIG_SND_INDIGODJ is not set
1571# CONFIG_SND_EMU10K1 is not set
1572# CONFIG_SND_EMU10K1X is not set
1573# CONFIG_SND_ENS1370 is not set
1574# CONFIG_SND_ENS1371 is not set
1575# CONFIG_SND_ES1938 is not set
1576# CONFIG_SND_ES1968 is not set
1577# CONFIG_SND_FM801 is not set
1578# CONFIG_SND_HDA_INTEL is not set
1579# CONFIG_SND_HDSP is not set
1580# CONFIG_SND_HDSPM is not set
1581# CONFIG_SND_HIFIER is not set
1582# CONFIG_SND_ICE1712 is not set
1583# CONFIG_SND_ICE1724 is not set
1584# CONFIG_SND_INTEL8X0 is not set
1585# CONFIG_SND_INTEL8X0M is not set
1586# CONFIG_SND_KORG1212 is not set
1587# CONFIG_SND_MAESTRO3 is not set
1588# CONFIG_SND_MIXART is not set
1589# CONFIG_SND_NM256 is not set
1590# CONFIG_SND_PCXHR is not set
1591# CONFIG_SND_RIPTIDE is not set
1592# CONFIG_SND_RME32 is not set
1593# CONFIG_SND_RME96 is not set
1594# CONFIG_SND_RME9652 is not set
1595# CONFIG_SND_SIS7019 is not set
1596# CONFIG_SND_SONICVIBES is not set
1597# CONFIG_SND_TRIDENT is not set
1598# CONFIG_SND_VIA82XX is not set
1599# CONFIG_SND_VIA82XX_MODEM is not set
1600# CONFIG_SND_VIRTUOSO is not set
1601# CONFIG_SND_VX222 is not set
1602# CONFIG_SND_YMFPCI is not set
1603# CONFIG_SND_SPI is not set
1604# CONFIG_SND_USB is not set
1605# CONFIG_SND_SOC is not set
1606# CONFIG_SOUND_PRIME is not set
1607CONFIG_HID_SUPPORT=y
1608CONFIG_HID=y
1609CONFIG_HID_DEBUG=y
1610CONFIG_HIDRAW=y
1611
1612#
1613# USB Input Devices
1614#
1615CONFIG_USB_HID=y
1616CONFIG_HID_PID=y
1617# CONFIG_USB_HIDDEV is not set
1618
1619#
1620# Special HID drivers
1621#
1622# CONFIG_HID_COMPAT is not set
1623# CONFIG_HID_A4TECH is not set
1624# CONFIG_HID_APPLE is not set
1625# CONFIG_HID_BELKIN is not set
1626# CONFIG_HID_CHERRY is not set
1627# CONFIG_HID_CHICONY is not set
1628# CONFIG_HID_CYPRESS is not set
1629# CONFIG_HID_EZKEY is not set
1630# CONFIG_HID_GYRATION is not set
1631# CONFIG_HID_LOGITECH is not set
1632# CONFIG_HID_MICROSOFT is not set
1633# CONFIG_HID_MONTEREY is not set
1634# CONFIG_HID_NTRIG is not set
1635# CONFIG_HID_PANTHERLORD is not set
1636# CONFIG_HID_PETALYNX is not set
1637# CONFIG_HID_SAMSUNG is not set
1638# CONFIG_HID_SONY is not set
1639# CONFIG_HID_SUNPLUS is not set
1640# CONFIG_GREENASIA_FF is not set
1641# CONFIG_HID_TOPSEED is not set
1642# CONFIG_THRUSTMASTER_FF is not set
1643# CONFIG_ZEROPLUS_FF is not set
1644CONFIG_USB_SUPPORT=y
1645CONFIG_USB_ARCH_HAS_HCD=y
1646CONFIG_USB_ARCH_HAS_OHCI=y
1647CONFIG_USB_ARCH_HAS_EHCI=y
1648CONFIG_USB=y
1649CONFIG_USB_DEBUG=y
1650CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
1651
1652#
1653# Miscellaneous USB options
1654#
1655CONFIG_USB_DEVICEFS=y
1656# CONFIG_USB_DEVICE_CLASS is not set
1657# CONFIG_USB_DYNAMIC_MINORS is not set
1658CONFIG_USB_SUSPEND=y
1659CONFIG_USB_OTG=y
1660# CONFIG_USB_OTG_WHITELIST is not set
1661# CONFIG_USB_OTG_BLACKLIST_HUB is not set
1662CONFIG_USB_MON=y
1663# CONFIG_USB_WUSB is not set
1664# CONFIG_USB_WUSB_CBAF is not set
1665
1666#
1667# OTG and related infrastructure
1668#
1669CONFIG_USB_OTG_UTILS=y
1670# CONFIG_USB_GPIO_VBUS is not set
1671CONFIG_USB_LANGWELL_OTG=y
1672
1673#
1674# USB Host Controller Drivers
1675#
1676# CONFIG_USB_C67X00_HCD is not set
1677CONFIG_USB_EHCI_HCD=y
1678CONFIG_USB_EHCI_ROOT_HUB_TT=y
1679# CONFIG_USB_EHCI_TT_NEWSCHED is not set
1680# CONFIG_USB_OXU210HP_HCD is not set
1681# CONFIG_USB_ISP116X_HCD is not set
1682# CONFIG_USB_ISP1760_HCD is not set
1683# CONFIG_USB_OHCI_HCD is not set
1684# CONFIG_USB_UHCI_HCD is not set
1685# CONFIG_USB_SL811_HCD is not set
1686# CONFIG_USB_R8A66597_HCD is not set
1687# CONFIG_USB_WHCI_HCD is not set
1688# CONFIG_USB_HWA_HCD is not set
1689# CONFIG_USB_GADGET_MUSB_HDRC is not set
1690
1691#
1692# USB Device Class drivers
1693#
1694# CONFIG_USB_ACM is not set
1695CONFIG_USB_PRINTER=y
1696# CONFIG_USB_WDM is not set
1697# CONFIG_USB_TMC is not set
1698
1699#
1700# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
1701#
1702
1703#
1704# see USB_STORAGE Help for more information
1705#
1706CONFIG_USB_STORAGE=y
1707# CONFIG_USB_STORAGE_DEBUG is not set
1708# CONFIG_USB_STORAGE_DATAFAB is not set
1709# CONFIG_USB_STORAGE_FREECOM is not set
1710# CONFIG_USB_STORAGE_ISD200 is not set
1711# CONFIG_USB_STORAGE_USBAT is not set
1712# CONFIG_USB_STORAGE_SDDR09 is not set
1713# CONFIG_USB_STORAGE_SDDR55 is not set
1714# CONFIG_USB_STORAGE_JUMPSHOT is not set
1715# CONFIG_USB_STORAGE_ALAUDA is not set
1716# CONFIG_USB_STORAGE_ONETOUCH is not set
1717# CONFIG_USB_STORAGE_KARMA is not set
1718# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
1719CONFIG_USB_LIBUSUAL=y
1720
1721#
1722# USB Imaging devices
1723#
1724# CONFIG_USB_MDC800 is not set
1725# CONFIG_USB_MICROTEK is not set
1726
1727#
1728# USB port drivers
1729#
1730# CONFIG_USB_SERIAL is not set
1731
1732#
1733# USB Miscellaneous drivers
1734#
1735# CONFIG_USB_EMI62 is not set
1736# CONFIG_USB_EMI26 is not set
1737# CONFIG_USB_ADUTUX is not set
1738# CONFIG_USB_SEVSEG is not set
1739# CONFIG_USB_RIO500 is not set
1740# CONFIG_USB_LEGOTOWER is not set
1741# CONFIG_USB_LCD is not set
1742# CONFIG_USB_BERRY_CHARGE is not set
1743# CONFIG_USB_LED is not set
1744# CONFIG_USB_CYPRESS_CY7C63 is not set
1745# CONFIG_USB_CYTHERM is not set
1746# CONFIG_USB_PHIDGET is not set
1747# CONFIG_USB_IDMOUSE is not set
1748# CONFIG_USB_FTDI_ELAN is not set
1749# CONFIG_USB_APPLEDISPLAY is not set
1750# CONFIG_USB_SISUSBVGA is not set
1751# CONFIG_USB_LD is not set
1752# CONFIG_USB_TRANCEVIBRATOR is not set
1753# CONFIG_USB_IOWARRIOR is not set
1754# CONFIG_USB_TEST is not set
1755# CONFIG_USB_ISIGHTFW is not set
1756# CONFIG_USB_VST is not set
1757CONFIG_USB_GADGET=m
1758# CONFIG_USB_GADGET_DEBUG is not set
1759# CONFIG_USB_GADGET_DEBUG_FILES is not set
1760# CONFIG_USB_GADGET_DEBUG_FS is not set
1761CONFIG_USB_GADGET_VBUS_DRAW=2
1762CONFIG_USB_GADGET_SELECTED=y
1763# CONFIG_USB_GADGET_AT91 is not set
1764# CONFIG_USB_GADGET_ATMEL_USBA is not set
1765# CONFIG_USB_GADGET_FSL_USB2 is not set
1766# CONFIG_USB_GADGET_LH7A40X is not set
1767# CONFIG_USB_GADGET_OMAP is not set
1768# CONFIG_USB_GADGET_PXA25X is not set
1769# CONFIG_USB_GADGET_PXA27X is not set
1770# CONFIG_USB_GADGET_S3C2410 is not set
1771# CONFIG_USB_GADGET_IMX is not set
1772# CONFIG_USB_GADGET_M66592 is not set
1773# CONFIG_USB_GADGET_AMD5536UDC is not set
1774# CONFIG_USB_GADGET_FSL_QE is not set
1775# CONFIG_USB_GADGET_CI13XXX is not set
1776# CONFIG_USB_GADGET_NET2280 is not set
1777# CONFIG_USB_GADGET_GOKU is not set
1778# CONFIG_USB_GADGET_DUMMY_HCD is not set
1779CONFIG_USB_GADGET_DUALSPEED=y
1780# CONFIG_USB_ZERO is not set
1781CONFIG_USB_ETH=m
1782CONFIG_USB_ETH_RNDIS=y
1783# CONFIG_USB_GADGETFS is not set
1784CONFIG_USB_FILE_STORAGE=m
1785# CONFIG_USB_FILE_STORAGE_TEST is not set
1786# CONFIG_USB_G_SERIAL is not set
1787# CONFIG_USB_MIDI_GADGET is not set
1788# CONFIG_USB_G_PRINTER is not set
1789# CONFIG_USB_CDC_COMPOSITE is not set
1790# CONFIG_UWB is not set
1791CONFIG_MMC=y
1792# CONFIG_MEMSTICK is not set
1793CONFIG_NEW_LEDS=y
1794# CONFIG_LEDS_CLASS is not set
1795
1796#
1797# LED drivers
1798#
1799
1800#
1801# LED Triggers
1802#
1803CONFIG_LEDS_TRIGGERS=y
1804# CONFIG_LEDS_TRIGGER_TIMER is not set
1805# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
1806# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
1807# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
1808# CONFIG_ACCESSIBILITY is not set
1809# CONFIG_INFINIBAND is not set
1810# CONFIG_EDAC is not set
1811CONFIG_RTC_LIB=y
1812CONFIG_RTC_CLASS=y
1813# CONFIG_RTC_HCTOSYS is not set
1814# CONFIG_RTC_DEBUG is not set
1815
1816#
1817# RTC interfaces
1818#
1819CONFIG_RTC_INTF_SYSFS=y
1820CONFIG_RTC_INTF_PROC=y
1821CONFIG_RTC_INTF_DEV=y
1822# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
1823# CONFIG_RTC_DRV_TEST is not set
1824
1825#
1826# I2C RTC drivers
1827#
1828# CONFIG_RTC_DRV_DS1307 is not set
1829# CONFIG_RTC_DRV_DS1374 is not set
1830# CONFIG_RTC_DRV_DS1672 is not set
1831# CONFIG_RTC_DRV_MAX6900 is not set
1832# CONFIG_RTC_DRV_RS5C372 is not set
1833# CONFIG_RTC_DRV_ISL1208 is not set
1834# CONFIG_RTC_DRV_X1205 is not set
1835# CONFIG_RTC_DRV_PCF8563 is not set
1836# CONFIG_RTC_DRV_PCF8583 is not set
1837# CONFIG_RTC_DRV_M41T80 is not set
1838# CONFIG_RTC_DRV_S35390A is not set
1839# CONFIG_RTC_DRV_FM3130 is not set
1840# CONFIG_RTC_DRV_RX8581 is not set
1841
1842#
1843# SPI RTC drivers
1844#
1845# CONFIG_RTC_DRV_M41T94 is not set
1846# CONFIG_RTC_DRV_DS1305 is not set
1847# CONFIG_RTC_DRV_DS1390 is not set
1848# CONFIG_RTC_DRV_MAX6902 is not set
1849# CONFIG_RTC_DRV_R9701 is not set
1850# CONFIG_RTC_DRV_RS5C348 is not set
1851# CONFIG_RTC_DRV_DS3234 is not set
1852
1853#
1854# Platform RTC drivers
1855#
1856# CONFIG_RTC_DRV_CMOS is not set
1857CONFIG_RTC_DRV_VRTC=y
1858# CONFIG_RTC_DRV_DS1286 is not set
1859# CONFIG_RTC_DRV_DS1511 is not set
1860# CONFIG_RTC_DRV_DS1553 is not set
1861# CONFIG_RTC_DRV_DS1742 is not set
1862# CONFIG_RTC_DRV_STK17TA8 is not set
1863# CONFIG_RTC_DRV_M48T86 is not set
1864# CONFIG_RTC_DRV_M48T35 is not set
1865# CONFIG_RTC_DRV_M48T59 is not set
1866# CONFIG_RTC_DRV_BQ4802 is not set
1867# CONFIG_RTC_DRV_V3020 is not set
1868
1869#
1870# on-CPU RTC drivers
1871#
1872CONFIG_DMADEVICES=y
1873
1874#
1875# DMA Devices
1876#
1877# CONFIG_INTEL_IOATDMA is not set
1878# CONFIG_UIO is not set
1879# CONFIG_STAGING is not set
1880# CONFIG_X86_PLATFORM_DEVICES is not set
1881
1882#
1883# Firmware Drivers
1884#
1885# CONFIG_EDD is not set
1886CONFIG_FIRMWARE_MEMMAP=y
1887# CONFIG_DELL_RBU is not set
1888# CONFIG_DCDBAS is not set
1889# CONFIG_ISCSI_IBFT_FIND is not set
1890
1891#
1892# File systems
1893#
1894# CONFIG_EXT2_FS is not set
1895CONFIG_EXT3_FS=y
1896CONFIG_EXT3_FS_XATTR=y
1897CONFIG_EXT3_FS_POSIX_ACL=y
1898CONFIG_EXT3_FS_SECURITY=y
1899# CONFIG_EXT4_FS is not set
1900CONFIG_JBD=y
1901# CONFIG_JBD_DEBUG is not set
1902CONFIG_FS_MBCACHE=y
1903# CONFIG_REISERFS_FS is not set
1904# CONFIG_JFS_FS is not set
1905CONFIG_FS_POSIX_ACL=y
1906CONFIG_FILE_LOCKING=y
1907# CONFIG_XFS_FS is not set
1908# CONFIG_OCFS2_FS is not set
1909# CONFIG_BTRFS_FS is not set
1910CONFIG_DNOTIFY=y
1911CONFIG_INOTIFY=y
1912CONFIG_INOTIFY_USER=y
1913CONFIG_QUOTA=y
1914CONFIG_QUOTA_NETLINK_INTERFACE=y
1915# CONFIG_PRINT_QUOTA_WARNING is not set
1916CONFIG_QUOTA_TREE=y
1917# CONFIG_QFMT_V1 is not set
1918CONFIG_QFMT_V2=y
1919CONFIG_QUOTACTL=y
1920# CONFIG_AUTOFS_FS is not set
1921CONFIG_AUTOFS4_FS=y
1922# CONFIG_FUSE_FS is not set
1923CONFIG_GENERIC_ACL=y
1924
1925#
1926# CD-ROM/DVD Filesystems
1927#
1928CONFIG_ISO9660_FS=y
1929CONFIG_JOLIET=y
1930CONFIG_ZISOFS=y
1931# CONFIG_UDF_FS is not set
1932
1933#
1934# DOS/FAT/NT Filesystems
1935#
1936CONFIG_FAT_FS=y
1937CONFIG_MSDOS_FS=y
1938CONFIG_VFAT_FS=y
1939CONFIG_FAT_DEFAULT_CODEPAGE=437
1940CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1941# CONFIG_NTFS_FS is not set
1942
1943#
1944# Pseudo filesystems
1945#
1946CONFIG_PROC_FS=y
1947CONFIG_PROC_KCORE=y
1948CONFIG_PROC_SYSCTL=y
1949CONFIG_PROC_PAGE_MONITOR=y
1950CONFIG_SYSFS=y
1951CONFIG_TMPFS=y
1952CONFIG_TMPFS_POSIX_ACL=y
1953CONFIG_HUGETLBFS=y
1954CONFIG_HUGETLB_PAGE=y
1955# CONFIG_CONFIGFS_FS is not set
1956CONFIG_MISC_FILESYSTEMS=y
1957# CONFIG_ADFS_FS is not set
1958# CONFIG_AFFS_FS is not set
1959# CONFIG_ECRYPT_FS is not set
1960# CONFIG_HFS_FS is not set
1961# CONFIG_HFSPLUS_FS is not set
1962# CONFIG_BEFS_FS is not set
1963# CONFIG_BFS_FS is not set
1964# CONFIG_EFS_FS is not set
1965# CONFIG_CRAMFS is not set
1966# CONFIG_SQUASHFS is not set
1967# CONFIG_VXFS_FS is not set
1968# CONFIG_MINIX_FS is not set
1969# CONFIG_OMFS_FS is not set
1970# CONFIG_HPFS_FS is not set
1971# CONFIG_QNX4FS_FS is not set
1972# CONFIG_ROMFS_FS is not set
1973# CONFIG_SYSV_FS is not set
1974# CONFIG_UFS_FS is not set
1975CONFIG_NETWORK_FILESYSTEMS=y
1976CONFIG_NFS_FS=y
1977CONFIG_NFS_V3=y
1978CONFIG_NFS_V3_ACL=y
1979CONFIG_NFS_V4=y
1980CONFIG_ROOT_NFS=y
1981# CONFIG_NFSD is not set
1982CONFIG_LOCKD=y
1983CONFIG_LOCKD_V4=y
1984CONFIG_NFS_ACL_SUPPORT=y
1985CONFIG_NFS_COMMON=y
1986CONFIG_SUNRPC=y
1987CONFIG_SUNRPC_GSS=y
1988# CONFIG_SUNRPC_REGISTER_V4 is not set
1989CONFIG_RPCSEC_GSS_KRB5=y
1990# CONFIG_RPCSEC_GSS_SPKM3 is not set
1991# CONFIG_SMB_FS is not set
1992# CONFIG_CIFS is not set
1993# CONFIG_NCP_FS is not set
1994# CONFIG_CODA_FS is not set
1995# CONFIG_AFS_FS is not set
1996
1997#
1998# Partition Types
1999#
2000CONFIG_PARTITION_ADVANCED=y
2001# CONFIG_ACORN_PARTITION is not set
2002CONFIG_OSF_PARTITION=y
2003CONFIG_AMIGA_PARTITION=y
2004# CONFIG_ATARI_PARTITION is not set
2005CONFIG_MAC_PARTITION=y
2006CONFIG_MSDOS_PARTITION=y
2007CONFIG_BSD_DISKLABEL=y
2008CONFIG_MINIX_SUBPARTITION=y
2009CONFIG_SOLARIS_X86_PARTITION=y
2010CONFIG_UNIXWARE_DISKLABEL=y
2011# CONFIG_LDM_PARTITION is not set
2012CONFIG_SGI_PARTITION=y
2013# CONFIG_ULTRIX_PARTITION is not set
2014CONFIG_SUN_PARTITION=y
2015CONFIG_KARMA_PARTITION=y
2016CONFIG_EFI_PARTITION=y
2017# CONFIG_SYSV68_PARTITION is not set
2018CONFIG_NLS=y
2019CONFIG_NLS_DEFAULT="utf8"
2020CONFIG_NLS_CODEPAGE_437=y
2021# CONFIG_NLS_CODEPAGE_737 is not set
2022# CONFIG_NLS_CODEPAGE_775 is not set
2023# CONFIG_NLS_CODEPAGE_850 is not set
2024# CONFIG_NLS_CODEPAGE_852 is not set
2025# CONFIG_NLS_CODEPAGE_855 is not set
2026# CONFIG_NLS_CODEPAGE_857 is not set
2027# CONFIG_NLS_CODEPAGE_860 is not set
2028# CONFIG_NLS_CODEPAGE_861 is not set
2029# CONFIG_NLS_CODEPAGE_862 is not set
2030# CONFIG_NLS_CODEPAGE_863 is not set
2031# CONFIG_NLS_CODEPAGE_864 is not set
2032# CONFIG_NLS_CODEPAGE_865 is not set
2033# CONFIG_NLS_CODEPAGE_866 is not set
2034# CONFIG_NLS_CODEPAGE_869 is not set
2035# CONFIG_NLS_CODEPAGE_936 is not set
2036# CONFIG_NLS_CODEPAGE_950 is not set
2037# CONFIG_NLS_CODEPAGE_932 is not set
2038# CONFIG_NLS_CODEPAGE_949 is not set
2039# CONFIG_NLS_CODEPAGE_874 is not set
2040# CONFIG_NLS_ISO8859_8 is not set
2041# CONFIG_NLS_CODEPAGE_1250 is not set
2042# CONFIG_NLS_CODEPAGE_1251 is not set
2043CONFIG_NLS_ASCII=y
2044CONFIG_NLS_ISO8859_1=y
2045# CONFIG_NLS_ISO8859_2 is not set
2046# CONFIG_NLS_ISO8859_3 is not set
2047# CONFIG_NLS_ISO8859_4 is not set
2048# CONFIG_NLS_ISO8859_5 is not set
2049# CONFIG_NLS_ISO8859_6 is not set
2050# CONFIG_NLS_ISO8859_7 is not set
2051# CONFIG_NLS_ISO8859_9 is not set
2052# CONFIG_NLS_ISO8859_13 is not set
2053# CONFIG_NLS_ISO8859_14 is not set
2054# CONFIG_NLS_ISO8859_15 is not set
2055# CONFIG_NLS_KOI8_R is not set
2056# CONFIG_NLS_KOI8_U is not set
2057CONFIG_NLS_UTF8=y
2058# CONFIG_DLM is not set
2059
2060#
2061# Kernel hacking
2062#
2063CONFIG_TRACE_IRQFLAGS_SUPPORT=y
2064CONFIG_PRINTK_TIME=y
2065CONFIG_ENABLE_WARN_DEPRECATED=y
2066CONFIG_ENABLE_MUST_CHECK=y
2067CONFIG_FRAME_WARN=2048
2068CONFIG_MAGIC_SYSRQ=y
2069# CONFIG_UNUSED_SYMBOLS is not set
2070CONFIG_DEBUG_FS=y
2071# CONFIG_HEADERS_CHECK is not set
2072CONFIG_DEBUG_KERNEL=y
2073CONFIG_DEBUG_SHIRQ=y
2074CONFIG_DETECT_SOFTLOCKUP=y
2075# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
2076CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
2077# CONFIG_SCHED_DEBUG is not set
2078CONFIG_SCHEDSTATS=y
2079CONFIG_TIMER_STATS=y
2080# CONFIG_DEBUG_OBJECTS is not set
2081# CONFIG_SLUB_DEBUG_ON is not set
2082# CONFIG_SLUB_STATS is not set
2083# CONFIG_DEBUG_RT_MUTEXES is not set
2084# CONFIG_RT_MUTEX_TESTER is not set
2085CONFIG_DEBUG_SPINLOCK=y
2086CONFIG_DEBUG_MUTEXES=y
2087CONFIG_DEBUG_LOCK_ALLOC=y
2088# CONFIG_PROVE_LOCKING is not set
2089CONFIG_LOCKDEP=y
2090# CONFIG_LOCK_STAT is not set
2091# CONFIG_DEBUG_LOCKDEP is not set
2092CONFIG_DEBUG_SPINLOCK_SLEEP=y
2093# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
2094CONFIG_STACKTRACE=y
2095# CONFIG_DEBUG_KOBJECT is not set
2096CONFIG_DEBUG_BUGVERBOSE=y
2097
2098# CONFIG_DEBUG_INFO is not set
2099# CONFIG_DEBUG_VM is not set
2100# CONFIG_DEBUG_VIRTUAL is not set
2101# CONFIG_DEBUG_WRITECOUNT is not set
2102# CONFIG_DEBUG_MEMORY_INIT is not set
2103# CONFIG_DEBUG_LIST is not set
2104# CONFIG_DEBUG_SG is not set
2105# CONFIG_DEBUG_NOTIFIERS is not set
2106CONFIG_ARCH_WANT_FRAME_POINTERS=y
2107CONFIG_FRAME_POINTER=y
2108# CONFIG_BOOT_PRINTK_DELAY is not set
2109# CONFIG_RCU_TORTURE_TEST is not set
2110# CONFIG_RCU_CPU_STALL_DETECTOR is not set
2111# CONFIG_BACKTRACE_SELF_TEST is not set
2112# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
2113# CONFIG_FAULT_INJECTION is not set
2114# CONFIG_LATENCYTOP is not set
2115CONFIG_SYSCTL_SYSCALL_CHECK=y
2116CONFIG_USER_STACKTRACE_SUPPORT=y
2117CONFIG_HAVE_FUNCTION_TRACER=y
2118CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
2119CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
2120CONFIG_HAVE_DYNAMIC_FTRACE=y
2121CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
2122
2123#
2124# Tracers
2125#
2126# CONFIG_FUNCTION_TRACER is not set
2127# CONFIG_IRQSOFF_TRACER is not set
2128# CONFIG_SYSPROF_TRACER is not set
2129# CONFIG_SCHED_TRACER is not set
2130# CONFIG_CONTEXT_SWITCH_TRACER is not set
2131# CONFIG_BOOT_TRACER is not set
2132# CONFIG_TRACE_BRANCH_PROFILING is not set
2133# CONFIG_POWER_TRACER is not set
2134# CONFIG_STACK_TRACER is not set
2135# CONFIG_MMIOTRACE is not set
2136CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
2137# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
2138# CONFIG_SAMPLES is not set
2139CONFIG_HAVE_ARCH_KGDB=y
2140# CONFIG_KGDB is not set
2141# CONFIG_STRICT_DEVMEM is not set
2142CONFIG_X86_VERBOSE_BOOTUP=y
2143CONFIG_EARLY_PRINTK=y
2144CONFIG_X86_MRST_EARLY_PRINTK=y
2145# CONFIG_EARLY_PRINTK_DBGP is not set
2146CONFIG_DEBUG_STACKOVERFLOW=y
2147CONFIG_DEBUG_STACK_USAGE=y
2148# CONFIG_DEBUG_PAGEALLOC is not set
2149# CONFIG_DEBUG_PER_CPU_MAPS is not set
2150# CONFIG_X86_PTDUMP is not set
2151CONFIG_DEBUG_RODATA=y
2152# CONFIG_DEBUG_RODATA_TEST is not set
2153# CONFIG_4KSTACKS is not set
2154CONFIG_DOUBLEFAULT=y
2155CONFIG_HAVE_MMIOTRACE_SUPPORT=y
2156CONFIG_IO_DELAY_TYPE_0X80=0
2157CONFIG_IO_DELAY_TYPE_0XED=1
2158CONFIG_IO_DELAY_TYPE_UDELAY=2
2159CONFIG_IO_DELAY_TYPE_NONE=3
2160CONFIG_IO_DELAY_0X80=y
2161# CONFIG_IO_DELAY_0XED is not set
2162# CONFIG_IO_DELAY_UDELAY is not set
2163# CONFIG_IO_DELAY_NONE is not set
2164CONFIG_DEFAULT_IO_DELAY_TYPE=0
2165CONFIG_DEBUG_BOOT_PARAMS=y
2166# CONFIG_CPA_DEBUG is not set
2167CONFIG_OPTIMIZE_INLINING=y
2168
2169#
2170# Security options
2171#
2172CONFIG_KEYS=y
2173CONFIG_KEYS_DEBUG_PROC_KEYS=y
2174CONFIG_SECURITY=y
2175# CONFIG_SECURITYFS is not set
2176CONFIG_SECURITY_NETWORK=y
2177# CONFIG_SECURITY_NETWORK_XFRM is not set
2178# CONFIG_SECURITY_PATH is not set
2179CONFIG_SECURITY_FILE_CAPABILITIES=y
2180# CONFIG_SECURITY_ROOTPLUG is not set
2181CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536
2182# CONFIG_SECURITY_SMACK is not set
2183CONFIG_CRYPTO=y
2184# CONFIG_SECURITY_SELINUX is not set
2185
2186#
2187# Crypto core or helper
2188#
2189# CONFIG_CRYPTO_FIPS is not set
2190CONFIG_CRYPTO_ALGAPI=y
2191CONFIG_CRYPTO_ALGAPI2=y
2192CONFIG_CRYPTO_AEAD=y
2193CONFIG_CRYPTO_AEAD2=y
2194CONFIG_CRYPTO_BLKCIPHER=y
2195CONFIG_CRYPTO_BLKCIPHER2=y
2196CONFIG_CRYPTO_HASH=y
2197CONFIG_CRYPTO_HASH2=y
2198CONFIG_CRYPTO_RNG2=y
2199CONFIG_CRYPTO_MANAGER=y
2200CONFIG_CRYPTO_MANAGER2=y
2201# CONFIG_CRYPTO_GF128MUL is not set
2202# CONFIG_CRYPTO_NULL is not set
2203# CONFIG_CRYPTO_CRYPTD is not set
2204CONFIG_CRYPTO_AUTHENC=y
2205
2206#
2207# Authenticated Encryption with Associated Data
2208#
2209# CONFIG_CRYPTO_CCM is not set
2210# CONFIG_CRYPTO_GCM is not set
2211# CONFIG_CRYPTO_SEQIV is not set
2212
2213#
2214# Block modes
2215#
2216CONFIG_CRYPTO_CBC=y
2217# CONFIG_CRYPTO_CTR is not set
2218# CONFIG_CRYPTO_CTS is not set
2219CONFIG_CRYPTO_ECB=y
2220# CONFIG_CRYPTO_LRW is not set
2221# CONFIG_CRYPTO_PCBC is not set
2222# CONFIG_CRYPTO_XTS is not set
2223
2224#
2225# Hash modes
2226#
2227CONFIG_CRYPTO_HMAC=y
2228# CONFIG_CRYPTO_XCBC is not set
2229
2230#
2231# Digest
2232#
2233# CONFIG_CRYPTO_CRC32C is not set
2234# CONFIG_CRYPTO_CRC32C_INTEL is not set
2235# CONFIG_CRYPTO_MD4 is not set
2236CONFIG_CRYPTO_MD5=y
2237# CONFIG_CRYPTO_MICHAEL_MIC is not set
2238# CONFIG_CRYPTO_RMD128 is not set
2239# CONFIG_CRYPTO_RMD160 is not set
2240# CONFIG_CRYPTO_RMD256 is not set
2241# CONFIG_CRYPTO_RMD320 is not set
2242CONFIG_CRYPTO_SHA1=y
2243# CONFIG_CRYPTO_SHA256 is not set
2244# CONFIG_CRYPTO_SHA512 is not set
2245# CONFIG_CRYPTO_TGR192 is not set
2246# CONFIG_CRYPTO_WP512 is not set
2247
2248#
2249# Ciphers
2250#
2251CONFIG_CRYPTO_AES=y
2252CONFIG_CRYPTO_AES_586=y
2253# CONFIG_CRYPTO_ANUBIS is not set
2254CONFIG_CRYPTO_ARC4=y
2255# CONFIG_CRYPTO_BLOWFISH is not set
2256# CONFIG_CRYPTO_CAMELLIA is not set
2257# CONFIG_CRYPTO_CAST5 is not set
2258# CONFIG_CRYPTO_CAST6 is not set
2259CONFIG_CRYPTO_DES=y
2260# CONFIG_CRYPTO_FCRYPT is not set
2261# CONFIG_CRYPTO_KHAZAD is not set
2262# CONFIG_CRYPTO_SALSA20 is not set
2263# CONFIG_CRYPTO_SALSA20_586 is not set
2264# CONFIG_CRYPTO_SEED is not set
2265# CONFIG_CRYPTO_SERPENT is not set
2266# CONFIG_CRYPTO_TEA is not set
2267# CONFIG_CRYPTO_TWOFISH is not set
2268# CONFIG_CRYPTO_TWOFISH_586 is not set
2269
2270#
2271# Compression
2272#
2273# CONFIG_CRYPTO_DEFLATE is not set
2274# CONFIG_CRYPTO_LZO is not set
2275
2276#
2277# Random Number Generation
2278#
2279# CONFIG_CRYPTO_ANSI_CPRNG is not set
2280CONFIG_CRYPTO_HW=y
2281# CONFIG_CRYPTO_DEV_PADLOCK is not set
2282# CONFIG_CRYPTO_DEV_GEODE is not set
2283# CONFIG_CRYPTO_DEV_HIFN_795X is not set
2284CONFIG_HAVE_KVM=y
2285CONFIG_VIRTUALIZATION=y
2286# CONFIG_KVM is not set
2287# CONFIG_LGUEST is not set
2288# CONFIG_VIRTIO_PCI is not set
2289# CONFIG_VIRTIO_BALLOON is not set
2290
2291#
2292# Library routines
2293#
2294CONFIG_BITREVERSE=y
2295CONFIG_GENERIC_FIND_FIRST_BIT=y
2296CONFIG_GENERIC_FIND_NEXT_BIT=y
2297CONFIG_GENERIC_FIND_LAST_BIT=y
2298# CONFIG_CRC_CCITT is not set
2299# CONFIG_CRC16 is not set
2300CONFIG_CRC_T10DIF=y
2301# CONFIG_CRC_ITU_T is not set
2302CONFIG_CRC32=y
2303# CONFIG_CRC7 is not set
2304# CONFIG_LIBCRC32C is not set
2305CONFIG_AUDIT_GENERIC=y
2306CONFIG_ZLIB_INFLATE=y
2307CONFIG_PLIST=y
2308CONFIG_HAS_IOMEM=y
2309CONFIG_HAS_IOPORT=y
2310CONFIG_HAS_DMA=y
2311
2312CONFIG_INTEL_LNW_DMAC1=y
2313CONFIG_INTEL_LNW_DMAC2=y
2314# CONFIG_LNW_DMA_DEBUG is not set
2315# CONFIG_NET_DMA is not set
2316# CONFIG_DMATEST is not set
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-netbook b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-netbook
new file mode 100644
index 0000000000..9174ff6d5b
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-netbook
@@ -0,0 +1,52 @@
1CONFIG_LOCALVERSION="-netbook"
2
3CONFIG_ACER_WMI=y
4
5CONFIG_EEEPC_LAPTOP=m
6
7CONFIG_R8169=y
8# CONFIG_R8169_VLAN is not set
9
10CONFIG_ATL1E=y
11
12CONFIG_ATH5K=y
13# CONFIG_ATH5K_DEBUG is not set
14
15CONFIG_RT2860=m
16
17CONFIG_RT2860=m
18
19CONFIG_RTL8187SE=m
20
21
22CONFIG_DRM_I915_KMS=y
23CONFIG_FRAMEBUFFER_CONSOLE=y
24CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
25# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
26CONFIG_FONTS=y
27CONFIG_FONT_8x8=y
28CONFIG_FONT_6x11=y
29CONFIG_FONT_7x14=y
30# CONFIG_FONT_PEARL_8x8 is not set
31# CONFIG_FONT_ACORN_8x8 is not set
32# CONFIG_FONT_MINI_4x6 is not set
33# CONFIG_FONT_SUN8x16 is not set
34# CONFIG_FONT_SUN12x22 is not set
35CONFIG_FONT_10x18=y
36
37
38#
39# Enable KVM
40#
41CONFIG_VIRTUALIZATION=y
42CONFIG_KVM=m
43CONFIG_KVM_INTEL=m
44# CONFIG_KVM_AMD is not set
45# CONFIG_KVM_TRACE is not set
46# CONFIG_VIRTIO_PCI is not set
47# CONFIG_VIRTIO_BALLOON is not set
48
49#
50# For VMWARE support
51#
52CONFIG_FUSION_SPI=y
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/defconfig-menlow b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-menlow
index 30c1656220..ec7e1d66ea 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/defconfig-menlow
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-menlow
@@ -1,14 +1,13 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.27 3# Linux kernel version: 2.6.29-rc8
4# Wed Jan 14 11:45:36 2009 4# Wed Apr 1 13:38:03 2009
5# 5#
6# CONFIG_64BIT is not set 6# CONFIG_64BIT is not set
7CONFIG_X86_32=y 7CONFIG_X86_32=y
8# CONFIG_X86_64 is not set 8# CONFIG_X86_64 is not set
9CONFIG_X86=y 9CONFIG_X86=y
10CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig" 10CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
11# CONFIG_GENERIC_LOCKBREAK is not set
12CONFIG_GENERIC_TIME=y 11CONFIG_GENERIC_TIME=y
13CONFIG_GENERIC_CMOS_UPDATE=y 12CONFIG_GENERIC_CMOS_UPDATE=y
14CONFIG_CLOCKSOURCE_WATCHDOG=y 13CONFIG_CLOCKSOURCE_WATCHDOG=y
@@ -24,16 +23,14 @@ CONFIG_GENERIC_ISA_DMA=y
24CONFIG_GENERIC_IOMAP=y 23CONFIG_GENERIC_IOMAP=y
25CONFIG_GENERIC_BUG=y 24CONFIG_GENERIC_BUG=y
26CONFIG_GENERIC_HWEIGHT=y 25CONFIG_GENERIC_HWEIGHT=y
27# CONFIG_GENERIC_GPIO is not set
28CONFIG_ARCH_MAY_HAVE_PC_FDC=y 26CONFIG_ARCH_MAY_HAVE_PC_FDC=y
29# CONFIG_RWSEM_GENERIC_SPINLOCK is not set 27# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
30CONFIG_RWSEM_XCHGADD_ALGORITHM=y 28CONFIG_RWSEM_XCHGADD_ALGORITHM=y
31# CONFIG_ARCH_HAS_ILOG2_U32 is not set
32# CONFIG_ARCH_HAS_ILOG2_U64 is not set
33CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y 29CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
34CONFIG_GENERIC_CALIBRATE_DELAY=y 30CONFIG_GENERIC_CALIBRATE_DELAY=y
35# CONFIG_GENERIC_TIME_VSYSCALL is not set 31# CONFIG_GENERIC_TIME_VSYSCALL is not set
36CONFIG_ARCH_HAS_CPU_RELAX=y 32CONFIG_ARCH_HAS_CPU_RELAX=y
33CONFIG_ARCH_HAS_DEFAULT_IDLE=y
37CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y 34CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
38CONFIG_HAVE_SETUP_PER_CPU_AREA=y 35CONFIG_HAVE_SETUP_PER_CPU_AREA=y
39# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set 36# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
@@ -42,12 +39,12 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y
42# CONFIG_ZONE_DMA32 is not set 39# CONFIG_ZONE_DMA32 is not set
43CONFIG_ARCH_POPULATES_NODE_MAP=y 40CONFIG_ARCH_POPULATES_NODE_MAP=y
44# CONFIG_AUDIT_ARCH is not set 41# CONFIG_AUDIT_ARCH is not set
45CONFIG_ARCH_SUPPORTS_AOUT=y
46CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y 42CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
47CONFIG_GENERIC_HARDIRQS=y 43CONFIG_GENERIC_HARDIRQS=y
48CONFIG_GENERIC_IRQ_PROBE=y 44CONFIG_GENERIC_IRQ_PROBE=y
49CONFIG_GENERIC_PENDING_IRQ=y 45CONFIG_GENERIC_PENDING_IRQ=y
50CONFIG_X86_SMP=y 46CONFIG_X86_SMP=y
47CONFIG_USE_GENERIC_SMP_HELPERS=y
51CONFIG_X86_32_SMP=y 48CONFIG_X86_32_SMP=y
52CONFIG_X86_HT=y 49CONFIG_X86_HT=y
53CONFIG_X86_BIOS_REBOOT=y 50CONFIG_X86_BIOS_REBOOT=y
@@ -75,12 +72,21 @@ CONFIG_TASK_DELAY_ACCT=y
75CONFIG_AUDIT=y 72CONFIG_AUDIT=y
76CONFIG_AUDITSYSCALL=y 73CONFIG_AUDITSYSCALL=y
77CONFIG_AUDIT_TREE=y 74CONFIG_AUDIT_TREE=y
75
76#
77# RCU Subsystem
78#
79CONFIG_CLASSIC_RCU=y
80# CONFIG_TREE_RCU is not set
81# CONFIG_PREEMPT_RCU is not set
82# CONFIG_TREE_RCU_TRACE is not set
83# CONFIG_PREEMPT_RCU_TRACE is not set
78CONFIG_IKCONFIG=y 84CONFIG_IKCONFIG=y
79CONFIG_IKCONFIG_PROC=y 85CONFIG_IKCONFIG_PROC=y
80CONFIG_LOG_BUF_SHIFT=15 86CONFIG_LOG_BUF_SHIFT=15
81# CONFIG_CGROUPS is not set
82CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y 87CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
83# CONFIG_GROUP_SCHED is not set 88# CONFIG_GROUP_SCHED is not set
89# CONFIG_CGROUPS is not set
84CONFIG_SYSFS_DEPRECATED=y 90CONFIG_SYSFS_DEPRECATED=y
85CONFIG_SYSFS_DEPRECATED_V2=y 91CONFIG_SYSFS_DEPRECATED_V2=y
86CONFIG_RELAY=y 92CONFIG_RELAY=y
@@ -89,11 +95,12 @@ CONFIG_NAMESPACES=y
89# CONFIG_IPC_NS is not set 95# CONFIG_IPC_NS is not set
90# CONFIG_USER_NS is not set 96# CONFIG_USER_NS is not set
91# CONFIG_PID_NS is not set 97# CONFIG_PID_NS is not set
98# CONFIG_NET_NS is not set
92CONFIG_BLK_DEV_INITRD=y 99CONFIG_BLK_DEV_INITRD=y
93CONFIG_INITRAMFS_SOURCE="" 100CONFIG_INITRAMFS_SOURCE=""
94CONFIG_CC_OPTIMIZE_FOR_SIZE=y 101CONFIG_CC_OPTIMIZE_FOR_SIZE=y
95# CONFIG_FASTBOOT is not set
96CONFIG_SYSCTL=y 102CONFIG_SYSCTL=y
103CONFIG_ANON_INODES=y
97# CONFIG_EMBEDDED is not set 104# CONFIG_EMBEDDED is not set
98CONFIG_UID16=y 105CONFIG_UID16=y
99CONFIG_SYSCTL_SYSCALL=y 106CONFIG_SYSCTL_SYSCALL=y
@@ -105,20 +112,22 @@ CONFIG_PRINTK=y
105CONFIG_BUG=y 112CONFIG_BUG=y
106CONFIG_ELF_CORE=y 113CONFIG_ELF_CORE=y
107CONFIG_PCSPKR_PLATFORM=y 114CONFIG_PCSPKR_PLATFORM=y
108CONFIG_COMPAT_BRK=y
109CONFIG_BASE_FULL=y 115CONFIG_BASE_FULL=y
110CONFIG_FUTEX=y 116CONFIG_FUTEX=y
111CONFIG_ANON_INODES=y
112CONFIG_EPOLL=y 117CONFIG_EPOLL=y
113CONFIG_SIGNALFD=y 118CONFIG_SIGNALFD=y
114CONFIG_TIMERFD=y 119CONFIG_TIMERFD=y
115CONFIG_EVENTFD=y 120CONFIG_EVENTFD=y
116CONFIG_SHMEM=y 121CONFIG_SHMEM=y
122CONFIG_AIO=y
117CONFIG_VM_EVENT_COUNTERS=y 123CONFIG_VM_EVENT_COUNTERS=y
124CONFIG_PCI_QUIRKS=y
125CONFIG_COMPAT_BRK=y
118CONFIG_SLAB=y 126CONFIG_SLAB=y
119# CONFIG_SLUB is not set 127# CONFIG_SLUB is not set
120# CONFIG_SLOB is not set 128# CONFIG_SLOB is not set
121CONFIG_PROFILING=y 129CONFIG_PROFILING=y
130CONFIG_TRACEPOINTS=y
122# CONFIG_MARKERS is not set 131# CONFIG_MARKERS is not set
123# CONFIG_OPROFILE is not set 132# CONFIG_OPROFILE is not set
124CONFIG_HAVE_OPROFILE=y 133CONFIG_HAVE_OPROFILE=y
@@ -127,15 +136,10 @@ CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
127CONFIG_HAVE_IOREMAP_PROT=y 136CONFIG_HAVE_IOREMAP_PROT=y
128CONFIG_HAVE_KPROBES=y 137CONFIG_HAVE_KPROBES=y
129CONFIG_HAVE_KRETPROBES=y 138CONFIG_HAVE_KRETPROBES=y
130# CONFIG_HAVE_ARCH_TRACEHOOK is not set 139CONFIG_HAVE_ARCH_TRACEHOOK=y
131# CONFIG_HAVE_DMA_ATTRS is not set
132CONFIG_USE_GENERIC_SMP_HELPERS=y
133# CONFIG_HAVE_CLK is not set
134CONFIG_PROC_PAGE_MONITOR=y
135CONFIG_HAVE_GENERIC_DMA_COHERENT=y 140CONFIG_HAVE_GENERIC_DMA_COHERENT=y
136CONFIG_SLABINFO=y 141CONFIG_SLABINFO=y
137CONFIG_RT_MUTEXES=y 142CONFIG_RT_MUTEXES=y
138# CONFIG_TINY_SHMEM is not set
139CONFIG_BASE_SMALL=0 143CONFIG_BASE_SMALL=0
140CONFIG_MODULES=y 144CONFIG_MODULES=y
141# CONFIG_MODULE_FORCE_LOAD is not set 145# CONFIG_MODULE_FORCE_LOAD is not set
@@ -143,12 +147,10 @@ CONFIG_MODULE_UNLOAD=y
143CONFIG_MODULE_FORCE_UNLOAD=y 147CONFIG_MODULE_FORCE_UNLOAD=y
144CONFIG_MODVERSIONS=y 148CONFIG_MODVERSIONS=y
145CONFIG_MODULE_SRCVERSION_ALL=y 149CONFIG_MODULE_SRCVERSION_ALL=y
146CONFIG_KMOD=y
147CONFIG_STOP_MACHINE=y 150CONFIG_STOP_MACHINE=y
148CONFIG_BLOCK=y 151CONFIG_BLOCK=y
149CONFIG_LBD=y 152CONFIG_LBD=y
150CONFIG_BLK_DEV_IO_TRACE=y 153CONFIG_BLK_DEV_IO_TRACE=y
151CONFIG_LSF=y
152# CONFIG_BLK_DEV_BSG is not set 154# CONFIG_BLK_DEV_BSG is not set
153# CONFIG_BLK_DEV_INTEGRITY is not set 155# CONFIG_BLK_DEV_INTEGRITY is not set
154 156
@@ -164,7 +166,7 @@ CONFIG_IOSCHED_CFQ=y
164CONFIG_DEFAULT_CFQ=y 166CONFIG_DEFAULT_CFQ=y
165# CONFIG_DEFAULT_NOOP is not set 167# CONFIG_DEFAULT_NOOP is not set
166CONFIG_DEFAULT_IOSCHED="cfq" 168CONFIG_DEFAULT_IOSCHED="cfq"
167CONFIG_CLASSIC_RCU=y 169CONFIG_FREEZER=y
168 170
169# 171#
170# Processor type and features 172# Processor type and features
@@ -174,6 +176,7 @@ CONFIG_NO_HZ=y
174CONFIG_HIGH_RES_TIMERS=y 176CONFIG_HIGH_RES_TIMERS=y
175CONFIG_GENERIC_CLOCKEVENTS_BUILD=y 177CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
176CONFIG_SMP=y 178CONFIG_SMP=y
179# CONFIG_SPARSE_IRQ is not set
177CONFIG_X86_FIND_SMP_CONFIG=y 180CONFIG_X86_FIND_SMP_CONFIG=y
178CONFIG_X86_MPPARSE=y 181CONFIG_X86_MPPARSE=y
179# CONFIG_X86_PC is not set 182# CONFIG_X86_PC is not set
@@ -186,7 +189,7 @@ CONFIG_X86_GENERICARCH=y
186# CONFIG_X86_BIGSMP is not set 189# CONFIG_X86_BIGSMP is not set
187# CONFIG_X86_VSMP is not set 190# CONFIG_X86_VSMP is not set
188# CONFIG_X86_RDC321X is not set 191# CONFIG_X86_RDC321X is not set
189CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 192CONFIG_SCHED_OMIT_FRAME_POINTER=y
190# CONFIG_PARAVIRT_GUEST is not set 193# CONFIG_PARAVIRT_GUEST is not set
191# CONFIG_MEMTEST is not set 194# CONFIG_MEMTEST is not set
192CONFIG_X86_CYCLONE_TIMER=y 195CONFIG_X86_CYCLONE_TIMER=y
@@ -206,7 +209,6 @@ CONFIG_M586=y
206# CONFIG_MCRUSOE is not set 209# CONFIG_MCRUSOE is not set
207# CONFIG_MEFFICEON is not set 210# CONFIG_MEFFICEON is not set
208# CONFIG_MWINCHIPC6 is not set 211# CONFIG_MWINCHIPC6 is not set
209# CONFIG_MWINCHIP2 is not set
210# CONFIG_MWINCHIP3D is not set 212# CONFIG_MWINCHIP3D is not set
211# CONFIG_MGEODEGX1 is not set 213# CONFIG_MGEODEGX1 is not set
212# CONFIG_MGEODE_LX is not set 214# CONFIG_MGEODE_LX is not set
@@ -230,9 +232,16 @@ CONFIG_X86_POPAD_OK=y
230CONFIG_X86_ALIGNMENT_16=y 232CONFIG_X86_ALIGNMENT_16=y
231CONFIG_X86_INTEL_USERCOPY=y 233CONFIG_X86_INTEL_USERCOPY=y
232CONFIG_X86_MINIMUM_CPU_FAMILY=4 234CONFIG_X86_MINIMUM_CPU_FAMILY=4
235CONFIG_CPU_SUP_INTEL=y
236CONFIG_CPU_SUP_CYRIX_32=y
237CONFIG_CPU_SUP_AMD=y
238CONFIG_CPU_SUP_CENTAUR_32=y
239CONFIG_CPU_SUP_TRANSMETA_32=y
240CONFIG_CPU_SUP_UMC_32=y
233CONFIG_HPET_TIMER=y 241CONFIG_HPET_TIMER=y
234CONFIG_DMI=y 242CONFIG_DMI=y
235# CONFIG_IOMMU_HELPER is not set 243# CONFIG_IOMMU_HELPER is not set
244# CONFIG_IOMMU_API is not set
236CONFIG_NR_CPUS=8 245CONFIG_NR_CPUS=8
237# CONFIG_SCHED_SMT is not set 246# CONFIG_SCHED_SMT is not set
238CONFIG_SCHED_MC=y 247CONFIG_SCHED_MC=y
@@ -241,6 +250,7 @@ CONFIG_PREEMPT_VOLUNTARY=y
241# CONFIG_PREEMPT is not set 250# CONFIG_PREEMPT is not set
242CONFIG_X86_LOCAL_APIC=y 251CONFIG_X86_LOCAL_APIC=y
243CONFIG_X86_IO_APIC=y 252CONFIG_X86_IO_APIC=y
253# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
244CONFIG_X86_MCE=y 254CONFIG_X86_MCE=y
245CONFIG_X86_MCE_NONFATAL=y 255CONFIG_X86_MCE_NONFATAL=y
246# CONFIG_X86_MCE_P4THERMAL is not set 256# CONFIG_X86_MCE_P4THERMAL is not set
@@ -249,6 +259,8 @@ CONFIG_VM86=y
249# CONFIG_I8K is not set 259# CONFIG_I8K is not set
250CONFIG_X86_REBOOTFIXUPS=y 260CONFIG_X86_REBOOTFIXUPS=y
251CONFIG_MICROCODE=m 261CONFIG_MICROCODE=m
262CONFIG_MICROCODE_INTEL=y
263# CONFIG_MICROCODE_AMD is not set
252CONFIG_MICROCODE_OLD_INTERFACE=y 264CONFIG_MICROCODE_OLD_INTERFACE=y
253CONFIG_X86_MSR=m 265CONFIG_X86_MSR=m
254CONFIG_X86_CPUID=m 266CONFIG_X86_CPUID=m
@@ -257,27 +269,32 @@ CONFIG_HIGHMEM4G=y
257# CONFIG_HIGHMEM64G is not set 269# CONFIG_HIGHMEM64G is not set
258CONFIG_PAGE_OFFSET=0xC0000000 270CONFIG_PAGE_OFFSET=0xC0000000
259CONFIG_HIGHMEM=y 271CONFIG_HIGHMEM=y
272# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
273CONFIG_ARCH_FLATMEM_ENABLE=y
274CONFIG_ARCH_SPARSEMEM_ENABLE=y
275CONFIG_ARCH_SELECT_MEMORY_MODEL=y
260CONFIG_SELECT_MEMORY_MODEL=y 276CONFIG_SELECT_MEMORY_MODEL=y
261CONFIG_FLATMEM_MANUAL=y 277CONFIG_FLATMEM_MANUAL=y
262# CONFIG_DISCONTIGMEM_MANUAL is not set 278# CONFIG_DISCONTIGMEM_MANUAL is not set
263# CONFIG_SPARSEMEM_MANUAL is not set 279# CONFIG_SPARSEMEM_MANUAL is not set
264CONFIG_FLATMEM=y 280CONFIG_FLATMEM=y
265CONFIG_FLAT_NODE_MEM_MAP=y 281CONFIG_FLAT_NODE_MEM_MAP=y
266# CONFIG_SPARSEMEM_STATIC is not set 282CONFIG_SPARSEMEM_STATIC=y
267# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
268CONFIG_PAGEFLAGS_EXTENDED=y 283CONFIG_PAGEFLAGS_EXTENDED=y
269CONFIG_SPLIT_PTLOCK_CPUS=4 284CONFIG_SPLIT_PTLOCK_CPUS=4
270# CONFIG_RESOURCES_64BIT is not set 285# CONFIG_PHYS_ADDR_T_64BIT is not set
271CONFIG_ZONE_DMA_FLAG=1 286CONFIG_ZONE_DMA_FLAG=1
272CONFIG_BOUNCE=y 287CONFIG_BOUNCE=y
273CONFIG_VIRT_TO_BUS=y 288CONFIG_VIRT_TO_BUS=y
289CONFIG_UNEVICTABLE_LRU=y
274CONFIG_HIGHPTE=y 290CONFIG_HIGHPTE=y
291# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
292CONFIG_X86_RESERVE_LOW_64K=y
275# CONFIG_MATH_EMULATION is not set 293# CONFIG_MATH_EMULATION is not set
276CONFIG_MTRR=y 294CONFIG_MTRR=y
277# CONFIG_MTRR_SANITIZER is not set 295# CONFIG_MTRR_SANITIZER is not set
278# CONFIG_X86_PAT is not set 296# CONFIG_X86_PAT is not set
279CONFIG_EFI=y 297CONFIG_EFI=y
280# CONFIG_IRQBALANCE is not set
281CONFIG_SECCOMP=y 298CONFIG_SECCOMP=y
282# CONFIG_HZ_100 is not set 299# CONFIG_HZ_100 is not set
283CONFIG_HZ_250=y 300CONFIG_HZ_250=y
@@ -293,13 +310,18 @@ CONFIG_PHYSICAL_START=0x100000
293CONFIG_PHYSICAL_ALIGN=0x100000 310CONFIG_PHYSICAL_ALIGN=0x100000
294CONFIG_HOTPLUG_CPU=y 311CONFIG_HOTPLUG_CPU=y
295CONFIG_COMPAT_VDSO=y 312CONFIG_COMPAT_VDSO=y
313# CONFIG_CMDLINE_BOOL is not set
296CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y 314CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
297 315
298# 316#
299# Power management options 317# Power management and ACPI options
300# 318#
301CONFIG_PM=y 319CONFIG_PM=y
302# CONFIG_PM_DEBUG is not set 320CONFIG_PM_DEBUG=y
321CONFIG_PM_VERBOSE=y
322CONFIG_CAN_PM_TRACE=y
323CONFIG_PM_TRACE=y
324CONFIG_PM_TRACE_RTC=y
303CONFIG_PM_SLEEP_SMP=y 325CONFIG_PM_SLEEP_SMP=y
304CONFIG_PM_SLEEP=y 326CONFIG_PM_SLEEP=y
305CONFIG_SUSPEND=y 327CONFIG_SUSPEND=y
@@ -318,21 +340,14 @@ CONFIG_ACPI_BUTTON=y
318CONFIG_ACPI_VIDEO=y 340CONFIG_ACPI_VIDEO=y
319CONFIG_ACPI_FAN=y 341CONFIG_ACPI_FAN=y
320CONFIG_ACPI_DOCK=y 342CONFIG_ACPI_DOCK=y
321# CONFIG_ACPI_BAY is not set
322CONFIG_ACPI_PROCESSOR=y 343CONFIG_ACPI_PROCESSOR=y
323CONFIG_ACPI_HOTPLUG_CPU=y 344CONFIG_ACPI_HOTPLUG_CPU=y
324CONFIG_ACPI_THERMAL=y 345CONFIG_ACPI_THERMAL=y
325# CONFIG_ACPI_WMI is not set
326# CONFIG_ACPI_ASUS is not set
327# CONFIG_ACPI_TOSHIBA is not set
328CONFIG_ACPI_CUSTOM_DSDT_FILE="" 346CONFIG_ACPI_CUSTOM_DSDT_FILE=""
329# CONFIG_ACPI_CUSTOM_DSDT is not set 347# CONFIG_ACPI_CUSTOM_DSDT is not set
330CONFIG_ACPI_BLACKLIST_YEAR=2001 348CONFIG_ACPI_BLACKLIST_YEAR=2001
331# CONFIG_ACPI_DEBUG is not set 349# CONFIG_ACPI_DEBUG is not set
332CONFIG_ACPI_EC=y
333# CONFIG_ACPI_PCI_SLOT is not set 350# CONFIG_ACPI_PCI_SLOT is not set
334CONFIG_ACPI_POWER=y
335CONFIG_ACPI_SYSTEM=y
336CONFIG_X86_PM_TIMER=y 351CONFIG_X86_PM_TIMER=y
337CONFIG_ACPI_CONTAINER=y 352CONFIG_ACPI_CONTAINER=y
338CONFIG_ACPI_SBS=y 353CONFIG_ACPI_SBS=y
@@ -343,7 +358,6 @@ CONFIG_APM_DO_ENABLE=y
343# CONFIG_APM_CPU_IDLE is not set 358# CONFIG_APM_CPU_IDLE is not set
344CONFIG_APM_DISPLAY_BLANK=y 359CONFIG_APM_DISPLAY_BLANK=y
345CONFIG_APM_ALLOW_INTS=y 360CONFIG_APM_ALLOW_INTS=y
346# CONFIG_APM_REAL_MODE_POWER_OFF is not set
347 361
348# 362#
349# CPU Frequency scaling 363# CPU Frequency scaling
@@ -385,7 +399,6 @@ CONFIG_X86_P4_CLOCKMOD=m
385# 399#
386# shared options 400# shared options
387# 401#
388# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
389CONFIG_X86_SPEEDSTEP_LIB=m 402CONFIG_X86_SPEEDSTEP_LIB=m
390CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK=y 403CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK=y
391CONFIG_CPU_IDLE=y 404CONFIG_CPU_IDLE=y
@@ -413,6 +426,7 @@ CONFIG_ARCH_SUPPORTS_MSI=y
413CONFIG_PCI_MSI=y 426CONFIG_PCI_MSI=y
414CONFIG_PCI_LEGACY=y 427CONFIG_PCI_LEGACY=y
415# CONFIG_PCI_DEBUG is not set 428# CONFIG_PCI_DEBUG is not set
429# CONFIG_PCI_STUB is not set
416CONFIG_HT_IRQ=y 430CONFIG_HT_IRQ=y
417CONFIG_ISA_DMA_API=y 431CONFIG_ISA_DMA_API=y
418CONFIG_ISA=y 432CONFIG_ISA=y
@@ -436,13 +450,17 @@ CONFIG_HOTPLUG_PCI_SHPC=m
436# Executable file formats / Emulations 450# Executable file formats / Emulations
437# 451#
438CONFIG_BINFMT_ELF=y 452CONFIG_BINFMT_ELF=y
453# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
454CONFIG_HAVE_AOUT=y
439CONFIG_BINFMT_AOUT=m 455CONFIG_BINFMT_AOUT=m
440CONFIG_BINFMT_MISC=m 456CONFIG_BINFMT_MISC=m
457CONFIG_HAVE_ATOMIC_IOMAP=y
441CONFIG_NET=y 458CONFIG_NET=y
442 459
443# 460#
444# Networking options 461# Networking options
445# 462#
463CONFIG_COMPAT_NET_DEV_OPS=y
446CONFIG_PACKET=m 464CONFIG_PACKET=m
447CONFIG_PACKET_MMAP=y 465CONFIG_PACKET_MMAP=y
448CONFIG_UNIX=y 466CONFIG_UNIX=y
@@ -507,36 +525,6 @@ CONFIG_TCP_CONG_VENO=m
507CONFIG_DEFAULT_RENO=y 525CONFIG_DEFAULT_RENO=y
508CONFIG_DEFAULT_TCP_CONG="reno" 526CONFIG_DEFAULT_TCP_CONG="reno"
509# CONFIG_TCP_MD5SIG is not set 527# CONFIG_TCP_MD5SIG is not set
510CONFIG_IP_VS=m
511# CONFIG_IP_VS_DEBUG is not set
512CONFIG_IP_VS_TAB_BITS=12
513
514#
515# IPVS transport protocol load balancing support
516#
517CONFIG_IP_VS_PROTO_TCP=y
518CONFIG_IP_VS_PROTO_UDP=y
519CONFIG_IP_VS_PROTO_ESP=y
520CONFIG_IP_VS_PROTO_AH=y
521
522#
523# IPVS scheduler
524#
525CONFIG_IP_VS_RR=m
526CONFIG_IP_VS_WRR=m
527CONFIG_IP_VS_LC=m
528CONFIG_IP_VS_WLC=m
529CONFIG_IP_VS_LBLC=m
530CONFIG_IP_VS_LBLCR=m
531CONFIG_IP_VS_DH=m
532CONFIG_IP_VS_SH=m
533CONFIG_IP_VS_SED=m
534CONFIG_IP_VS_NQ=m
535
536#
537# IPVS application helper
538#
539CONFIG_IP_VS_FTP=m
540CONFIG_IPV6=m 528CONFIG_IPV6=m
541CONFIG_IPV6_PRIVACY=y 529CONFIG_IPV6_PRIVACY=y
542CONFIG_IPV6_ROUTER_PREF=y 530CONFIG_IPV6_ROUTER_PREF=y
@@ -575,8 +563,8 @@ CONFIG_NETFILTER_XTABLES=m
575CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 563CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
576# CONFIG_NETFILTER_XT_TARGET_DSCP is not set 564# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
577CONFIG_NETFILTER_XT_TARGET_MARK=m 565CONFIG_NETFILTER_XT_TARGET_MARK=m
578CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
579# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set 566# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
567CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
580# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set 568# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
581# CONFIG_NETFILTER_XT_TARGET_TRACE is not set 569# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
582CONFIG_NETFILTER_XT_TARGET_SECMARK=m 570CONFIG_NETFILTER_XT_TARGET_SECMARK=m
@@ -586,37 +574,70 @@ CONFIG_NETFILTER_XT_MATCH_COMMENT=m
586CONFIG_NETFILTER_XT_MATCH_DCCP=m 574CONFIG_NETFILTER_XT_MATCH_DCCP=m
587# CONFIG_NETFILTER_XT_MATCH_DSCP is not set 575# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
588CONFIG_NETFILTER_XT_MATCH_ESP=m 576CONFIG_NETFILTER_XT_MATCH_ESP=m
577# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
589# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set 578# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
590CONFIG_NETFILTER_XT_MATCH_LENGTH=m 579CONFIG_NETFILTER_XT_MATCH_LENGTH=m
591CONFIG_NETFILTER_XT_MATCH_LIMIT=m 580CONFIG_NETFILTER_XT_MATCH_LIMIT=m
592CONFIG_NETFILTER_XT_MATCH_MAC=m 581CONFIG_NETFILTER_XT_MATCH_MAC=m
593CONFIG_NETFILTER_XT_MATCH_MARK=m 582CONFIG_NETFILTER_XT_MATCH_MARK=m
583CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
594# CONFIG_NETFILTER_XT_MATCH_OWNER is not set 584# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
595CONFIG_NETFILTER_XT_MATCH_POLICY=m 585CONFIG_NETFILTER_XT_MATCH_POLICY=m
596CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
597CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m 586CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
598CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 587CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
599CONFIG_NETFILTER_XT_MATCH_QUOTA=m 588CONFIG_NETFILTER_XT_MATCH_QUOTA=m
600# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set 589# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
601CONFIG_NETFILTER_XT_MATCH_REALM=m 590CONFIG_NETFILTER_XT_MATCH_REALM=m
591# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
602CONFIG_NETFILTER_XT_MATCH_SCTP=m 592CONFIG_NETFILTER_XT_MATCH_SCTP=m
603CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 593CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
604CONFIG_NETFILTER_XT_MATCH_STRING=m 594CONFIG_NETFILTER_XT_MATCH_STRING=m
605CONFIG_NETFILTER_XT_MATCH_TCPMSS=m 595CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
606# CONFIG_NETFILTER_XT_MATCH_TIME is not set 596# CONFIG_NETFILTER_XT_MATCH_TIME is not set
607# CONFIG_NETFILTER_XT_MATCH_U32 is not set 597# CONFIG_NETFILTER_XT_MATCH_U32 is not set
608# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set 598CONFIG_IP_VS=m
599# CONFIG_IP_VS_IPV6 is not set
600# CONFIG_IP_VS_DEBUG is not set
601CONFIG_IP_VS_TAB_BITS=12
602
603#
604# IPVS transport protocol load balancing support
605#
606CONFIG_IP_VS_PROTO_TCP=y
607CONFIG_IP_VS_PROTO_UDP=y
608CONFIG_IP_VS_PROTO_AH_ESP=y
609CONFIG_IP_VS_PROTO_ESP=y
610CONFIG_IP_VS_PROTO_AH=y
611
612#
613# IPVS scheduler
614#
615CONFIG_IP_VS_RR=m
616CONFIG_IP_VS_WRR=m
617CONFIG_IP_VS_LC=m
618CONFIG_IP_VS_WLC=m
619CONFIG_IP_VS_LBLC=m
620CONFIG_IP_VS_LBLCR=m
621CONFIG_IP_VS_DH=m
622CONFIG_IP_VS_SH=m
623CONFIG_IP_VS_SED=m
624CONFIG_IP_VS_NQ=m
625
626#
627# IPVS application helper
628#
629CONFIG_IP_VS_FTP=m
609 630
610# 631#
611# IP: Netfilter Configuration 632# IP: Netfilter Configuration
612# 633#
634# CONFIG_NF_DEFRAG_IPV4 is not set
613CONFIG_IP_NF_QUEUE=m 635CONFIG_IP_NF_QUEUE=m
614CONFIG_IP_NF_IPTABLES=m 636CONFIG_IP_NF_IPTABLES=m
615CONFIG_IP_NF_MATCH_RECENT=m 637CONFIG_IP_NF_MATCH_ADDRTYPE=m
616CONFIG_IP_NF_MATCH_ECN=m
617CONFIG_IP_NF_MATCH_AH=m 638CONFIG_IP_NF_MATCH_AH=m
639CONFIG_IP_NF_MATCH_ECN=m
618CONFIG_IP_NF_MATCH_TTL=m 640CONFIG_IP_NF_MATCH_TTL=m
619CONFIG_IP_NF_MATCH_ADDRTYPE=m
620CONFIG_IP_NF_FILTER=m 641CONFIG_IP_NF_FILTER=m
621CONFIG_IP_NF_TARGET_REJECT=m 642CONFIG_IP_NF_TARGET_REJECT=m
622CONFIG_IP_NF_TARGET_LOG=m 643CONFIG_IP_NF_TARGET_LOG=m
@@ -635,16 +656,16 @@ CONFIG_IP_NF_ARP_MANGLE=m
635# 656#
636CONFIG_IP6_NF_QUEUE=m 657CONFIG_IP6_NF_QUEUE=m
637CONFIG_IP6_NF_IPTABLES=m 658CONFIG_IP6_NF_IPTABLES=m
638CONFIG_IP6_NF_MATCH_RT=m 659CONFIG_IP6_NF_MATCH_AH=m
639CONFIG_IP6_NF_MATCH_OPTS=m 660CONFIG_IP6_NF_MATCH_EUI64=m
640CONFIG_IP6_NF_MATCH_FRAG=m 661CONFIG_IP6_NF_MATCH_FRAG=m
662CONFIG_IP6_NF_MATCH_OPTS=m
641CONFIG_IP6_NF_MATCH_HL=m 663CONFIG_IP6_NF_MATCH_HL=m
642CONFIG_IP6_NF_MATCH_IPV6HEADER=m 664CONFIG_IP6_NF_MATCH_IPV6HEADER=m
643CONFIG_IP6_NF_MATCH_AH=m
644# CONFIG_IP6_NF_MATCH_MH is not set 665# CONFIG_IP6_NF_MATCH_MH is not set
645CONFIG_IP6_NF_MATCH_EUI64=m 666CONFIG_IP6_NF_MATCH_RT=m
646CONFIG_IP6_NF_FILTER=m
647CONFIG_IP6_NF_TARGET_LOG=m 667CONFIG_IP6_NF_TARGET_LOG=m
668CONFIG_IP6_NF_FILTER=m
648CONFIG_IP6_NF_TARGET_REJECT=m 669CONFIG_IP6_NF_TARGET_REJECT=m
649CONFIG_IP6_NF_MANGLE=m 670CONFIG_IP6_NF_MANGLE=m
650CONFIG_IP6_NF_TARGET_HL=m 671CONFIG_IP6_NF_TARGET_HL=m
@@ -655,10 +676,6 @@ CONFIG_IP6_NF_RAW=m
655# DECnet: Netfilter Configuration 676# DECnet: Netfilter Configuration
656# 677#
657CONFIG_DECNET_NF_GRABULATOR=m 678CONFIG_DECNET_NF_GRABULATOR=m
658
659#
660# Bridge: Netfilter Configuration
661#
662CONFIG_BRIDGE_NF_EBTABLES=m 679CONFIG_BRIDGE_NF_EBTABLES=m
663CONFIG_BRIDGE_EBT_BROUTE=m 680CONFIG_BRIDGE_EBT_BROUTE=m
664CONFIG_BRIDGE_EBT_T_FILTER=m 681CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -683,17 +700,15 @@ CONFIG_BRIDGE_EBT_ULOG=m
683# CONFIG_BRIDGE_EBT_NFLOG is not set 700# CONFIG_BRIDGE_EBT_NFLOG is not set
684CONFIG_IP_DCCP=m 701CONFIG_IP_DCCP=m
685CONFIG_INET_DCCP_DIAG=m 702CONFIG_INET_DCCP_DIAG=m
686CONFIG_IP_DCCP_ACKVEC=y
687 703
688# 704#
689# DCCP CCIDs Configuration (EXPERIMENTAL) 705# DCCP CCIDs Configuration (EXPERIMENTAL)
690# 706#
691CONFIG_IP_DCCP_CCID2=m
692# CONFIG_IP_DCCP_CCID2_DEBUG is not set 707# CONFIG_IP_DCCP_CCID2_DEBUG is not set
693CONFIG_IP_DCCP_CCID3=m 708CONFIG_IP_DCCP_CCID3=y
694# CONFIG_IP_DCCP_CCID3_DEBUG is not set 709# CONFIG_IP_DCCP_CCID3_DEBUG is not set
695CONFIG_IP_DCCP_CCID3_RTO=100 710CONFIG_IP_DCCP_CCID3_RTO=100
696CONFIG_IP_DCCP_TFRC_LIB=m 711CONFIG_IP_DCCP_TFRC_LIB=y
697 712
698# 713#
699# DCCP Kernel Hacking 714# DCCP Kernel Hacking
@@ -715,6 +730,7 @@ CONFIG_ATM_BR2684=m
715# CONFIG_ATM_BR2684_IPFILTER is not set 730# CONFIG_ATM_BR2684_IPFILTER is not set
716CONFIG_STP=m 731CONFIG_STP=m
717CONFIG_BRIDGE=m 732CONFIG_BRIDGE=m
733# CONFIG_NET_DSA is not set
718CONFIG_VLAN_8021Q=m 734CONFIG_VLAN_8021Q=m
719# CONFIG_VLAN_8021Q_GVRP is not set 735# CONFIG_VLAN_8021Q_GVRP is not set
720CONFIG_DECNET=m 736CONFIG_DECNET=m
@@ -748,6 +764,7 @@ CONFIG_NET_SCH_HTB=m
748CONFIG_NET_SCH_HFSC=m 764CONFIG_NET_SCH_HFSC=m
749CONFIG_NET_SCH_ATM=m 765CONFIG_NET_SCH_ATM=m
750CONFIG_NET_SCH_PRIO=m 766CONFIG_NET_SCH_PRIO=m
767# CONFIG_NET_SCH_MULTIQ is not set
751CONFIG_NET_SCH_RED=m 768CONFIG_NET_SCH_RED=m
752CONFIG_NET_SCH_SFQ=m 769CONFIG_NET_SCH_SFQ=m
753CONFIG_NET_SCH_TEQL=m 770CONFIG_NET_SCH_TEQL=m
@@ -755,6 +772,7 @@ CONFIG_NET_SCH_TBF=m
755CONFIG_NET_SCH_GRED=m 772CONFIG_NET_SCH_GRED=m
756CONFIG_NET_SCH_DSMARK=m 773CONFIG_NET_SCH_DSMARK=m
757CONFIG_NET_SCH_NETEM=m 774CONFIG_NET_SCH_NETEM=m
775# CONFIG_NET_SCH_DRR is not set
758CONFIG_NET_SCH_INGRESS=m 776CONFIG_NET_SCH_INGRESS=m
759 777
760# 778#
@@ -782,8 +800,10 @@ CONFIG_NET_ACT_IPT=m
782# CONFIG_NET_ACT_NAT is not set 800# CONFIG_NET_ACT_NAT is not set
783CONFIG_NET_ACT_PEDIT=m 801CONFIG_NET_ACT_PEDIT=m
784CONFIG_NET_ACT_SIMP=m 802CONFIG_NET_ACT_SIMP=m
803# CONFIG_NET_ACT_SKBEDIT is not set
785# CONFIG_NET_CLS_IND is not set 804# CONFIG_NET_CLS_IND is not set
786CONFIG_NET_SCH_FIFO=y 805CONFIG_NET_SCH_FIFO=y
806# CONFIG_DCB is not set
787 807
788# 808#
789# Network testing 809# Network testing
@@ -805,8 +825,6 @@ CONFIG_BT_HIDP=m
805# 825#
806# Bluetooth device drivers 826# Bluetooth device drivers
807# 827#
808CONFIG_BT_HCIUSB=m
809CONFIG_BT_HCIUSB_SCO=y
810# CONFIG_BT_HCIBTUSB is not set 828# CONFIG_BT_HCIBTUSB is not set
811# CONFIG_BT_HCIBTSDIO is not set 829# CONFIG_BT_HCIBTSDIO is not set
812CONFIG_BT_HCIUART=m 830CONFIG_BT_HCIUART=m
@@ -818,20 +836,19 @@ CONFIG_BT_HCIBPA10X=m
818CONFIG_BT_HCIBFUSB=m 836CONFIG_BT_HCIBFUSB=m
819CONFIG_BT_HCIVHCI=m 837CONFIG_BT_HCIVHCI=m
820# CONFIG_AF_RXRPC is not set 838# CONFIG_AF_RXRPC is not set
839# CONFIG_PHONET is not set
821CONFIG_FIB_RULES=y 840CONFIG_FIB_RULES=y
822 841CONFIG_WIRELESS=y
823#
824# Wireless
825#
826# CONFIG_CFG80211 is not set 842# CONFIG_CFG80211 is not set
843CONFIG_WIRELESS_OLD_REGULATORY=y
827CONFIG_WIRELESS_EXT=y 844CONFIG_WIRELESS_EXT=y
828CONFIG_WIRELESS_EXT_SYSFS=y 845CONFIG_WIRELESS_EXT_SYSFS=y
846CONFIG_LIB80211=m
847CONFIG_LIB80211_CRYPT_WEP=m
848CONFIG_LIB80211_CRYPT_CCMP=m
849CONFIG_LIB80211_CRYPT_TKIP=m
829# CONFIG_MAC80211 is not set 850# CONFIG_MAC80211 is not set
830CONFIG_IEEE80211=m 851# CONFIG_WIMAX is not set
831# CONFIG_IEEE80211_DEBUG is not set
832CONFIG_IEEE80211_CRYPT_WEP=m
833CONFIG_IEEE80211_CRYPT_CCMP=m
834CONFIG_IEEE80211_CRYPT_TKIP=m
835# CONFIG_RFKILL is not set 852# CONFIG_RFKILL is not set
836# CONFIG_NET_9P is not set 853# CONFIG_NET_9P is not set
837 854
@@ -857,6 +874,7 @@ CONFIG_MTD=m
857# CONFIG_MTD_DEBUG is not set 874# CONFIG_MTD_DEBUG is not set
858CONFIG_MTD_CONCAT=m 875CONFIG_MTD_CONCAT=m
859CONFIG_MTD_PARTITIONS=y 876CONFIG_MTD_PARTITIONS=y
877# CONFIG_MTD_TESTS is not set
860CONFIG_MTD_REDBOOT_PARTS=m 878CONFIG_MTD_REDBOOT_PARTS=m
861CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 879CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
862# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set 880# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
@@ -867,6 +885,7 @@ CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
867# User Modules And Translation Layers 885# User Modules And Translation Layers
868# 886#
869CONFIG_MTD_CHAR=m 887CONFIG_MTD_CHAR=m
888CONFIG_HAVE_MTD_OTP=y
870CONFIG_MTD_BLKDEVS=m 889CONFIG_MTD_BLKDEVS=m
871CONFIG_MTD_BLOCK=m 890CONFIG_MTD_BLOCK=m
872# CONFIG_MTD_BLOCK_RO is not set 891# CONFIG_MTD_BLOCK_RO is not set
@@ -912,9 +931,7 @@ CONFIG_MTD_ABSENT=m
912# 931#
913CONFIG_MTD_COMPLEX_MAPPINGS=y 932CONFIG_MTD_COMPLEX_MAPPINGS=y
914CONFIG_MTD_PHYSMAP=m 933CONFIG_MTD_PHYSMAP=m
915CONFIG_MTD_PHYSMAP_START=0x8000000 934# CONFIG_MTD_PHYSMAP_COMPAT is not set
916CONFIG_MTD_PHYSMAP_LEN=0x4000000
917CONFIG_MTD_PHYSMAP_BANKWIDTH=2
918CONFIG_MTD_SC520CDP=m 935CONFIG_MTD_SC520CDP=m
919CONFIG_MTD_NETSC520=m 936CONFIG_MTD_NETSC520=m
920CONFIG_MTD_TS5500=m 937CONFIG_MTD_TS5500=m
@@ -980,12 +997,17 @@ CONFIG_MTD_ONENAND_OTP=y
980# CONFIG_MTD_ONENAND_SIM is not set 997# CONFIG_MTD_ONENAND_SIM is not set
981 998
982# 999#
1000# LPDDR flash memory drivers
1001#
1002# CONFIG_MTD_LPDDR is not set
1003
1004#
983# UBI - Unsorted block images 1005# UBI - Unsorted block images
984# 1006#
985# CONFIG_MTD_UBI is not set 1007# CONFIG_MTD_UBI is not set
986# CONFIG_PARPORT is not set 1008# CONFIG_PARPORT is not set
987CONFIG_PNP=y 1009CONFIG_PNP=y
988# CONFIG_PNP_DEBUG is not set 1010CONFIG_PNP_DEBUG_MESSAGES=y
989 1011
990# 1012#
991# Protocols 1013# Protocols
@@ -1020,21 +1042,20 @@ CONFIG_ATA_OVER_ETH=m
1020CONFIG_MISC_DEVICES=y 1042CONFIG_MISC_DEVICES=y
1021# CONFIG_IBM_ASM is not set 1043# CONFIG_IBM_ASM is not set
1022# CONFIG_PHANTOM is not set 1044# CONFIG_PHANTOM is not set
1023# CONFIG_EEPROM_93CX6 is not set
1024# CONFIG_SGI_IOC4 is not set 1045# CONFIG_SGI_IOC4 is not set
1025# CONFIG_TIFM_CORE is not set 1046# CONFIG_TIFM_CORE is not set
1026# CONFIG_ACER_WMI is not set 1047# CONFIG_ICS932S401 is not set
1027# CONFIG_ASUS_LAPTOP is not set
1028# CONFIG_FUJITSU_LAPTOP is not set
1029# CONFIG_TC1100_WMI is not set
1030# CONFIG_MSI_LAPTOP is not set
1031# CONFIG_COMPAL_LAPTOP is not set
1032# CONFIG_SONY_LAPTOP is not set
1033# CONFIG_THINKPAD_ACPI is not set
1034# CONFIG_INTEL_MENLOW is not set
1035# CONFIG_EEEPC_LAPTOP is not set
1036# CONFIG_ENCLOSURE_SERVICES is not set 1048# CONFIG_ENCLOSURE_SERVICES is not set
1037# CONFIG_HP_ILO is not set 1049# CONFIG_HP_ILO is not set
1050# CONFIG_C2PORT is not set
1051
1052#
1053# EEPROM support
1054#
1055# CONFIG_EEPROM_AT24 is not set
1056# CONFIG_EEPROM_AT25 is not set
1057# CONFIG_EEPROM_LEGACY is not set
1058# CONFIG_EEPROM_93CX6 is not set
1038CONFIG_HAVE_IDE=y 1059CONFIG_HAVE_IDE=y
1039# CONFIG_IDE is not set 1060# CONFIG_IDE is not set
1040 1061
@@ -1078,6 +1099,7 @@ CONFIG_SCSI_ISCSI_ATTRS=m
1078# CONFIG_SCSI_SRP_ATTRS is not set 1099# CONFIG_SCSI_SRP_ATTRS is not set
1079CONFIG_SCSI_LOWLEVEL=y 1100CONFIG_SCSI_LOWLEVEL=y
1080# CONFIG_ISCSI_TCP is not set 1101# CONFIG_ISCSI_TCP is not set
1102# CONFIG_SCSI_CXGB3_ISCSI is not set
1081# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 1103# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
1082# CONFIG_SCSI_3W_9XXX is not set 1104# CONFIG_SCSI_3W_9XXX is not set
1083# CONFIG_SCSI_7000FASST is not set 1105# CONFIG_SCSI_7000FASST is not set
@@ -1098,6 +1120,8 @@ CONFIG_SCSI_LOWLEVEL=y
1098# CONFIG_MEGARAID_SAS is not set 1120# CONFIG_MEGARAID_SAS is not set
1099# CONFIG_SCSI_HPTIOP is not set 1121# CONFIG_SCSI_HPTIOP is not set
1100# CONFIG_SCSI_BUSLOGIC is not set 1122# CONFIG_SCSI_BUSLOGIC is not set
1123# CONFIG_LIBFC is not set
1124# CONFIG_FCOE is not set
1101# CONFIG_SCSI_DMX3191D is not set 1125# CONFIG_SCSI_DMX3191D is not set
1102# CONFIG_SCSI_DTC3280 is not set 1126# CONFIG_SCSI_DTC3280 is not set
1103# CONFIG_SCSI_EATA is not set 1127# CONFIG_SCSI_EATA is not set
@@ -1252,6 +1276,9 @@ CONFIG_SMSC_PHY=m
1252# CONFIG_BROADCOM_PHY is not set 1276# CONFIG_BROADCOM_PHY is not set
1253# CONFIG_ICPLUS_PHY is not set 1277# CONFIG_ICPLUS_PHY is not set
1254# CONFIG_REALTEK_PHY is not set 1278# CONFIG_REALTEK_PHY is not set
1279# CONFIG_NATIONAL_PHY is not set
1280# CONFIG_STE10XP is not set
1281# CONFIG_LSI_ET1011C_PHY is not set
1255# CONFIG_MDIO_BITBANG is not set 1282# CONFIG_MDIO_BITBANG is not set
1256CONFIG_NET_ETHERNET=y 1283CONFIG_NET_ETHERNET=y
1257CONFIG_MII=y 1284CONFIG_MII=y
@@ -1293,6 +1320,9 @@ CONFIG_ULI526X=m
1293# CONFIG_IBM_NEW_EMAC_RGMII is not set 1320# CONFIG_IBM_NEW_EMAC_RGMII is not set
1294# CONFIG_IBM_NEW_EMAC_TAH is not set 1321# CONFIG_IBM_NEW_EMAC_TAH is not set
1295# CONFIG_IBM_NEW_EMAC_EMAC4 is not set 1322# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
1323# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
1324# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
1325# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
1296CONFIG_NET_PCI=y 1326CONFIG_NET_PCI=y
1297# CONFIG_PCNET32 is not set 1327# CONFIG_PCNET32 is not set
1298# CONFIG_AMD8111_ETH is not set 1328# CONFIG_AMD8111_ETH is not set
@@ -1302,7 +1332,6 @@ CONFIG_NET_PCI=y
1302# CONFIG_B44 is not set 1332# CONFIG_B44 is not set
1303# CONFIG_FORCEDETH is not set 1333# CONFIG_FORCEDETH is not set
1304# CONFIG_CS89x0 is not set 1334# CONFIG_CS89x0 is not set
1305# CONFIG_EEPRO100 is not set
1306CONFIG_E100=m 1335CONFIG_E100=m
1307# CONFIG_FEALNX is not set 1336# CONFIG_FEALNX is not set
1308# CONFIG_NATSEMI is not set 1337# CONFIG_NATSEMI is not set
@@ -1316,22 +1345,24 @@ CONFIG_8139TOO_8129=y
1316# CONFIG_R6040 is not set 1345# CONFIG_R6040 is not set
1317# CONFIG_SIS900 is not set 1346# CONFIG_SIS900 is not set
1318CONFIG_EPIC100=m 1347CONFIG_EPIC100=m
1348# CONFIG_SMSC9420 is not set
1319# CONFIG_SUNDANCE is not set 1349# CONFIG_SUNDANCE is not set
1320# CONFIG_TLAN is not set 1350# CONFIG_TLAN is not set
1321# CONFIG_VIA_RHINE is not set 1351# CONFIG_VIA_RHINE is not set
1322# CONFIG_SC92031 is not set 1352# CONFIG_SC92031 is not set
1353# CONFIG_ATL2 is not set
1323CONFIG_NETDEV_1000=y 1354CONFIG_NETDEV_1000=y
1324# CONFIG_ACENIC is not set 1355# CONFIG_ACENIC is not set
1325# CONFIG_DL2K is not set 1356# CONFIG_DL2K is not set
1326CONFIG_E1000=m 1357CONFIG_E1000=m
1327CONFIG_E1000_DISABLE_PACKET_SPLIT=y
1328# CONFIG_E1000E is not set 1358# CONFIG_E1000E is not set
1329# CONFIG_IP1000 is not set 1359# CONFIG_IP1000 is not set
1330# CONFIG_IGB is not set 1360# CONFIG_IGB is not set
1331# CONFIG_NS83820 is not set 1361# CONFIG_NS83820 is not set
1332# CONFIG_HAMACHI is not set 1362# CONFIG_HAMACHI is not set
1333# CONFIG_YELLOWFIN is not set 1363# CONFIG_YELLOWFIN is not set
1334# CONFIG_R8169 is not set 1364CONFIG_R8169=m
1365CONFIG_R8169_VLAN=y
1335# CONFIG_SIS190 is not set 1366# CONFIG_SIS190 is not set
1336CONFIG_SKGE=y 1367CONFIG_SKGE=y
1337# CONFIG_SKGE_DEBUG is not set 1368# CONFIG_SKGE_DEBUG is not set
@@ -1343,18 +1374,24 @@ CONFIG_SKY2=y
1343# CONFIG_QLA3XXX is not set 1374# CONFIG_QLA3XXX is not set
1344# CONFIG_ATL1 is not set 1375# CONFIG_ATL1 is not set
1345# CONFIG_ATL1E is not set 1376# CONFIG_ATL1E is not set
1377# CONFIG_ATL1C is not set
1378# CONFIG_JME is not set
1346CONFIG_NETDEV_10000=y 1379CONFIG_NETDEV_10000=y
1347# CONFIG_CHELSIO_T1 is not set 1380# CONFIG_CHELSIO_T1 is not set
1381CONFIG_CHELSIO_T3_DEPENDS=y
1348# CONFIG_CHELSIO_T3 is not set 1382# CONFIG_CHELSIO_T3 is not set
1383# CONFIG_ENIC is not set
1349# CONFIG_IXGBE is not set 1384# CONFIG_IXGBE is not set
1350CONFIG_IXGB=m 1385CONFIG_IXGB=m
1351# CONFIG_S2IO is not set 1386# CONFIG_S2IO is not set
1352# CONFIG_MYRI10GE is not set 1387# CONFIG_MYRI10GE is not set
1353# CONFIG_NETXEN_NIC is not set 1388# CONFIG_NETXEN_NIC is not set
1354# CONFIG_NIU is not set 1389# CONFIG_NIU is not set
1390# CONFIG_MLX4_EN is not set
1355# CONFIG_MLX4_CORE is not set 1391# CONFIG_MLX4_CORE is not set
1356# CONFIG_TEHUTI is not set 1392# CONFIG_TEHUTI is not set
1357# CONFIG_BNX2X is not set 1393# CONFIG_BNX2X is not set
1394# CONFIG_QLGE is not set
1358# CONFIG_SFC is not set 1395# CONFIG_SFC is not set
1359# CONFIG_TR is not set 1396# CONFIG_TR is not set
1360 1397
@@ -1363,13 +1400,6 @@ CONFIG_IXGB=m
1363# 1400#
1364# CONFIG_WLAN_PRE80211 is not set 1401# CONFIG_WLAN_PRE80211 is not set
1365CONFIG_WLAN_80211=y 1402CONFIG_WLAN_80211=y
1366CONFIG_IPW2100=m
1367# CONFIG_IPW2100_MONITOR is not set
1368# CONFIG_IPW2100_DEBUG is not set
1369CONFIG_IPW2200=m
1370# CONFIG_IPW2200_MONITOR is not set
1371# CONFIG_IPW2200_QOS is not set
1372# CONFIG_IPW2200_DEBUG is not set
1373# CONFIG_LIBERTAS is not set 1403# CONFIG_LIBERTAS is not set
1374# CONFIG_AIRO is not set 1404# CONFIG_AIRO is not set
1375# CONFIG_HERMES is not set 1405# CONFIG_HERMES is not set
@@ -1377,10 +1407,23 @@ CONFIG_IPW2200=m
1377# CONFIG_PRISM54 is not set 1407# CONFIG_PRISM54 is not set
1378# CONFIG_USB_ZD1201 is not set 1408# CONFIG_USB_ZD1201 is not set
1379# CONFIG_USB_NET_RNDIS_WLAN is not set 1409# CONFIG_USB_NET_RNDIS_WLAN is not set
1410CONFIG_IPW2100=m
1411# CONFIG_IPW2100_MONITOR is not set
1412# CONFIG_IPW2100_DEBUG is not set
1413CONFIG_IPW2200=m
1414# CONFIG_IPW2200_MONITOR is not set
1415# CONFIG_IPW2200_QOS is not set
1416# CONFIG_IPW2200_DEBUG is not set
1417CONFIG_LIBIPW=m
1418# CONFIG_LIBIPW_DEBUG is not set
1380# CONFIG_IWLWIFI_LEDS is not set 1419# CONFIG_IWLWIFI_LEDS is not set
1381# CONFIG_HOSTAP is not set 1420# CONFIG_HOSTAP is not set
1382 1421
1383# 1422#
1423# Enable WiMAX (Networking options) to see the WiMAX drivers
1424#
1425
1426#
1384# USB Network Adapters 1427# USB Network Adapters
1385# 1428#
1386CONFIG_USB_CATC=m 1429CONFIG_USB_CATC=m
@@ -1391,6 +1434,7 @@ CONFIG_USB_USBNET=y
1391CONFIG_USB_NET_AX8817X=y 1434CONFIG_USB_NET_AX8817X=y
1392CONFIG_USB_NET_CDCETHER=m 1435CONFIG_USB_NET_CDCETHER=m
1393# CONFIG_USB_NET_DM9601 is not set 1436# CONFIG_USB_NET_DM9601 is not set
1437# CONFIG_USB_NET_SMSC95XX is not set
1394CONFIG_USB_NET_GL620A=m 1438CONFIG_USB_NET_GL620A=m
1395CONFIG_USB_NET_NET1080=m 1439CONFIG_USB_NET_NET1080=m
1396CONFIG_USB_NET_PLUSB=m 1440CONFIG_USB_NET_PLUSB=m
@@ -1419,6 +1463,7 @@ CONFIG_ATM_DRIVERS=y
1419# CONFIG_ATM_IA is not set 1463# CONFIG_ATM_IA is not set
1420# CONFIG_ATM_FORE200E is not set 1464# CONFIG_ATM_FORE200E is not set
1421# CONFIG_ATM_HE is not set 1465# CONFIG_ATM_HE is not set
1466# CONFIG_ATM_SOLOS is not set
1422# CONFIG_FDDI is not set 1467# CONFIG_FDDI is not set
1423# CONFIG_HIPPI is not set 1468# CONFIG_HIPPI is not set
1424CONFIG_PPP=m 1469CONFIG_PPP=m
@@ -1482,6 +1527,7 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
1482CONFIG_MOUSE_PS2_SYNAPTICS=y 1527CONFIG_MOUSE_PS2_SYNAPTICS=y
1483CONFIG_MOUSE_PS2_LIFEBOOK=y 1528CONFIG_MOUSE_PS2_LIFEBOOK=y
1484CONFIG_MOUSE_PS2_TRACKPOINT=y 1529CONFIG_MOUSE_PS2_TRACKPOINT=y
1530# CONFIG_MOUSE_PS2_ELANTECH is not set
1485# CONFIG_MOUSE_PS2_TOUCHKIT is not set 1531# CONFIG_MOUSE_PS2_TOUCHKIT is not set
1486CONFIG_MOUSE_SERIAL=m 1532CONFIG_MOUSE_SERIAL=m
1487# CONFIG_MOUSE_APPLETOUCH is not set 1533# CONFIG_MOUSE_APPLETOUCH is not set
@@ -1521,6 +1567,7 @@ CONFIG_TOUCHSCREEN_ADS7846=m
1521# CONFIG_TOUCHSCREEN_FUJITSU is not set 1567# CONFIG_TOUCHSCREEN_FUJITSU is not set
1522CONFIG_TOUCHSCREEN_GUNZE=m 1568CONFIG_TOUCHSCREEN_GUNZE=m
1523CONFIG_TOUCHSCREEN_ELO=m 1569CONFIG_TOUCHSCREEN_ELO=m
1570# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
1524CONFIG_TOUCHSCREEN_MTOUCH=m 1571CONFIG_TOUCHSCREEN_MTOUCH=m
1525# CONFIG_TOUCHSCREEN_INEXIO is not set 1572# CONFIG_TOUCHSCREEN_INEXIO is not set
1526CONFIG_TOUCHSCREEN_MK712=m 1573CONFIG_TOUCHSCREEN_MK712=m
@@ -1528,10 +1575,10 @@ CONFIG_TOUCHSCREEN_MK712=m
1528# CONFIG_TOUCHSCREEN_PENMOUNT is not set 1575# CONFIG_TOUCHSCREEN_PENMOUNT is not set
1529# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set 1576# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
1530# CONFIG_TOUCHSCREEN_TOUCHWIN is not set 1577# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
1531# CONFIG_TOUCHSCREEN_UCB1400 is not set
1532# CONFIG_TOUCHSCREEN_WM97XX is not set 1578# CONFIG_TOUCHSCREEN_WM97XX is not set
1533# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set 1579# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
1534# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set 1580# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
1581# CONFIG_TOUCHSCREEN_TSC2007 is not set
1535CONFIG_INPUT_MISC=y 1582CONFIG_INPUT_MISC=y
1536CONFIG_INPUT_PCSPKR=y 1583CONFIG_INPUT_PCSPKR=y
1537# CONFIG_INPUT_APANEL is not set 1584# CONFIG_INPUT_APANEL is not set
@@ -1542,6 +1589,7 @@ CONFIG_INPUT_WISTRON_BTNS=m
1542# CONFIG_INPUT_KEYSPAN_REMOTE is not set 1589# CONFIG_INPUT_KEYSPAN_REMOTE is not set
1543# CONFIG_INPUT_POWERMATE is not set 1590# CONFIG_INPUT_POWERMATE is not set
1544# CONFIG_INPUT_YEALINK is not set 1591# CONFIG_INPUT_YEALINK is not set
1592# CONFIG_INPUT_CM109 is not set
1545CONFIG_INPUT_UINPUT=m 1593CONFIG_INPUT_UINPUT=m
1546 1594
1547# 1595#
@@ -1574,7 +1622,6 @@ CONFIG_SERIAL_NONSTANDARD=y
1574# CONFIG_ROCKETPORT is not set 1622# CONFIG_ROCKETPORT is not set
1575# CONFIG_CYCLADES is not set 1623# CONFIG_CYCLADES is not set
1576# CONFIG_DIGIEPCA is not set 1624# CONFIG_DIGIEPCA is not set
1577# CONFIG_ESPSERIAL is not set
1578# CONFIG_MOXA_INTELLIO is not set 1625# CONFIG_MOXA_INTELLIO is not set
1579# CONFIG_MOXA_SMARTIO is not set 1626# CONFIG_MOXA_SMARTIO is not set
1580# CONFIG_ISI is not set 1627# CONFIG_ISI is not set
@@ -1612,6 +1659,7 @@ CONFIG_SERIAL_CORE=y
1612CONFIG_SERIAL_CORE_CONSOLE=y 1659CONFIG_SERIAL_CORE_CONSOLE=y
1613CONFIG_SERIAL_JSM=y 1660CONFIG_SERIAL_JSM=y
1614CONFIG_UNIX98_PTYS=y 1661CONFIG_UNIX98_PTYS=y
1662# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
1615CONFIG_LEGACY_PTYS=y 1663CONFIG_LEGACY_PTYS=y
1616CONFIG_LEGACY_PTY_COUNT=64 1664CONFIG_LEGACY_PTY_COUNT=64
1617CONFIG_IPMI_HANDLER=m 1665CONFIG_IPMI_HANDLER=m
@@ -1704,8 +1752,6 @@ CONFIG_SCx200_ACB=m
1704# Miscellaneous I2C Chip support 1752# Miscellaneous I2C Chip support
1705# 1753#
1706# CONFIG_DS1682 is not set 1754# CONFIG_DS1682 is not set
1707# CONFIG_AT24 is not set
1708CONFIG_SENSORS_EEPROM=m
1709CONFIG_SENSORS_PCF8574=m 1755CONFIG_SENSORS_PCF8574=m
1710# CONFIG_PCF8575 is not set 1756# CONFIG_PCF8575 is not set
1711CONFIG_SENSORS_PCA9539=m 1757CONFIG_SENSORS_PCA9539=m
@@ -1728,7 +1774,6 @@ CONFIG_SPI_BITBANG=m
1728# 1774#
1729# SPI Protocol Masters 1775# SPI Protocol Masters
1730# 1776#
1731# CONFIG_SPI_AT25 is not set
1732# CONFIG_SPI_SPIDEV is not set 1777# CONFIG_SPI_SPIDEV is not set
1733# CONFIG_SPI_TLE62X0 is not set 1778# CONFIG_SPI_TLE62X0 is not set
1734CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 1779CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
@@ -1748,13 +1793,16 @@ CONFIG_W1_MASTER_DS2482=m
1748# 1793#
1749CONFIG_W1_SLAVE_THERM=m 1794CONFIG_W1_SLAVE_THERM=m
1750CONFIG_W1_SLAVE_SMEM=m 1795CONFIG_W1_SLAVE_SMEM=m
1796# CONFIG_W1_SLAVE_DS2431 is not set
1751CONFIG_W1_SLAVE_DS2433=m 1797CONFIG_W1_SLAVE_DS2433=m
1752CONFIG_W1_SLAVE_DS2433_CRC=y 1798CONFIG_W1_SLAVE_DS2433_CRC=y
1753# CONFIG_W1_SLAVE_DS2760 is not set 1799# CONFIG_W1_SLAVE_DS2760 is not set
1800# CONFIG_W1_SLAVE_BQ27000 is not set
1754CONFIG_POWER_SUPPLY=y 1801CONFIG_POWER_SUPPLY=y
1755# CONFIG_POWER_SUPPLY_DEBUG is not set 1802# CONFIG_POWER_SUPPLY_DEBUG is not set
1756# CONFIG_PDA_POWER is not set 1803# CONFIG_PDA_POWER is not set
1757# CONFIG_BATTERY_DS2760 is not set 1804# CONFIG_BATTERY_DS2760 is not set
1805# CONFIG_BATTERY_BQ27x00 is not set
1758CONFIG_HWMON=y 1806CONFIG_HWMON=y
1759CONFIG_HWMON_VID=m 1807CONFIG_HWMON_VID=m
1760# CONFIG_SENSORS_ABITUGURU is not set 1808# CONFIG_SENSORS_ABITUGURU is not set
@@ -1768,8 +1816,10 @@ CONFIG_HWMON_VID=m
1768# CONFIG_SENSORS_ADM1029 is not set 1816# CONFIG_SENSORS_ADM1029 is not set
1769# CONFIG_SENSORS_ADM1031 is not set 1817# CONFIG_SENSORS_ADM1031 is not set
1770# CONFIG_SENSORS_ADM9240 is not set 1818# CONFIG_SENSORS_ADM9240 is not set
1819# CONFIG_SENSORS_ADT7462 is not set
1771# CONFIG_SENSORS_ADT7470 is not set 1820# CONFIG_SENSORS_ADT7470 is not set
1772# CONFIG_SENSORS_ADT7473 is not set 1821# CONFIG_SENSORS_ADT7473 is not set
1822# CONFIG_SENSORS_ADT7475 is not set
1773# CONFIG_SENSORS_K8TEMP is not set 1823# CONFIG_SENSORS_K8TEMP is not set
1774# CONFIG_SENSORS_ASB100 is not set 1824# CONFIG_SENSORS_ASB100 is not set
1775# CONFIG_SENSORS_ATXP1 is not set 1825# CONFIG_SENSORS_ATXP1 is not set
@@ -1799,6 +1849,8 @@ CONFIG_SENSORS_LM85=m
1799# CONFIG_SENSORS_LM90 is not set 1849# CONFIG_SENSORS_LM90 is not set
1800# CONFIG_SENSORS_LM92 is not set 1850# CONFIG_SENSORS_LM92 is not set
1801# CONFIG_SENSORS_LM93 is not set 1851# CONFIG_SENSORS_LM93 is not set
1852# CONFIG_SENSORS_LTC4245 is not set
1853# CONFIG_SENSORS_MAX1111 is not set
1802# CONFIG_SENSORS_MAX1619 is not set 1854# CONFIG_SENSORS_MAX1619 is not set
1803# CONFIG_SENSORS_MAX6650 is not set 1855# CONFIG_SENSORS_MAX6650 is not set
1804# CONFIG_SENSORS_PC87360 is not set 1856# CONFIG_SENSORS_PC87360 is not set
@@ -1822,16 +1874,17 @@ CONFIG_SENSORS_LM85=m
1822# CONFIG_SENSORS_W83627HF is not set 1874# CONFIG_SENSORS_W83627HF is not set
1823# CONFIG_SENSORS_W83627EHF is not set 1875# CONFIG_SENSORS_W83627EHF is not set
1824# CONFIG_SENSORS_HDAPS is not set 1876# CONFIG_SENSORS_HDAPS is not set
1877# CONFIG_SENSORS_LIS3LV02D is not set
1825# CONFIG_SENSORS_APPLESMC is not set 1878# CONFIG_SENSORS_APPLESMC is not set
1826# CONFIG_HWMON_DEBUG_CHIP is not set 1879# CONFIG_HWMON_DEBUG_CHIP is not set
1827CONFIG_THERMAL=y 1880CONFIG_THERMAL=y
1828# CONFIG_THERMAL_HWMON is not set 1881# CONFIG_THERMAL_HWMON is not set
1829# CONFIG_WATCHDOG is not set 1882# CONFIG_WATCHDOG is not set
1883CONFIG_SSB_POSSIBLE=y
1830 1884
1831# 1885#
1832# Sonics Silicon Backplane 1886# Sonics Silicon Backplane
1833# 1887#
1834CONFIG_SSB_POSSIBLE=y
1835# CONFIG_SSB is not set 1888# CONFIG_SSB is not set
1836 1889
1837# 1890#
@@ -1841,6 +1894,10 @@ CONFIG_SSB_POSSIBLE=y
1841# CONFIG_MFD_SM501 is not set 1894# CONFIG_MFD_SM501 is not set
1842# CONFIG_HTC_PASIC3 is not set 1895# CONFIG_HTC_PASIC3 is not set
1843# CONFIG_MFD_TMIO is not set 1896# CONFIG_MFD_TMIO is not set
1897# CONFIG_MFD_WM8400 is not set
1898# CONFIG_MFD_WM8350_I2C is not set
1899# CONFIG_MFD_PCF50633 is not set
1900# CONFIG_REGULATOR is not set
1844 1901
1845# 1902#
1846# Multimedia devices 1903# Multimedia devices
@@ -1881,6 +1938,7 @@ CONFIG_VIDEO_TVEEPROM=m
1881CONFIG_VIDEO_TUNER=m 1938CONFIG_VIDEO_TUNER=m
1882CONFIG_VIDEO_CAPTURE_DRIVERS=y 1939CONFIG_VIDEO_CAPTURE_DRIVERS=y
1883# CONFIG_VIDEO_ADV_DEBUG is not set 1940# CONFIG_VIDEO_ADV_DEBUG is not set
1941# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
1884CONFIG_VIDEO_HELPER_CHIPS_AUTO=y 1942CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
1885CONFIG_VIDEO_IR_I2C=m 1943CONFIG_VIDEO_IR_I2C=m
1886CONFIG_VIDEO_MSP3400=m 1944CONFIG_VIDEO_MSP3400=m
@@ -1897,12 +1955,10 @@ CONFIG_VIDEO_CX2341X=m
1897# CONFIG_VIDEO_CPIA2 is not set 1955# CONFIG_VIDEO_CPIA2 is not set
1898# CONFIG_VIDEO_SAA5246A is not set 1956# CONFIG_VIDEO_SAA5246A is not set
1899# CONFIG_VIDEO_SAA5249 is not set 1957# CONFIG_VIDEO_SAA5249 is not set
1900# CONFIG_TUNER_3036 is not set
1901# CONFIG_VIDEO_STRADIS is not set 1958# CONFIG_VIDEO_STRADIS is not set
1902# CONFIG_VIDEO_ZORAN is not set 1959# CONFIG_VIDEO_ZORAN is not set
1903# CONFIG_VIDEO_SAA7134 is not set 1960# CONFIG_VIDEO_SAA7134 is not set
1904# CONFIG_VIDEO_MXB is not set 1961# CONFIG_VIDEO_MXB is not set
1905# CONFIG_VIDEO_DPC is not set
1906# CONFIG_VIDEO_HEXIUM_ORION is not set 1962# CONFIG_VIDEO_HEXIUM_ORION is not set
1907# CONFIG_VIDEO_HEXIUM_GEMINI is not set 1963# CONFIG_VIDEO_HEXIUM_GEMINI is not set
1908# CONFIG_VIDEO_CX88 is not set 1964# CONFIG_VIDEO_CX88 is not set
@@ -1911,6 +1967,7 @@ CONFIG_VIDEO_CX2341X=m
1911# CONFIG_VIDEO_IVTV is not set 1967# CONFIG_VIDEO_IVTV is not set
1912# CONFIG_VIDEO_CX18 is not set 1968# CONFIG_VIDEO_CX18 is not set
1913# CONFIG_VIDEO_CAFE_CCIC is not set 1969# CONFIG_VIDEO_CAFE_CCIC is not set
1970# CONFIG_SOC_CAMERA is not set
1914CONFIG_V4L_USB_DRIVERS=y 1971CONFIG_V4L_USB_DRIVERS=y
1915# CONFIG_USB_VIDEO_CLASS is not set 1972# CONFIG_USB_VIDEO_CLASS is not set
1916# CONFIG_USB_GSPCA is not set 1973# CONFIG_USB_GSPCA is not set
@@ -1940,8 +1997,6 @@ CONFIG_USB_PWC=m
1940# CONFIG_USB_ZR364XX is not set 1997# CONFIG_USB_ZR364XX is not set
1941# CONFIG_USB_STKWEBCAM is not set 1998# CONFIG_USB_STKWEBCAM is not set
1942# CONFIG_USB_S2255 is not set 1999# CONFIG_USB_S2255 is not set
1943# CONFIG_SOC_CAMERA is not set
1944# CONFIG_VIDEO_SH_MOBILE_CEU is not set
1945CONFIG_RADIO_ADAPTERS=y 2000CONFIG_RADIO_ADAPTERS=y
1946# CONFIG_RADIO_CADET is not set 2001# CONFIG_RADIO_CADET is not set
1947# CONFIG_RADIO_RTRACK is not set 2002# CONFIG_RADIO_RTRACK is not set
@@ -1959,6 +2014,9 @@ CONFIG_RADIO_ADAPTERS=y
1959# CONFIG_RADIO_ZOLTRIX is not set 2014# CONFIG_RADIO_ZOLTRIX is not set
1960# CONFIG_USB_DSBR is not set 2015# CONFIG_USB_DSBR is not set
1961# CONFIG_USB_SI470X is not set 2016# CONFIG_USB_SI470X is not set
2017# CONFIG_USB_MR800 is not set
2018# CONFIG_RADIO_TEA5764 is not set
2019# CONFIG_DVB_DYNAMIC_MINORS is not set
1962CONFIG_DVB_CAPTURE_DRIVERS=y 2020CONFIG_DVB_CAPTURE_DRIVERS=y
1963 2021
1964# 2022#
@@ -1993,10 +2051,12 @@ CONFIG_DVB_USB_DTT200U=m
1993# CONFIG_DVB_USB_OPERA1 is not set 2051# CONFIG_DVB_USB_OPERA1 is not set
1994# CONFIG_DVB_USB_AF9005 is not set 2052# CONFIG_DVB_USB_AF9005 is not set
1995# CONFIG_DVB_USB_DW2102 is not set 2053# CONFIG_DVB_USB_DW2102 is not set
2054# CONFIG_DVB_USB_CINERGY_T2 is not set
1996# CONFIG_DVB_USB_ANYSEE is not set 2055# CONFIG_DVB_USB_ANYSEE is not set
2056# CONFIG_DVB_USB_DTV5100 is not set
2057# CONFIG_DVB_USB_AF9015 is not set
1997# CONFIG_DVB_TTUSB_BUDGET is not set 2058# CONFIG_DVB_TTUSB_BUDGET is not set
1998# CONFIG_DVB_TTUSB_DEC is not set 2059# CONFIG_DVB_TTUSB_DEC is not set
1999# CONFIG_DVB_CINERGYT2 is not set
2000# CONFIG_DVB_SIANO_SMS1XXX is not set 2060# CONFIG_DVB_SIANO_SMS1XXX is not set
2001 2061
2002# 2062#
@@ -2014,6 +2074,16 @@ CONFIG_DVB_USB_DTT200U=m
2014# CONFIG_DVB_PLUTO2 is not set 2074# CONFIG_DVB_PLUTO2 is not set
2015 2075
2016# 2076#
2077# Supported SDMC DM1105 Adapters
2078#
2079# CONFIG_DVB_DM1105 is not set
2080
2081#
2082# Supported FireWire (IEEE 1394) Adapters
2083#
2084# CONFIG_DVB_FIREDTV is not set
2085
2086#
2017# Supported DVB Frontends 2087# Supported DVB Frontends
2018# 2088#
2019 2089
@@ -2023,19 +2093,31 @@ CONFIG_DVB_USB_DTT200U=m
2023# CONFIG_DVB_FE_CUSTOMISE is not set 2093# CONFIG_DVB_FE_CUSTOMISE is not set
2024 2094
2025# 2095#
2096# Multistandard (satellite) frontends
2097#
2098# CONFIG_DVB_STB0899 is not set
2099# CONFIG_DVB_STB6100 is not set
2100
2101#
2026# DVB-S (satellite) frontends 2102# DVB-S (satellite) frontends
2027# 2103#
2028CONFIG_DVB_CX24110=m 2104CONFIG_DVB_CX24110=m
2029CONFIG_DVB_CX24123=m 2105CONFIG_DVB_CX24123=m
2030CONFIG_DVB_MT312=m 2106CONFIG_DVB_MT312=m
2031CONFIG_DVB_S5H1420=m 2107CONFIG_DVB_S5H1420=m
2108# CONFIG_DVB_STV0288 is not set
2109# CONFIG_DVB_STB6000 is not set
2032CONFIG_DVB_STV0299=m 2110CONFIG_DVB_STV0299=m
2033CONFIG_DVB_TDA8083=m 2111CONFIG_DVB_TDA8083=m
2034CONFIG_DVB_TDA10086=m 2112CONFIG_DVB_TDA10086=m
2113# CONFIG_DVB_TDA8261 is not set
2035CONFIG_DVB_VES1X93=m 2114CONFIG_DVB_VES1X93=m
2036# CONFIG_DVB_TUNER_ITD1000 is not set 2115# CONFIG_DVB_TUNER_ITD1000 is not set
2116# CONFIG_DVB_TUNER_CX24113 is not set
2037CONFIG_DVB_TDA826X=m 2117CONFIG_DVB_TDA826X=m
2038CONFIG_DVB_TUA6100=m 2118CONFIG_DVB_TUA6100=m
2119# CONFIG_DVB_CX24116 is not set
2120# CONFIG_DVB_SI21XX is not set
2039 2121
2040# 2122#
2041# DVB-T (terrestrial) frontends 2123# DVB-T (terrestrial) frontends
@@ -2072,11 +2154,17 @@ CONFIG_DVB_OR51211=m
2072CONFIG_DVB_OR51132=m 2154CONFIG_DVB_OR51132=m
2073CONFIG_DVB_BCM3510=m 2155CONFIG_DVB_BCM3510=m
2074CONFIG_DVB_LGDT330X=m 2156CONFIG_DVB_LGDT330X=m
2157# CONFIG_DVB_LGDT3304 is not set
2075CONFIG_DVB_S5H1409=m 2158CONFIG_DVB_S5H1409=m
2076# CONFIG_DVB_AU8522 is not set 2159# CONFIG_DVB_AU8522 is not set
2077CONFIG_DVB_S5H1411=m 2160CONFIG_DVB_S5H1411=m
2078 2161
2079# 2162#
2163# ISDB-T (terrestrial) frontends
2164#
2165# CONFIG_DVB_S921 is not set
2166
2167#
2080# Digital terrestrial only tuners/PLL 2168# Digital terrestrial only tuners/PLL
2081# 2169#
2082CONFIG_DVB_PLL=m 2170CONFIG_DVB_PLL=m
@@ -2088,6 +2176,13 @@ CONFIG_DVB_PLL=m
2088CONFIG_DVB_LNBP21=m 2176CONFIG_DVB_LNBP21=m
2089# CONFIG_DVB_ISL6405 is not set 2177# CONFIG_DVB_ISL6405 is not set
2090CONFIG_DVB_ISL6421=m 2178CONFIG_DVB_ISL6421=m
2179# CONFIG_DVB_LGS8GL5 is not set
2180
2181#
2182# Tools to develop new frontends
2183#
2184# CONFIG_DVB_DUMMY_FE is not set
2185# CONFIG_DVB_AF9013 is not set
2091CONFIG_DAB=y 2186CONFIG_DAB=y
2092CONFIG_USB_DABUSB=m 2187CONFIG_USB_DABUSB=m
2093 2188
@@ -2109,22 +2204,24 @@ CONFIG_DRM=m
2109# CONFIG_DRM_TDFX is not set 2204# CONFIG_DRM_TDFX is not set
2110# CONFIG_DRM_R128 is not set 2205# CONFIG_DRM_R128 is not set
2111# CONFIG_DRM_RADEON is not set 2206# CONFIG_DRM_RADEON is not set
2207CONFIG_DRM_INTEL_COMMON=m
2112# CONFIG_DRM_I810 is not set 2208# CONFIG_DRM_I810 is not set
2113# CONFIG_DRM_I830 is not set 2209# CONFIG_DRM_I830 is not set
2114# CONFIG_DRM_I915 is not set 2210CONFIG_DRM_I915=m
2211CONFIG_DRM_I915_KMS=y
2115# CONFIG_DRM_MGA is not set 2212# CONFIG_DRM_MGA is not set
2116# CONFIG_DRM_SIS is not set 2213# CONFIG_DRM_SIS is not set
2117# CONFIG_DRM_VIA is not set 2214# CONFIG_DRM_VIA is not set
2118# CONFIG_DRM_SAVAGE is not set 2215# CONFIG_DRM_SAVAGE is not set
2119CONFIG_DRM_PSB=m
2120CONFIG_VGASTATE=m 2216CONFIG_VGASTATE=m
2121CONFIG_VIDEO_OUTPUT_CONTROL=y 2217CONFIG_VIDEO_OUTPUT_CONTROL=y
2122CONFIG_FB=y 2218CONFIG_FB=y
2123CONFIG_FIRMWARE_EDID=y 2219CONFIG_FIRMWARE_EDID=y
2124CONFIG_FB_DDC=m 2220CONFIG_FB_DDC=m
2125CONFIG_FB_CFB_FILLRECT=y 2221CONFIG_FB_BOOT_VESA_SUPPORT=y
2126CONFIG_FB_CFB_COPYAREA=y 2222CONFIG_FB_CFB_FILLRECT=m
2127CONFIG_FB_CFB_IMAGEBLIT=y 2223CONFIG_FB_CFB_COPYAREA=m
2224CONFIG_FB_CFB_IMAGEBLIT=m
2128# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set 2225# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
2129# CONFIG_FB_SYS_FILLRECT is not set 2226# CONFIG_FB_SYS_FILLRECT is not set
2130# CONFIG_FB_SYS_COPYAREA is not set 2227# CONFIG_FB_SYS_COPYAREA is not set
@@ -2148,9 +2245,8 @@ CONFIG_FB_TILEBLITTING=y
2148# CONFIG_FB_IMSTT is not set 2245# CONFIG_FB_IMSTT is not set
2149CONFIG_FB_VGA16=m 2246CONFIG_FB_VGA16=m
2150# CONFIG_FB_UVESA is not set 2247# CONFIG_FB_UVESA is not set
2151CONFIG_FB_VESA=y 2248# CONFIG_FB_VESA is not set
2152# CONFIG_FB_EFI is not set 2249# CONFIG_FB_EFI is not set
2153# CONFIG_FB_IMAC is not set
2154# CONFIG_FB_N411 is not set 2250# CONFIG_FB_N411 is not set
2155# CONFIG_FB_HGA is not set 2251# CONFIG_FB_HGA is not set
2156# CONFIG_FB_S1D13XXX is not set 2252# CONFIG_FB_S1D13XXX is not set
@@ -2183,6 +2279,7 @@ CONFIG_FB_ATY_BACKLIGHT=y
2183# CONFIG_FB_S3 is not set 2279# CONFIG_FB_S3 is not set
2184# CONFIG_FB_SAVAGE is not set 2280# CONFIG_FB_SAVAGE is not set
2185# CONFIG_FB_SIS is not set 2281# CONFIG_FB_SIS is not set
2282# CONFIG_FB_VIA is not set
2186# CONFIG_FB_NEOMAGIC is not set 2283# CONFIG_FB_NEOMAGIC is not set
2187# CONFIG_FB_KYRO is not set 2284# CONFIG_FB_KYRO is not set
2188# CONFIG_FB_3DFX is not set 2285# CONFIG_FB_3DFX is not set
@@ -2195,16 +2292,20 @@ CONFIG_FB_ATY_BACKLIGHT=y
2195# CONFIG_FB_CARMINE is not set 2292# CONFIG_FB_CARMINE is not set
2196# CONFIG_FB_GEODE is not set 2293# CONFIG_FB_GEODE is not set
2197# CONFIG_FB_VIRTUAL is not set 2294# CONFIG_FB_VIRTUAL is not set
2295# CONFIG_FB_METRONOME is not set
2296# CONFIG_FB_MB862XX is not set
2198CONFIG_BACKLIGHT_LCD_SUPPORT=y 2297CONFIG_BACKLIGHT_LCD_SUPPORT=y
2199CONFIG_LCD_CLASS_DEVICE=m 2298CONFIG_LCD_CLASS_DEVICE=m
2200# CONFIG_LCD_LTV350QV is not set 2299# CONFIG_LCD_LTV350QV is not set
2201# CONFIG_LCD_ILI9320 is not set 2300# CONFIG_LCD_ILI9320 is not set
2301# CONFIG_LCD_TDO24M is not set
2202# CONFIG_LCD_VGG2432A4 is not set 2302# CONFIG_LCD_VGG2432A4 is not set
2203# CONFIG_LCD_PLATFORM is not set 2303# CONFIG_LCD_PLATFORM is not set
2204CONFIG_BACKLIGHT_CLASS_DEVICE=y 2304CONFIG_BACKLIGHT_CLASS_DEVICE=y
2205# CONFIG_BACKLIGHT_CORGI is not set 2305CONFIG_BACKLIGHT_GENERIC=y
2206# CONFIG_BACKLIGHT_PROGEAR is not set 2306# CONFIG_BACKLIGHT_PROGEAR is not set
2207# CONFIG_BACKLIGHT_MBP_NVIDIA is not set 2307# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
2308# CONFIG_BACKLIGHT_SAHARA is not set
2208 2309
2209# 2310#
2210# Display device support 2311# Display device support
@@ -2217,7 +2318,6 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
2217CONFIG_VGA_CONSOLE=y 2318CONFIG_VGA_CONSOLE=y
2218CONFIG_VGACON_SOFT_SCROLLBACK=y 2319CONFIG_VGACON_SOFT_SCROLLBACK=y
2219CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 2320CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
2220CONFIG_VIDEO_SELECT=y
2221CONFIG_MDA_CONSOLE=m 2321CONFIG_MDA_CONSOLE=m
2222CONFIG_DUMMY_CONSOLE=y 2322CONFIG_DUMMY_CONSOLE=y
2223CONFIG_FRAMEBUFFER_CONSOLE=y 2323CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -2228,11 +2328,13 @@ CONFIG_FONT_8x8=y
2228CONFIG_FONT_8x16=y 2328CONFIG_FONT_8x16=y
2229# CONFIG_LOGO is not set 2329# CONFIG_LOGO is not set
2230CONFIG_SOUND=m 2330CONFIG_SOUND=m
2331CONFIG_SOUND_OSS_CORE=y
2231CONFIG_SND=m 2332CONFIG_SND=m
2232CONFIG_SND_TIMER=m 2333CONFIG_SND_TIMER=m
2233CONFIG_SND_PCM=m 2334CONFIG_SND_PCM=m
2234CONFIG_SND_HWDEP=m 2335CONFIG_SND_HWDEP=m
2235CONFIG_SND_RAWMIDI=m 2336CONFIG_SND_RAWMIDI=m
2337CONFIG_SND_JACK=y
2236CONFIG_SND_SEQUENCER=m 2338CONFIG_SND_SEQUENCER=m
2237CONFIG_SND_SEQ_DUMMY=m 2339CONFIG_SND_SEQ_DUMMY=m
2238CONFIG_SND_OSSEMUL=y 2340CONFIG_SND_OSSEMUL=y
@@ -2240,6 +2342,7 @@ CONFIG_SND_MIXER_OSS=m
2240CONFIG_SND_PCM_OSS=m 2342CONFIG_SND_PCM_OSS=m
2241CONFIG_SND_PCM_OSS_PLUGINS=y 2343CONFIG_SND_PCM_OSS_PLUGINS=y
2242CONFIG_SND_SEQUENCER_OSS=y 2344CONFIG_SND_SEQUENCER_OSS=y
2345# CONFIG_SND_HRTIMER is not set
2243CONFIG_SND_DYNAMIC_MINORS=y 2346CONFIG_SND_DYNAMIC_MINORS=y
2244CONFIG_SND_SUPPORT_OLD_API=y 2347CONFIG_SND_SUPPORT_OLD_API=y
2245CONFIG_SND_VERBOSE_PROCFS=y 2348CONFIG_SND_VERBOSE_PROCFS=y
@@ -2330,11 +2433,15 @@ CONFIG_SND_PCI=y
2330# CONFIG_SND_FM801 is not set 2433# CONFIG_SND_FM801 is not set
2331CONFIG_SND_HDA_INTEL=m 2434CONFIG_SND_HDA_INTEL=m
2332# CONFIG_SND_HDA_HWDEP is not set 2435# CONFIG_SND_HDA_HWDEP is not set
2436# CONFIG_SND_HDA_INPUT_BEEP is not set
2333CONFIG_SND_HDA_CODEC_REALTEK=y 2437CONFIG_SND_HDA_CODEC_REALTEK=y
2334CONFIG_SND_HDA_CODEC_ANALOG=y 2438CONFIG_SND_HDA_CODEC_ANALOG=y
2335CONFIG_SND_HDA_CODEC_SIGMATEL=y 2439CONFIG_SND_HDA_CODEC_SIGMATEL=y
2336CONFIG_SND_HDA_CODEC_VIA=y 2440CONFIG_SND_HDA_CODEC_VIA=y
2337CONFIG_SND_HDA_CODEC_ATIHDMI=y 2441CONFIG_SND_HDA_CODEC_ATIHDMI=y
2442CONFIG_SND_HDA_CODEC_NVHDMI=y
2443CONFIG_SND_HDA_CODEC_INTELHDMI=y
2444CONFIG_SND_HDA_ELD=y
2338CONFIG_SND_HDA_CODEC_CONEXANT=y 2445CONFIG_SND_HDA_CODEC_CONEXANT=y
2339CONFIG_SND_HDA_CODEC_CMEDIA=y 2446CONFIG_SND_HDA_CODEC_CMEDIA=y
2340CONFIG_SND_HDA_CODEC_SI3054=y 2447CONFIG_SND_HDA_CODEC_SI3054=y
@@ -2369,6 +2476,7 @@ CONFIG_SND_USB=y
2369CONFIG_SND_USB_AUDIO=m 2476CONFIG_SND_USB_AUDIO=m
2370# CONFIG_SND_USB_USX2Y is not set 2477# CONFIG_SND_USB_USX2Y is not set
2371# CONFIG_SND_USB_CAIAQ is not set 2478# CONFIG_SND_USB_CAIAQ is not set
2479# CONFIG_SND_USB_US122L is not set
2372# CONFIG_SND_SOC is not set 2480# CONFIG_SND_SOC is not set
2373# CONFIG_SOUND_PRIME is not set 2481# CONFIG_SOUND_PRIME is not set
2374CONFIG_AC97_BUS=m 2482CONFIG_AC97_BUS=m
@@ -2381,15 +2489,37 @@ CONFIG_HID=y
2381# USB Input Devices 2489# USB Input Devices
2382# 2490#
2383CONFIG_USB_HID=y 2491CONFIG_USB_HID=y
2384CONFIG_USB_HIDINPUT_POWERBOOK=y
2385CONFIG_HID_FF=y
2386CONFIG_HID_PID=y 2492CONFIG_HID_PID=y
2493CONFIG_USB_HIDDEV=y
2494
2495#
2496# Special HID drivers
2497#
2498CONFIG_HID_COMPAT=y
2499CONFIG_HID_A4TECH=y
2500CONFIG_HID_APPLE=y
2501CONFIG_HID_BELKIN=y
2502CONFIG_HID_CHERRY=y
2503CONFIG_HID_CHICONY=y
2504CONFIG_HID_CYPRESS=y
2505CONFIG_HID_EZKEY=y
2506CONFIG_HID_GYRATION=y
2507CONFIG_HID_LOGITECH=y
2387CONFIG_LOGITECH_FF=y 2508CONFIG_LOGITECH_FF=y
2388# CONFIG_LOGIRUMBLEPAD2_FF is not set 2509# CONFIG_LOGIRUMBLEPAD2_FF is not set
2510CONFIG_HID_MICROSOFT=y
2511CONFIG_HID_MONTEREY=y
2512CONFIG_HID_NTRIG=y
2513CONFIG_HID_PANTHERLORD=y
2389# CONFIG_PANTHERLORD_FF is not set 2514# CONFIG_PANTHERLORD_FF is not set
2515CONFIG_HID_PETALYNX=y
2516CONFIG_HID_SAMSUNG=y
2517CONFIG_HID_SONY=y
2518CONFIG_HID_SUNPLUS=y
2519# CONFIG_GREENASIA_FF is not set
2520CONFIG_HID_TOPSEED=y
2390CONFIG_THRUSTMASTER_FF=y 2521CONFIG_THRUSTMASTER_FF=y
2391# CONFIG_ZEROPLUS_FF is not set 2522# CONFIG_ZEROPLUS_FF is not set
2392CONFIG_USB_HIDDEV=y
2393CONFIG_USB_SUPPORT=y 2523CONFIG_USB_SUPPORT=y
2394CONFIG_USB_ARCH_HAS_HCD=y 2524CONFIG_USB_ARCH_HAS_HCD=y
2395CONFIG_USB_ARCH_HAS_OHCI=y 2525CONFIG_USB_ARCH_HAS_OHCI=y
@@ -2407,6 +2537,8 @@ CONFIG_USB_DEVICE_CLASS=y
2407CONFIG_USB_SUSPEND=y 2537CONFIG_USB_SUSPEND=y
2408# CONFIG_USB_OTG is not set 2538# CONFIG_USB_OTG is not set
2409CONFIG_USB_MON=y 2539CONFIG_USB_MON=y
2540# CONFIG_USB_WUSB is not set
2541# CONFIG_USB_WUSB_CBAF is not set
2410 2542
2411# 2543#
2412# USB Host Controller Drivers 2544# USB Host Controller Drivers
@@ -2415,6 +2547,7 @@ CONFIG_USB_MON=y
2415CONFIG_USB_EHCI_HCD=y 2547CONFIG_USB_EHCI_HCD=y
2416CONFIG_USB_EHCI_ROOT_HUB_TT=y 2548CONFIG_USB_EHCI_ROOT_HUB_TT=y
2417CONFIG_USB_EHCI_TT_NEWSCHED=y 2549CONFIG_USB_EHCI_TT_NEWSCHED=y
2550# CONFIG_USB_OXU210HP_HCD is not set
2418# CONFIG_USB_ISP116X_HCD is not set 2551# CONFIG_USB_ISP116X_HCD is not set
2419# CONFIG_USB_ISP1760_HCD is not set 2552# CONFIG_USB_ISP1760_HCD is not set
2420CONFIG_USB_OHCI_HCD=y 2553CONFIG_USB_OHCI_HCD=y
@@ -2424,6 +2557,8 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
2424CONFIG_USB_UHCI_HCD=y 2557CONFIG_USB_UHCI_HCD=y
2425# CONFIG_USB_SL811_HCD is not set 2558# CONFIG_USB_SL811_HCD is not set
2426# CONFIG_USB_R8A66597_HCD is not set 2559# CONFIG_USB_R8A66597_HCD is not set
2560# CONFIG_USB_WHCI_HCD is not set
2561# CONFIG_USB_HWA_HCD is not set
2427# CONFIG_USB_GADGET_MUSB_HDRC is not set 2562# CONFIG_USB_GADGET_MUSB_HDRC is not set
2428 2563
2429# 2564#
@@ -2432,20 +2567,20 @@ CONFIG_USB_UHCI_HCD=y
2432CONFIG_USB_ACM=m 2567CONFIG_USB_ACM=m
2433CONFIG_USB_PRINTER=m 2568CONFIG_USB_PRINTER=m
2434# CONFIG_USB_WDM is not set 2569# CONFIG_USB_WDM is not set
2570# CONFIG_USB_TMC is not set
2435 2571
2436# 2572#
2437# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 2573# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
2438# 2574#
2439 2575
2440# 2576#
2441# may also be needed; see USB_STORAGE Help for more information 2577# see USB_STORAGE Help for more information
2442# 2578#
2443CONFIG_USB_STORAGE=y 2579CONFIG_USB_STORAGE=y
2444# CONFIG_USB_STORAGE_DEBUG is not set 2580# CONFIG_USB_STORAGE_DEBUG is not set
2445CONFIG_USB_STORAGE_DATAFAB=y 2581CONFIG_USB_STORAGE_DATAFAB=y
2446CONFIG_USB_STORAGE_FREECOM=y 2582CONFIG_USB_STORAGE_FREECOM=y
2447# CONFIG_USB_STORAGE_ISD200 is not set 2583# CONFIG_USB_STORAGE_ISD200 is not set
2448CONFIG_USB_STORAGE_DPCM=y
2449CONFIG_USB_STORAGE_USBAT=y 2584CONFIG_USB_STORAGE_USBAT=y
2450CONFIG_USB_STORAGE_SDDR09=y 2585CONFIG_USB_STORAGE_SDDR09=y
2451CONFIG_USB_STORAGE_SDDR55=y 2586CONFIG_USB_STORAGE_SDDR55=y
@@ -2514,12 +2649,14 @@ CONFIG_USB_SERIAL_PL2303=m
2514CONFIG_USB_SERIAL_HP4X=m 2649CONFIG_USB_SERIAL_HP4X=m
2515CONFIG_USB_SERIAL_SAFE=m 2650CONFIG_USB_SERIAL_SAFE=m
2516CONFIG_USB_SERIAL_SAFE_PADDED=y 2651CONFIG_USB_SERIAL_SAFE_PADDED=y
2652# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
2517CONFIG_USB_SERIAL_SIERRAWIRELESS=m 2653CONFIG_USB_SERIAL_SIERRAWIRELESS=m
2518CONFIG_USB_SERIAL_TI=m 2654CONFIG_USB_SERIAL_TI=m
2519CONFIG_USB_SERIAL_CYBERJACK=m 2655CONFIG_USB_SERIAL_CYBERJACK=m
2520CONFIG_USB_SERIAL_XIRCOM=m 2656CONFIG_USB_SERIAL_XIRCOM=m
2521CONFIG_USB_SERIAL_OPTION=m 2657CONFIG_USB_SERIAL_OPTION=m
2522CONFIG_USB_SERIAL_OMNINET=m 2658CONFIG_USB_SERIAL_OMNINET=m
2659# CONFIG_USB_SERIAL_OPTICON is not set
2523# CONFIG_USB_SERIAL_DEBUG is not set 2660# CONFIG_USB_SERIAL_DEBUG is not set
2524 2661
2525# 2662#
@@ -2528,6 +2665,7 @@ CONFIG_USB_SERIAL_OMNINET=m
2528CONFIG_USB_EMI62=m 2665CONFIG_USB_EMI62=m
2529CONFIG_USB_EMI26=m 2666CONFIG_USB_EMI26=m
2530# CONFIG_USB_ADUTUX is not set 2667# CONFIG_USB_ADUTUX is not set
2668# CONFIG_USB_SEVSEG is not set
2531CONFIG_USB_RIO500=m 2669CONFIG_USB_RIO500=m
2532CONFIG_USB_LEGOTOWER=m 2670CONFIG_USB_LEGOTOWER=m
2533CONFIG_USB_LCD=m 2671CONFIG_USB_LCD=m
@@ -2546,6 +2684,7 @@ CONFIG_USB_LD=m
2546# CONFIG_USB_IOWARRIOR is not set 2684# CONFIG_USB_IOWARRIOR is not set
2547# CONFIG_USB_TEST is not set 2685# CONFIG_USB_TEST is not set
2548# CONFIG_USB_ISIGHTFW is not set 2686# CONFIG_USB_ISIGHTFW is not set
2687# CONFIG_USB_VST is not set
2549CONFIG_USB_ATM=m 2688CONFIG_USB_ATM=m
2550CONFIG_USB_SPEEDTOUCH=m 2689CONFIG_USB_SPEEDTOUCH=m
2551CONFIG_USB_CXACRU=m 2690CONFIG_USB_CXACRU=m
@@ -2555,20 +2694,24 @@ CONFIG_USB_GADGET=y
2555# CONFIG_USB_GADGET_DEBUG is not set 2694# CONFIG_USB_GADGET_DEBUG is not set
2556CONFIG_USB_GADGET_DEBUG_FILES=y 2695CONFIG_USB_GADGET_DEBUG_FILES=y
2557# CONFIG_USB_GADGET_DEBUG_FS is not set 2696# CONFIG_USB_GADGET_DEBUG_FS is not set
2697CONFIG_USB_GADGET_VBUS_DRAW=2
2558CONFIG_USB_GADGET_SELECTED=y 2698CONFIG_USB_GADGET_SELECTED=y
2559CONFIG_USB_GADGET_AMD5536UDC=y 2699# CONFIG_USB_GADGET_AT91 is not set
2560CONFIG_USB_AMD5536UDC=y
2561# CONFIG_USB_GADGET_ATMEL_USBA is not set 2700# CONFIG_USB_GADGET_ATMEL_USBA is not set
2562# CONFIG_USB_GADGET_FSL_USB2 is not set 2701# CONFIG_USB_GADGET_FSL_USB2 is not set
2563# CONFIG_USB_GADGET_NET2280 is not set
2564# CONFIG_USB_GADGET_PXA25X is not set
2565# CONFIG_USB_GADGET_M66592 is not set
2566# CONFIG_USB_GADGET_PXA27X is not set
2567# CONFIG_USB_GADGET_GOKU is not set
2568# CONFIG_USB_GADGET_LH7A40X is not set 2702# CONFIG_USB_GADGET_LH7A40X is not set
2569# CONFIG_USB_GADGET_OMAP is not set 2703# CONFIG_USB_GADGET_OMAP is not set
2704# CONFIG_USB_GADGET_PXA25X is not set
2705# CONFIG_USB_GADGET_PXA27X is not set
2570# CONFIG_USB_GADGET_S3C2410 is not set 2706# CONFIG_USB_GADGET_S3C2410 is not set
2571# CONFIG_USB_GADGET_AT91 is not set 2707# CONFIG_USB_GADGET_IMX is not set
2708# CONFIG_USB_GADGET_M66592 is not set
2709CONFIG_USB_GADGET_AMD5536UDC=y
2710CONFIG_USB_AMD5536UDC=y
2711# CONFIG_USB_GADGET_FSL_QE is not set
2712# CONFIG_USB_GADGET_CI13XXX is not set
2713# CONFIG_USB_GADGET_NET2280 is not set
2714# CONFIG_USB_GADGET_GOKU is not set
2572# CONFIG_USB_GADGET_DUMMY_HCD is not set 2715# CONFIG_USB_GADGET_DUMMY_HCD is not set
2573CONFIG_USB_GADGET_DUALSPEED=y 2716CONFIG_USB_GADGET_DUALSPEED=y
2574# CONFIG_USB_ZERO is not set 2717# CONFIG_USB_ZERO is not set
@@ -2581,12 +2724,17 @@ CONFIG_USB_FILE_STORAGE_TEST=y
2581# CONFIG_USB_MIDI_GADGET is not set 2724# CONFIG_USB_MIDI_GADGET is not set
2582# CONFIG_USB_G_PRINTER is not set 2725# CONFIG_USB_G_PRINTER is not set
2583# CONFIG_USB_CDC_COMPOSITE is not set 2726# CONFIG_USB_CDC_COMPOSITE is not set
2727
2728#
2729# OTG and related infrastructure
2730#
2731# CONFIG_UWB is not set
2584CONFIG_MMC=y 2732CONFIG_MMC=y
2585# CONFIG_MMC_DEBUG is not set 2733# CONFIG_MMC_DEBUG is not set
2586CONFIG_MMC_UNSAFE_RESUME=y 2734CONFIG_MMC_UNSAFE_RESUME=y
2587 2735
2588# 2736#
2589# MMC/SD Card Drivers 2737# MMC/SD/SDIO Card Drivers
2590# 2738#
2591CONFIG_MMC_BLOCK=y 2739CONFIG_MMC_BLOCK=y
2592CONFIG_MMC_BLOCK_BOUNCE=y 2740CONFIG_MMC_BLOCK_BOUNCE=y
@@ -2594,7 +2742,7 @@ CONFIG_MMC_BLOCK_BOUNCE=y
2594# CONFIG_MMC_TEST is not set 2742# CONFIG_MMC_TEST is not set
2595 2743
2596# 2744#
2597# MMC/SD Host Controller Drivers 2745# MMC/SD/SDIO Host Controller Drivers
2598# 2746#
2599CONFIG_MMC_SDHCI=y 2747CONFIG_MMC_SDHCI=y
2600# CONFIG_MMC_SDHCI_PCI is not set 2748# CONFIG_MMC_SDHCI_PCI is not set
@@ -2607,6 +2755,7 @@ CONFIG_LEDS_CLASS=m
2607# 2755#
2608# LED drivers 2756# LED drivers
2609# 2757#
2758# CONFIG_LEDS_ALIX2 is not set
2610# CONFIG_LEDS_PCA9532 is not set 2759# CONFIG_LEDS_PCA9532 is not set
2611# CONFIG_LEDS_CLEVO_MAIL is not set 2760# CONFIG_LEDS_CLEVO_MAIL is not set
2612# CONFIG_LEDS_PCA955X is not set 2761# CONFIG_LEDS_PCA955X is not set
@@ -2617,6 +2766,7 @@ CONFIG_LEDS_CLASS=m
2617CONFIG_LEDS_TRIGGERS=y 2766CONFIG_LEDS_TRIGGERS=y
2618CONFIG_LEDS_TRIGGER_TIMER=m 2767CONFIG_LEDS_TRIGGER_TIMER=m
2619CONFIG_LEDS_TRIGGER_HEARTBEAT=m 2768CONFIG_LEDS_TRIGGER_HEARTBEAT=m
2769# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
2620# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set 2770# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
2621# CONFIG_ACCESSIBILITY is not set 2771# CONFIG_ACCESSIBILITY is not set
2622# CONFIG_INFINIBAND is not set 2772# CONFIG_INFINIBAND is not set
@@ -2648,26 +2798,32 @@ CONFIG_RTC_DRV_PCF8583=m
2648# CONFIG_RTC_DRV_M41T80 is not set 2798# CONFIG_RTC_DRV_M41T80 is not set
2649# CONFIG_RTC_DRV_S35390A is not set 2799# CONFIG_RTC_DRV_S35390A is not set
2650# CONFIG_RTC_DRV_FM3130 is not set 2800# CONFIG_RTC_DRV_FM3130 is not set
2801# CONFIG_RTC_DRV_RX8581 is not set
2651 2802
2652# 2803#
2653# SPI RTC drivers 2804# SPI RTC drivers
2654# 2805#
2655# CONFIG_RTC_DRV_M41T94 is not set 2806# CONFIG_RTC_DRV_M41T94 is not set
2656# CONFIG_RTC_DRV_DS1305 is not set 2807# CONFIG_RTC_DRV_DS1305 is not set
2808# CONFIG_RTC_DRV_DS1390 is not set
2657CONFIG_RTC_DRV_MAX6902=m 2809CONFIG_RTC_DRV_MAX6902=m
2658# CONFIG_RTC_DRV_R9701 is not set 2810# CONFIG_RTC_DRV_R9701 is not set
2659CONFIG_RTC_DRV_RS5C348=m 2811CONFIG_RTC_DRV_RS5C348=m
2812# CONFIG_RTC_DRV_DS3234 is not set
2660 2813
2661# 2814#
2662# Platform RTC drivers 2815# Platform RTC drivers
2663# 2816#
2664# CONFIG_RTC_DRV_CMOS is not set 2817# CONFIG_RTC_DRV_CMOS is not set
2818# CONFIG_RTC_DRV_DS1286 is not set
2665# CONFIG_RTC_DRV_DS1511 is not set 2819# CONFIG_RTC_DRV_DS1511 is not set
2666CONFIG_RTC_DRV_DS1553=m 2820CONFIG_RTC_DRV_DS1553=m
2667CONFIG_RTC_DRV_DS1742=m 2821CONFIG_RTC_DRV_DS1742=m
2668# CONFIG_RTC_DRV_STK17TA8 is not set 2822# CONFIG_RTC_DRV_STK17TA8 is not set
2669CONFIG_RTC_DRV_M48T86=m 2823CONFIG_RTC_DRV_M48T86=m
2824# CONFIG_RTC_DRV_M48T35 is not set
2670# CONFIG_RTC_DRV_M48T59 is not set 2825# CONFIG_RTC_DRV_M48T59 is not set
2826# CONFIG_RTC_DRV_BQ4802 is not set
2671CONFIG_RTC_DRV_V3020=m 2827CONFIG_RTC_DRV_V3020=m
2672 2828
2673# 2829#
@@ -2675,6 +2831,21 @@ CONFIG_RTC_DRV_V3020=m
2675# 2831#
2676# CONFIG_DMADEVICES is not set 2832# CONFIG_DMADEVICES is not set
2677# CONFIG_UIO is not set 2833# CONFIG_UIO is not set
2834# CONFIG_STAGING is not set
2835CONFIG_X86_PLATFORM_DEVICES=y
2836# CONFIG_ASUS_LAPTOP is not set
2837# CONFIG_FUJITSU_LAPTOP is not set
2838# CONFIG_TC1100_WMI is not set
2839# CONFIG_MSI_LAPTOP is not set
2840# CONFIG_PANASONIC_LAPTOP is not set
2841# CONFIG_COMPAL_LAPTOP is not set
2842# CONFIG_SONY_LAPTOP is not set
2843# CONFIG_THINKPAD_ACPI is not set
2844# CONFIG_INTEL_MENLOW is not set
2845# CONFIG_EEEPC_LAPTOP is not set
2846# CONFIG_ACPI_WMI is not set
2847# CONFIG_ACPI_ASUS is not set
2848# CONFIG_ACPI_TOSHIBA is not set
2678 2849
2679# 2850#
2680# Firmware Drivers 2851# Firmware Drivers
@@ -2700,7 +2871,7 @@ CONFIG_EXT3_FS=y
2700CONFIG_EXT3_FS_XATTR=y 2871CONFIG_EXT3_FS_XATTR=y
2701CONFIG_EXT3_FS_POSIX_ACL=y 2872CONFIG_EXT3_FS_POSIX_ACL=y
2702CONFIG_EXT3_FS_SECURITY=y 2873CONFIG_EXT3_FS_SECURITY=y
2703# CONFIG_EXT4DEV_FS is not set 2874# CONFIG_EXT4_FS is not set
2704CONFIG_JBD=y 2875CONFIG_JBD=y
2705# CONFIG_JBD_DEBUG is not set 2876# CONFIG_JBD_DEBUG is not set
2706CONFIG_FS_MBCACHE=y 2877CONFIG_FS_MBCACHE=y
@@ -2716,15 +2887,18 @@ CONFIG_JFS_SECURITY=y
2716# CONFIG_JFS_DEBUG is not set 2887# CONFIG_JFS_DEBUG is not set
2717CONFIG_JFS_STATISTICS=y 2888CONFIG_JFS_STATISTICS=y
2718CONFIG_FS_POSIX_ACL=y 2889CONFIG_FS_POSIX_ACL=y
2890CONFIG_FILE_LOCKING=y
2719# CONFIG_XFS_FS is not set 2891# CONFIG_XFS_FS is not set
2720# CONFIG_GFS2_FS is not set 2892# CONFIG_GFS2_FS is not set
2721# CONFIG_OCFS2_FS is not set 2893# CONFIG_OCFS2_FS is not set
2894# CONFIG_BTRFS_FS is not set
2722CONFIG_DNOTIFY=y 2895CONFIG_DNOTIFY=y
2723CONFIG_INOTIFY=y 2896CONFIG_INOTIFY=y
2724CONFIG_INOTIFY_USER=y 2897CONFIG_INOTIFY_USER=y
2725CONFIG_QUOTA=y 2898CONFIG_QUOTA=y
2726# CONFIG_QUOTA_NETLINK_INTERFACE is not set 2899# CONFIG_QUOTA_NETLINK_INTERFACE is not set
2727CONFIG_PRINT_QUOTA_WARNING=y 2900CONFIG_PRINT_QUOTA_WARNING=y
2901CONFIG_QUOTA_TREE=m
2728CONFIG_QFMT_V1=m 2902CONFIG_QFMT_V1=m
2729CONFIG_QFMT_V2=m 2903CONFIG_QFMT_V2=m
2730CONFIG_QUOTACTL=y 2904CONFIG_QUOTACTL=y
@@ -2760,16 +2934,14 @@ CONFIG_NTFS_RW=y
2760CONFIG_PROC_FS=y 2934CONFIG_PROC_FS=y
2761CONFIG_PROC_KCORE=y 2935CONFIG_PROC_KCORE=y
2762CONFIG_PROC_SYSCTL=y 2936CONFIG_PROC_SYSCTL=y
2937CONFIG_PROC_PAGE_MONITOR=y
2763CONFIG_SYSFS=y 2938CONFIG_SYSFS=y
2764CONFIG_TMPFS=y 2939CONFIG_TMPFS=y
2765CONFIG_TMPFS_POSIX_ACL=y 2940CONFIG_TMPFS_POSIX_ACL=y
2766CONFIG_HUGETLBFS=y 2941CONFIG_HUGETLBFS=y
2767CONFIG_HUGETLB_PAGE=y 2942CONFIG_HUGETLB_PAGE=y
2768CONFIG_CONFIGFS_FS=m 2943CONFIG_CONFIGFS_FS=m
2769 2944CONFIG_MISC_FILESYSTEMS=y
2770#
2771# Miscellaneous filesystems
2772#
2773CONFIG_ADFS_FS=m 2945CONFIG_ADFS_FS=m
2774# CONFIG_ADFS_FS_RW is not set 2946# CONFIG_ADFS_FS_RW is not set
2775CONFIG_AFFS_FS=m 2947CONFIG_AFFS_FS=m
@@ -2798,6 +2970,7 @@ CONFIG_JFFS2_CMODE_PRIORITY=y
2798# CONFIG_JFFS2_CMODE_SIZE is not set 2970# CONFIG_JFFS2_CMODE_SIZE is not set
2799# CONFIG_JFFS2_CMODE_FAVOURLZO is not set 2971# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
2800CONFIG_CRAMFS=y 2972CONFIG_CRAMFS=y
2973# CONFIG_SQUASHFS is not set
2801CONFIG_VXFS_FS=m 2974CONFIG_VXFS_FS=m
2802# CONFIG_MINIX_FS is not set 2975# CONFIG_MINIX_FS is not set
2803# CONFIG_OMFS_FS is not set 2976# CONFIG_OMFS_FS is not set
@@ -2825,6 +2998,7 @@ CONFIG_NFS_ACL_SUPPORT=m
2825CONFIG_NFS_COMMON=y 2998CONFIG_NFS_COMMON=y
2826CONFIG_SUNRPC=m 2999CONFIG_SUNRPC=m
2827CONFIG_SUNRPC_GSS=m 3000CONFIG_SUNRPC_GSS=m
3001# CONFIG_SUNRPC_REGISTER_V4 is not set
2828CONFIG_RPCSEC_GSS_KRB5=m 3002CONFIG_RPCSEC_GSS_KRB5=m
2829CONFIG_RPCSEC_GSS_SPKM3=m 3003CONFIG_RPCSEC_GSS_SPKM3=m
2830CONFIG_SMB_FS=y 3004CONFIG_SMB_FS=y
@@ -2942,31 +3116,51 @@ CONFIG_TIMER_STATS=y
2942CONFIG_DEBUG_BUGVERBOSE=y 3116CONFIG_DEBUG_BUGVERBOSE=y
2943# CONFIG_DEBUG_INFO is not set 3117# CONFIG_DEBUG_INFO is not set
2944# CONFIG_DEBUG_VM is not set 3118# CONFIG_DEBUG_VM is not set
3119# CONFIG_DEBUG_VIRTUAL is not set
2945# CONFIG_DEBUG_WRITECOUNT is not set 3120# CONFIG_DEBUG_WRITECOUNT is not set
2946CONFIG_DEBUG_MEMORY_INIT=y 3121CONFIG_DEBUG_MEMORY_INIT=y
2947# CONFIG_DEBUG_LIST is not set 3122# CONFIG_DEBUG_LIST is not set
2948# CONFIG_DEBUG_SG is not set 3123# CONFIG_DEBUG_SG is not set
3124# CONFIG_DEBUG_NOTIFIERS is not set
3125CONFIG_ARCH_WANT_FRAME_POINTERS=y
2949# CONFIG_FRAME_POINTER is not set 3126# CONFIG_FRAME_POINTER is not set
2950# CONFIG_BOOT_PRINTK_DELAY is not set 3127# CONFIG_BOOT_PRINTK_DELAY is not set
2951# CONFIG_RCU_TORTURE_TEST is not set 3128# CONFIG_RCU_TORTURE_TEST is not set
3129# CONFIG_RCU_CPU_STALL_DETECTOR is not set
2952# CONFIG_BACKTRACE_SELF_TEST is not set 3130# CONFIG_BACKTRACE_SELF_TEST is not set
3131# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
2953# CONFIG_FAULT_INJECTION is not set 3132# CONFIG_FAULT_INJECTION is not set
2954# CONFIG_LATENCYTOP is not set 3133# CONFIG_LATENCYTOP is not set
2955# CONFIG_SYSCTL_SYSCALL_CHECK is not set 3134# CONFIG_SYSCTL_SYSCALL_CHECK is not set
2956CONFIG_HAVE_FTRACE=y 3135CONFIG_USER_STACKTRACE_SUPPORT=y
3136CONFIG_HAVE_FUNCTION_TRACER=y
3137CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
3138CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
2957CONFIG_HAVE_DYNAMIC_FTRACE=y 3139CONFIG_HAVE_DYNAMIC_FTRACE=y
2958# CONFIG_FTRACE is not set 3140CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
3141
3142#
3143# Tracers
3144#
3145# CONFIG_FUNCTION_TRACER is not set
2959# CONFIG_IRQSOFF_TRACER is not set 3146# CONFIG_IRQSOFF_TRACER is not set
2960# CONFIG_SYSPROF_TRACER is not set 3147# CONFIG_SYSPROF_TRACER is not set
2961# CONFIG_SCHED_TRACER is not set 3148# CONFIG_SCHED_TRACER is not set
2962# CONFIG_CONTEXT_SWITCH_TRACER is not set 3149# CONFIG_CONTEXT_SWITCH_TRACER is not set
3150# CONFIG_BOOT_TRACER is not set
3151# CONFIG_TRACE_BRANCH_PROFILING is not set
3152# CONFIG_POWER_TRACER is not set
3153# CONFIG_STACK_TRACER is not set
3154# CONFIG_MMIOTRACE is not set
2963# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set 3155# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
3156# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
2964# CONFIG_SAMPLES is not set 3157# CONFIG_SAMPLES is not set
2965CONFIG_HAVE_ARCH_KGDB=y 3158CONFIG_HAVE_ARCH_KGDB=y
2966# CONFIG_KGDB is not set 3159# CONFIG_KGDB is not set
2967# CONFIG_STRICT_DEVMEM is not set 3160# CONFIG_STRICT_DEVMEM is not set
2968CONFIG_X86_VERBOSE_BOOTUP=y 3161CONFIG_X86_VERBOSE_BOOTUP=y
2969CONFIG_EARLY_PRINTK=y 3162CONFIG_EARLY_PRINTK=y
3163# CONFIG_EARLY_PRINTK_DBGP is not set
2970# CONFIG_DEBUG_STACKOVERFLOW is not set 3164# CONFIG_DEBUG_STACKOVERFLOW is not set
2971# CONFIG_DEBUG_STACK_USAGE is not set 3165# CONFIG_DEBUG_STACK_USAGE is not set
2972# CONFIG_DEBUG_PAGEALLOC is not set 3166# CONFIG_DEBUG_PAGEALLOC is not set
@@ -2976,7 +3170,7 @@ CONFIG_EARLY_PRINTK=y
2976# CONFIG_DEBUG_NX_TEST is not set 3170# CONFIG_DEBUG_NX_TEST is not set
2977# CONFIG_4KSTACKS is not set 3171# CONFIG_4KSTACKS is not set
2978CONFIG_DOUBLEFAULT=y 3172CONFIG_DOUBLEFAULT=y
2979# CONFIG_MMIOTRACE is not set 3173CONFIG_HAVE_MMIOTRACE_SUPPORT=y
2980CONFIG_IO_DELAY_TYPE_0X80=0 3174CONFIG_IO_DELAY_TYPE_0X80=0
2981CONFIG_IO_DELAY_TYPE_0XED=1 3175CONFIG_IO_DELAY_TYPE_0XED=1
2982CONFIG_IO_DELAY_TYPE_UDELAY=2 3176CONFIG_IO_DELAY_TYPE_UDELAY=2
@@ -2996,8 +3190,10 @@ CONFIG_DEFAULT_IO_DELAY_TYPE=0
2996CONFIG_KEYS=y 3190CONFIG_KEYS=y
2997CONFIG_KEYS_DEBUG_PROC_KEYS=y 3191CONFIG_KEYS_DEBUG_PROC_KEYS=y
2998CONFIG_SECURITY=y 3192CONFIG_SECURITY=y
3193# CONFIG_SECURITYFS is not set
2999CONFIG_SECURITY_NETWORK=y 3194CONFIG_SECURITY_NETWORK=y
3000# CONFIG_SECURITY_NETWORK_XFRM is not set 3195# CONFIG_SECURITY_NETWORK_XFRM is not set
3196# CONFIG_SECURITY_PATH is not set
3001# CONFIG_SECURITY_FILE_CAPABILITIES is not set 3197# CONFIG_SECURITY_FILE_CAPABILITIES is not set
3002# CONFIG_SECURITY_ROOTPLUG is not set 3198# CONFIG_SECURITY_ROOTPLUG is not set
3003CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 3199CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
@@ -3008,18 +3204,24 @@ CONFIG_SECURITY_SELINUX_DISABLE=y
3008CONFIG_SECURITY_SELINUX_DEVELOP=y 3204CONFIG_SECURITY_SELINUX_DEVELOP=y
3009CONFIG_SECURITY_SELINUX_AVC_STATS=y 3205CONFIG_SECURITY_SELINUX_AVC_STATS=y
3010CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 3206CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
3011# CONFIG_SECURITY_SELINUX_ENABLE_SECMARK_DEFAULT is not set
3012# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set 3207# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
3013CONFIG_CRYPTO=y 3208CONFIG_CRYPTO=y
3014 3209
3015# 3210#
3016# Crypto core or helper 3211# Crypto core or helper
3017# 3212#
3213# CONFIG_CRYPTO_FIPS is not set
3018CONFIG_CRYPTO_ALGAPI=y 3214CONFIG_CRYPTO_ALGAPI=y
3215CONFIG_CRYPTO_ALGAPI2=y
3019CONFIG_CRYPTO_AEAD=m 3216CONFIG_CRYPTO_AEAD=m
3217CONFIG_CRYPTO_AEAD2=y
3020CONFIG_CRYPTO_BLKCIPHER=y 3218CONFIG_CRYPTO_BLKCIPHER=y
3219CONFIG_CRYPTO_BLKCIPHER2=y
3021CONFIG_CRYPTO_HASH=y 3220CONFIG_CRYPTO_HASH=y
3221CONFIG_CRYPTO_HASH2=y
3222CONFIG_CRYPTO_RNG2=y
3022CONFIG_CRYPTO_MANAGER=y 3223CONFIG_CRYPTO_MANAGER=y
3224CONFIG_CRYPTO_MANAGER2=y
3023# CONFIG_CRYPTO_GF128MUL is not set 3225# CONFIG_CRYPTO_GF128MUL is not set
3024CONFIG_CRYPTO_NULL=m 3226CONFIG_CRYPTO_NULL=m
3025# CONFIG_CRYPTO_CRYPTD is not set 3227# CONFIG_CRYPTO_CRYPTD is not set
@@ -3054,6 +3256,7 @@ CONFIG_CRYPTO_HMAC=y
3054# Digest 3256# Digest
3055# 3257#
3056CONFIG_CRYPTO_CRC32C=m 3258CONFIG_CRYPTO_CRC32C=m
3259# CONFIG_CRYPTO_CRC32C_INTEL is not set
3057CONFIG_CRYPTO_MD4=m 3260CONFIG_CRYPTO_MD4=m
3058CONFIG_CRYPTO_MD5=y 3261CONFIG_CRYPTO_MD5=y
3059CONFIG_CRYPTO_MICHAEL_MIC=m 3262CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -3095,6 +3298,11 @@ CONFIG_CRYPTO_TWOFISH_COMMON=m
3095# 3298#
3096CONFIG_CRYPTO_DEFLATE=m 3299CONFIG_CRYPTO_DEFLATE=m
3097# CONFIG_CRYPTO_LZO is not set 3300# CONFIG_CRYPTO_LZO is not set
3301
3302#
3303# Random Number Generation
3304#
3305# CONFIG_CRYPTO_ANSI_CPRNG is not set
3098CONFIG_CRYPTO_HW=y 3306CONFIG_CRYPTO_HW=y
3099CONFIG_CRYPTO_DEV_PADLOCK=m 3307CONFIG_CRYPTO_DEV_PADLOCK=m
3100CONFIG_CRYPTO_DEV_PADLOCK_AES=m 3308CONFIG_CRYPTO_DEV_PADLOCK_AES=m
@@ -3114,6 +3322,7 @@ CONFIG_VIRTUALIZATION=y
3114CONFIG_BITREVERSE=y 3322CONFIG_BITREVERSE=y
3115CONFIG_GENERIC_FIND_FIRST_BIT=y 3323CONFIG_GENERIC_FIND_FIRST_BIT=y
3116CONFIG_GENERIC_FIND_NEXT_BIT=y 3324CONFIG_GENERIC_FIND_NEXT_BIT=y
3325CONFIG_GENERIC_FIND_LAST_BIT=y
3117CONFIG_CRC_CCITT=m 3326CONFIG_CRC_CCITT=m
3118CONFIG_CRC16=m 3327CONFIG_CRC16=m
3119# CONFIG_CRC_T10DIF is not set 3328# CONFIG_CRC_T10DIF is not set
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-netbook b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-netbook
index b520435082..67373a29d0 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-netbook
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-netbook
@@ -1003,7 +1003,7 @@ CONFIG_NETDEV_1000=y
1003# CONFIG_NS83820 is not set 1003# CONFIG_NS83820 is not set
1004# CONFIG_HAMACHI is not set 1004# CONFIG_HAMACHI is not set
1005# CONFIG_YELLOWFIN is not set 1005# CONFIG_YELLOWFIN is not set
1006# CONFIG_R8169 is not set 1006CONFIG_R8169=y
1007# CONFIG_SIS190 is not set 1007# CONFIG_SIS190 is not set
1008# CONFIG_SKGE is not set 1008# CONFIG_SKGE is not set
1009# CONFIG_SKY2 is not set 1009# CONFIG_SKY2 is not set
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6-build-nonintconfig.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6-build-nonintconfig.patch
new file mode 100644
index 0000000000..bd65daf516
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6-build-nonintconfig.patch
@@ -0,0 +1,128 @@
1diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
2index 32e8c5a..8020453 100644
3--- a/scripts/kconfig/Makefile
4+++ b/scripts/kconfig/Makefile
5@@ -24,6 +24,11 @@ oldconfig: $(obj)/conf
6 silentoldconfig: $(obj)/conf
7 $< -s $(Kconfig)
8
9+nonint_oldconfig: $(obj)/conf
10+ $< -b $(Kconfig)
11+loose_nonint_oldconfig: $(obj)/conf
12+ $< -B $(Kconfig)
13+
14 # Create new linux.pot file
15 # Adjust charset to UTF-8 in .po file to accept UTF-8 in Kconfig files
16 # The symlink is used to repair a deficiency in arch/um
17diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
18index fda6313..ed33b66 100644
19--- a/scripts/kconfig/conf.c
20+++ b/scripts/kconfig/conf.c
21@@ -22,6 +22,8 @@
22 ask_all,
23 ask_new,
24 ask_silent,
25+ dont_ask,
26+ dont_ask_dont_tell,
27 set_default,
28 set_yes,
29 set_mod,
30@@ -39,6 +41,8 @@
31
32 static char nohelp_text[] = N_("Sorry, no help available for this option yet.\n");
33
34+static int return_value = 0;
35+
36 static const char *get_help(struct menu *menu)
37 {
38 if (menu_has_help(menu))
39@@ -359,7 +363,10 @@
40
41 switch (prop->type) {
42 case P_MENU:
43- if (input_mode == ask_silent && rootEntry != menu) {
44+ if ((input_mode == ask_silent ||
45+ input_mode == dont_ask ||
46+ input_mode == dont_ask_dont_tell) &&
47+ rootEntry != menu) {
48 check_conf(menu);
49 return;
50 }
51@@ -417,12 +424,21 @@
52 if (sym && !sym_has_value(sym)) {
53 if (sym_is_changable(sym) ||
54 (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)) {
55+ if (input_mode == dont_ask ||
56+ input_mode == dont_ask_dont_tell) {
57+ if (input_mode == dont_ask &&
58+ sym->name && !sym_is_choice_value(sym)) {
59+ fprintf(stderr,"CONFIG_%s\n",sym->name);
60+ ++return_value;
61+ }
62+ } else {
63 if (!conf_cnt++)
64 printf(_("*\n* Restart config...\n*\n"));
65 rootEntry = menu_get_parent_menu(menu);
66 conf(rootEntry);
67 }
68 }
69+ }
70
71 for (child = menu->list; child; child = child->next)
72 check_conf(child);
73@@ -438,7 +454,7 @@
74 bindtextdomain(PACKAGE, LOCALEDIR);
75 textdomain(PACKAGE);
76
77- while ((opt = getopt(ac, av, "osdD:nmyrh")) != -1) {
78+ while ((opt = getopt(ac, av, "osbBdD:nmyrh")) != -1) {
79 switch (opt) {
80 case 'o':
81 input_mode = ask_silent;
82@@ -447,6 +463,12 @@
83 input_mode = ask_silent;
84 sync_kconfig = 1;
85 break;
86+ case 'b':
87+ input_mode = dont_ask;
88+ break;
89+ case 'B':
90+ input_mode = dont_ask_dont_tell;
91+ break;
92 case 'd':
93 input_mode = set_default;
94 break;
95@@ -510,6 +532,8 @@
96 case ask_silent:
97 case ask_all:
98 case ask_new:
99+ case dont_ask:
100+ case dont_ask_dont_tell:
101 conf_read(NULL);
102 break;
103 case set_no:
104@@ -571,12 +595,16 @@
105 conf(&rootmenu);
106 input_mode = ask_silent;
107 /* fall through */
108+ case dont_ask:
109+ case dont_ask_dont_tell:
110 case ask_silent:
111 /* Update until a loop caused no more changes */
112 do {
113 conf_cnt = 0;
114 check_conf(&rootmenu);
115- } while (conf_cnt);
116+ } while (conf_cnt &&
117+ (input_mode != dont_ask &&
118+ input_mode != dont_ask_dont_tell));
119 break;
120 }
121
122@@ -598,5 +626,5 @@
123 exit(1);
124 }
125 }
126- return 0;
127+ return return_value;
128 }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.19-modesetting-by-default.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.19-modesetting-by-default.patch
new file mode 100644
index 0000000000..32b99a99b8
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.19-modesetting-by-default.patch
@@ -0,0 +1,11 @@
1--- linux-2.6.28/drivers/gpu/drm/i915/i915_drv.c~ 2009-02-20 21:36:06.000000000 -0800
2+++ linux-2.6.28/drivers/gpu/drm/i915/i915_drv.c 2009-02-20 21:36:06.000000000 -0800
3@@ -35,7 +35,7 @@
4 #include "drm_pciids.h"
5 #include <linux/console.h>
6
7-static unsigned int i915_modeset = -1;
8+static unsigned int i915_modeset = 1;
9 module_param_named(modeset, i915_modeset, int, 0400);
10
11 unsigned int i915_fbpercrtc = 0;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0002-fastboot-remove-wait-for-all-devices-before-mounti.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-dont-wait-for-mouse.patch
index 9ea6d62a63..02a4474cae 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0002-fastboot-remove-wait-for-all-devices-before-mounti.patch
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-dont-wait-for-mouse.patch
@@ -22,16 +22,21 @@ Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
22--- 22---
23--- a/init/do_mounts.c 2009-01-07 18:42:10.000000000 -0800 23--- a/init/do_mounts.c 2009-01-07 18:42:10.000000000 -0800
24+++ b/init/do_mounts.c 2009-01-07 18:43:02.000000000 -0800 24+++ b/init/do_mounts.c 2009-01-07 18:43:02.000000000 -0800
25@@ -370,10 +370,12 @@ void __init prepare_namespace(void) 25@@ -370,14 +370,17 @@ void __init prepare_namespace(void)
26 ssleep(root_delay); 26 ssleep(root_delay);
27 } 27 }
28 28
29+#if 0 29+#if 0
30 /* wait for the known devices to complete their probing */ 30 /*
31 while (driver_probe_done() != 0) 31 * wait for the known devices to complete their probing
32 msleep(100); 32 *
33 * Note: this is a potential source of long boot delays.
34 * For example, it is not atypical to wait 5 seconds here
35 * for the touchpad of a laptop to initialize.
36 */
37 wait_for_device_probe();
33+#endif 38+#endif
34 async_synchronize_full(); 39+ async_synchronize_full();
35 40
36 md_run_setup(); 41 md_run_setup();
37 42
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch
new file mode 100644
index 0000000000..a8d68338b5
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch
@@ -0,0 +1,56 @@
1From 2b5cde2b272f56ec67b56a2af8c067d42eff7328 Mon Sep 17 00:00:00 2001
2From: Li Peng <peng.li@intel.com>
3Date: Fri, 13 Mar 2009 10:25:07 +0800
4Subject: drm/i915: Fix LVDS dither setting
5
6Update bdb_lvds_options structure according to its defination in
72D driver. Then we can parse and set 'lvds_dither' bit correctly
8on non-965 chips.
9
10Signed-off-by: Li Peng <peng.li@intel.com>
11Signed-off-by: Eric Anholt <eric@anholt.net>
12---
13 drivers/gpu/drm/i915/intel_bios.h | 12 ++++++------
14 drivers/gpu/drm/i915/intel_lvds.c | 2 +-
15 2 files changed, 7 insertions(+), 7 deletions(-)
16
17diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
18index 5ea715a..de621aa 100644
19--- a/drivers/gpu/drm/i915/intel_bios.h
20+++ b/drivers/gpu/drm/i915/intel_bios.h
21@@ -162,13 +162,13 @@ struct bdb_lvds_options {
22 u8 panel_type;
23 u8 rsvd1;
24 /* LVDS capabilities, stored in a dword */
25- u8 rsvd2:1;
26- u8 lvds_edid:1;
27- u8 pixel_dither:1;
28- u8 pfit_ratio_auto:1;
29- u8 pfit_gfx_mode_enhanced:1;
30- u8 pfit_text_mode_enhanced:1;
31 u8 pfit_mode:2;
32+ u8 pfit_text_mode_enhanced:1;
33+ u8 pfit_gfx_mode_enhanced:1;
34+ u8 pfit_ratio_auto:1;
35+ u8 pixel_dither:1;
36+ u8 lvds_edid:1;
37+ u8 rsvd2:1;
38 u8 rsvd4;
39 } __attribute__((packed));
40
41diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
42index 0d211af..6619f26 100644
43--- a/drivers/gpu/drm/i915/intel_lvds.c
44+++ b/drivers/gpu/drm/i915/intel_lvds.c
45@@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
46 pfit_control = 0;
47
48 if (!IS_I965G(dev)) {
49- if (dev_priv->panel_wants_dither)
50+ if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
51 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
52 }
53 else
54--
551.6.1.3
56
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0006-Revert-drm-i915-GEM-on-PAE-has-problems-disable.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-revert.patch
index 77c9fa6ef3..850fa161e9 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0006-Revert-drm-i915-GEM-on-PAE-has-problems-disable.patch
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-revert.patch
@@ -23,8 +23,8 @@ index afa8a12..553dd4b 100644
23 default: 23 default:
24 DRM_ERROR("Unknown parameter %d\n", param->param); 24 DRM_ERROR("Unknown parameter %d\n", param->param);
25@@ -830,14 +830,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) 25@@ -830,14 +830,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
26 26 "performance may suffer.\n");
27 dev_priv->regs = ioremap(base, size); 27 }
28 28
29-#ifdef CONFIG_HIGHMEM64G 29-#ifdef CONFIG_HIGHMEM64G
30- /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */ 30- /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
@@ -34,9 +34,9 @@ index afa8a12..553dd4b 100644
34- dev_priv->has_gem = 1; 34- dev_priv->has_gem = 1;
35-#endif 35-#endif
36- 36-
37 i915_gem_load(dev); 37 dev->driver->get_vblank_counter = i915_get_vblank_counter;
38 38 if (IS_GM45(dev))
39 /* Init HWS */ 39 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
40diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h 40diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
41index b3cc473..adc972c 100644 41index b3cc473..adc972c 100644
42--- a/drivers/gpu/drm/i915/i915_drv.h 42--- a/drivers/gpu/drm/i915/i915_drv.h
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch
new file mode 100644
index 0000000000..9291362f04
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch
@@ -0,0 +1,208 @@
1From b55de80e49892002a1878013ab9aee1a30970be6 Mon Sep 17 00:00:00 2001
2From: Bruce Allan <bruce.w.allan@intel.com>
3Date: Sat, 21 Mar 2009 13:25:25 -0700
4Subject: [PATCH] e100: add support for 82552 10/100 adapter
5
6This patch enables support for the new Intel 82552 adapter (new PHY paired
7with the existing MAC in the ICH7 chipset). No new features are added to
8the driver, however there are minor changes due to updated registers and a
9few workarounds for hardware errata.
10
11Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
12Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
13Signed-off-by: David S. Miller <davem@davemloft.net>
14---
15 drivers/net/e100.c | 93 +++++++++++++++++++++++++++++++++++++++++++---------
16 1 files changed, 77 insertions(+), 16 deletions(-)
17
18diff --git a/drivers/net/e100.c b/drivers/net/e100.c
19index 861d2ee..0504db9 100644
20--- a/drivers/net/e100.c
21+++ b/drivers/net/e100.c
22@@ -167,7 +167,7 @@
23
24 #define DRV_NAME "e100"
25 #define DRV_EXT "-NAPI"
26-#define DRV_VERSION "3.5.23-k6"DRV_EXT
27+#define DRV_VERSION "3.5.24-k2"DRV_EXT
28 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
29 #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
30 #define PFX DRV_NAME ": "
31@@ -240,6 +240,7 @@ static struct pci_device_id e100_id_table[] = {
32 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
33 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
34 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
35+ INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
36 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
37 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
38 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
39@@ -275,6 +276,7 @@ enum phy {
40 phy_82562_em = 0x032002A8,
41 phy_82562_ek = 0x031002A8,
42 phy_82562_eh = 0x017002A8,
43+ phy_82552_v = 0xd061004d,
44 phy_unknown = 0xFFFFFFFF,
45 };
46
47@@ -943,6 +945,22 @@ static int mdio_read(struct net_device *netdev, int addr, int reg)
48
49 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
50 {
51+ struct nic *nic = netdev_priv(netdev);
52+
53+ if ((nic->phy == phy_82552_v) && (reg == MII_BMCR) &&
54+ (data & (BMCR_ANRESTART | BMCR_ANENABLE))) {
55+ u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
56+
57+ /*
58+ * Workaround Si issue where sometimes the part will not
59+ * autoneg to 100Mbps even when advertised.
60+ */
61+ if (advert & ADVERTISE_100FULL)
62+ data |= BMCR_SPEED100 | BMCR_FULLDPLX;
63+ else if (advert & ADVERTISE_100HALF)
64+ data |= BMCR_SPEED100;
65+ }
66+
67 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
68 }
69
70@@ -1276,16 +1294,12 @@ static int e100_phy_init(struct nic *nic)
71 if (addr == 32)
72 return -EAGAIN;
73
74- /* Selected the phy and isolate the rest */
75- for (addr = 0; addr < 32; addr++) {
76- if (addr != nic->mii.phy_id) {
77- mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
78- } else {
79- bmcr = mdio_read(netdev, addr, MII_BMCR);
80- mdio_write(netdev, addr, MII_BMCR,
81- bmcr & ~BMCR_ISOLATE);
82- }
83- }
84+ /* Isolate all the PHY ids */
85+ for (addr = 0; addr < 32; addr++)
86+ mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
87+ /* Select the discovered PHY */
88+ bmcr &= ~BMCR_ISOLATE;
89+ mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
90
91 /* Get phy ID */
92 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
93@@ -1303,7 +1317,18 @@ static int e100_phy_init(struct nic *nic)
94 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
95 }
96
97- if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
98+ if (nic->phy == phy_82552_v) {
99+ u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
100+
101+ /* Workaround Si not advertising flow-control during autoneg */
102+ advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
103+ mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
104+
105+ /* Reset for the above changes to take effect */
106+ bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
107+ bmcr |= BMCR_RESET;
108+ mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
109+ } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
110 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
111 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
112 /* enable/disable MDI/MDI-X auto-switching. */
113@@ -2134,6 +2159,9 @@ err_clean_rx:
114 }
115
116 #define MII_LED_CONTROL 0x1B
117+#define E100_82552_LED_OVERRIDE 0x19
118+#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
119+#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
120 static void e100_blink_led(unsigned long data)
121 {
122 struct nic *nic = (struct nic *)data;
123@@ -2143,10 +2171,19 @@ static void e100_blink_led(unsigned long data)
124 led_on_559 = 0x05,
125 led_on_557 = 0x07,
126 };
127+ u16 led_reg = MII_LED_CONTROL;
128+
129+ if (nic->phy == phy_82552_v) {
130+ led_reg = E100_82552_LED_OVERRIDE;
131
132- nic->leds = (nic->leds & led_on) ? led_off :
133- (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
134- mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
135+ nic->leds = (nic->leds == E100_82552_LED_ON) ?
136+ E100_82552_LED_OFF : E100_82552_LED_ON;
137+ } else {
138+ nic->leds = (nic->leds & led_on) ? led_off :
139+ (nic->mac < mac_82559_D101M) ? led_on_557 :
140+ led_on_559;
141+ }
142+ mdio_write(nic->netdev, nic->mii.phy_id, led_reg, nic->leds);
143 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
144 }
145
146@@ -2375,13 +2412,15 @@ static void e100_diag_test(struct net_device *netdev,
147 static int e100_phys_id(struct net_device *netdev, u32 data)
148 {
149 struct nic *nic = netdev_priv(netdev);
150+ u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
151+ MII_LED_CONTROL;
152
153 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
154 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
155 mod_timer(&nic->blink_timer, jiffies);
156 msleep_interruptible(data * 1000);
157 del_timer_sync(&nic->blink_timer);
158- mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
159+ mdio_write(netdev, nic->mii.phy_id, led_reg, 0);
160
161 return 0;
162 }
163@@ -2686,6 +2725,9 @@ static void __devexit e100_remove(struct pci_dev *pdev)
164 }
165 }
166
167+#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
168+#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
169+#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
170 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
171 {
172 struct net_device *netdev = pci_get_drvdata(pdev);
173@@ -2698,6 +2740,15 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
174 pci_save_state(pdev);
175
176 if ((nic->flags & wol_magic) | e100_asf(nic)) {
177+ /* enable reverse auto-negotiation */
178+ if (nic->phy == phy_82552_v) {
179+ u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
180+ E100_82552_SMARTSPEED);
181+
182+ mdio_write(netdev, nic->mii.phy_id,
183+ E100_82552_SMARTSPEED, smartspeed |
184+ E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
185+ }
186 if (pci_enable_wake(pdev, PCI_D3cold, true))
187 pci_enable_wake(pdev, PCI_D3hot, true);
188 } else {
189@@ -2721,6 +2772,16 @@ static int e100_resume(struct pci_dev *pdev)
190 /* ack any pending wake events, disable PME */
191 pci_enable_wake(pdev, 0, 0);
192
193+ /* disbale reverse auto-negotiation */
194+ if (nic->phy == phy_82552_v) {
195+ u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
196+ E100_82552_SMARTSPEED);
197+
198+ mdio_write(netdev, nic->mii.phy_id,
199+ E100_82552_SMARTSPEED,
200+ smartspeed & ~(E100_82552_REV_ANEG));
201+ }
202+
203 netif_device_attach(netdev);
204 if (netif_running(netdev))
205 e100_up(nic);
206--
2071.5.5.1
208
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0005-fastboot-async-enable-default.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-enable-async-by-default.patch
index 6eea4f6e17..6eea4f6e17 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0005-fastboot-async-enable-default.patch
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-enable-async-by-default.patch
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-even-faster-kms.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-even-faster-kms.patch
new file mode 100644
index 0000000000..80d1edf0aa
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-even-faster-kms.patch
@@ -0,0 +1,20 @@
1--- linux-2.6.28/drivers/gpu/drm/i915/intel_lvds.c.org 2009-03-21 19:57:13.000000000 -0700
2+++ linux-2.6.28/drivers/gpu/drm/i915/intel_lvds.c 2009-03-21 19:57:25.000000000 -0700
3@@ -221,7 +221,7 @@ static void intel_lvds_prepare(struct dr
4 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
5 BACKLIGHT_DUTY_CYCLE_MASK);
6
7- intel_lvds_set_power(dev, false);
8+// intel_lvds_set_power(dev, false);
9 }
10
11 static void intel_lvds_commit( struct drm_encoder *encoder)
12@@ -233,7 +233,7 @@ static void intel_lvds_commit( struct dr
13 dev_priv->backlight_duty_cycle =
14 intel_lvds_get_max_backlight(dev);
15
16- intel_lvds_set_power(dev, true);
17+// intel_lvds_set_power(dev, true);
18 }
19
20 static void intel_lvds_mode_set(struct drm_encoder *encoder,
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0003-fastboot-remove-duplicate-unpack_to_rootfs.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-initrd.patch
index ea4c617ed9..ea4c617ed9 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0003-fastboot-remove-duplicate-unpack_to_rootfs.patch
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-initrd.patch
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-kms.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-kms.patch
new file mode 100644
index 0000000000..f213958bf5
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-kms.patch
@@ -0,0 +1,285 @@
1diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
2index 1c3a8c5..144624a 100644
3--- a/drivers/gpu/drm/drm_crtc_helper.c
4+++ b/drivers/gpu/drm/drm_crtc_helper.c
5@@ -29,6 +29,8 @@
6 * Jesse Barnes <jesse.barnes@intel.com>
7 */
8
9+#include <linux/async.h>
10+
11 #include "drmP.h"
12 #include "drm_crtc.h"
13 #include "drm_crtc_helper.h"
14@@ -42,6 +44,8 @@ static struct drm_display_mode std_modes[] = {
15 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
16 };
17
18+LIST_HEAD(drm_async_list);
19+
20 /**
21 * drm_helper_probe_connector_modes - get complete set of display modes
22 * @dev: DRM device
23@@ -137,6 +141,26 @@ int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
24 }
25 EXPORT_SYMBOL(drm_helper_probe_connector_modes);
26
27+int drm_helper_probe_connector_modes_fast(struct drm_device *dev, uint32_t maxX,
28+ uint32_t maxY)
29+{
30+ struct drm_connector *connector;
31+ int count = 0;
32+
33+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
34+ count += drm_helper_probe_single_connector_modes(connector,
35+ maxX, maxY);
36+ /*
37+ * If we found a 'good' connector, we stop probing futher.
38+ */
39+ if (count > 0)
40+ break;
41+ }
42+
43+ return count;
44+}
45+EXPORT_SYMBOL(drm_helper_probe_connector_modes_fast);
46+
47 static void drm_helper_add_std_modes(struct drm_device *dev,
48 struct drm_connector *connector)
49 {
50@@ -882,6 +906,24 @@ bool drm_helper_plugged_event(struct drm_device *dev)
51 /* FIXME: send hotplug event */
52 return true;
53 }
54+
55+static void async_notify_fb_changed(void *data, async_cookie_t cookie)
56+{
57+ struct drm_device *dev = data;
58+ dev->mode_config.funcs->fb_changed(dev);
59+}
60+
61+static void async_probe_hard(void *data, async_cookie_t cookie)
62+{
63+ struct drm_device *dev = data;
64+ /* Need to wait for async_notify_fb_changed to be done */
65+ async_synchronize_cookie_domain(cookie, &drm_async_list);
66+ drm_helper_probe_connector_modes(dev,
67+ dev->mode_config.max_width,
68+ dev->mode_config.max_height);
69+}
70+
71+
72 /**
73 * drm_initial_config - setup a sane initial connector configuration
74 * @dev: DRM device
75@@ -902,7 +944,7 @@ bool drm_helper_initial_config(struct drm_device *dev, bool can_grow)
76 struct drm_connector *connector;
77 int count = 0;
78
79- count = drm_helper_probe_connector_modes(dev,
80+ count = drm_helper_probe_connector_modes_fast(dev,
81 dev->mode_config.max_width,
82 dev->mode_config.max_height);
83
84@@ -921,7 +963,9 @@ bool drm_helper_initial_config(struct drm_device *dev, bool can_grow)
85 drm_setup_crtcs(dev);
86
87 /* alert the driver fb layer */
88- dev->mode_config.funcs->fb_changed(dev);
89+ async_schedule_domain(async_notify_fb_changed, dev, &drm_async_list);
90+ /* probe further outputs */
91+ async_schedule_domain(async_probe_hard, dev, &drm_async_list);
92
93 return 0;
94 }
95diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
96index 14c7a23..ef52021 100644
97--- a/drivers/gpu/drm/drm_drv.c
98+++ b/drivers/gpu/drm/drm_drv.c
99@@ -48,6 +48,7 @@
100
101 #include "drmP.h"
102 #include "drm_core.h"
103+#include <linux/async.h>
104
105 static int drm_version(struct drm_device *dev, void *data,
106 struct drm_file *file_priv);
107@@ -345,6 +346,9 @@ void drm_exit(struct drm_driver *driver)
108 struct drm_device *dev, *tmp;
109 DRM_DEBUG("\n");
110
111+ /* make sure all async DRM operations are finished */
112+ async_synchronize_full_domain(&drm_async_list);
113+
114 list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
115 drm_cleanup(dev);
116
117diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
118index a839a28..069b189 100644
119--- a/drivers/gpu/drm/drm_edid.c
120+++ b/drivers/gpu/drm/drm_edid.c
121@@ -588,20 +588,22 @@ static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
122 {
123 struct i2c_algo_bit_data *algo_data = adapter->algo_data;
124 unsigned char *edid = NULL;
125+ int divider = 5;
126 int i, j;
127
128 algo_data->setscl(algo_data->data, 1);
129
130- for (i = 0; i < 1; i++) {
131+ for (i = 0; i < 2; i++) {
132 /* For some old monitors we need the
133 * following process to initialize/stop DDC
134 */
135+
136 algo_data->setsda(algo_data->data, 1);
137- msleep(13);
138+ msleep(13 / divider);
139
140 algo_data->setscl(algo_data->data, 1);
141 for (j = 0; j < 5; j++) {
142- msleep(10);
143+ msleep(10 / divider);
144 if (algo_data->getscl(algo_data->data))
145 break;
146 }
147@@ -609,31 +611,33 @@ static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
148 continue;
149
150 algo_data->setsda(algo_data->data, 0);
151- msleep(15);
152+ msleep(15 / divider);
153 algo_data->setscl(algo_data->data, 0);
154- msleep(15);
155+ msleep(15 / divider);
156 algo_data->setsda(algo_data->data, 1);
157- msleep(15);
158+ msleep(15 / divider);
159
160 /* Do the real work */
161 edid = drm_do_probe_ddc_edid(adapter);
162 algo_data->setsda(algo_data->data, 0);
163 algo_data->setscl(algo_data->data, 0);
164- msleep(15);
165+ msleep(15 / divider);
166
167 algo_data->setscl(algo_data->data, 1);
168 for (j = 0; j < 10; j++) {
169- msleep(10);
170+ msleep(10 / divider);
171 if (algo_data->getscl(algo_data->data))
172 break;
173 }
174
175 algo_data->setsda(algo_data->data, 1);
176- msleep(15);
177+ msleep(15 / divider);
178 algo_data->setscl(algo_data->data, 0);
179 algo_data->setsda(algo_data->data, 0);
180+
181 if (edid)
182 break;
183+ divider = 1;
184 }
185 /* Release the DDC lines when done or the Apple Cinema HD display
186 * will switch off
187diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
188index a283427..6f2eced 100644
189--- a/drivers/gpu/drm/i915/intel_display.c
190+++ b/drivers/gpu/drm/i915/intel_display.c
191@@ -319,7 +319,7 @@ void
192 intel_wait_for_vblank(struct drm_device *dev)
193 {
194 /* Wait for 20ms, i.e. one cycle at 50hz. */
195- udelay(20000);
196+ mdelay(20);
197 }
198
199 static int
200@@ -1466,12 +1466,12 @@ static void intel_setup_outputs(struct drm_device *dev)
201 struct drm_i915_private *dev_priv = dev->dev_private;
202 struct drm_connector *connector;
203
204- intel_crt_init(dev);
205-
206- /* Set up integrated LVDS */
207+ /* Set up integrated LVDS -- will skip if the lid is closed */
208 if (IS_MOBILE(dev) && !IS_I830(dev))
209 intel_lvds_init(dev);
210
211+ intel_crt_init(dev);
212+
213 if (IS_I9XX(dev)) {
214 int found;
215
216diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
217index 957daef..22a74bd 100644
218--- a/drivers/gpu/drm/i915/intel_drv.h
219+++ b/drivers/gpu/drm/i915/intel_drv.h
220@@ -81,6 +81,7 @@ struct intel_output {
221 int type;
222 struct intel_i2c_chan *i2c_bus; /* for control functions */
223 struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
224+ struct edid *edid;
225 bool load_detect_temp;
226 bool needs_tv_clock;
227 void *dev_priv;
228diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
229index 0d211af..dc4fecc 100644
230--- a/drivers/gpu/drm/i915/intel_lvds.c
231+++ b/drivers/gpu/drm/i915/intel_lvds.c
232@@ -336,6 +336,7 @@ static void intel_lvds_destroy(struct drm_connector *connector)
233 intel_i2c_destroy(intel_output->ddc_bus);
234 drm_sysfs_connector_remove(connector);
235 drm_connector_cleanup(connector);
236+ kfree(intel_output->edid);
237 kfree(connector);
238 }
239
240@@ -516,5 +517,6 @@ failed:
241 if (intel_output->ddc_bus)
242 intel_i2c_destroy(intel_output->ddc_bus);
243 drm_connector_cleanup(connector);
244+ kfree(intel_output->edid);
245 kfree(connector);
246 }
247diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
248index e42019e..8c0d5f6 100644
249--- a/drivers/gpu/drm/i915/intel_modes.c
250+++ b/drivers/gpu/drm/i915/intel_modes.c
251@@ -70,13 +70,21 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
252 struct edid *edid;
253 int ret = 0;
254
255+ if (intel_output->edid) {
256+ printk(KERN_INFO "Skipping EDID probe due to cached edid\n");
257+ return ret;
258+ }
259+
260 edid = drm_get_edid(&intel_output->base,
261 &intel_output->ddc_bus->adapter);
262 if (edid) {
263 drm_mode_connector_update_edid_property(&intel_output->base,
264 edid);
265 ret = drm_add_edid_modes(&intel_output->base, edid);
266- kfree(edid);
267+ if (intel_output->type == INTEL_OUTPUT_LVDS)
268+ intel_output->edid = edid;
269+ else
270+ kfree(edid);
271 }
272
273 return ret;
274diff --git a/include/drm/drmP.h b/include/drm/drmP.h
275index e5f4ae9..69ce4f4 100644
276--- a/include/drm/drmP.h
277+++ b/include/drm/drmP.h
278@@ -304,6 +304,7 @@ struct drm_vma_entry {
279 pid_t pid;
280 };
281
282+extern struct list_head drm_async_list;
283 /**
284 * DMA buffer.
285 */
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-flip-ide-net.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-flip-ide-net.patch
new file mode 100644
index 0000000000..eda77564ce
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-flip-ide-net.patch
@@ -0,0 +1,40 @@
1From: Arjan van de Ven <arjan@linux.intel.com>
2Date: Mon, 26 Jan 2009 18:58:11 -0800
3Subject: [PATCH] ide/net: flip the order of SATA and network init
4
5this patch flips the order in which sata and network drivers are initialized.
6
7SATA probing takes quite a bit of time, and with the asynchronous infrastructure
8other drivers that run after it can execute in parallel. Network drivers do tend
9to take some real time talking to the hardware, so running these later is
10a good thing (the sata probe then runs concurrent)
11
12This saves about 15% of my kernels boot time.
13
14Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
15---
16 drivers/Makefile | 5 +++--
17 1 files changed, 3 insertions(+), 2 deletions(-)
18
19diff --git a/drivers/Makefile b/drivers/Makefile
20index c1bf417..2618a61 100644
21--- a/drivers/Makefile
22+++ b/drivers/Makefile
23@@ -36,13 +36,14 @@
24 obj-$(CONFIG_FB_INTEL) += video/intelfb/
25 obj-y += serial/
26 obj-$(CONFIG_PARPORT) += parport/
27-obj-y += base/ block/ misc/ mfd/ net/ media/
28+obj-y += base/ block/ misc/ mfd/ media/
29 obj-$(CONFIG_NUBUS) += nubus/
30-obj-$(CONFIG_ATM) += atm/
31 obj-y += macintosh/
32 obj-$(CONFIG_IDE) += ide/
33 obj-$(CONFIG_SCSI) += scsi/
34 obj-$(CONFIG_ATA) += ata/
35+obj-y += net/
36+obj-$(CONFIG_ATM) += atm/
37 obj-$(CONFIG_FUSION) += message/
38 obj-$(CONFIG_FIREWIRE) += firewire/
39 obj-y += ieee1394/
40
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch
new file mode 100644
index 0000000000..1ae8257203
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch
@@ -0,0 +1,92 @@
1From 2c5ccde448ae5f4062802bcd6002f856acbd268f Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Tue, 3 Feb 2009 16:26:16 -0800
4Subject: [PATCH] input: introduce a tougher i8042.reset
5
6Some bad touchpads don't reset right the first time (MSI Wind U-100 for
7example). This patch will retry the reset up to 5 times.
8
9In addition, this patch also adds a module parameter to not treat
10reset failures as fatal to the usage of the device. This prevents
11a touchpad failure from also disabling the keyboard....
12
13Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
14---
15 Documentation/kernel-parameters.txt | 2 ++
16 drivers/input/serio/i8042.c | 33 ++++++++++++++++++++++++---------
17 2 files changed, 26 insertions(+), 9 deletions(-)
18
19diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
20index ac613a6..a43e3bd 100644
21--- a/Documentation/kernel-parameters.txt
22+++ b/Documentation/kernel-parameters.txt
23@@ -855,6 +855,8 @@ and is between 256 and 4096 characters. It is defined in the file
24 [HW] Frequency with which keyboard LEDs should blink
25 when kernel panics (default is 0.5 sec)
26 i8042.reset [HW] Reset the controller during init and cleanup
27+ i8042.nonfatal [HW] Don't treat i8042.reset failures as fatal for the
28+ device initialization.
29 i8042.unlock [HW] Unlock (ignore) the keylock
30
31 i810= [HW,DRM]
32diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
33index 170f71e..2473a9a 100644
34--- a/drivers/input/serio/i8042.c
35+++ b/drivers/input/serio/i8042.c
36@@ -47,6 +47,10 @@ static unsigned int i8042_reset;
37 module_param_named(reset, i8042_reset, bool, 0);
38 MODULE_PARM_DESC(reset, "Reset controller during init and cleanup.");
39
40+static unsigned int i8042_nonfatal;
41+module_param_named(nonfatal, i8042_nonfatal, bool, 0);
42+MODULE_PARM_DESC(reset, "Treat controller test failures as non-fatal.");
43+
44 static unsigned int i8042_direct;
45 module_param_named(direct, i8042_direct, bool, 0);
46 MODULE_PARM_DESC(direct, "Put keyboard port into non-translated mode.");
47@@ -712,22 +716,33 @@ static int i8042_controller_check(void)
48 static int i8042_controller_selftest(void)
49 {
50 unsigned char param;
51+ int i = 0;
52
53 if (!i8042_reset)
54 return 0;
55
56- if (i8042_command(&param, I8042_CMD_CTL_TEST)) {
57- printk(KERN_ERR "i8042.c: i8042 controller self test timeout.\n");
58- return -ENODEV;
59- }
60+ /*
61+ * We try this 5 times; on some really fragile systems this does not
62+ * take the first time...
63+ */
64+ do {
65+
66+ if (i8042_command(&param, I8042_CMD_CTL_TEST)) {
67+ printk(KERN_ERR "i8042.c: i8042 controller self test timeout.\n");
68+ return -ENODEV;
69+ }
70+
71+ if (param == I8042_RET_CTL_TEST)
72+ return 0;
73
74- if (param != I8042_RET_CTL_TEST) {
75 printk(KERN_ERR "i8042.c: i8042 controller selftest failed. (%#x != %#x)\n",
76- param, I8042_RET_CTL_TEST);
77- return -EIO;
78- }
79+ param, I8042_RET_CTL_TEST);
80+ msleep(50);
81+ } while (i++ < 5);
82
83- return 0;
84+ if (i8042_nonfatal)
85+ return 0;
86+ return -EIO;
87 }
88
89 /*
90--
911.6.0.6
92
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-jbd-longer-commit-interval.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-jbd-longer-commit-interval.patch
new file mode 100644
index 0000000000..d7bd92151b
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-jbd-longer-commit-interval.patch
@@ -0,0 +1,28 @@
1From 0143f8eb8afcaccba5a78196fb3db4361e0097a7 Mon Sep 17 00:00:00 2001
2From: Arjan van de Ven <arjan@linux.intel.com>
3Date: Mon, 9 Feb 2009 21:25:32 -0800
4Subject: [PATCH] jbd: longer commit interval
5
6... 5 seconds is rather harsh on ssd's..
7
8Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
9---
10 include/linux/jbd.h | 2 +-
11 1 files changed, 1 insertions(+), 1 deletions(-)
12
13diff --git a/include/linux/jbd.h b/include/linux/jbd.h
14index 64246dc..d64b7fd 100644
15--- a/include/linux/jbd.h
16+++ b/include/linux/jbd.h
17@@ -46,7 +46,7 @@
18 /*
19 * The default maximum commit age, in seconds.
20 */
21-#define JBD_DEFAULT_MAX_COMMIT_AGE 5
22+#define JBD_DEFAULT_MAX_COMMIT_AGE 15
23
24 #ifdef CONFIG_JBD_DEBUG
25 /*
26--
271.6.0.6
28
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-kms-after-sata.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-kms-after-sata.patch
new file mode 100644
index 0000000000..663b367971
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-kms-after-sata.patch
@@ -0,0 +1,32 @@
1--- linux-2.6.28/drivers/Makefile~ 2009-03-21 21:23:28.000000000 -0700
2+++ linux-2.6.28/drivers/Makefile 2009-03-21 21:23:28.000000000 -0700
3@@ -25,15 +25,8 @@
4 # default.
5 obj-y += char/
6
7-# gpu/ comes after char for AGP vs DRM startup
8-obj-y += gpu/
9-
10 obj-$(CONFIG_CONNECTOR) += connector/
11
12-# i810fb and intelfb depend on char/agp/
13-obj-$(CONFIG_FB_I810) += video/i810/
14-obj-$(CONFIG_FB_INTEL) += video/intelfb/
15-
16 obj-y += serial/
17 obj-$(CONFIG_PARPORT) += parport/
18 obj-y += base/ block/ misc/ mfd/ media/
19@@ -43,6 +36,13 @@
20 obj-$(CONFIG_SCSI) += scsi/
21 obj-$(CONFIG_ATA) += ata/
22 obj-y += net/
23+
24+# gpu/ comes after char for AGP vs DRM startup
25+obj-y += gpu/
26+# i810fb and intelfb depend on char/agp/
27+obj-$(CONFIG_FB_I810) += video/i810/
28+obj-$(CONFIG_FB_INTEL) += video/intelfb/
29+
30 obj-$(CONFIG_ATM) += atm/
31 obj-$(CONFIG_FUSION) += message/
32 obj-$(CONFIG_FIREWIRE) += firewire/
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-msiwind.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-msiwind.patch
new file mode 100644
index 0000000000..e7fded41e8
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-msiwind.patch
@@ -0,0 +1,57 @@
1Patch to get the touchpad on the MSI Wind U-100 working
2
3
4--- linux-2.6.28/drivers/input/serio/i8042-x86ia64io.h.org 2009-02-01 18:31:29.000000000 -0800
5+++ linux-2.6.28/drivers/input/serio/i8042-x86ia64io.h 2009-02-01 18:35:26.000000000 -0800
6@@ -378,6 +378,13 @@ static struct dmi_system_id __initdata i
7 DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
8 },
9 },
10+ {
11+ .ident = "MSI Wind U-100",
12+ .matches = {
13+ DMI_MATCH(DMI_BOARD_NAME, "U-100"),
14+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
15+ },
16+ },
17 { }
18 };
19 #endif
20@@ -448,6 +455,25 @@ static struct dmi_system_id __initdata i
21 { }
22 };
23
24+static struct dmi_system_id __initdata i8042_dmi_reset_table[] = {
25+ {
26+ .ident = "MSI Wind U-100",
27+ .matches = {
28+ DMI_MATCH(DMI_BOARD_NAME, "U-100"),
29+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
30+ },
31+ },
32+ {
33+ .ident = "LG Electronics X110",
34+ .matches = {
35+ DMI_MATCH(DMI_BOARD_NAME, "X110"),
36+ DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
37+ },
38+ },
39+ { }
40+};
41+
42+
43 #endif /* CONFIG_X86 */
44
45 #ifdef CONFIG_PNP
46@@ -564,6 +583,11 @@ static int __init i8042_pnp_init(void)
47 i8042_nopnp = 1;
48 #endif
49
50+ if (dmi_check_system(i8042_dmi_reset_table)) {
51+ i8042_reset = 1;
52+ i8042_nonfatal = 1;
53+ }
54+
55 if (i8042_nopnp) {
56 printk(KERN_INFO "i8042: PNP detection disabled\n");
57 return 0;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-agp.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-agp.patch
new file mode 100644
index 0000000000..77e553956c
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-agp.patch
@@ -0,0 +1,83 @@
1From eaf05431b9ea8676d23106e6373b7d2b8ff2d97d Mon Sep 17 00:00:00 2001
2From: Shaohua Li <shaohua.li@intel.com>
3Date: Mon, 23 Feb 2009 15:19:16 +0800
4Subject: agp/intel: Add support for new intel chipset.
5
6This is a G33-like desktop and mobile chipset.
7
8Signed-off-by: Shaohua Li <shaohua.li@intel.com>
9Signed-off-by: Eric Anholt <eric@anholt.net>
10---
11 drivers/char/agp/intel-agp.c | 21 ++++++++++++++++++---
12 1 files changed, 18 insertions(+), 3 deletions(-)
13
14diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
15index c771418..0232cfc 100644
16--- a/drivers/char/agp/intel-agp.c
17+++ b/drivers/char/agp/intel-agp.c
18@@ -26,6 +26,10 @@
19 #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
20 #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
21 #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
22+#define PCI_DEVICE_ID_INTEL_IGDGM_HB 0xA010
23+#define PCI_DEVICE_ID_INTEL_IGDGM_IG 0xA011
24+#define PCI_DEVICE_ID_INTEL_IGDG_HB 0xA000
25+#define PCI_DEVICE_ID_INTEL_IGDG_IG 0xA001
26 #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
27 #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
28 #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
29@@ -60,7 +64,12 @@
30
31 #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
32 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
33- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB)
34+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
35+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
36+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
37+
38+#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
39+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
40
41 #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
42 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
43@@ -510,7 +519,7 @@ static void intel_i830_init_gtt_entries(void)
44 size = 512;
45 }
46 size += 4; /* add in BIOS popup space */
47- } else if (IS_G33) {
48+ } else if (IS_G33 && !IS_IGD) {
49 /* G33's GTT size defined in gmch_ctrl */
50 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
51 case G33_PGETBL_SIZE_1M:
52@@ -526,7 +535,7 @@ static void intel_i830_init_gtt_entries(void)
53 size = 512;
54 }
55 size += 4;
56- } else if (IS_G4X) {
57+ } else if (IS_G4X || IS_IGD) {
58 /* On 4 series hardware, GTT stolen is separate from graphics
59 * stolen, ignore it in stolen gtt entries counting. However,
60 * 4KB of the stolen memory doesn't get mapped to the GTT.
61@@ -2159,6 +2168,10 @@ static const struct intel_driver_description {
62 NULL, &intel_g33_driver },
63 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
64 NULL, &intel_g33_driver },
65+ { PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD",
66+ NULL, &intel_g33_driver },
67+ { PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD",
68+ NULL, &intel_g33_driver },
69 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
70 "Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
71 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
72@@ -2353,6 +2366,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
73 ID(PCI_DEVICE_ID_INTEL_82945G_HB),
74 ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
75 ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
76+ ID(PCI_DEVICE_ID_INTEL_IGDGM_HB),
77+ ID(PCI_DEVICE_ID_INTEL_IGDG_HB),
78 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
79 ID(PCI_DEVICE_ID_INTEL_82G35_HB),
80 ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
81--
821.6.1.3
83
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-drm.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-drm.patch
new file mode 100644
index 0000000000..1e7b866949
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-drm.patch
@@ -0,0 +1,336 @@
1From 8b941bea1d0fe0c5cf0de938cd0bd89ce6640dbb Mon Sep 17 00:00:00 2001
2From: Shaohua Li <shaohua.li@intel.com>
3Date: Mon, 23 Feb 2009 15:19:19 +0800
4Subject: drm/i915: Add support for new G33-like chipset.
5
6This chip is nearly the same, but has new clock settings required.
7
8Signed-off-by: Shaohua Li <shaohua.li@intel.com>
9Signed-off-by: Eric Anholt <eric@anholt.net>
10---
11 drivers/gpu/drm/i915/i915_drv.h | 10 +++-
12 drivers/gpu/drm/i915/i915_reg.h | 4 +
13 drivers/gpu/drm/i915/intel_display.c | 111 +++++++++++++++++++++++++++++-----
14 include/drm/drm_pciids.h | 2 +
15 4 files changed, 109 insertions(+), 18 deletions(-)
16
17diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
18index 0e27854..36d6bc3 100644
19--- a/drivers/gpu/drm/i915/i915_drv.h
20+++ b/drivers/gpu/drm/i915/i915_drv.h
21@@ -787,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
22 (dev)->pci_device == 0x2E22 || \
23 IS_GM45(dev))
24
25+#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
26+#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
27+#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
28+
29 #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
30 (dev)->pci_device == 0x29B2 || \
31- (dev)->pci_device == 0x29D2)
32+ (dev)->pci_device == 0x29D2 || \
33+ (IS_IGD(dev)))
34
35 #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
36 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
37
38 #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
39- IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
40+ IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
41+ IS_IGD(dev))
42
43 #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
44 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
45diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
46index 9d6539a..f07d315 100644
47--- a/drivers/gpu/drm/i915/i915_reg.h
48+++ b/drivers/gpu/drm/i915/i915_reg.h
49@@ -358,6 +358,7 @@
50 #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
51 #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
52 #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
53+#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */
54
55 #define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
56 #define I915_CRC_ERROR_ENABLE (1UL<<29)
57@@ -434,6 +435,7 @@
58 */
59 #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
60 #define DPLL_FPA01_P1_POST_DIV_SHIFT 16
61+#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
62 /* i830, required in DVO non-gang */
63 #define PLL_P2_DIVIDE_BY_4 (1 << 23)
64 #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
65@@ -500,10 +502,12 @@
66 #define FPB0 0x06048
67 #define FPB1 0x0604c
68 #define FP_N_DIV_MASK 0x003f0000
69+#define FP_N_IGD_DIV_MASK 0x00ff0000
70 #define FP_N_DIV_SHIFT 16
71 #define FP_M1_DIV_MASK 0x00003f00
72 #define FP_M1_DIV_SHIFT 8
73 #define FP_M2_DIV_MASK 0x0000003f
74+#define FP_M2_IGD_DIV_MASK 0x000000ff
75 #define FP_M2_DIV_SHIFT 0
76 #define DPLL_TEST 0x606c
77 #define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
78diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
79index a283427..1702564 100644
80--- a/drivers/gpu/drm/i915/intel_display.c
81+++ b/drivers/gpu/drm/i915/intel_display.c
82@@ -90,18 +90,32 @@ typedef struct {
83 #define I9XX_DOT_MAX 400000
84 #define I9XX_VCO_MIN 1400000
85 #define I9XX_VCO_MAX 2800000
86+#define IGD_VCO_MIN 1700000
87+#define IGD_VCO_MAX 3500000
88 #define I9XX_N_MIN 1
89 #define I9XX_N_MAX 6
90+/* IGD's Ncounter is a ring counter */
91+#define IGD_N_MIN 3
92+#define IGD_N_MAX 6
93 #define I9XX_M_MIN 70
94 #define I9XX_M_MAX 120
95+#define IGD_M_MIN 2
96+#define IGD_M_MAX 256
97 #define I9XX_M1_MIN 10
98 #define I9XX_M1_MAX 22
99 #define I9XX_M2_MIN 5
100 #define I9XX_M2_MAX 9
101+/* IGD M1 is reserved, and must be 0 */
102+#define IGD_M1_MIN 0
103+#define IGD_M1_MAX 0
104+#define IGD_M2_MIN 0
105+#define IGD_M2_MAX 254
106 #define I9XX_P_SDVO_DAC_MIN 5
107 #define I9XX_P_SDVO_DAC_MAX 80
108 #define I9XX_P_LVDS_MIN 7
109 #define I9XX_P_LVDS_MAX 98
110+#define IGD_P_LVDS_MIN 7
111+#define IGD_P_LVDS_MAX 112
112 #define I9XX_P1_MIN 1
113 #define I9XX_P1_MAX 8
114 #define I9XX_P2_SDVO_DAC_SLOW 10
115@@ -115,6 +129,8 @@ typedef struct {
116 #define INTEL_LIMIT_I8XX_LVDS 1
117 #define INTEL_LIMIT_I9XX_SDVO_DAC 2
118 #define INTEL_LIMIT_I9XX_LVDS 3
119+#define INTEL_LIMIT_IGD_SDVO_DAC 4
120+#define INTEL_LIMIT_IGD_LVDS 5
121
122 static const intel_limit_t intel_limits[] = {
123 { /* INTEL_LIMIT_I8XX_DVO_DAC */
124@@ -168,6 +184,32 @@ static const intel_limit_t intel_limits[] = {
125 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
126 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
127 },
128+ { /* INTEL_LIMIT_IGD_SDVO */
129+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
130+ .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
131+ .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
132+ .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
133+ .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
134+ .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
135+ .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
136+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
137+ .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
138+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
139+ },
140+ { /* INTEL_LIMIT_IGD_LVDS */
141+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
142+ .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
143+ .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
144+ .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
145+ .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
146+ .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
147+ .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX },
148+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
149+ /* IGD only supports single-channel mode. */
150+ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
151+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
152+ },
153+
154 };
155
156 static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
157@@ -175,11 +217,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
158 struct drm_device *dev = crtc->dev;
159 const intel_limit_t *limit;
160
161- if (IS_I9XX(dev)) {
162+ if (IS_I9XX(dev) && !IS_IGD(dev)) {
163 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
164 limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
165 else
166 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
167+ } else if (IS_IGD(dev)) {
168+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
169+ limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
170+ else
171+ limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
172 } else {
173 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
174 limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
175@@ -189,8 +236,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
176 return limit;
177 }
178
179-static void intel_clock(int refclk, intel_clock_t *clock)
180+/* m1 is reserved as 0 in IGD, n is a ring counter */
181+static void igd_clock(int refclk, intel_clock_t *clock)
182 {
183+ clock->m = clock->m2 + 2;
184+ clock->p = clock->p1 * clock->p2;
185+ clock->vco = refclk * clock->m / clock->n;
186+ clock->dot = clock->vco / clock->p;
187+}
188+
189+static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
190+{
191+ if (IS_IGD(dev)) {
192+ igd_clock(refclk, clock);
193+ return;
194+ }
195 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
196 clock->p = clock->p1 * clock->p2;
197 clock->vco = refclk * clock->m / (clock->n + 2);
198@@ -226,6 +286,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
199 static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
200 {
201 const intel_limit_t *limit = intel_limit (crtc);
202+ struct drm_device *dev = crtc->dev;
203
204 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
205 INTELPllInvalid ("p1 out of range\n");
206@@ -235,7 +296,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
207 INTELPllInvalid ("m2 out of range\n");
208 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
209 INTELPllInvalid ("m1 out of range\n");
210- if (clock->m1 <= clock->m2)
211+ if (clock->m1 <= clock->m2 && !IS_IGD(dev))
212 INTELPllInvalid ("m1 <= m2\n");
213 if (clock->m < limit->m.min || limit->m.max < clock->m)
214 INTELPllInvalid ("m out of range\n");
215@@ -289,15 +350,17 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
216 memset (best_clock, 0, sizeof (*best_clock));
217
218 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
219- for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
220- clock.m2 <= limit->m2.max; clock.m2++) {
221+ for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
222+ /* m1 is always 0 in IGD */
223+ if (clock.m2 >= clock.m1 && !IS_IGD(dev))
224+ break;
225 for (clock.n = limit->n.min; clock.n <= limit->n.max;
226 clock.n++) {
227 for (clock.p1 = limit->p1.min;
228 clock.p1 <= limit->p1.max; clock.p1++) {
229 int this_err;
230
231- intel_clock(refclk, &clock);
232+ intel_clock(dev, refclk, &clock);
233
234 if (!intel_PLL_is_valid(crtc, &clock))
235 continue;
236@@ -634,7 +697,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev)
237 return 400000;
238 else if (IS_I915G(dev))
239 return 333000;
240- else if (IS_I945GM(dev) || IS_845G(dev))
241+ else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
242 return 200000;
243 else if (IS_I915GM(dev)) {
244 u16 gcfgc = 0;
245@@ -782,7 +845,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
246 return -EINVAL;
247 }
248
249- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
250+ if (IS_IGD(dev))
251+ fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
252+ else
253+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
254
255 dpll = DPLL_VGA_MODE_DIS;
256 if (IS_I9XX(dev)) {
257@@ -799,7 +865,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
258 }
259
260 /* compute bitmask from p1 value */
261- dpll |= (1 << (clock.p1 - 1)) << 16;
262+ if (IS_IGD(dev))
263+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
264+ else
265+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
266 switch (clock.p2) {
267 case 5:
268 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
269@@ -1279,10 +1348,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
270 fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
271
272 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
273- clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
274- clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
275+ if (IS_IGD(dev)) {
276+ clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
277+ clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
278+ } else {
279+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
280+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
281+ }
282+
283 if (IS_I9XX(dev)) {
284- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
285+ if (IS_IGD(dev))
286+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
287+ DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
288+ else
289+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
290 DPLL_FPA01_P1_POST_DIV_SHIFT);
291
292 switch (dpll & DPLL_MODE_MASK) {
293@@ -1301,7 +1380,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
294 }
295
296 /* XXX: Handle the 100Mhz refclk */
297- intel_clock(96000, &clock);
298+ intel_clock(dev, 96000, &clock);
299 } else {
300 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
301
302@@ -1313,9 +1392,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
303 if ((dpll & PLL_REF_INPUT_MASK) ==
304 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
305 /* XXX: might not be 66MHz */
306- intel_clock(66000, &clock);
307+ intel_clock(dev, 66000, &clock);
308 } else
309- intel_clock(48000, &clock);
310+ intel_clock(dev, 48000, &clock);
311 } else {
312 if (dpll & PLL_P1_DIVIDE_BY_TWO)
313 clock.p1 = 2;
314@@ -1328,7 +1407,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
315 else
316 clock.p2 = 2;
317
318- intel_clock(48000, &clock);
319+ intel_clock(dev, 48000, &clock);
320 }
321 }
322
323diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
324index 5165f24..76c4c82 100644
325--- a/include/drm/drm_pciids.h
326+++ b/include/drm/drm_pciids.h
327@@ -418,4 +418,6 @@
328 {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
329 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
330 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
331+ {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
332+ {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
333 {0, 0, 0}
334--
3351.6.1.3
336
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-gtt-size.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-gtt-size.patch
new file mode 100644
index 0000000000..c16350f9fd
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-gtt-size.patch
@@ -0,0 +1,21 @@
1IGD device only has last 1 page used by GTT. this should align to AGP gart code.
2
3Signed-off-by: Shaohua Li <shaohua.li@intel.com>
4---
5 drivers/gpu/drm/i915/i915_dma.c | 2 +-
6 1 file changed, 1 insertion(+), 1 deletion(-)
7
8Index: linux/drivers/gpu/drm/i915/i915_dma.c
9===================================================================
10--- linux.orig/drivers/gpu/drm/i915/i915_dma.c 2009-03-13 15:36:12.000000000 +0800
11+++ linux/drivers/gpu/drm/i915/i915_dma.c 2009-03-13 15:37:26.000000000 +0800
12@@ -880,7 +880,7 @@ static int i915_probe_agp(struct drm_dev
13 * Some of the preallocated space is taken by the GTT
14 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
15 */
16- if (IS_G4X(dev))
17+ if (IS_G4X(dev) || IS_IGD(dev))
18 overhead = 4096;
19 else
20 overhead = (*aperture_size / 1024) + 4096;
21
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-i2c.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-i2c.patch
new file mode 100644
index 0000000000..00a6cf481f
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-i2c.patch
@@ -0,0 +1,38 @@
1In IGD, DPCUNIT_CLOCK_GATE_DISABLE bit should be set, otherwise i2c
2access will be wrong.
3
4Signed-off-by: Shaohua Li <shaohua.li@intel.com>
5---
6 drivers/gpu/drm/i915/i915_reg.h | 1 +
7 drivers/gpu/drm/i915/intel_display.c | 5 +++++
8 2 files changed, 6 insertions(+)
9
10Index: linux/drivers/gpu/drm/i915/i915_reg.h
11===================================================================
12--- linux.orig/drivers/gpu/drm/i915/i915_reg.h 2009-03-16 14:18:27.000000000 +0800
13+++ linux/drivers/gpu/drm/i915/i915_reg.h 2009-03-16 14:28:09.000000000 +0800
14@@ -523,6 +523,7 @@
15 #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
16 #define D_STATE 0x6104
17 #define CG_2D_DIS 0x6200
18+#define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24)
19 #define CG_3D_DIS 0x6204
20
21 /*
22Index: linux/drivers/gpu/drm/i915/intel_display.c
23===================================================================
24--- linux.orig/drivers/gpu/drm/i915/intel_display.c 2009-03-16 14:16:11.000000000 +0800
25+++ linux/drivers/gpu/drm/i915/intel_display.c 2009-03-16 14:27:46.000000000 +0800
26@@ -1545,6 +1545,11 @@ static void intel_setup_outputs(struct d
27 struct drm_i915_private *dev_priv = dev->dev_private;
28 struct drm_connector *connector;
29
30+ /* When using bit bashing for I2C, this bit needs to be set to 1 */
31+ if (IS_IGD(dev))
32+ I915_WRITE(CG_2D_DIS,
33+ I915_READ(CG_2D_DIS) | DPCUNIT_CLOCK_GATE_DISABLE);
34+
35 intel_crt_init(dev);
36
37 /* Set up integrated LVDS */
38
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch
new file mode 100644
index 0000000000..1003765535
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch
@@ -0,0 +1,28 @@
1diff --git a/drivers/gpu/drm/psb/psb_fb.c b/drivers/gpu/drm/psb/psb_fb.c
2index 67934c0..8fc5221 100644
3--- a/drivers/gpu/drm/psb/psb_fb.c
4+++ b/drivers/gpu/drm/psb/psb_fb.c
5@@ -896,8 +896,10 @@ static int psbfb_kms_off(struct drm_device *dev, int suspend)
6 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
7 struct fb_info *info = fb->fbdev;
8
9- if (suspend)
10+ if (suspend) {
11 fb_set_suspend(info, 1);
12+ psbfb_blank(FB_BLANK_POWERDOWN, info);
13+ }
14 }
15 mutex_unlock(&dev->mode_config.mutex);
16
17@@ -928,8 +930,10 @@ static int psbfb_kms_on(struct drm_device *dev, int resume)
18 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
19 struct fb_info *info = fb->fbdev;
20
21- if (resume)
22+ if (resume) {
23 fb_set_suspend(info, 0);
24+ psbfb_blank(FB_BLANK_UNBLANK, info);
25+ }
26
27 }
28 mutex_unlock(&dev->mode_config.mutex);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch
new file mode 100644
index 0000000000..4ffda75e15
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch
@@ -0,0 +1,37524 @@
1diff -uNr a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
2--- a/drivers/gpu/drm/Kconfig 2009-03-23 15:12:14.000000000 -0800
3+++ b/drivers/gpu/drm/Kconfig 2009-04-07 13:28:38.000000000 -0700
4@@ -122,3 +122,14 @@
5 help
6 Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
7 chipset. If M is selected the module will be called savage.
8+
9+config DRM_PSB
10+ tristate "Intel Poulsbo/Moorestown"
11+ depends on DRM && PCI
12+ select FB_CFB_COPYAREA
13+ select FB_CFB_FILLRECT
14+ select FB_CFB_IMAGEBLIT
15+ help
16+ Choose this option if you have a Poulsbo or Moorestown platform.
17+ If M is selected the module will be called psb.
18+
19diff -uNr a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
20--- a/drivers/gpu/drm/Makefile 2009-03-23 15:12:14.000000000 -0800
21+++ b/drivers/gpu/drm/Makefile 2009-04-07 13:28:38.000000000 -0700
22@@ -25,4 +25,5 @@
23 obj-$(CONFIG_DRM_SIS) += sis/
24 obj-$(CONFIG_DRM_SAVAGE)+= savage/
25 obj-$(CONFIG_DRM_VIA) +=via/
26+obj-$(CONFIG_DRM_PSB) +=psb/
27
28diff -uNr a/drivers/gpu/drm/psb/lnc_topaz.c b/drivers/gpu/drm/psb/lnc_topaz.c
29--- a/drivers/gpu/drm/psb/lnc_topaz.c 1969-12-31 16:00:00.000000000 -0800
30+++ b/drivers/gpu/drm/psb/lnc_topaz.c 2009-04-07 13:28:38.000000000 -0700
31@@ -0,0 +1,695 @@
32+/**
33+ * file lnc_topaz.c
34+ * TOPAZ I/O operations and IRQ handling
35+ *
36+ */
37+
38+/**************************************************************************
39+ *
40+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
41+ * Copyright (c) Imagination Technologies Limited, UK
42+ * All Rights Reserved.
43+ *
44+ * Permission is hereby granted, free of charge, to any person obtaining a
45+ * copy of this software and associated documentation files (the
46+ * "Software"), to deal in the Software without restriction, including
47+ * without limitation the rights to use, copy, modify, merge, publish,
48+ * distribute, sub license, and/or sell copies of the Software, and to
49+ * permit persons to whom the Software is furnished to do so, subject to
50+ * the following conditions:
51+ *
52+ * The above copyright notice and this permission notice (including the
53+ * next paragraph) shall be included in all copies or substantial portions
54+ * of the Software.
55+ *
56+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
57+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
58+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
59+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
60+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
61+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
62+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
63+ *
64+ **************************************************************************/
65+
66+/* include headers */
67+/* #define DRM_DEBUG_CODE 2 */
68+
69+#include <drm/drmP.h>
70+#include <drm/drm_os_linux.h>
71+
72+#include "psb_drv.h"
73+#include "psb_drm.h"
74+#include "lnc_topaz.h"
75+
76+#include <linux/io.h>
77+#include <linux/delay.h>
78+
79+static int drm_psb_ospmxxx = 0x0;
80+
81+/* static function define */
82+static int lnc_topaz_deliver_command(struct drm_device *dev,
83+ struct ttm_buffer_object *cmd_buffer,
84+ unsigned long cmd_offset,
85+ unsigned long cmd_size,
86+ void **topaz_cmd, uint32_t sequence,
87+ int copy_cmd);
88+static int lnc_topaz_send(struct drm_device *dev, void *cmd,
89+ unsigned long cmd_size, uint32_t sync_seq);
90+static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd);
91+static int lnc_topaz_dequeue_send(struct drm_device *dev);
92+static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
93+ unsigned long cmd_size, uint32_t sequence);
94+
95+void lnc_topaz_interrupt(struct drm_device *dev, uint32_t topaz_stat)
96+{
97+ struct drm_psb_private *dev_priv =
98+ (struct drm_psb_private *)dev->dev_private;
99+ uint32_t clr_flag = lnc_topaz_queryirq(dev);
100+
101+ lnc_topaz_clearirq(dev, clr_flag);
102+
103+ /* ignore non-SYNC interrupts */
104+ if ((CCB_CTRL_SEQ(dev_priv) & 0x8000) == 0)
105+ return;
106+
107+ dev_priv->topaz_current_sequence =
108+ *(uint32_t *)dev_priv->topaz_sync_addr;
109+
110+ PSB_DEBUG_IRQ("TOPAZ:Got SYNC IRQ,sync seq:0x%08x (MTX) vs 0x%08x\n",
111+ dev_priv->topaz_current_sequence,
112+ dev_priv->sequence[LNC_ENGINE_ENCODE]);
113+
114+ psb_fence_handler(dev, LNC_ENGINE_ENCODE);
115+
116+ dev_priv->topaz_busy = 1;
117+ lnc_topaz_dequeue_send(dev);
118+}
119+
120+static int lnc_submit_encode_cmdbuf(struct drm_device *dev,
121+ struct ttm_buffer_object *cmd_buffer,
122+ unsigned long cmd_offset, unsigned long cmd_size,
123+ struct ttm_fence_object *fence)
124+{
125+ struct drm_psb_private *dev_priv = dev->dev_private;
126+ unsigned long irq_flags;
127+ int ret = 0;
128+ void *cmd;
129+ uint32_t sequence = dev_priv->sequence[LNC_ENGINE_ENCODE];
130+
131+ PSB_DEBUG_GENERAL("TOPAZ: command submit\n");
132+
133+ /* # lock topaz's mutex [msvdx_mutex] */
134+ mutex_lock(&dev_priv->topaz_mutex);
135+
136+ PSB_DEBUG_GENERAL("TOPAZ: topaz busy = %d\n", dev_priv->topaz_busy);
137+
138+ if (dev_priv->topaz_fw_loaded == 0) {
139+ /* #.# load fw to driver */
140+ PSB_DEBUG_INIT("TOPAZ: load /lib/firmware/topaz_fw.bin\n");
141+ ret = topaz_init_fw(dev);
142+ if (ret != 0) {
143+ mutex_unlock(&dev_priv->topaz_mutex);
144+
145+ /* FIXME: find a proper return value */
146+ DRM_ERROR("TOPAX:load /lib/firmware/topaz_fw.bin fail,"
147+ "ensure udevd is configured correctly!\n");
148+
149+ return -EFAULT;
150+ }
151+ dev_priv->topaz_fw_loaded = 1;
152+ } else {
153+ /* OSPM power state change */
154+ /* FIXME: why here? why not in the NEW_CODEC case? */
155+ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX) {
156+ psb_power_up_topaz(dev);
157+ lnc_topaz_restore_mtx_state(dev);
158+ }
159+ }
160+
161+ /* # schedule watchdog */
162+ /* psb_schedule_watchdog(dev_priv); */
163+
164+ /* # spin lock irq save [msvdx_lock] */
165+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
166+
167+ /* # if topaz need to reset, reset it */
168+ if (dev_priv->topaz_needs_reset) {
169+ /* #.# reset it */
170+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
171+ PSB_DEBUG_GENERAL("TOPAZ: needs reset.\n");
172+
173+ if (lnc_topaz_reset(dev_priv)) {
174+ mutex_unlock(&dev_priv->topaz_mutex);
175+ ret = -EBUSY;
176+ DRM_ERROR("TOPAZ: reset failed.\n");
177+ return ret;
178+ }
179+
180+ PSB_DEBUG_GENERAL("TOPAZ: reset ok.\n");
181+
182+ /* #.# reset any related flags */
183+ dev_priv->topaz_needs_reset = 0;
184+ dev_priv->topaz_busy = 0;
185+ PSB_DEBUG_GENERAL("XXX: does we need idle flag??\n");
186+ dev_priv->topaz_start_idle = 0;
187+
188+ /* #.# init topaz */
189+ lnc_topaz_init(dev);
190+
191+ /* avoid another fw init */
192+ dev_priv->topaz_fw_loaded = 1;
193+
194+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
195+ }
196+
197+ if (!dev_priv->topaz_busy) {
198+ /* # direct map topaz command if topaz is free */
199+ PSB_DEBUG_GENERAL("TOPAZ:direct send command,sequence %08x \n",
200+ sequence);
201+
202+ dev_priv->topaz_busy = 1;
203+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
204+
205+ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
206+ cmd_size, NULL, sequence, 0);
207+
208+ if (ret) {
209+ DRM_ERROR("TOPAZ: failed to extract cmd...\n");
210+ mutex_unlock(&dev_priv->topaz_mutex);
211+ return ret;
212+ }
213+ } else {
214+ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence %08x \n",
215+ sequence);
216+ cmd = NULL;
217+
218+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
219+
220+ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
221+ cmd_size, &cmd, sequence, 1);
222+ if (cmd == NULL || ret) {
223+ DRM_ERROR("TOPAZ: map command for save fialed\n");
224+ mutex_unlock(&dev_priv->topaz_mutex);
225+ return ret;
226+ }
227+
228+ ret = lnc_topaz_save_command(dev, cmd, cmd_size, sequence);
229+ if (ret)
230+ DRM_ERROR("TOPAZ: save command failed\n");
231+ }
232+
233+ /* OPSM D0IX power state change */
234+ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX)
235+ lnc_topaz_save_mtx_state(dev);
236+
237+ mutex_unlock(&dev_priv->topaz_mutex);
238+
239+ return ret;
240+}
241+
242+static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
243+ unsigned long cmd_size, uint32_t sequence)
244+{
245+ struct drm_psb_private *dev_priv = dev->dev_private;
246+ struct lnc_topaz_cmd_queue *topaz_cmd;
247+ unsigned long irq_flags;
248+
249+ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence: %08x..\n",
250+ sequence);
251+
252+ topaz_cmd = drm_calloc(1, sizeof(struct lnc_topaz_cmd_queue),
253+ DRM_MEM_DRIVER);
254+ if (topaz_cmd == NULL) {
255+ mutex_unlock(&dev_priv->topaz_mutex);
256+ DRM_ERROR("TOPAZ: out of memory....\n");
257+ return -ENOMEM;
258+ }
259+
260+ topaz_cmd->cmd = cmd;
261+ topaz_cmd->cmd_size = cmd_size;
262+ topaz_cmd->sequence = sequence;
263+
264+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
265+ list_add_tail(&topaz_cmd->head, &dev_priv->topaz_queue);
266+ if (!dev_priv->topaz_busy) {
267+ /* dev_priv->topaz_busy = 1; */
268+ PSB_DEBUG_GENERAL("TOPAZ: need immediate dequeue...\n");
269+ lnc_topaz_dequeue_send(dev);
270+ PSB_DEBUG_GENERAL("TOPAZ: after dequeue command\n");
271+ }
272+
273+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
274+
275+ return 0;
276+}
277+
278+
279+int lnc_cmdbuf_video(struct drm_file *priv,
280+ struct list_head *validate_list,
281+ uint32_t fence_type,
282+ struct drm_psb_cmdbuf_arg *arg,
283+ struct ttm_buffer_object *cmd_buffer,
284+ struct psb_ttm_fence_rep *fence_arg)
285+{
286+ struct drm_device *dev = priv->minor->dev;
287+ struct ttm_fence_object *fence = NULL;
288+ int ret;
289+
290+ ret = lnc_submit_encode_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
291+ arg->cmdbuf_size, fence);
292+ if (ret)
293+ return ret;
294+
295+#if LNC_TOPAZ_NO_IRQ /* workaround for interrupt issue */
296+ psb_fence_or_sync(priv, LNC_ENGINE_ENCODE, fence_type, arg->fence_flags,
297+ validate_list, fence_arg, &fence);
298+
299+ if (fence)
300+ ttm_fence_object_unref(&fence);
301+#endif
302+
303+ mutex_lock(&cmd_buffer->mutex);
304+ if (cmd_buffer->sync_obj != NULL)
305+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
306+ mutex_unlock(&cmd_buffer->mutex);
307+
308+ return 0;
309+}
310+
311+static int lnc_topaz_sync(struct drm_device *dev, uint32_t sync_seq)
312+{
313+ struct drm_psb_private *dev_priv = dev->dev_private;
314+ uint32_t sync_cmd[3];
315+ int count = 10000;
316+#if 0
317+ struct ttm_fence_device *fdev = &dev_priv->fdev;
318+ struct ttm_fence_class_manager *fc =
319+ &fdev->fence_class[LNC_ENGINE_ENCODE];
320+ unsigned long irq_flags;
321+#endif
322+ uint32_t *sync_p = (uint32_t *)dev_priv->topaz_sync_addr;
323+
324+ /* insert a SYNC command here */
325+ dev_priv->topaz_sync_cmd_seq = (1 << 15) | dev_priv->topaz_cmd_seq++;
326+ sync_cmd[0] = MTX_CMDID_SYNC | (3 << 8) |
327+ (dev_priv->topaz_sync_cmd_seq << 16);
328+ sync_cmd[1] = dev_priv->topaz_sync_offset;
329+ sync_cmd[2] = sync_seq;
330+
331+ PSB_DEBUG_GENERAL("TOPAZ:MTX_CMDID_SYNC: size(3),cmd seq (0x%04x),"
332+ "sync_seq (0x%08x)\n",
333+ dev_priv->topaz_sync_cmd_seq, sync_seq);
334+
335+ lnc_mtx_send(dev_priv, sync_cmd);
336+
337+#if LNC_TOPAZ_NO_IRQ /* workaround for interrupt issue */
338+ /* # poll topaz register for certain times */
339+ while (count && *sync_p != sync_seq) {
340+ DRM_UDELAY(100);
341+ --count;
342+ }
343+ if ((count == 0) && (*sync_p != sync_seq)) {
344+ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),actual 0x%08x\n",
345+ sync_seq, *sync_p);
346+ return -EBUSY;
347+ }
348+ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p);
349+
350+ dev_priv->topaz_busy = 0;
351+
352+ /* XXX: check psb_fence_handler is suitable for topaz */
353+ dev_priv->topaz_current_sequence = *sync_p;
354+#if 0
355+ write_lock_irqsave(&fc->lock, irq_flags);
356+ ttm_fence_handler(fdev, LNC_ENGINE_ENCODE,
357+ dev_priv->topaz_current_sequence,
358+ _PSB_FENCE_TYPE_EXE, 0);
359+ write_unlock_irqrestore(&fc->lock, irq_flags);
360+#endif
361+#endif
362+ return 0;
363+}
364+
365+int
366+lnc_topaz_deliver_command(struct drm_device *dev,
367+ struct ttm_buffer_object *cmd_buffer,
368+ unsigned long cmd_offset, unsigned long cmd_size,
369+ void **topaz_cmd, uint32_t sequence,
370+ int copy_cmd)
371+{
372+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
373+ struct ttm_bo_kmap_obj cmd_kmap;
374+ bool is_iomem;
375+ int ret;
376+ unsigned char *cmd_start, *tmp;
377+
378+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2,
379+ &cmd_kmap);
380+ if (ret) {
381+ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", ret);
382+ return ret;
383+ }
384+ cmd_start = (unsigned char *) ttm_kmap_obj_virtual(&cmd_kmap,
385+ &is_iomem) + cmd_page_offset;
386+
387+ if (copy_cmd) {
388+ PSB_DEBUG_GENERAL("TOPAZ: queue commands\n");
389+ tmp = drm_calloc(1, cmd_size, DRM_MEM_DRIVER);
390+ if (tmp == NULL) {
391+ ret = -ENOMEM;
392+ goto out;
393+ }
394+ memcpy(tmp, cmd_start, cmd_size);
395+ *topaz_cmd = tmp;
396+ } else {
397+ PSB_DEBUG_GENERAL("TOPAZ: directly send the command\n");
398+ ret = lnc_topaz_send(dev, cmd_start, cmd_size, sequence);
399+ if (ret) {
400+ DRM_ERROR("TOPAZ: commit commands failed.\n");
401+ ret = -EINVAL;
402+ }
403+ }
404+
405+out:
406+ PSB_DEBUG_GENERAL("TOPAZ:cmd_size(%ld), sequence(%d) copy_cmd(%d)\n",
407+ cmd_size, sequence, copy_cmd);
408+
409+ ttm_bo_kunmap(&cmd_kmap);
410+
411+ return ret;
412+}
413+
414+int
415+lnc_topaz_send(struct drm_device *dev, void *cmd,
416+ unsigned long cmd_size, uint32_t sync_seq)
417+{
418+ struct drm_psb_private *dev_priv = dev->dev_private;
419+ int ret = 0;
420+ unsigned char *command = (unsigned char *) cmd;
421+ struct topaz_cmd_header *cur_cmd_header;
422+ uint32_t cur_cmd_size, cur_cmd_id;
423+ uint32_t codec;
424+
425+ PSB_DEBUG_GENERAL("TOPAZ: send the command in the buffer one by one\n");
426+
427+ while (cmd_size > 0) {
428+ cur_cmd_header = (struct topaz_cmd_header *) command;
429+ cur_cmd_size = cur_cmd_header->size * 4;
430+ cur_cmd_id = cur_cmd_header->id;
431+
432+ switch (cur_cmd_id) {
433+ case MTX_CMDID_SW_NEW_CODEC:
434+ codec = *((uint32_t *) cmd + 1);
435+
436+ PSB_DEBUG_GENERAL("TOPAZ: setup new codec %s (%d)\n",
437+ codec_to_string(codec), codec);
438+ if (topaz_setup_fw(dev, codec)) {
439+ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
440+ return -EBUSY;
441+ }
442+
443+ dev_priv->topaz_cur_codec = codec;
444+ break;
445+
446+ case MTX_CMDID_SW_ENTER_LOWPOWER:
447+ PSB_DEBUG_GENERAL("TOPAZ: enter lowpower.... \n");
448+ PSB_DEBUG_GENERAL("XXX: implement it\n");
449+ break;
450+
451+ case MTX_CMDID_SW_LEAVE_LOWPOWER:
452+ PSB_DEBUG_GENERAL("TOPAZ: leave lowpower... \n");
453+ PSB_DEBUG_GENERAL("XXX: implement it\n");
454+ break;
455+
456+ /* ordinary commmand */
457+ case MTX_CMDID_START_PIC:
458+ /* XXX: specially handle START_PIC hw command */
459+ CCB_CTRL_SET_QP(dev_priv,
460+ *(command + cur_cmd_size - 4));
461+ /* strip the QP parameter (it's software arg) */
462+ cur_cmd_header->size--;
463+ default:
464+ cur_cmd_header->seq = 0x7fff &
465+ dev_priv->topaz_cmd_seq++;
466+
467+ PSB_DEBUG_GENERAL("TOPAZ: %s: size(%d),"
468+ " seq (0x%04x)\n",
469+ cmd_to_string(cur_cmd_id),
470+ cur_cmd_size, cur_cmd_header->seq);
471+ ret = lnc_mtx_send(dev_priv, command);
472+ if (ret) {
473+ DRM_ERROR("TOPAZ: error -- ret(%d)\n", ret);
474+ goto out;
475+ }
476+ break;
477+ }
478+
479+ command += cur_cmd_size;
480+ cmd_size -= cur_cmd_size;
481+ }
482+ lnc_topaz_sync(dev, sync_seq);
483+out:
484+ return ret;
485+}
486+
487+static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd)
488+{
489+ struct topaz_cmd_header *cur_cmd_header =
490+ (struct topaz_cmd_header *) cmd;
491+ uint32_t cmd_size = cur_cmd_header->size;
492+ uint32_t read_index, write_index;
493+ const uint32_t *cmd_pointer = (uint32_t *) cmd;
494+
495+ int ret = 0;
496+
497+ /* <msvdx does> # enable all clock */
498+
499+ write_index = dev_priv->topaz_cmd_windex;
500+ if (write_index + cmd_size + 1 > dev_priv->topaz_ccb_size) {
501+ int free_space = dev_priv->topaz_ccb_size - write_index;
502+
503+ PSB_DEBUG_GENERAL("TOPAZ: -------will wrap CCB write point.\n");
504+ if (free_space > 0) {
505+ struct topaz_cmd_header pad_cmd;
506+
507+ pad_cmd.id = MTX_CMDID_NULL;
508+ pad_cmd.size = free_space;
509+ pad_cmd.seq = 0x7fff & dev_priv->topaz_cmd_seq++;
510+
511+ PSB_DEBUG_GENERAL("TOPAZ: MTX_CMDID_NULL:"
512+ " size(%d),seq (0x%04x)\n",
513+ pad_cmd.size, pad_cmd.seq);
514+
515+ TOPAZ_BEGIN_CCB(dev_priv);
516+ TOPAZ_OUT_CCB(dev_priv, pad_cmd.val);
517+ TOPAZ_END_CCB(dev_priv, 1);
518+ }
519+ POLL_WB_RINDEX(dev_priv, 0);
520+ if (ret == 0)
521+ dev_priv->topaz_cmd_windex = 0;
522+ else {
523+ DRM_ERROR("TOPAZ: poll rindex timeout\n");
524+ return ret; /* HW may hang, need reset */
525+ }
526+ PSB_DEBUG_GENERAL("TOPAZ: -------wrap CCB was done.\n");
527+ }
528+
529+ read_index = CCB_CTRL_RINDEX(dev_priv);/* temperily use CCB CTRL */
530+ write_index = dev_priv->topaz_cmd_windex;
531+
532+ PSB_DEBUG_GENERAL("TOPAZ: write index(%d), read index(%d,WB=%d)\n",
533+ write_index, read_index, WB_CCB_CTRL_RINDEX(dev_priv));
534+ TOPAZ_BEGIN_CCB(dev_priv);
535+ while (cmd_size > 0) {
536+ TOPAZ_OUT_CCB(dev_priv, *cmd_pointer++);
537+ --cmd_size;
538+ }
539+ TOPAZ_END_CCB(dev_priv, 1);
540+
541+ POLL_WB_RINDEX(dev_priv, dev_priv->topaz_cmd_windex);
542+
543+#if 0
544+ DRM_UDELAY(1000);
545+ lnc_topaz_clearirq(dev,
546+ lnc_topaz_queryirq(dev));
547+ LNC_TRACEL("TOPAZ: after clear, query again\n");
548+ lnc_topaz_queryirq(dev_priv);
549+#endif
550+
551+ return ret;
552+}
553+
554+int lnc_topaz_dequeue_send(struct drm_device *dev)
555+{
556+ struct drm_psb_private *dev_priv = dev->dev_private;
557+ struct lnc_topaz_cmd_queue *topaz_cmd = NULL;
558+ int ret;
559+
560+ PSB_DEBUG_GENERAL("TOPAZ: dequeue command and send it to topaz\n");
561+
562+ if (list_empty(&dev_priv->topaz_queue)) {
563+ dev_priv->topaz_busy = 0;
564+ return 0;
565+ }
566+
567+ topaz_cmd = list_first_entry(&dev_priv->topaz_queue,
568+ struct lnc_topaz_cmd_queue, head);
569+
570+ PSB_DEBUG_GENERAL("TOPAZ: queue has id %08x\n", topaz_cmd->sequence);
571+ ret = lnc_topaz_send(dev, topaz_cmd->cmd, topaz_cmd->cmd_size,
572+ topaz_cmd->sequence);
573+ if (ret) {
574+ DRM_ERROR("TOPAZ: lnc_topaz_send failed.\n");
575+ ret = -EINVAL;
576+ }
577+
578+ list_del(&topaz_cmd->head);
579+ kfree(topaz_cmd->cmd);
580+ drm_free(topaz_cmd, sizeof(struct lnc_topaz_cmd_queue),
581+ DRM_MEM_DRIVER);
582+
583+ return ret;
584+}
585+
586+void
587+lnc_topaz_lockup(struct drm_psb_private *dev_priv,
588+ int *topaz_lockup, int *topaz_idle)
589+{
590+ unsigned long irq_flags;
591+ uint32_t tmp;
592+
593+ /* if have printk in this function, you will have plenties here */
594+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
595+ *topaz_lockup = 0;
596+ *topaz_idle = 1;
597+
598+ if (!dev_priv->has_topaz) {
599+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
600+ return;
601+ }
602+
603+ tmp = dev_priv->topaz_current_sequence
604+ - dev_priv->sequence[LNC_ENGINE_ENCODE];
605+ if (tmp > 0x0FFFFFFF) {
606+ if (dev_priv->topaz_current_sequence ==
607+ dev_priv->topaz_last_sequence) {
608+ *topaz_lockup = 1;
609+ } else {
610+ dev_priv->topaz_last_sequence =
611+ dev_priv->topaz_current_sequence;
612+ *topaz_idle = 0;
613+ }
614+
615+ if (dev_priv->topaz_start_idle)
616+ dev_priv->topaz_start_idle = 0;
617+ } else {
618+ if (dev_priv->topaz_needs_reset == 0) {
619+ if (dev_priv->topaz_start_idle &&
620+ (dev_priv->topaz_finished_sequence
621+ == dev_priv->topaz_current_sequence)) {
622+ if (time_after_eq(jiffies,
623+ dev_priv->topaz_idle_start_jiffies +
624+ TOPAZ_MAX_IDELTIME)) {
625+
626+ /* XXX: disable clock <msvdx does> */
627+ dev_priv->topaz_needs_reset = 1;
628+ } else
629+ *topaz_idle = 0;
630+ } else {
631+ dev_priv->topaz_start_idle = 1;
632+ dev_priv->topaz_idle_start_jiffies = jiffies;
633+ dev_priv->topaz_finished_sequence =
634+ dev_priv->topaz_current_sequence;
635+ *topaz_idle = 0;
636+ }
637+ }
638+ }
639+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
640+}
641+
642+
643+void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_count)
644+{
645+ PSB_DEBUG_GENERAL("TOPAZ: kick mtx count(%d).\n", kick_count);
646+ MTX_WRITE32(MTX_CR_MTX_KICK, kick_count);
647+}
648+
649+/* power up msvdx, OSPM function */
650+int psb_power_up_topaz(struct drm_device *dev)
651+{
652+ struct drm_psb_private *dev_priv =
653+ (struct drm_psb_private *)dev->dev_private;
654+
655+ if (dev_priv->topaz_power_state == LNC_TOPAZ_POWERON)
656+ return 0;
657+
658+ psb_up_island_power(dev, PSB_VIDEO_ENC_ISLAND);
659+
660+ PSB_DEBUG_GENERAL("FIXME: how to write clock state for topaz?"
661+ " so many clock\n");
662+ /* PSB_WMSVDX32(dev_priv->topaz_clk_state, MSVDX_MAN_CLK_ENABLE); */
663+
664+ PSB_DEBUG_GENERAL("FIXME restore registers or init msvdx\n");
665+
666+ PSB_DEBUG_GENERAL("FIXME: flush all mmu\n");
667+
668+ dev_priv->topaz_power_state = LNC_TOPAZ_POWERON;
669+
670+ return 0;
671+}
672+
673+int psb_power_down_topaz(struct drm_device *dev)
674+{
675+ struct drm_psb_private *dev_priv =
676+ (struct drm_psb_private *)dev->dev_private;
677+
678+ if (dev_priv->topaz_power_state == LNC_TOPAZ_POWEROFF)
679+ return 0;
680+
681+ if (dev_priv->topaz_busy) {
682+ PSB_DEBUG_GENERAL("FIXME: MSVDX is busy, should wait it\n");
683+ return -EBUSY;
684+ }
685+ PSB_DEBUG_GENERAL("FIXME: how to read clock state for topaz?"
686+ " so many clock\n");
687+ /* dev_priv->topaz_clk_state = PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE); */
688+ PSB_DEBUG_GENERAL("FIXME: save MSVDX register\n");
689+ PSB_DEBUG_GENERAL("FIXME: save MSVDX context\n");
690+
691+ psb_down_island_power(dev, PSB_VIDEO_ENC_ISLAND);
692+
693+ dev_priv->topaz_power_state = LNC_TOPAZ_POWEROFF;
694+
695+ return 0;
696+}
697+
698+int lnc_prepare_topaz_suspend(struct drm_device *dev)
699+{
700+ /* FIXME: need reset when resume?
701+ * Is mtx restore enough for encoder continue run? */
702+ /* dev_priv->topaz_needs_reset = 1; */
703+
704+ /* make sure all IRQs are seviced */
705+
706+ /* make sure all the fence is signaled */
707+
708+ /* save mtx context into somewhere */
709+ /* lnc_topaz_save_mtx_state(dev); */
710+
711+ return 0;
712+}
713+
714+int lnc_prepare_topaz_resume(struct drm_device *dev)
715+{
716+ /* FIXME: need reset when resume?
717+ * Is mtx restore enough for encoder continue run? */
718+ /* dev_priv->topaz_needs_reset = 1; */
719+
720+ /* make sure IRQ is open */
721+
722+ /* restore mtx context */
723+ /* lnc_topaz_restore_mtx_state(dev); */
724+
725+ return 0;
726+}
727diff -uNr a/drivers/gpu/drm/psb/lnc_topaz.h b/drivers/gpu/drm/psb/lnc_topaz.h
728--- a/drivers/gpu/drm/psb/lnc_topaz.h 1969-12-31 16:00:00.000000000 -0800
729+++ b/drivers/gpu/drm/psb/lnc_topaz.h 2009-04-07 13:28:38.000000000 -0700
730@@ -0,0 +1,803 @@
731+/**************************************************************************
732+ *
733+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
734+ * Copyright (c) Imagination Technologies Limited, UK
735+ * All Rights Reserved.
736+ *
737+ * Permission is hereby granted, free of charge, to any person obtaining a
738+ * copy of this software and associated documentation files (the
739+ * "Software"), to deal in the Software without restriction, including
740+ * without limitation the rights to use, copy, modify, merge, publish,
741+ * distribute, sub license, and/or sell copies of the Software, and to
742+ * permit persons to whom the Software is furnished to do so, subject to
743+ * the following conditions:
744+ *
745+ * The above copyright notice and this permission notice (including the
746+ * next paragraph) shall be included in all copies or substantial portions
747+ * of the Software.
748+ *
749+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
750+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
751+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
752+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
753+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
754+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
755+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
756+ *
757+ **************************************************************************/
758+
759+#ifndef _LNC_TOPAZ_H_
760+#define _LNC_TOPAZ_H_
761+
762+#include "psb_drv.h"
763+
764+#define LNC_TOPAZ_NO_IRQ 1
765+#define TOPAZ_MTX_REG_SIZE (34 * 4 + 183 * 4)
766+#define ENABLE_TOPAZ_OSPM_D0IX (0x10)
767+
768+/* extern int drm_psb_ospm; */
769+
770+int psb_power_up_topaz(struct drm_device *dev);
771+int psb_power_down_topaz(struct drm_device *dev);
772+int lnc_prepare_topaz_suspend(struct drm_device *dev);
773+int lnc_prepare_topaz_resume(struct drm_device *dev);
774+
775+/*
776+ * MACROS to insert values into fields within a word. The basename of the
777+ * field must have MASK_BASENAME and SHIFT_BASENAME constants.
778+ */
779+#define MM_WRITE32(base, offset, value) \
780+do { \
781+ *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg) \
782+ + base + offset)) = value; \
783+} while (0)
784+
785+#define MM_READ32(base, offset, pointer) \
786+do { \
787+ *(pointer) = *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg)\
788+ + base + offset)); \
789+} while (0)
790+
791+#define F_MASK(basename) (MASK_##basename)
792+#define F_SHIFT(basename) (SHIFT_##basename)
793+
794+#define F_ENCODE(val, basename) \
795+ (((val) << (F_SHIFT(basename))) & (F_MASK(basename)))
796+
797+/* MVEA macro */
798+#define MVEA_START 0x03000
799+
800+#define MVEA_WRITE32(offset, value) MM_WRITE32(MVEA_START, offset, value)
801+#define MVEA_READ32(offset, pointer) MM_READ32(MVEA_START, offset, pointer);
802+
803+#define F_MASK_MVEA(basename) (MASK_MVEA_##basename) /* MVEA */
804+#define F_SHIFT_MVEA(basename) (SHIFT_MVEA_##basename) /* MVEA */
805+#define F_ENCODE_MVEA(val, basename) \
806+ (((val)<<(F_SHIFT_MVEA(basename)))&(F_MASK_MVEA(basename)))
807+
808+/* VLC macro */
809+#define TOPAZ_VLC_START 0x05000
810+
811+/* TOPAZ macro */
812+#define TOPAZ_START 0x02000
813+
814+#define TOPAZ_WRITE32(offset, value) MM_WRITE32(TOPAZ_START, offset, value)
815+#define TOPAZ_READ32(offset, pointer) MM_READ32(TOPAZ_START, offset, pointer)
816+
817+#define F_MASK_TOPAZ(basename) (MASK_TOPAZ_##basename)
818+#define F_SHIFT_TOPAZ(basename) (SHIFT_TOPAZ_##basename)
819+#define F_ENCODE_TOPAZ(val,basename) \
820+ (((val)<<(F_SHIFT_TOPAZ(basename)))&(F_MASK_TOPAZ(basename)))
821+
822+/* MTX macro */
823+#define MTX_START 0x0
824+
825+#define MTX_WRITE32(offset, value) MM_WRITE32(MTX_START, offset, value)
826+#define MTX_READ32(offset, pointer) MM_READ32(MTX_START, offset, pointer)
827+
828+/* DMAC macro */
829+#define DMAC_START 0x0f000
830+
831+#define DMAC_WRITE32(offset, value) MM_WRITE32(DMAC_START, offset, value)
832+#define DMAC_READ32(offset, pointer) MM_READ32(DMAC_START, offset, pointer)
833+
834+#define F_MASK_DMAC(basename) (MASK_DMAC_##basename)
835+#define F_SHIFT_DMAC(basename) (SHIFT_DMAC_##basename)
836+#define F_ENCODE_DMAC(val,basename) \
837+ (((val)<<(F_SHIFT_DMAC(basename)))&(F_MASK_DMAC(basename)))
838+
839+
840+/* Register CR_IMG_TOPAZ_INTENAB */
841+#define TOPAZ_CR_IMG_TOPAZ_INTENAB 0x0008
842+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x00000001
843+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0
844+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x0008
845+
846+#define MASK_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x80000000
847+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 31
848+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x0008
849+
850+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x00000008
851+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 3
852+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x0008
853+
854+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x00000002
855+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 1
856+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x0008
857+
858+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x00000004
859+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 2
860+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x0008
861+
862+#define TOPAZ_CR_IMG_TOPAZ_INTCLEAR 0x000C
863+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x00000001
864+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0
865+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x000C
866+
867+#define TOPAZ_CR_IMG_TOPAZ_INTSTAT 0x0004
868+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x00000001
869+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0
870+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x0004
871+
872+#define MTX_CCBCTRL_ROFF 0
873+#define MTX_CCBCTRL_COMPLETE 4
874+#define MTX_CCBCTRL_CCBSIZE 8
875+#define MTX_CCBCTRL_QP 12
876+#define MTX_CCBCTRL_INITQP 24
877+
878+#define TOPAZ_CR_MMU_STATUS 0x001C
879+#define MASK_TOPAZ_CR_MMU_PF_N_RW 0x00000001
880+#define SHIFT_TOPAZ_CR_MMU_PF_N_RW 0
881+#define REGNUM_TOPAZ_CR_MMU_PF_N_RW 0x001C
882+
883+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x00000008
884+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 3
885+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x000C
886+
887+#define TOPAZ_CR_MMU_MEM_REQ 0x0020
888+#define MASK_TOPAZ_CR_MEM_REQ_STAT_READS 0x000000FF
889+#define SHIFT_TOPAZ_CR_MEM_REQ_STAT_READS 0
890+#define REGNUM_TOPAZ_CR_MEM_REQ_STAT_READS 0x0020
891+
892+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x00000002
893+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 1
894+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x000C
895+
896+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x00000004
897+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 2
898+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x000C
899+
900+#define MTX_CR_MTX_KICK 0x0080
901+#define MASK_MTX_MTX_KICK 0x0000FFFF
902+#define SHIFT_MTX_MTX_KICK 0
903+#define REGNUM_MTX_MTX_KICK 0x0080
904+
905+#define MTX_DATA_MEM_BASE 0x82880000
906+
907+#define MTX_CR_MTX_RAM_ACCESS_CONTROL 0x0108
908+#define MASK_MTX_MTX_MCMR 0x00000001
909+#define SHIFT_MTX_MTX_MCMR 0
910+#define REGNUM_MTX_MTX_MCMR 0x0108
911+
912+#define MASK_MTX_MTX_MCMID 0x0FF00000
913+#define SHIFT_MTX_MTX_MCMID 20
914+#define REGNUM_MTX_MTX_MCMID 0x0108
915+
916+#define MASK_MTX_MTX_MCM_ADDR 0x000FFFFC
917+#define SHIFT_MTX_MTX_MCM_ADDR 2
918+#define REGNUM_MTX_MTX_MCM_ADDR 0x0108
919+
920+#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C
921+#define MASK_MTX_MTX_MTX_MCM_STAT 0x00000001
922+#define SHIFT_MTX_MTX_MTX_MCM_STAT 0
923+#define REGNUM_MTX_MTX_MTX_MCM_STAT 0x010C
924+
925+#define MASK_MTX_MTX_MCMAI 0x00000002
926+#define SHIFT_MTX_MTX_MCMAI 1
927+#define REGNUM_MTX_MTX_MCMAI 0x0108
928+
929+#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104
930+
931+#define MVEA_CR_IMG_MVEA_SRST 0x0000
932+#define MASK_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x00000001
933+#define SHIFT_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0
934+#define REGNUM_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x0000
935+
936+#define MASK_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x00000002
937+#define SHIFT_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 1
938+#define REGNUM_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x0000
939+
940+#define MASK_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x00000004
941+#define SHIFT_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 2
942+#define REGNUM_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x0000
943+
944+#define MASK_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x00000008
945+#define SHIFT_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 3
946+#define REGNUM_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x0000
947+
948+#define MASK_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x00000010
949+#define SHIFT_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 4
950+#define REGNUM_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x0000
951+
952+#define MASK_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x00000020
953+#define SHIFT_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 5
954+#define REGNUM_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x0000
955+
956+#define TOPAZ_CR_IMG_TOPAZ_CORE_ID 0x03C0
957+#define TOPAZ_CR_IMG_TOPAZ_CORE_REV 0x03D0
958+
959+#define TOPAZ_MTX_PC (0x00000005)
960+#define PC_START_ADDRESS (0x80900000)
961+
962+#define TOPAZ_CR_TOPAZ_AUTO_CLK_GATE 0x0014
963+#define MASK_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x00000001
964+#define SHIFT_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0
965+#define REGNUM_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x0014
966+
967+#define MASK_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x00000002
968+#define SHIFT_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 1
969+#define REGNUM_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x0014
970+
971+#define MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x00000002
972+#define SHIFT_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 1
973+#define REGNUM_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x0010
974+
975+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET 0x000000F8
976+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET 0x000000FC
977+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000
978+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000
979+
980+#define TOPAZ_CORE_CR_MTX_DEBUG_OFFSET 0x0000003C
981+
982+#define MASK_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x00000004
983+#define SHIFT_TOPAZ_CR_MTX_DBG_IS_SLAVE 2
984+#define REGNUM_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x003C
985+
986+#define MASK_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x00000018
987+#define SHIFT_TOPAZ_CR_MTX_DBG_GPIO_OUT 3
988+#define REGNUM_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x003C
989+
990+#define MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET 0x00000108
991+
992+#define TOPAZ_CR_MMU_CONTROL0 0x0024
993+#define MASK_TOPAZ_CR_MMU_BYPASS 0x00000800
994+#define SHIFT_TOPAZ_CR_MMU_BYPASS 11
995+#define REGNUM_TOPAZ_CR_MMU_BYPASS 0x0024
996+
997+#define TOPAZ_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X)))
998+#define MASK_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFF000
999+#define SHIFT_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 12
1000+#define REGNUM_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0x0030
1001+
1002+#define MASK_TOPAZ_CR_MMU_INVALDC 0x00000008
1003+#define SHIFT_TOPAZ_CR_MMU_INVALDC 3
1004+#define REGNUM_TOPAZ_CR_MMU_INVALDC 0x0024
1005+
1006+#define MASK_TOPAZ_CR_MMU_FLUSH 0x00000004
1007+#define SHIFT_TOPAZ_CR_MMU_FLUSH 2
1008+#define REGNUM_TOPAZ_CR_MMU_FLUSH 0x0024
1009+
1010+#define TOPAZ_CR_MMU_BANK_INDEX 0x0038
1011+#define MASK_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (0x00000003 << (8 + ((i) * 2)))
1012+#define SHIFT_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (8 + ((i) * 2))
1013+#define REGNUM_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) 0x0038
1014+
1015+#define TOPAZ_CR_TOPAZ_MAN_CLK_GATE 0x0010
1016+#define MASK_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x00000001
1017+#define SHIFT_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0
1018+#define REGNUM_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x0010
1019+
1020+#define MTX_CORE_CR_MTX_TXRPT_OFFSET 0x0000000c
1021+#define TXRPT_WAITONKICK_VALUE 0x8ade0000
1022+
1023+#define MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK 0x00000002
1024+
1025+#define MTX_CORE_CR_MTX_ENABLE_OFFSET 0x00000000
1026+#define MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK 0x00000001
1027+
1028+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x00000002
1029+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 1
1030+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x0004
1031+
1032+#define MTX_CORE_CR_MTX_SOFT_RESET_OFFSET 0x00000200
1033+#define MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001
1034+
1035+#define MTX_CR_MTX_SYSC_CDMAA 0x0344
1036+#define MASK_MTX_CDMAA_ADDRESS 0x03FFFFFC
1037+#define SHIFT_MTX_CDMAA_ADDRESS 2
1038+#define REGNUM_MTX_CDMAA_ADDRESS 0x0344
1039+
1040+#define MTX_CR_MTX_SYSC_CDMAC 0x0340
1041+#define MASK_MTX_LENGTH 0x0000FFFF
1042+#define SHIFT_MTX_LENGTH 0
1043+#define REGNUM_MTX_LENGTH 0x0340
1044+
1045+#define MASK_MTX_BURSTSIZE 0x07000000
1046+#define SHIFT_MTX_BURSTSIZE 24
1047+#define REGNUM_MTX_BURSTSIZE 0x0340
1048+
1049+#define MASK_MTX_RNW 0x00020000
1050+#define SHIFT_MTX_RNW 17
1051+#define REGNUM_MTX_RNW 0x0340
1052+
1053+#define MASK_MTX_ENABLE 0x00010000
1054+#define SHIFT_MTX_ENABLE 16
1055+#define REGNUM_MTX_ENABLE 0x0340
1056+
1057+#define MASK_MTX_LENGTH 0x0000FFFF
1058+#define SHIFT_MTX_LENGTH 0
1059+#define REGNUM_MTX_LENGTH 0x0340
1060+
1061+#define TOPAZ_CR_IMG_TOPAZ_SRST 0x0000
1062+#define MASK_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x00000001
1063+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0
1064+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x0000
1065+
1066+#define MASK_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x00000008
1067+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 3
1068+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x0000
1069+
1070+#define MASK_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000002
1071+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 1
1072+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000
1073+
1074+#define MVEA_CR_MVEA_AUTO_CLOCK_GATING 0x0024
1075+#define MASK_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x00000001
1076+#define SHIFT_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0
1077+#define REGNUM_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x0024
1078+
1079+#define MASK_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x00000002
1080+#define SHIFT_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 1
1081+#define REGNUM_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x0024
1082+
1083+#define MASK_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x00000004
1084+#define SHIFT_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 2
1085+#define REGNUM_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x0024
1086+
1087+#define MASK_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x00000008
1088+#define SHIFT_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 3
1089+#define REGNUM_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x0024
1090+
1091+#define TOPAZ_CR_IMG_TOPAZ_DMAC_MODE 0x0040
1092+#define MASK_TOPAZ_CR_DMAC_MASTER_MODE 0x00000001
1093+#define SHIFT_TOPAZ_CR_DMAC_MASTER_MODE 0
1094+#define REGNUM_TOPAZ_CR_DMAC_MASTER_MODE 0x0040
1095+
1096+#define MTX_CR_MTX_SYSC_CDMAT 0x0350
1097+#define MASK_MTX_TRANSFERDATA 0xFFFFFFFF
1098+#define SHIFT_MTX_TRANSFERDATA 0
1099+#define REGNUM_MTX_TRANSFERDATA 0x0350
1100+
1101+#define IMG_SOC_DMAC_IRQ_STAT(X) (0x000C + (32 * (X)))
1102+#define MASK_IMG_SOC_TRANSFER_FIN 0x00020000
1103+#define SHIFT_IMG_SOC_TRANSFER_FIN 17
1104+#define REGNUM_IMG_SOC_TRANSFER_FIN 0x000C
1105+
1106+#define IMG_SOC_DMAC_COUNT(X) (0x0004 + (32 * (X)))
1107+#define MASK_IMG_SOC_CNT 0x0000FFFF
1108+#define SHIFT_IMG_SOC_CNT 0
1109+#define REGNUM_IMG_SOC_CNT 0x0004
1110+
1111+#define MASK_IMG_SOC_EN 0x00010000
1112+#define SHIFT_IMG_SOC_EN 16
1113+#define REGNUM_IMG_SOC_EN 0x0004
1114+
1115+#define MASK_IMG_SOC_LIST_EN 0x00040000
1116+#define SHIFT_IMG_SOC_LIST_EN 18
1117+#define REGNUM_IMG_SOC_LIST_EN 0x0004
1118+
1119+#define IMG_SOC_DMAC_PER_HOLD(X) (0x0018 + (32 * (X)))
1120+#define MASK_IMG_SOC_PER_HOLD 0x0000007F
1121+#define SHIFT_IMG_SOC_PER_HOLD 0
1122+#define REGNUM_IMG_SOC_PER_HOLD 0x0018
1123+
1124+#define IMG_SOC_DMAC_SETUP(X) (0x0000 + (32 * (X)))
1125+#define MASK_IMG_SOC_START_ADDRESS 0xFFFFFFF
1126+#define SHIFT_IMG_SOC_START_ADDRESS 0
1127+#define REGNUM_IMG_SOC_START_ADDRESS 0x0000
1128+
1129+#define MASK_IMG_SOC_BSWAP 0x40000000
1130+#define SHIFT_IMG_SOC_BSWAP 30
1131+#define REGNUM_IMG_SOC_BSWAP 0x0004
1132+
1133+#define MASK_IMG_SOC_PW 0x18000000
1134+#define SHIFT_IMG_SOC_PW 27
1135+#define REGNUM_IMG_SOC_PW 0x0004
1136+
1137+#define MASK_IMG_SOC_DIR 0x04000000
1138+#define SHIFT_IMG_SOC_DIR 26
1139+#define REGNUM_IMG_SOC_DIR 0x0004
1140+
1141+#define MASK_IMG_SOC_PI 0x03000000
1142+#define SHIFT_IMG_SOC_PI 24
1143+#define REGNUM_IMG_SOC_PI 0x0004
1144+#define IMG_SOC_PI_1 0x00000002
1145+#define IMG_SOC_PI_2 0x00000001
1146+#define IMG_SOC_PI_4 0x00000000
1147+
1148+#define MASK_IMG_SOC_TRANSFER_IEN 0x20000000
1149+#define SHIFT_IMG_SOC_TRANSFER_IEN 29
1150+#define REGNUM_IMG_SOC_TRANSFER_IEN 0x0004
1151+
1152+#define DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT) \
1153+ ((((BSWAP) << SHIFT_IMG_SOC_BSWAP) & MASK_IMG_SOC_BSWAP)| \
1154+ (((PW) << SHIFT_IMG_SOC_PW) & MASK_IMG_SOC_PW)| \
1155+ (((DIR) << SHIFT_IMG_SOC_DIR) & MASK_IMG_SOC_DIR)| \
1156+ (((PERIPH_INCR) << SHIFT_IMG_SOC_PI) & MASK_IMG_SOC_PI)| \
1157+ (((COUNT) << SHIFT_IMG_SOC_CNT) & MASK_IMG_SOC_CNT))
1158+
1159+#define IMG_SOC_DMAC_PERIPH(X) (0x0008 + (32 * (X)))
1160+#define MASK_IMG_SOC_EXT_SA 0x0000000F
1161+#define SHIFT_IMG_SOC_EXT_SA 0
1162+#define REGNUM_IMG_SOC_EXT_SA 0x0008
1163+
1164+#define MASK_IMG_SOC_ACC_DEL 0xE0000000
1165+#define SHIFT_IMG_SOC_ACC_DEL 29
1166+#define REGNUM_IMG_SOC_ACC_DEL 0x0008
1167+
1168+#define MASK_IMG_SOC_INCR 0x08000000
1169+#define SHIFT_IMG_SOC_INCR 27
1170+#define REGNUM_IMG_SOC_INCR 0x0008
1171+
1172+#define MASK_IMG_SOC_BURST 0x07000000
1173+#define SHIFT_IMG_SOC_BURST 24
1174+#define REGNUM_IMG_SOC_BURST 0x0008
1175+
1176+#define DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST) \
1177+((((ACC_DEL) << SHIFT_IMG_SOC_ACC_DEL) & MASK_IMG_SOC_ACC_DEL)| \
1178+(((INCR) << SHIFT_IMG_SOC_INCR) & MASK_IMG_SOC_INCR)| \
1179+(((BURST) << SHIFT_IMG_SOC_BURST) & MASK_IMG_SOC_BURST))
1180+
1181+#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X)))
1182+#define MASK_IMG_SOC_ADDR 0x007FFFFF
1183+#define SHIFT_IMG_SOC_ADDR 0
1184+#define REGNUM_IMG_SOC_ADDR 0x0014
1185+
1186+/* **************** DMAC define **************** */
1187+enum DMAC_eBSwap {
1188+ DMAC_BSWAP_NO_SWAP = 0x0,/* !< No byte swapping will be performed. */
1189+ DMAC_BSWAP_REVERSE = 0x1,/* !< Byte order will be reversed. */
1190+};
1191+
1192+enum DMAC_ePW {
1193+ DMAC_PWIDTH_32_BIT = 0x0,/* !< Peripheral width 32-bit. */
1194+ DMAC_PWIDTH_16_BIT = 0x1,/* !< Peripheral width 16-bit. */
1195+ DMAC_PWIDTH_8_BIT = 0x2,/* !< Peripheral width 8-bit. */
1196+};
1197+
1198+enum DMAC_eAccDel {
1199+ DMAC_ACC_DEL_0 = 0x0, /* !< Access delay zero clock cycles */
1200+ DMAC_ACC_DEL_256 = 0x1, /* !< Access delay 256 clock cycles */
1201+ DMAC_ACC_DEL_512 = 0x2, /* !< Access delay 512 clock cycles */
1202+ DMAC_ACC_DEL_768 = 0x3, /* !< Access delay 768 clock cycles */
1203+ DMAC_ACC_DEL_1024 = 0x4,/* !< Access delay 1024 clock cycles */
1204+ DMAC_ACC_DEL_1280 = 0x5,/* !< Access delay 1280 clock cycles */
1205+ DMAC_ACC_DEL_1536 = 0x6,/* !< Access delay 1536 clock cycles */
1206+ DMAC_ACC_DEL_1792 = 0x7,/* !< Access delay 1792 clock cycles */
1207+};
1208+
1209+enum DMAC_eBurst {
1210+ DMAC_BURST_0 = 0x0, /* !< burst size of 0 */
1211+ DMAC_BURST_1 = 0x1, /* !< burst size of 1 */
1212+ DMAC_BURST_2 = 0x2, /* !< burst size of 2 */
1213+ DMAC_BURST_3 = 0x3, /* !< burst size of 3 */
1214+ DMAC_BURST_4 = 0x4, /* !< burst size of 4 */
1215+ DMAC_BURST_5 = 0x5, /* !< burst size of 5 */
1216+ DMAC_BURST_6 = 0x6, /* !< burst size of 6 */
1217+ DMAC_BURST_7 = 0x7, /* !< burst size of 7 */
1218+};
1219+
1220+/* commands for topaz,shared with user space driver */
1221+enum drm_lnc_topaz_cmd {
1222+ MTX_CMDID_NULL = 0,
1223+ MTX_CMDID_DO_HEADER = 1,
1224+ MTX_CMDID_ENCODE_SLICE = 2,
1225+ MTX_CMDID_WRITEREG = 3,
1226+ MTX_CMDID_START_PIC = 4,
1227+ MTX_CMDID_END_PIC = 5,
1228+ MTX_CMDID_SYNC = 6,
1229+ MTX_CMDID_ENCODE_ONE_ROW = 7,
1230+ MTX_CMDID_FLUSH = 8,
1231+ MTX_CMDID_SW_LEAVE_LOWPOWER = 0xfc,
1232+ MTX_CMDID_SW_ENTER_LOWPOWER = 0xfe,
1233+ MTX_CMDID_SW_NEW_CODEC = 0xff
1234+};
1235+
1236+/* codecs topaz supports,shared with user space driver */
1237+enum drm_lnc_topaz_codec {
1238+ IMG_CODEC_JPEG = 0,
1239+ IMG_CODEC_H264_NO_RC,
1240+ IMG_CODEC_H264_VBR,
1241+ IMG_CODEC_H264_CBR,
1242+ IMG_CODEC_H263_NO_RC,
1243+ IMG_CODEC_H263_VBR,
1244+ IMG_CODEC_H263_CBR,
1245+ IMG_CODEC_MPEG4_NO_RC,
1246+ IMG_CODEC_MPEG4_VBR,
1247+ IMG_CODEC_MPEG4_CBR,
1248+ IMG_CODEC_NUM
1249+};
1250+
1251+/* XXX: it's a copy of msvdx cmd queue. should have some change? */
1252+struct lnc_topaz_cmd_queue {
1253+ struct list_head head;
1254+ void *cmd;
1255+ unsigned long cmd_size;
1256+ uint32_t sequence;
1257+};
1258+
1259+
1260+struct topaz_cmd_header {
1261+ union {
1262+ struct {
1263+ unsigned long id:8;
1264+ unsigned long size:8;
1265+ unsigned long seq:16;
1266+ };
1267+ uint32_t val;
1268+ };
1269+};
1270+
1271+/* external function declare */
1272+/* lnc_topazinit.c */
1273+int lnc_topaz_init(struct drm_device *dev);
1274+int lnc_topaz_uninit(struct drm_device *dev);
1275+int lnc_topaz_reset(struct drm_psb_private *dev_priv);
1276+int topaz_init_fw(struct drm_device *dev);
1277+int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec);
1278+int topaz_wait_for_register(struct drm_psb_private *dev_priv,
1279+ uint32_t addr, uint32_t value,
1280+ uint32_t enable);
1281+void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
1282+ uint32_t byte_addr, uint32_t val);
1283+uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
1284+ uint32_t byte_addr);
1285+void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
1286+ uint32_t addr);
1287+void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
1288+ uint32_t val);
1289+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
1290+int lnc_topaz_save_mtx_state(struct drm_device *dev);
1291+int lnc_topaz_restore_mtx_state(struct drm_device *dev);
1292+
1293+/* lnc_topaz.c */
1294+void lnc_topaz_interrupt(struct drm_device *dev, uint32_t topaz_stat);
1295+
1296+int lnc_cmdbuf_video(struct drm_file *priv,
1297+ struct list_head *validate_list,
1298+ uint32_t fence_type,
1299+ struct drm_psb_cmdbuf_arg *arg,
1300+ struct ttm_buffer_object *cmd_buffer,
1301+ struct psb_ttm_fence_rep *fence_arg);
1302+
1303+void lnc_topaz_flush_cmd_queue(struct drm_device *dev);
1304+void lnc_topaz_lockup(struct drm_psb_private *dev_priv, int *topaz_lockup,
1305+ int *topaz_idle);
1306+void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_cout);
1307+
1308+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
1309+
1310+/* macros to get/set CCB control data */
1311+#define WB_CCB_CTRL_RINDEX(dev_priv) (*((uint32_t *)dev_priv->topaz_ccb_wb))
1312+#define WB_CCB_CTRL_SEQ(dev_priv) (*((uint32_t *)dev_priv->topaz_ccb_wb+1))
1313+
1314+#define POLL_WB_RINDEX(dev_priv,value) \
1315+do { \
1316+ int i; \
1317+ for (i = 0; i < 10000; i++) { \
1318+ if (WB_CCB_CTRL_RINDEX(dev_priv) == value) \
1319+ break; \
1320+ else \
1321+ DRM_UDELAY(100); \
1322+ } \
1323+ if (WB_CCB_CTRL_RINDEX(dev_priv) != value) { \
1324+ DRM_ERROR("TOPAZ: poll rindex timeout\n"); \
1325+ ret = -EBUSY; \
1326+ } \
1327+} while (0)
1328+
1329+#define POLL_WB_SEQ(dev_priv,value) \
1330+do { \
1331+ int i; \
1332+ for (i = 0; i < 10000; i++) { \
1333+ if (WB_CCB_CTRL_SEQ(dev_priv) == value) \
1334+ break; \
1335+ else \
1336+ DRM_UDELAY(1000); \
1337+ } \
1338+ if (WB_CCB_CTRL_SEQ(dev_priv) != value) { \
1339+ DRM_ERROR("TOPAZ:poll mtxseq timeout,0x%04x(mtx) vs 0x%04x\n",\
1340+ WB_CCB_CTRL_SEQ(dev_priv), value); \
1341+ ret = -EBUSY; \
1342+ } \
1343+} while (0)
1344+
1345+#define CCB_CTRL_RINDEX(dev_priv) \
1346+ topaz_read_mtx_mem(dev_priv, \
1347+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_ROFF)
1348+
1349+#define CCB_CTRL_RINDEX(dev_priv) \
1350+ topaz_read_mtx_mem(dev_priv, \
1351+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_ROFF)
1352+
1353+#define CCB_CTRL_QP(dev_priv) \
1354+ topaz_read_mtx_mem(dev_priv, \
1355+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_QP)
1356+
1357+#define CCB_CTRL_SEQ(dev_priv) \
1358+ topaz_read_mtx_mem(dev_priv, \
1359+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_COMPLETE)
1360+
1361+#define CCB_CTRL_FRAMESKIP(dev_priv) \
1362+ topaz_read_mtx_mem(dev_priv, \
1363+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_FRAMESKIP)
1364+
1365+#define CCB_CTRL_SET_QP(dev_priv, qp) \
1366+ topaz_write_mtx_mem(dev_priv, \
1367+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_QP, qp)
1368+
1369+#define CCB_CTRL_SET_INITIALQP(dev_priv, qp) \
1370+ topaz_write_mtx_mem(dev_priv, \
1371+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_INITQP, qp)
1372+
1373+
1374+#define TOPAZ_BEGIN_CCB(dev_priv) \
1375+ topaz_write_mtx_mem_multiple_setup(dev_priv, \
1376+ dev_priv->topaz_ccb_buffer_addr + \
1377+ dev_priv->topaz_cmd_windex * 4)
1378+
1379+#define TOPAZ_OUT_CCB(dev_priv, cmd) \
1380+do { \
1381+ topaz_write_mtx_mem_multiple(dev_priv, cmd); \
1382+ dev_priv->topaz_cmd_windex++; \
1383+} while (0)
1384+
1385+#define TOPAZ_END_CCB(dev_priv,kick_count) \
1386+ topaz_mtx_kick(dev_priv, 1);
1387+
1388+static inline char *cmd_to_string(int cmd_id)
1389+{
1390+ switch (cmd_id) {
1391+ case MTX_CMDID_START_PIC:
1392+ return "MTX_CMDID_START_PIC";
1393+ case MTX_CMDID_END_PIC:
1394+ return "MTX_CMDID_END_PIC";
1395+ case MTX_CMDID_DO_HEADER:
1396+ return "MTX_CMDID_DO_HEADER";
1397+ case MTX_CMDID_ENCODE_SLICE:
1398+ return "MTX_CMDID_ENCODE_SLICE";
1399+ case MTX_CMDID_SYNC:
1400+ return "MTX_CMDID_SYNC";
1401+
1402+ default:
1403+ return "Undefined command";
1404+
1405+ }
1406+}
1407+
1408+static inline char *codec_to_string(int codec)
1409+{
1410+ switch (codec) {
1411+ case IMG_CODEC_H264_NO_RC:
1412+ return "H264_NO_RC";
1413+ case IMG_CODEC_H264_VBR:
1414+ return "H264_VBR";
1415+ case IMG_CODEC_H264_CBR:
1416+ return "H264_CBR";
1417+ case IMG_CODEC_H263_NO_RC:
1418+ return "H263_NO_RC";
1419+ case IMG_CODEC_H263_VBR:
1420+ return "H263_VBR";
1421+ case IMG_CODEC_H263_CBR:
1422+ return "H263_CBR";
1423+ case IMG_CODEC_MPEG4_NO_RC:
1424+ return "MPEG4_NO_RC";
1425+ case IMG_CODEC_MPEG4_VBR:
1426+ return "MPEG4_VBR";
1427+ case IMG_CODEC_MPEG4_CBR:
1428+ return "MPEG4_CBR";
1429+ default:
1430+ return "Undefined codec";
1431+ }
1432+}
1433+
1434+static inline void lnc_topaz_enableirq(struct drm_device *dev)
1435+{
1436+ struct drm_psb_private *dev_priv = dev->dev_private;
1437+ uint32_t ier = dev_priv->vdc_irq_mask | _LNC_IRQ_TOPAZ_FLAG;
1438+
1439+ PSB_DEBUG_IRQ("TOPAZ: enable IRQ\n");
1440+
1441+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB,
1442+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MAS_INTEN) |
1443+ /* F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA) | */
1444+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT) |
1445+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX) |
1446+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT));
1447+
1448+ PSB_WVDC32(ier, PSB_INT_ENABLE_R); /* essential */
1449+}
1450+
1451+static inline void lnc_topaz_disableirq(struct drm_device *dev)
1452+{
1453+
1454+ struct drm_psb_private *dev_priv = dev->dev_private;
1455+ uint32_t ier = dev_priv->vdc_irq_mask & (~_LNC_IRQ_TOPAZ_FLAG);
1456+
1457+ PSB_DEBUG_INIT("TOPAZ: disable IRQ\n");
1458+
1459+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0);
1460+ PSB_WVDC32(ier, PSB_INT_ENABLE_R); /* essential */
1461+}
1462+
1463+static inline void lnc_topaz_clearirq(struct drm_device *dev,
1464+ uint32_t clear_topaz)
1465+{
1466+ struct drm_psb_private *dev_priv = dev->dev_private;
1467+
1468+ PSB_DEBUG_INIT("TOPAZ: clear IRQ\n");
1469+ if (clear_topaz != 0)
1470+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, clear_topaz);
1471+
1472+ PSB_WVDC32(_LNC_IRQ_TOPAZ_FLAG, PSB_INT_IDENTITY_R);
1473+}
1474+
1475+static inline uint32_t lnc_topaz_queryirq(struct drm_device *dev)
1476+{
1477+ struct drm_psb_private *dev_priv = dev->dev_private;
1478+ uint32_t val, iir, clear = 0;
1479+
1480+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &val);
1481+ iir = PSB_RVDC32(PSB_INT_IDENTITY_R);
1482+
1483+ if ((val == 0) && (iir == 0)) {/* no interrupt */
1484+ PSB_DEBUG_GENERAL("TOPAZ: no interrupt,IIR=TOPAZ_INTSTAT=0\n");
1485+ return 0;
1486+ }
1487+
1488+ PSB_DEBUG_IRQ("TOPAZ:TOPAZ_INTSTAT=0x%08x,IIR=0%08x\n", val, iir);
1489+
1490+ if (val & (1<<31))
1491+ PSB_DEBUG_IRQ("TOPAZ:IRQ pin activated,cmd seq=0x%04x,"
1492+ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
1493+ CCB_CTRL_SEQ(dev_priv),
1494+ dev_priv->sequence[LNC_ENGINE_ENCODE],
1495+ *(uint32_t *)dev_priv->topaz_sync_addr);
1496+ else
1497+ PSB_DEBUG_IRQ("TOPAZ:IRQ pin not activated,cmd seq=0x%04x,"
1498+ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
1499+ CCB_CTRL_SEQ(dev_priv),
1500+ dev_priv->sequence[LNC_ENGINE_ENCODE],
1501+ *(uint32_t *)dev_priv->topaz_sync_addr);
1502+
1503+ if (val & 0x8) {
1504+ uint32_t mmu_status, mmu_req;
1505+
1506+ TOPAZ_READ32(TOPAZ_CR_MMU_STATUS, &mmu_status);
1507+ TOPAZ_READ32(TOPAZ_CR_MMU_MEM_REQ, &mmu_req);
1508+
1509+ PSB_DEBUG_IRQ("TOPAZ: detect a page fault interrupt, "
1510+ "address=0x%08x,mem req=0x%08x\n",
1511+ mmu_status, mmu_req);
1512+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT);
1513+ }
1514+
1515+ if (val & 0x4) {
1516+ PSB_DEBUG_IRQ("TOPAZ: detect a MTX_HALT interrupt\n");
1517+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT);
1518+ }
1519+
1520+ if (val & 0x2) {
1521+ PSB_DEBUG_IRQ("TOPAZ: detect a MTX interrupt\n");
1522+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX);
1523+ }
1524+
1525+ if (val & 0x1) {
1526+ PSB_DEBUG_IRQ("TOPAZ: detect a MVEA interrupt\n");
1527+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA);
1528+ }
1529+
1530+ return clear;
1531+}
1532+
1533+#endif /* _LNC_TOPAZ_H_ */
1534diff -uNr a/drivers/gpu/drm/psb/lnc_topazinit.c b/drivers/gpu/drm/psb/lnc_topazinit.c
1535--- a/drivers/gpu/drm/psb/lnc_topazinit.c 1969-12-31 16:00:00.000000000 -0800
1536+++ b/drivers/gpu/drm/psb/lnc_topazinit.c 2009-04-07 13:28:38.000000000 -0700
1537@@ -0,0 +1,1896 @@
1538+/**
1539+ * file lnc_topazinit.c
1540+ * TOPAZ initialization and mtx-firmware upload
1541+ *
1542+ */
1543+
1544+/**************************************************************************
1545+ *
1546+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
1547+ * Copyright (c) Imagination Technologies Limited, UK
1548+ * All Rights Reserved.
1549+ *
1550+ * Permission is hereby granted, free of charge, to any person obtaining a
1551+ * copy of this software and associated documentation files (the
1552+ * "Software"), to deal in the Software without restriction, including
1553+ * without limitation the rights to use, copy, modify, merge, publish,
1554+ * distribute, sub license, and/or sell copies of the Software, and to
1555+ * permit persons to whom the Software is furnished to do so, subject to
1556+ * the following conditions:
1557+ *
1558+ * The above copyright notice and this permission notice (including the
1559+ * next paragraph) shall be included in all copies or substantial portions
1560+ * of the Software.
1561+ *
1562+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1563+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1564+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
1565+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
1566+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
1567+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
1568+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
1569+ *
1570+ **************************************************************************/
1571+
1572+/* NOTE: (READ BEFORE REFINE CODE)
1573+ * 1. The FIRMWARE's SIZE is measured by byte, we have to pass the size
1574+ * measured by word to DMAC.
1575+ *
1576+ *
1577+ *
1578+ */
1579+
1580+/* include headers */
1581+
1582+/* #define DRM_DEBUG_CODE 2 */
1583+
1584+#include <linux/firmware.h>
1585+
1586+#include <drm/drmP.h>
1587+#include <drm/drm.h>
1588+
1589+#include "psb_drv.h"
1590+#include "lnc_topaz.h"
1591+
1592+/* WARNING: this define is very important */
1593+#define RAM_SIZE (1024 * 24)
1594+
1595+static int drm_psb_ospmxxx = 0x10;
1596+
1597+/* register default values
1598+ * THIS HEADER IS ONLY INCLUDE ONCE*/
1599+static unsigned long topaz_default_regs[183][3] = {
1600+ {MVEA_START, 0x00000000, 0x00000000},
1601+ {MVEA_START, 0x00000004, 0x00000400},
1602+ {MVEA_START, 0x00000008, 0x00000000},
1603+ {MVEA_START, 0x0000000C, 0x00000000},
1604+ {MVEA_START, 0x00000010, 0x00000000},
1605+ {MVEA_START, 0x00000014, 0x00000000},
1606+ {MVEA_START, 0x00000018, 0x00000000},
1607+ {MVEA_START, 0x0000001C, 0x00000000},
1608+ {MVEA_START, 0x00000020, 0x00000120},
1609+ {MVEA_START, 0x00000024, 0x00000000},
1610+ {MVEA_START, 0x00000028, 0x00000000},
1611+ {MVEA_START, 0x00000100, 0x00000000},
1612+ {MVEA_START, 0x00000104, 0x00000000},
1613+ {MVEA_START, 0x00000108, 0x00000000},
1614+ {MVEA_START, 0x0000010C, 0x00000000},
1615+ {MVEA_START, 0x0000011C, 0x00000001},
1616+ {MVEA_START, 0x0000012C, 0x00000000},
1617+ {MVEA_START, 0x00000180, 0x00000000},
1618+ {MVEA_START, 0x00000184, 0x00000000},
1619+ {MVEA_START, 0x00000188, 0x00000000},
1620+ {MVEA_START, 0x0000018C, 0x00000000},
1621+ {MVEA_START, 0x00000190, 0x00000000},
1622+ {MVEA_START, 0x00000194, 0x00000000},
1623+ {MVEA_START, 0x00000198, 0x00000000},
1624+ {MVEA_START, 0x0000019C, 0x00000000},
1625+ {MVEA_START, 0x000001A0, 0x00000000},
1626+ {MVEA_START, 0x000001A4, 0x00000000},
1627+ {MVEA_START, 0x000001A8, 0x00000000},
1628+ {MVEA_START, 0x000001AC, 0x00000000},
1629+ {MVEA_START, 0x000001B0, 0x00000000},
1630+ {MVEA_START, 0x000001B4, 0x00000000},
1631+ {MVEA_START, 0x000001B8, 0x00000000},
1632+ {MVEA_START, 0x000001BC, 0x00000000},
1633+ {MVEA_START, 0x000001F8, 0x00000000},
1634+ {MVEA_START, 0x000001FC, 0x00000000},
1635+ {MVEA_START, 0x00000200, 0x00000000},
1636+ {MVEA_START, 0x00000204, 0x00000000},
1637+ {MVEA_START, 0x00000208, 0x00000000},
1638+ {MVEA_START, 0x0000020C, 0x00000000},
1639+ {MVEA_START, 0x00000210, 0x00000000},
1640+ {MVEA_START, 0x00000220, 0x00000001},
1641+ {MVEA_START, 0x00000224, 0x0000001F},
1642+ {MVEA_START, 0x00000228, 0x00000100},
1643+ {MVEA_START, 0x0000022C, 0x00001F00},
1644+ {MVEA_START, 0x00000230, 0x00000101},
1645+ {MVEA_START, 0x00000234, 0x00001F1F},
1646+ {MVEA_START, 0x00000238, 0x00001F01},
1647+ {MVEA_START, 0x0000023C, 0x0000011F},
1648+ {MVEA_START, 0x00000240, 0x00000200},
1649+ {MVEA_START, 0x00000244, 0x00001E00},
1650+ {MVEA_START, 0x00000248, 0x00000002},
1651+ {MVEA_START, 0x0000024C, 0x0000001E},
1652+ {MVEA_START, 0x00000250, 0x00000003},
1653+ {MVEA_START, 0x00000254, 0x0000001D},
1654+ {MVEA_START, 0x00000258, 0x00001F02},
1655+ {MVEA_START, 0x0000025C, 0x00000102},
1656+ {MVEA_START, 0x00000260, 0x0000011E},
1657+ {MVEA_START, 0x00000264, 0x00000000},
1658+ {MVEA_START, 0x00000268, 0x00000000},
1659+ {MVEA_START, 0x0000026C, 0x00000000},
1660+ {MVEA_START, 0x00000270, 0x00000000},
1661+ {MVEA_START, 0x00000274, 0x00000000},
1662+ {MVEA_START, 0x00000278, 0x00000000},
1663+ {MVEA_START, 0x00000280, 0x00008000},
1664+ {MVEA_START, 0x00000284, 0x00000000},
1665+ {MVEA_START, 0x00000288, 0x00000000},
1666+ {MVEA_START, 0x0000028C, 0x00000000},
1667+ {MVEA_START, 0x00000314, 0x00000000},
1668+ {MVEA_START, 0x00000318, 0x00000000},
1669+ {MVEA_START, 0x0000031C, 0x00000000},
1670+ {MVEA_START, 0x00000320, 0x00000000},
1671+ {MVEA_START, 0x00000324, 0x00000000},
1672+ {MVEA_START, 0x00000348, 0x00000000},
1673+ {MVEA_START, 0x00000380, 0x00000000},
1674+ {MVEA_START, 0x00000384, 0x00000000},
1675+ {MVEA_START, 0x00000388, 0x00000000},
1676+ {MVEA_START, 0x0000038C, 0x00000000},
1677+ {MVEA_START, 0x00000390, 0x00000000},
1678+ {MVEA_START, 0x00000394, 0x00000000},
1679+ {MVEA_START, 0x00000398, 0x00000000},
1680+ {MVEA_START, 0x0000039C, 0x00000000},
1681+ {MVEA_START, 0x000003A0, 0x00000000},
1682+ {MVEA_START, 0x000003A4, 0x00000000},
1683+ {MVEA_START, 0x000003A8, 0x00000000},
1684+ {MVEA_START, 0x000003B0, 0x00000000},
1685+ {MVEA_START, 0x000003B4, 0x00000000},
1686+ {MVEA_START, 0x000003B8, 0x00000000},
1687+ {MVEA_START, 0x000003BC, 0x00000000},
1688+ {MVEA_START, 0x000003D4, 0x00000000},
1689+ {MVEA_START, 0x000003D8, 0x00000000},
1690+ {MVEA_START, 0x000003DC, 0x00000000},
1691+ {MVEA_START, 0x000003E0, 0x00000000},
1692+ {MVEA_START, 0x000003E4, 0x00000000},
1693+ {MVEA_START, 0x000003EC, 0x00000000},
1694+ {MVEA_START, 0x000002D0, 0x00000000},
1695+ {MVEA_START, 0x000002D4, 0x00000000},
1696+ {MVEA_START, 0x000002D8, 0x00000000},
1697+ {MVEA_START, 0x000002DC, 0x00000000},
1698+ {MVEA_START, 0x000002E0, 0x00000000},
1699+ {MVEA_START, 0x000002E4, 0x00000000},
1700+ {MVEA_START, 0x000002E8, 0x00000000},
1701+ {MVEA_START, 0x000002EC, 0x00000000},
1702+ {MVEA_START, 0x000002F0, 0x00000000},
1703+ {MVEA_START, 0x000002F4, 0x00000000},
1704+ {MVEA_START, 0x000002F8, 0x00000000},
1705+ {MVEA_START, 0x000002FC, 0x00000000},
1706+ {MVEA_START, 0x00000300, 0x00000000},
1707+ {MVEA_START, 0x00000304, 0x00000000},
1708+ {MVEA_START, 0x00000308, 0x00000000},
1709+ {MVEA_START, 0x0000030C, 0x00000000},
1710+ {MVEA_START, 0x00000290, 0x00000000},
1711+ {MVEA_START, 0x00000294, 0x00000000},
1712+ {MVEA_START, 0x00000298, 0x00000000},
1713+ {MVEA_START, 0x0000029C, 0x00000000},
1714+ {MVEA_START, 0x000002A0, 0x00000000},
1715+ {MVEA_START, 0x000002A4, 0x00000000},
1716+ {MVEA_START, 0x000002A8, 0x00000000},
1717+ {MVEA_START, 0x000002AC, 0x00000000},
1718+ {MVEA_START, 0x000002B0, 0x00000000},
1719+ {MVEA_START, 0x000002B4, 0x00000000},
1720+ {MVEA_START, 0x000002B8, 0x00000000},
1721+ {MVEA_START, 0x000002BC, 0x00000000},
1722+ {MVEA_START, 0x000002C0, 0x00000000},
1723+ {MVEA_START, 0x000002C4, 0x00000000},
1724+ {MVEA_START, 0x000002C8, 0x00000000},
1725+ {MVEA_START, 0x000002CC, 0x00000000},
1726+ {MVEA_START, 0x00000080, 0x00000000},
1727+ {MVEA_START, 0x00000084, 0x80705700},
1728+ {MVEA_START, 0x00000088, 0x00000000},
1729+ {MVEA_START, 0x0000008C, 0x00000000},
1730+ {MVEA_START, 0x00000090, 0x00000000},
1731+ {MVEA_START, 0x00000094, 0x00000000},
1732+ {MVEA_START, 0x00000098, 0x00000000},
1733+ {MVEA_START, 0x0000009C, 0x00000000},
1734+ {MVEA_START, 0x000000A0, 0x00000000},
1735+ {MVEA_START, 0x000000A4, 0x00000000},
1736+ {MVEA_START, 0x000000A8, 0x00000000},
1737+ {MVEA_START, 0x000000AC, 0x00000000},
1738+ {MVEA_START, 0x000000B0, 0x00000000},
1739+ {MVEA_START, 0x000000B4, 0x00000000},
1740+ {MVEA_START, 0x000000B8, 0x00000000},
1741+ {MVEA_START, 0x000000BC, 0x00000000},
1742+ {MVEA_START, 0x000000C0, 0x00000000},
1743+ {MVEA_START, 0x000000C4, 0x00000000},
1744+ {MVEA_START, 0x000000C8, 0x00000000},
1745+ {MVEA_START, 0x000000CC, 0x00000000},
1746+ {MVEA_START, 0x000000D0, 0x00000000},
1747+ {MVEA_START, 0x000000D4, 0x00000000},
1748+ {MVEA_START, 0x000000D8, 0x00000000},
1749+ {MVEA_START, 0x000000DC, 0x00000000},
1750+ {MVEA_START, 0x000000E0, 0x00000000},
1751+ {MVEA_START, 0x000000E4, 0x00000000},
1752+ {MVEA_START, 0x000000E8, 0x00000000},
1753+ {MVEA_START, 0x000000EC, 0x00000000},
1754+ {MVEA_START, 0x000000F0, 0x00000000},
1755+ {MVEA_START, 0x000000F4, 0x00000000},
1756+ {MVEA_START, 0x000000F8, 0x00000000},
1757+ {MVEA_START, 0x000000FC, 0x00000000},
1758+ {TOPAZ_VLC_START, 0x00000000, 0x00000000},
1759+ {TOPAZ_VLC_START, 0x00000004, 0x00000000},
1760+ {TOPAZ_VLC_START, 0x00000008, 0x00000000},
1761+ {TOPAZ_VLC_START, 0x0000000C, 0x00000000},
1762+ {TOPAZ_VLC_START, 0x00000010, 0x00000000},
1763+ {TOPAZ_VLC_START, 0x00000014, 0x00000000},
1764+ {TOPAZ_VLC_START, 0x0000001C, 0x00000000},
1765+ {TOPAZ_VLC_START, 0x00000020, 0x00000000},
1766+ {TOPAZ_VLC_START, 0x00000024, 0x00000000},
1767+ {TOPAZ_VLC_START, 0x0000002C, 0x00000000},
1768+ {TOPAZ_VLC_START, 0x00000034, 0x00000000},
1769+ {TOPAZ_VLC_START, 0x00000038, 0x00000000},
1770+ {TOPAZ_VLC_START, 0x0000003C, 0x00000000},
1771+ {TOPAZ_VLC_START, 0x00000040, 0x00000000},
1772+ {TOPAZ_VLC_START, 0x00000044, 0x00000000},
1773+ {TOPAZ_VLC_START, 0x00000048, 0x00000000},
1774+ {TOPAZ_VLC_START, 0x0000004C, 0x00000000},
1775+ {TOPAZ_VLC_START, 0x00000050, 0x00000000},
1776+ {TOPAZ_VLC_START, 0x00000054, 0x00000000},
1777+ {TOPAZ_VLC_START, 0x00000058, 0x00000000},
1778+ {TOPAZ_VLC_START, 0x0000005C, 0x00000000},
1779+ {TOPAZ_VLC_START, 0x00000060, 0x00000000},
1780+ {TOPAZ_VLC_START, 0x00000064, 0x00000000},
1781+ {TOPAZ_VLC_START, 0x00000068, 0x00000000},
1782+ {TOPAZ_VLC_START, 0x0000006C, 0x00000000}
1783+};
1784+
1785+#define FIRMWARE_NAME "topaz_fw.bin"
1786+
1787+/* define structure */
1788+/* firmware file's info head */
1789+struct topaz_fwinfo {
1790+ unsigned int ver:16;
1791+ unsigned int codec:16;
1792+
1793+ unsigned int text_size;
1794+ unsigned int data_size;
1795+ unsigned int data_location;
1796+};
1797+
1798+/* firmware data array define */
1799+struct topaz_codec_fw {
1800+ uint32_t ver;
1801+ uint32_t codec;
1802+
1803+ uint32_t text_size;
1804+ uint32_t data_size;
1805+ uint32_t data_location;
1806+
1807+ struct ttm_buffer_object *text;
1808+ struct ttm_buffer_object *data;
1809+};
1810+
1811+
1812+
1813+/* static function define */
1814+static int topaz_upload_fw(struct drm_device *dev,
1815+ enum drm_lnc_topaz_codec codec);
1816+static inline void topaz_set_default_regs(struct drm_psb_private
1817+ *dev_priv);
1818+
1819+#define UPLOAD_FW_BY_DMA 1
1820+
1821+#if UPLOAD_FW_BY_DMA
1822+static void topaz_dma_transfer(struct drm_psb_private *dev_priv,
1823+ uint32_t channel, uint32_t src_phy_addr,
1824+ uint32_t offset, uint32_t dst_addr,
1825+ uint32_t byte_num, uint32_t is_increment,
1826+ uint32_t is_write);
1827+#else
1828+static void topaz_mtx_upload_by_register(struct drm_device *dev,
1829+ uint32_t mtx_mem, uint32_t addr,
1830+ uint32_t size,
1831+ struct ttm_buffer_object *buf);
1832+#endif
1833+
1834+static void topaz_write_core_reg(struct drm_psb_private *dev_priv,
1835+ uint32_t reg, const uint32_t val);
1836+static void topaz_read_core_reg(struct drm_psb_private *dev_priv,
1837+ uint32_t reg, uint32_t *ret_val);
1838+static void get_mtx_control_from_dash(struct drm_psb_private *dev_priv);
1839+static void release_mtx_control_from_dash(struct drm_psb_private
1840+ *dev_priv);
1841+static void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv);
1842+static void mtx_dma_read(struct drm_device *dev, uint32_t source_addr,
1843+ uint32_t size);
1844+static void mtx_dma_write(struct drm_device *dev);
1845+
1846+
1847+#if 0 /* DEBUG_FUNCTION */
1848+static int topaz_test_null(struct drm_device *dev, uint32_t seq);
1849+static void topaz_mmu_flush(struct drm_device *dev);
1850+static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value);
1851+#endif
1852+#if 0
1853+static void topaz_save_default_regs(struct drm_psb_private *dev_priv,
1854+ uint32_t *data);
1855+static void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
1856+ uint32_t *data);
1857+#endif
1858+
1859+/* globale variable define */
1860+struct topaz_codec_fw topaz_fw[IMG_CODEC_NUM];
1861+
1862+uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
1863+ uint32_t byte_addr)
1864+{
1865+ uint32_t read_val;
1866+ uint32_t reg, bank_size, ram_bank_size, ram_id;
1867+
1868+ TOPAZ_READ32(0x3c, &reg);
1869+ reg = 0x0a0a0606;
1870+ bank_size = (reg & 0xF0000) >> 16;
1871+
1872+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
1873+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
1874+
1875+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
1876+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
1877+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR) |
1878+ F_ENCODE(1, MTX_MTX_MCMR));
1879+
1880+ /* ?? poll this reg? */
1881+ topaz_wait_for_register(dev_priv,
1882+ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
1883+ 1, 1);
1884+
1885+ MTX_READ32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, &read_val);
1886+
1887+ return read_val;
1888+}
1889+
1890+void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
1891+ uint32_t byte_addr, uint32_t val)
1892+{
1893+ uint32_t ram_id = 0;
1894+ uint32_t reg, bank_size, ram_bank_size;
1895+
1896+ TOPAZ_READ32(0x3c, &reg);
1897+
1898+ /* PSB_DEBUG_GENERAL ("TOPAZ: DEBUG REG(%x)\n", reg); */
1899+ reg = 0x0a0a0606;
1900+
1901+ bank_size = (reg & 0xF0000) >> 16;
1902+
1903+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
1904+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
1905+
1906+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
1907+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
1908+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
1909+
1910+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
1911+
1912+ /* ?? poll this reg? */
1913+ topaz_wait_for_register(dev_priv,
1914+ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
1915+ 1, 1);
1916+
1917+ return;
1918+}
1919+
1920+void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
1921+ uint32_t byte_addr)
1922+{
1923+ uint32_t ram_id = 0;
1924+ uint32_t reg, bank_size, ram_bank_size;
1925+
1926+ TOPAZ_READ32(0x3c, &reg);
1927+
1928+ reg = 0x0a0a0606;
1929+
1930+ bank_size = (reg & 0xF0000) >> 16;
1931+
1932+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
1933+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
1934+
1935+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
1936+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
1937+ F_ENCODE(1, MTX_MTX_MCMAI) |
1938+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
1939+}
1940+
1941+void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
1942+ uint32_t val)
1943+{
1944+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
1945+}
1946+
1947+
1948+int topaz_wait_for_register(struct drm_psb_private *dev_priv,
1949+ uint32_t addr, uint32_t value, uint32_t mask)
1950+{
1951+ uint32_t tmp;
1952+ uint32_t count = 10000;
1953+
1954+ /* # poll topaz register for certain times */
1955+ while (count) {
1956+ /* #.# read */
1957+ MM_READ32(addr, 0, &tmp);
1958+
1959+ if (value == (tmp & mask))
1960+ return 0;
1961+
1962+ /* #.# delay and loop */
1963+ DRM_UDELAY(100);
1964+ --count;
1965+ }
1966+
1967+ /* # now waiting is timeout, return 1 indicat failed */
1968+ /* XXX: testsuit means a timeout 10000 */
1969+
1970+ DRM_ERROR("TOPAZ:time out to poll addr(0x%x) expected value(0x%08x), "
1971+ "actual 0x%08x (0x%08x & 0x%08x)\n",
1972+ addr, value, tmp & mask, tmp, mask);
1973+
1974+ return -EBUSY;
1975+
1976+}
1977+
1978+
1979+void lnc_topaz_reset_wq(struct work_struct *work)
1980+{
1981+ struct drm_psb_private *dev_priv =
1982+ container_of(work, struct drm_psb_private, topaz_watchdog_wq);
1983+
1984+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
1985+ unsigned long irq_flags;
1986+
1987+ mutex_lock(&dev_priv->topaz_mutex);
1988+ dev_priv->topaz_needs_reset = 1;
1989+ dev_priv->topaz_current_sequence++;
1990+ PSB_DEBUG_GENERAL
1991+ ("MSVDXFENCE: incremented topaz_current_sequence to :%d\n",
1992+ dev_priv->topaz_current_sequence);
1993+
1994+ psb_fence_error(scheduler->dev, LNC_ENGINE_ENCODE,
1995+ dev_priv->topaz_current_sequence, _PSB_FENCE_TYPE_EXE,
1996+ DRM_CMD_HANG);
1997+
1998+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
1999+ dev_priv->timer_available = 1;
2000+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
2001+
2002+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
2003+
2004+ /* psb_msvdx_flush_cmd_queue(scheduler->dev); */
2005+
2006+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
2007+
2008+ psb_schedule_watchdog(dev_priv);
2009+ mutex_unlock(&dev_priv->topaz_mutex);
2010+}
2011+
2012+
2013+/* this function finish the first part of initialization, the rest
2014+ * should be done in topaz_setup_fw
2015+ */
2016+int lnc_topaz_init(struct drm_device *dev)
2017+{
2018+ struct drm_psb_private *dev_priv = dev->dev_private;
2019+ struct ttm_bo_device *bdev = &dev_priv->bdev;
2020+ uint32_t core_id, core_rev;
2021+ void *topaz_bo_virt;
2022+ int ret = 0;
2023+ bool is_iomem;
2024+
2025+ PSB_DEBUG_GENERAL("TOPAZ: init topaz data structures\n");
2026+
2027+ /* # initialize comand topaz queueing [msvdx_queue] */
2028+ INIT_LIST_HEAD(&dev_priv->topaz_queue);
2029+ /* # init mutex? CHECK: mutex usage [msvdx_mutex] */
2030+ mutex_init(&dev_priv->topaz_mutex);
2031+ /* # spin lock init? CHECK spin lock usage [msvdx_lock] */
2032+ spin_lock_init(&dev_priv->topaz_lock);
2033+
2034+ /* # topaz status init. [msvdx_busy] */
2035+ dev_priv->topaz_busy = 0;
2036+ dev_priv->topaz_cmd_seq = 0;
2037+ dev_priv->topaz_fw_loaded = 0;
2038+ dev_priv->topaz_cur_codec = 0;
2039+ dev_priv->topaz_mtx_data_mem = NULL;
2040+ dev_priv->cur_mtx_data_size = 0;
2041+
2042+ dev_priv->topaz_mtx_reg_state = kmalloc(TOPAZ_MTX_REG_SIZE,
2043+ GFP_KERNEL);
2044+ if (dev_priv->topaz_mtx_reg_state == NULL) {
2045+ DRM_ERROR("TOPAZ: failed to allocate space "
2046+ "for mtx register\n");
2047+ return -1;
2048+ }
2049+
2050+ /* # gain write back structure,we may only need 32+4=40DW */
2051+ if (!dev_priv->topaz_bo) {
2052+ ret = ttm_buffer_object_create(bdev, 4096,
2053+ ttm_bo_type_kernel,
2054+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
2055+ 0, 0, 0, NULL, &(dev_priv->topaz_bo));
2056+ if (ret != 0) {
2057+ DRM_ERROR("TOPAZ: failed to allocate topaz BO.\n");
2058+ return ret;
2059+ }
2060+ }
2061+
2062+ ret = ttm_bo_kmap(dev_priv->topaz_bo, 0,
2063+ dev_priv->topaz_bo->num_pages,
2064+ &dev_priv->topaz_bo_kmap);
2065+ if (ret) {
2066+ DRM_ERROR("TOPAZ: map topaz BO bo failed......\n");
2067+ ttm_bo_unref(&dev_priv->topaz_bo);
2068+ return ret;
2069+ }
2070+
2071+ topaz_bo_virt = ttm_kmap_obj_virtual(&dev_priv->topaz_bo_kmap,
2072+ &is_iomem);
2073+ dev_priv->topaz_ccb_wb = (void *) topaz_bo_virt;
2074+ dev_priv->topaz_wb_offset = dev_priv->topaz_bo->offset;
2075+ dev_priv->topaz_sync_addr = (uint32_t *) (topaz_bo_virt + 2048);
2076+ dev_priv->topaz_sync_offset = dev_priv->topaz_wb_offset + 2048;
2077+ PSB_DEBUG_GENERAL("TOPAZ: allocated BO for WriteBack and SYNC command,"
2078+ "WB offset=0x%08x, SYNC offset=0x%08x\n",
2079+ dev_priv->topaz_wb_offset, dev_priv->topaz_sync_offset);
2080+
2081+ *(dev_priv->topaz_sync_addr) = ~0; /* reset sync seq */
2082+
2083+ /* # reset topaz */
2084+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2085+ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2086+ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2087+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2088+ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2089+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2090+ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2091+
2092+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2093+ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2094+ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2095+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2096+ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2097+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2098+ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2099+
2100+ /* # set up MMU */
2101+ topaz_mmu_hwsetup(dev_priv);
2102+
2103+ PSB_DEBUG_GENERAL("TOPAZ: defer firmware loading to the place"
2104+ "when receiving user space commands\n");
2105+
2106+#if 0 /* can't load FW here */
2107+ /* #.# load fw to driver */
2108+ PSB_DEBUG_GENERAL("TOPAZ: will init firmware\n");
2109+ ret = topaz_init_fw(dev);
2110+ if (ret != 0)
2111+ return -1;
2112+
2113+ topaz_setup_fw(dev, FW_H264_NO_RC);/* just for test */
2114+#endif
2115+ /* <msvdx does> # minimal clock */
2116+
2117+ /* <msvdx does> # return 0 */
2118+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_ID, &core_id);
2119+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_REV, &core_rev);
2120+
2121+ PSB_DEBUG_GENERAL("TOPAZ: core_id(%x) core_rev(%x)\n",
2122+ core_id, core_rev);
2123+
2124+ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX)
2125+ psb_power_down_topaz(dev);
2126+
2127+ return 0;
2128+}
2129+
2130+int lnc_topaz_uninit(struct drm_device *dev)
2131+{
2132+ struct drm_psb_private *dev_priv = dev->dev_private;
2133+ /* int n;*/
2134+
2135+ /* flush MMU */
2136+ PSB_DEBUG_GENERAL("XXX: need to flush mmu cache here??\n");
2137+ /* topaz_mmu_flushcache (dev_priv); */
2138+
2139+ /* # reset TOPAZ chip */
2140+ lnc_topaz_reset(dev_priv);
2141+
2142+ /* release resources */
2143+ /* # release write back memory */
2144+ dev_priv->topaz_ccb_wb = NULL;
2145+
2146+ ttm_bo_unref(&dev_priv->topaz_bo);
2147+
2148+ /* release mtx register save space */
2149+ kfree(dev_priv->topaz_mtx_reg_state);
2150+
2151+ /* release mtx data memory save space */
2152+ if (dev_priv->topaz_mtx_data_mem)
2153+ ttm_bo_unref(&dev_priv->topaz_mtx_data_mem);
2154+
2155+ /* # release firmware */
2156+ /* XXX: but this handlnig should be reconsidered */
2157+ /* XXX: there is no jpeg firmware...... */
2158+#if 0 /* FIX WHEN FIRMWARE IS LOADED */
2159+ for (n = 1; n < IMG_CODEC_NUM; ++n) {
2160+ ttm_bo_unref(&topaz_fw[n].text);
2161+ ttm_bo_unref(&topaz_fw[n].data);
2162+ }
2163+#endif
2164+ ttm_bo_kunmap(&dev_priv->topaz_bo_kmap);
2165+ ttm_bo_unref(&dev_priv->topaz_bo);
2166+
2167+ return 0;
2168+}
2169+
2170+int lnc_topaz_reset(struct drm_psb_private *dev_priv)
2171+{
2172+ return 0;
2173+#if 0
2174+ int ret = 0;
2175+ /* # software reset */
2176+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
2177+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
2178+
2179+ /* # call lnc_wait_for_register, wait reset finished */
2180+ topaz_wait_for_register(dev_priv,
2181+ MTX_START + MTX_CORE_CR_MTX_ENABLE_OFFSET,
2182+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK,
2183+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
2184+
2185+ /* # if reset finised */
2186+ PSB_DEBUG_GENERAL("XXX: add condition judgement for topaz wait...\n");
2187+ /* #.# clear interrupt enable flag */
2188+
2189+ /* #.# clear pending interrupt flags */
2190+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
2191+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX) |
2192+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT) |
2193+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA) |
2194+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT)
2195+ );
2196+ /* # destroy topaz mutex in drm_psb_privaet [msvdx_mutex] */
2197+
2198+ /* # return register value which is waited above */
2199+
2200+ PSB_DEBUG_GENERAL("called\n");
2201+ return 0;
2202+#endif
2203+}
2204+
2205+/* read firmware bin file and load all data into driver */
2206+int topaz_init_fw(struct drm_device *dev)
2207+{
2208+ struct drm_psb_private *dev_priv = dev->dev_private;
2209+ struct ttm_bo_device *bdev = &dev_priv->bdev;
2210+ const struct firmware *raw = NULL;
2211+ unsigned char *ptr;
2212+ int ret = 0;
2213+ int n;
2214+ struct topaz_fwinfo *cur_fw;
2215+ int cur_size;
2216+ struct topaz_codec_fw *cur_codec;
2217+ struct ttm_buffer_object **cur_drm_obj;
2218+ struct ttm_bo_kmap_obj tmp_kmap;
2219+ bool is_iomem;
2220+
2221+ dev_priv->stored_initial_qp = 0;
2222+
2223+ /* # get firmware */
2224+ ret = request_firmware(&raw, FIRMWARE_NAME, &dev->pdev->dev);
2225+ if (ret != 0) {
2226+ DRM_ERROR("TOPAZ: request_firmware failed: %d\n", ret);
2227+ return ret;
2228+ }
2229+
2230+ PSB_DEBUG_GENERAL("TOPAZ: opened firmware\n");
2231+
2232+ if (raw && (raw->size < sizeof(struct topaz_fwinfo))) {
2233+ DRM_ERROR("TOPAZ: firmware file is not correct size.\n");
2234+ goto out;
2235+ }
2236+
2237+ ptr = (unsigned char *) raw->data;
2238+
2239+ if (!ptr) {
2240+ DRM_ERROR("TOPAZ: failed to load firmware.\n");
2241+ goto out;
2242+ }
2243+
2244+ /* # load fw from file */
2245+ PSB_DEBUG_GENERAL("TOPAZ: load firmware.....\n");
2246+ cur_fw = NULL;
2247+ /* didn't use the first element */
2248+ for (n = 1; n < IMG_CODEC_NUM; ++n) {
2249+ cur_fw = (struct topaz_fwinfo *) ptr;
2250+
2251+ cur_codec = &topaz_fw[cur_fw->codec];
2252+ cur_codec->ver = cur_fw->ver;
2253+ cur_codec->codec = cur_fw->codec;
2254+ cur_codec->text_size = cur_fw->text_size;
2255+ cur_codec->data_size = cur_fw->data_size;
2256+ cur_codec->data_location = cur_fw->data_location;
2257+
2258+ PSB_DEBUG_GENERAL("TOPAZ: load firemware %s.\n",
2259+ codec_to_string(cur_fw->codec));
2260+
2261+ /* #.# handle text section */
2262+ cur_codec->text = NULL;
2263+ ptr += sizeof(struct topaz_fwinfo);
2264+ cur_drm_obj = &cur_codec->text;
2265+ cur_size = cur_fw->text_size;
2266+
2267+ /* #.# malloc DRM object for fw storage */
2268+ ret = ttm_buffer_object_create(bdev, cur_size,
2269+ ttm_bo_type_kernel,
2270+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
2271+ 0, 0, 0, NULL, cur_drm_obj);
2272+ if (ret) {
2273+ DRM_ERROR("Failed to allocate firmware.\n");
2274+ goto out;
2275+ }
2276+
2277+ /* #.# fill DRM object with firmware data */
2278+ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
2279+ &tmp_kmap);
2280+ if (ret) {
2281+ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
2282+ ttm_bo_unref(cur_drm_obj);
2283+ *cur_drm_obj = NULL;
2284+ goto out;
2285+ }
2286+
2287+ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
2288+ cur_size);
2289+
2290+ ttm_bo_kunmap(&tmp_kmap);
2291+
2292+ /* #.# handle data section */
2293+ cur_codec->data = NULL;
2294+ ptr += cur_fw->text_size;
2295+ cur_drm_obj = &cur_codec->data;
2296+ cur_size = cur_fw->data_size;
2297+
2298+ /* #.# malloc DRM object for fw storage */
2299+ ret = ttm_buffer_object_create(bdev, cur_size,
2300+ ttm_bo_type_kernel,
2301+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
2302+ 0, 0, 0, NULL, cur_drm_obj);
2303+ if (ret) {
2304+ DRM_ERROR("Failed to allocate firmware.\n");
2305+ goto out;
2306+ }
2307+
2308+ /* #.# fill DRM object with firmware data */
2309+ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
2310+ &tmp_kmap);
2311+ if (ret) {
2312+ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
2313+ ttm_bo_unref(cur_drm_obj);
2314+ *cur_drm_obj = NULL;
2315+ goto out;
2316+ }
2317+
2318+ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
2319+ cur_size);
2320+
2321+ ttm_bo_kunmap(&tmp_kmap);
2322+
2323+ /* #.# validate firmware */
2324+
2325+ /* #.# update ptr */
2326+ ptr += cur_fw->data_size;
2327+ }
2328+
2329+ release_firmware(raw);
2330+
2331+ PSB_DEBUG_GENERAL("TOPAZ: return from firmware init\n");
2332+
2333+ return 0;
2334+
2335+out:
2336+ if (raw) {
2337+ PSB_DEBUG_GENERAL("release firmware....\n");
2338+ release_firmware(raw);
2339+ }
2340+
2341+ return -1;
2342+}
2343+
2344+/* setup fw when start a new context */
2345+int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
2346+{
2347+ struct drm_psb_private *dev_priv = dev->dev_private;
2348+ struct ttm_bo_device *bdev = &dev_priv->bdev;
2349+ uint32_t mem_size = RAM_SIZE; /* follow DDK */
2350+ uint32_t verify_pc;
2351+ int ret;
2352+
2353+#if 0
2354+ if (codec == dev_priv->topaz_current_codec) {
2355+ LNC_TRACEL("TOPAZ: reuse previous codec\n");
2356+ return 0;
2357+ }
2358+#endif
2359+
2360+ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX)
2361+ psb_power_up_topaz(dev);
2362+
2363+ /* XXX: need to rest topaz? */
2364+ PSB_DEBUG_GENERAL("XXX: should reset topaz when context change?\n");
2365+
2366+ /* XXX: interrupt enable shouldn't be enable here,
2367+ * this funtion is called when interrupt is enable,
2368+ * but here, we've no choice since we have to call setup_fw by
2369+ * manual */
2370+ /* # upload firmware, clear interruputs and start the firmware
2371+ * -- from hostutils.c in TestSuits*/
2372+
2373+ /* # reset MVEA */
2374+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2375+ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2376+ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2377+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2378+ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2379+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2380+ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2381+
2382+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
2383+ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
2384+ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
2385+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
2386+ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
2387+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
2388+ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
2389+
2390+
2391+ topaz_mmu_hwsetup(dev_priv);
2392+
2393+#if !LNC_TOPAZ_NO_IRQ
2394+ lnc_topaz_disableirq(dev);
2395+#endif
2396+
2397+ PSB_DEBUG_GENERAL("TOPAZ: will setup firmware....\n");
2398+
2399+ topaz_set_default_regs(dev_priv);
2400+
2401+ /* # reset mtx */
2402+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST,
2403+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET) |
2404+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
2405+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET));
2406+
2407+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST, 0x0);
2408+
2409+ /* # upload fw by drm */
2410+ PSB_DEBUG_GENERAL("TOPAZ: will upload firmware\n");
2411+
2412+ topaz_upload_fw(dev, codec);
2413+
2414+ /* allocate the space for context save & restore if needed */
2415+ if (dev_priv->topaz_mtx_data_mem == NULL) {
2416+ ret = ttm_buffer_object_create(bdev,
2417+ dev_priv->cur_mtx_data_size * 4,
2418+ ttm_bo_type_kernel,
2419+ DRM_PSB_FLAG_MEM_MMU |
2420+ TTM_PL_FLAG_NO_EVICT,
2421+ 0, 0, 0, NULL,
2422+ &dev_priv->topaz_mtx_data_mem);
2423+ if (ret) {
2424+ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
2425+ "mtx data save\n");
2426+ return -1;
2427+ }
2428+ }
2429+ PSB_DEBUG_GENERAL("TOPAZ: after upload fw ....\n");
2430+
2431+ /* XXX: In power save mode, need to save the complete data memory
2432+ * and restore it. MTX_FWIF.c record the data size */
2433+ PSB_DEBUG_GENERAL("TOPAZ:in power save mode need to save memory?\n");
2434+
2435+ PSB_DEBUG_GENERAL("TOPAZ: setting up pc address\n");
2436+ topaz_write_core_reg(dev_priv, TOPAZ_MTX_PC, PC_START_ADDRESS);
2437+
2438+ PSB_DEBUG_GENERAL("TOPAZ: verify pc address\n");
2439+
2440+ topaz_read_core_reg(dev_priv, TOPAZ_MTX_PC, &verify_pc);
2441+
2442+ /* enable auto clock is essential for this driver */
2443+ TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_AUTO_CLK_GATE,
2444+ F_ENCODE(1, TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE) |
2445+ F_ENCODE(1, TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE));
2446+ MVEA_WRITE32(MVEA_CR_MVEA_AUTO_CLOCK_GATING,
2447+ F_ENCODE(1, MVEA_CR_MVEA_IPE_AUTO_CLK_GATE) |
2448+ F_ENCODE(1, MVEA_CR_MVEA_SPE_AUTO_CLK_GATE) |
2449+ F_ENCODE(1, MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE) |
2450+ F_ENCODE(1, MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE));
2451+
2452+ PSB_DEBUG_GENERAL("TOPAZ: current pc(%08X) vs %08X\n",
2453+ verify_pc, PC_START_ADDRESS);
2454+
2455+ /* # turn on MTX */
2456+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
2457+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
2458+
2459+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
2460+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
2461+
2462+ /* # poll on the interrupt which the firmware will generate */
2463+ topaz_wait_for_register(dev_priv,
2464+ TOPAZ_START + TOPAZ_CR_IMG_TOPAZ_INTSTAT,
2465+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTS_MTX),
2466+ F_MASK(TOPAZ_CR_IMG_TOPAZ_INTS_MTX));
2467+
2468+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
2469+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
2470+
2471+ PSB_DEBUG_GENERAL("TOPAZ: after topaz mtx setup ....\n");
2472+
2473+ /* # get ccb buffer addr -- file hostutils.c */
2474+ dev_priv->topaz_ccb_buffer_addr =
2475+ topaz_read_mtx_mem(dev_priv,
2476+ MTX_DATA_MEM_BASE + mem_size - 4);
2477+ dev_priv->topaz_ccb_ctrl_addr =
2478+ topaz_read_mtx_mem(dev_priv,
2479+ MTX_DATA_MEM_BASE + mem_size - 8);
2480+ dev_priv->topaz_ccb_size =
2481+ topaz_read_mtx_mem(dev_priv,
2482+ dev_priv->topaz_ccb_ctrl_addr +
2483+ MTX_CCBCTRL_CCBSIZE);
2484+
2485+ dev_priv->topaz_cmd_windex = 0;
2486+
2487+ PSB_DEBUG_GENERAL("TOPAZ:ccb_buffer_addr(%x),ctrl_addr(%x) size(%d)\n",
2488+ dev_priv->topaz_ccb_buffer_addr,
2489+ dev_priv->topaz_ccb_ctrl_addr,
2490+ dev_priv->topaz_ccb_size);
2491+
2492+ /* # write back the initial QP Value */
2493+ topaz_write_mtx_mem(dev_priv,
2494+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_INITQP,
2495+ dev_priv->stored_initial_qp);
2496+
2497+ PSB_DEBUG_GENERAL("TOPAZ: write WB mem address 0x%08x\n",
2498+ dev_priv->topaz_wb_offset);
2499+ topaz_write_mtx_mem(dev_priv, MTX_DATA_MEM_BASE + mem_size - 12,
2500+ dev_priv->topaz_wb_offset);
2501+
2502+ /* this kick is essential for mtx.... */
2503+ *((uint32_t *) dev_priv->topaz_ccb_wb) = 0x01020304;
2504+ topaz_mtx_kick(dev_priv, 1);
2505+ DRM_UDELAY(1000);
2506+ PSB_DEBUG_GENERAL("TOPAZ: DDK expected 0x12345678 in WB memory,"
2507+ " and here it is 0x%08x\n",
2508+ *((uint32_t *) dev_priv->topaz_ccb_wb));
2509+
2510+ *((uint32_t *) dev_priv->topaz_ccb_wb) = 0x0;/* reset it to 0 */
2511+ PSB_DEBUG_GENERAL("TOPAZ: firmware uploaded.\n");
2512+
2513+ /* XXX: is there any need to record next cmd num??
2514+ * we use fence seqence number to record it
2515+ */
2516+ dev_priv->topaz_busy = 0;
2517+ dev_priv->topaz_cmd_seq = 0;
2518+
2519+#if !LNC_TOPAZ_NO_IRQ
2520+ lnc_topaz_enableirq(dev);
2521+#endif
2522+
2523+#if 0
2524+ /* test sync command */
2525+ {
2526+ uint32_t sync_cmd[3];
2527+ uint32_t *sync_p = (uint32_t *)dev_priv->topaz_sync_addr;
2528+ int count = 10000;
2529+
2530+ /* insert a SYNC command here */
2531+ sync_cmd[0] = MTX_CMDID_SYNC | (3 << 8) |
2532+ (0x5b << 16);
2533+ sync_cmd[1] = dev_priv->topaz_sync_offset;
2534+ sync_cmd[2] = 0x3c;
2535+
2536+ TOPAZ_BEGIN_CCB(dev_priv);
2537+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
2538+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
2539+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
2540+ TOPAZ_END_CCB(dev_priv, 1);
2541+
2542+ while (count && *sync_p != 0x3c) {
2543+ DRM_UDELAY(1000);
2544+ --count;
2545+ }
2546+ if ((count == 0) && (*sync_p != 0x3c)) {
2547+ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
2548+ "actual 0x%08x\n",
2549+ 0x3c, *sync_p);
2550+ }
2551+ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p);
2552+ }
2553+#endif
2554+#if 0
2555+ topaz_mmu_flush(dev);
2556+
2557+ topaz_test_null(dev, 0xe1e1);
2558+ topaz_test_null(dev, 0xe2e2);
2559+ topaz_mmu_test(dev, 0x12345678);
2560+ topaz_test_null(dev, 0xe3e3);
2561+ topaz_mmu_test(dev, 0x8764321);
2562+
2563+ topaz_test_null(dev, 0xe4e4);
2564+ topaz_test_null(dev, 0xf3f3);
2565+#endif
2566+
2567+ return 0;
2568+}
2569+
2570+#if UPLOAD_FW_BY_DMA
2571+int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
2572+{
2573+ struct drm_psb_private *dev_priv = dev->dev_private;
2574+ const struct topaz_codec_fw *cur_codec_fw;
2575+ uint32_t text_size, data_size;
2576+ uint32_t data_location;
2577+ uint32_t cur_mtx_data_size;
2578+
2579+ /* # refer HLD document */
2580+
2581+ /* # MTX reset */
2582+ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
2583+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
2584+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
2585+
2586+ DRM_UDELAY(6000);
2587+
2588+ /* # upload the firmware by DMA */
2589+ cur_codec_fw = &topaz_fw[codec];
2590+
2591+ PSB_DEBUG_GENERAL("Topaz:upload codec %s(%d) text sz=%d data sz=%d"
2592+ " data location(%d)\n", codec_to_string(codec), codec,
2593+ cur_codec_fw->text_size, cur_codec_fw->data_size,
2594+ cur_codec_fw->data_location);
2595+
2596+ /* # upload text */
2597+ text_size = cur_codec_fw->text_size / 4;
2598+
2599+ /* setup the MTX to start recieving data:
2600+ use a register for the transfer which will point to the source
2601+ (MTX_CR_MTX_SYSC_CDMAT) */
2602+ /* #.# fill the dst addr */
2603+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
2604+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
2605+ F_ENCODE(2, MTX_BURSTSIZE) |
2606+ F_ENCODE(0, MTX_RNW) |
2607+ F_ENCODE(1, MTX_ENABLE) |
2608+ F_ENCODE(text_size, MTX_LENGTH));
2609+
2610+ /* #.# set DMAC access to host memory via BIF */
2611+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
2612+
2613+ /* #.# transfer the codec */
2614+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
2615+ MTX_CR_MTX_SYSC_CDMAT, text_size, 0, 0);
2616+
2617+ /* #.# wait dma finish */
2618+ topaz_wait_for_register(dev_priv,
2619+ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
2620+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
2621+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
2622+
2623+ /* #.# clear interrupt */
2624+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
2625+
2626+ /* # return access to topaz core */
2627+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
2628+
2629+ /* # upload data */
2630+ data_size = cur_codec_fw->data_size / 4;
2631+ data_location = cur_codec_fw->data_location;
2632+
2633+ /* #.# fill the dst addr */
2634+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA,
2635+ 0x80900000 + data_location - 0x82880000);
2636+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
2637+ F_ENCODE(2, MTX_BURSTSIZE) |
2638+ F_ENCODE(0, MTX_RNW) |
2639+ F_ENCODE(1, MTX_ENABLE) |
2640+ F_ENCODE(data_size, MTX_LENGTH));
2641+
2642+ /* #.# set DMAC access to host memory via BIF */
2643+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
2644+
2645+ /* #.# transfer the codec */
2646+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->data->offset, 0,
2647+ MTX_CR_MTX_SYSC_CDMAT, data_size, 0, 0);
2648+
2649+ /* #.# wait dma finish */
2650+ topaz_wait_for_register(dev_priv,
2651+ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
2652+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
2653+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
2654+
2655+ /* #.# clear interrupt */
2656+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
2657+
2658+ /* # return access to topaz core */
2659+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
2660+
2661+ /* record this codec's mtx data size for
2662+ * context save & restore */
2663+ cur_mtx_data_size = RAM_SIZE - (data_location - 0x82880000);
2664+ if (dev_priv->cur_mtx_data_size != cur_mtx_data_size) {
2665+ dev_priv->cur_mtx_data_size = cur_mtx_data_size;
2666+ if (dev_priv->topaz_mtx_data_mem)
2667+ ttm_bo_unref(&dev_priv->topaz_mtx_data_mem);
2668+ dev_priv->topaz_mtx_data_mem = NULL;
2669+ }
2670+
2671+ return 0;
2672+}
2673+
2674+#else
2675+
2676+void topaz_mtx_upload_by_register(struct drm_device *dev, uint32_t mtx_mem,
2677+ uint32_t addr, uint32_t size,
2678+ struct ttm_buffer_object *buf)
2679+{
2680+ struct drm_psb_private *dev_priv = dev->dev_private;
2681+ uint32_t *buf_p;
2682+ uint32_t debug_reg, bank_size, bank_ram_size, bank_count;
2683+ uint32_t cur_ram_id, ram_addr , ram_id;
2684+ int map_ret, lp;
2685+ struct ttm_bo_kmap_obj bo_kmap;
2686+ bool is_iomem;
2687+ uint32_t cur_addr;
2688+
2689+ get_mtx_control_from_dash(dev_priv);
2690+
2691+ map_ret = ttm_bo_kmap(buf, 0, buf->num_pages, &bo_kmap);
2692+ if (map_ret) {
2693+ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", map_ret);
2694+ return;
2695+ }
2696+ buf_p = (uint32_t *) ttm_kmap_obj_virtual(&bo_kmap, &is_iomem);
2697+
2698+
2699+ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, &debug_reg);
2700+ debug_reg = 0x0a0a0606;
2701+ bank_size = (debug_reg & 0xf0000) >> 16;
2702+ bank_ram_size = 1 << (bank_size + 2);
2703+
2704+ bank_count = (debug_reg & 0xf00) >> 8;
2705+
2706+ topaz_wait_for_register(dev_priv,
2707+ MTX_START+MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET,
2708+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
2709+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
2710+
2711+ cur_ram_id = -1;
2712+ cur_addr = addr;
2713+ for (lp = 0; lp < size / 4; ++lp) {
2714+ ram_id = mtx_mem + (cur_addr / bank_ram_size);
2715+
2716+ if (cur_ram_id != ram_id) {
2717+ ram_addr = cur_addr >> 2;
2718+
2719+ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
2720+ F_ENCODE(ram_id, MTX_MTX_MCMID) |
2721+ F_ENCODE(ram_addr, MTX_MTX_MCM_ADDR) |
2722+ F_ENCODE(1, MTX_MTX_MCMAI));
2723+
2724+ cur_ram_id = ram_id;
2725+ }
2726+ cur_addr += 4;
2727+
2728+ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET,
2729+ *(buf_p + lp));
2730+
2731+ topaz_wait_for_register(dev_priv,
2732+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET + MTX_START,
2733+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
2734+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
2735+ }
2736+
2737+ ttm_bo_kunmap(&bo_kmap);
2738+
2739+ PSB_DEBUG_GENERAL("TOPAZ: register data upload done\n");
2740+ return;
2741+}
2742+
2743+int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
2744+{
2745+ struct drm_psb_private *dev_priv = dev->dev_private;
2746+ const struct topaz_codec_fw *cur_codec_fw;
2747+ uint32_t text_size, data_size;
2748+ uint32_t data_location;
2749+
2750+ /* # refer HLD document */
2751+ /* # MTX reset */
2752+ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
2753+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
2754+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
2755+
2756+ DRM_UDELAY(6000);
2757+
2758+ /* # upload the firmware by DMA */
2759+ cur_codec_fw = &topaz_fw[codec];
2760+
2761+ PSB_DEBUG_GENERAL("Topaz: upload codec %s text size(%d) data size(%d)"
2762+ " data location(0x%08x)\n", codec_to_string(codec),
2763+ cur_codec_fw->text_size, cur_codec_fw->data_size,
2764+ cur_codec_fw->data_location);
2765+
2766+ /* # upload text */
2767+ text_size = cur_codec_fw->text_size;
2768+
2769+ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_CODE_MEM,
2770+ PC_START_ADDRESS - MTX_MEMORY_BASE,
2771+ text_size, cur_codec_fw->text);
2772+
2773+ /* # upload data */
2774+ data_size = cur_codec_fw->data_size;
2775+ data_location = cur_codec_fw->data_location;
2776+
2777+ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_DATA_MEM,
2778+ data_location - 0x82880000, data_size,
2779+ cur_codec_fw->data);
2780+
2781+ return 0;
2782+}
2783+
2784+#endif /* UPLOAD_FW_BY_DMA */
2785+
2786+void
2787+topaz_dma_transfer(struct drm_psb_private *dev_priv, uint32_t channel,
2788+ uint32_t src_phy_addr, uint32_t offset,
2789+ uint32_t soc_addr, uint32_t byte_num,
2790+ uint32_t is_increment, uint32_t is_write)
2791+{
2792+ uint32_t dmac_count;
2793+ uint32_t irq_stat;
2794+ uint32_t count;
2795+
2796+ PSB_DEBUG_GENERAL("TOPAZ: using dma to transfer firmware\n");
2797+ /* # check that no transfer is currently in progress and no
2798+ interrupts are outstanding ?? (why care interrupt) */
2799+ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &dmac_count);
2800+ if (0 != (dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)))
2801+ DRM_ERROR("TOPAZ: there is tranfer in progress\n");
2802+
2803+ /* assert(0==(dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)));*/
2804+
2805+ /* no hold off period */
2806+ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
2807+ /* clear previous interrupts */
2808+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
2809+ /* check irq status */
2810+ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_stat);
2811+ /* assert(0 == irq_stat); */
2812+ if (0 != irq_stat)
2813+ DRM_ERROR("TOPAZ: there is hold up\n");
2814+
2815+ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel),
2816+ (src_phy_addr + offset));
2817+ count = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, DMAC_PWIDTH_32_BIT,
2818+ is_write, DMAC_PWIDTH_32_BIT, byte_num);
2819+ /* generate an interrupt at the end of transfer */
2820+ count |= MASK_IMG_SOC_TRANSFER_IEN;
2821+ count |= F_ENCODE(is_write, IMG_SOC_DIR);
2822+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count);
2823+
2824+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
2825+ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0,
2826+ is_increment, DMAC_BURST_2));
2827+
2828+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
2829+
2830+ /* Finally, rewrite the count register with
2831+ * the enable bit set to kick off the transfer
2832+ */
2833+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count | MASK_IMG_SOC_EN);
2834+
2835+ PSB_DEBUG_GENERAL("TOPAZ: dma transfer started.\n");
2836+
2837+ return;
2838+}
2839+
2840+void topaz_set_default_regs(struct drm_psb_private *dev_priv)
2841+{
2842+ int n;
2843+ int count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
2844+
2845+ for (n = 0; n < count; n++)
2846+ MM_WRITE32(topaz_default_regs[n][0],
2847+ topaz_default_regs[n][1],
2848+ topaz_default_regs[n][2]);
2849+
2850+}
2851+
2852+void topaz_write_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
2853+ const uint32_t val)
2854+{
2855+ uint32_t tmp;
2856+ get_mtx_control_from_dash(dev_priv);
2857+
2858+ /* put data into MTX_RW_DATA */
2859+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET, val);
2860+
2861+ /* request a write */
2862+ tmp = reg &
2863+ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK;
2864+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, tmp);
2865+
2866+ /* wait for operation finished */
2867+ topaz_wait_for_register(dev_priv,
2868+ MTX_START +
2869+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
2870+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
2871+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
2872+
2873+ release_mtx_control_from_dash(dev_priv);
2874+}
2875+
2876+void topaz_read_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
2877+ uint32_t *ret_val)
2878+{
2879+ uint32_t tmp;
2880+
2881+ get_mtx_control_from_dash(dev_priv);
2882+
2883+ /* request a write */
2884+ tmp = (reg &
2885+ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
2886+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
2887+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK | tmp);
2888+
2889+ /* wait for operation finished */
2890+ topaz_wait_for_register(dev_priv,
2891+ MTX_START +
2892+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
2893+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
2894+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
2895+
2896+ /* read */
2897+ MTX_READ32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET,
2898+ ret_val);
2899+
2900+ release_mtx_control_from_dash(dev_priv);
2901+}
2902+
2903+void get_mtx_control_from_dash(struct drm_psb_private *dev_priv)
2904+{
2905+ int debug_reg_slave_val;
2906+
2907+ /* GetMTXControlFromDash */
2908+ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
2909+ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE) |
2910+ F_ENCODE(2, TOPAZ_CR_MTX_DBG_GPIO_OUT));
2911+ do {
2912+ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
2913+ &debug_reg_slave_val);
2914+ } while ((debug_reg_slave_val & 0x18) != 0);
2915+
2916+ /* save access control */
2917+ TOPAZ_READ32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
2918+ &dev_priv->topaz_dash_access_ctrl);
2919+}
2920+
2921+void release_mtx_control_from_dash(struct drm_psb_private *dev_priv)
2922+{
2923+ /* restore access control */
2924+ TOPAZ_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
2925+ dev_priv->topaz_dash_access_ctrl);
2926+
2927+ /* release bus */
2928+ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
2929+ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE));
2930+}
2931+
2932+void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv)
2933+{
2934+ uint32_t pd_addr = psb_get_default_pd_addr(dev_priv->mmu);
2935+
2936+ /* bypass all request while MMU is being configured */
2937+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
2938+ F_ENCODE(1, TOPAZ_CR_MMU_BYPASS));
2939+
2940+ /* set MMU hardware at the page table directory */
2941+ PSB_DEBUG_GENERAL("TOPAZ: write PD phyaddr=0x%08x "
2942+ "into MMU_DIR_LIST0/1\n", pd_addr);
2943+ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(0), pd_addr);
2944+ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(1), 0);
2945+
2946+ /* setup index register, all pointing to directory bank 0 */
2947+ TOPAZ_WRITE32(TOPAZ_CR_MMU_BANK_INDEX, 0);
2948+
2949+ /* now enable MMU access for all requestors */
2950+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, 0);
2951+}
2952+
2953+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv)
2954+{
2955+ uint32_t mmu_control;
2956+
2957+#if 0
2958+ PSB_DEBUG_GENERAL("XXX: Only one PTD/PTE cache"
2959+ " so flush using the master core\n");
2960+#endif
2961+ /* XXX: disable interrupt */
2962+
2963+ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &mmu_control);
2964+ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_INVALDC);
2965+ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_FLUSH);
2966+
2967+#if 0
2968+ PSB_DEBUG_GENERAL("Set Invalid flag (this causes a flush with MMU\n"
2969+ "still operating afterwards even if not cleared,\n"
2970+ "but may want to replace with MMU_FLUSH?\n");
2971+#endif
2972+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
2973+
2974+ /* clear it */
2975+ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_INVALDC));
2976+ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_FLUSH));
2977+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
2978+}
2979+
2980+#if 0 /* DEBUG_FUNCTION */
2981+struct reg_pair {
2982+ uint32_t base;
2983+ uint32_t offset;
2984+};
2985+
2986+
2987+static int ccb_offset;
2988+
2989+static int topaz_test_null(struct drm_device *dev, uint32_t seq)
2990+{
2991+ struct drm_psb_private *dev_priv = dev->dev_private;
2992+
2993+ /* XXX: here we finished firmware setup....
2994+ * using a NULL command to verify the
2995+ * correctness of firmware
2996+ */
2997+ uint32_t null_cmd;
2998+ uint32_t cmd_seq;
2999+
3000+ null_cmd = 0 | (1 << 8) | (seq) << 16;
3001+ topaz_write_mtx_mem(dev_priv,
3002+ dev_priv->topaz_ccb_buffer_addr + ccb_offset,
3003+ null_cmd);
3004+
3005+ topaz_mtx_kick(dev_priv, 1);
3006+
3007+ DRM_UDELAY(1000); /* wait to finish */
3008+
3009+ cmd_seq = topaz_read_mtx_mem(dev_priv,
3010+ dev_priv->topaz_ccb_ctrl_addr + 4);
3011+
3012+ PSB_DEBUG_GENERAL("Topaz: Sent NULL with sequence=0x%08x,"
3013+ " got sequence=0x%08x (WB_seq=0x%08x,WB_roff=%d)\n",
3014+ seq, cmd_seq, WB_SEQ, WB_ROFF);
3015+
3016+ PSB_DEBUG_GENERAL("Topaz: after NULL test, query IRQ and clear it\n");
3017+
3018+ topaz_test_queryirq(dev);
3019+ topaz_test_clearirq(dev);
3020+
3021+ ccb_offset += 4;
3022+
3023+ return 0;
3024+}
3025+
3026+void topaz_mmu_flush(struct drm_psb_private *dev_priv)
3027+{
3028+ uint32_t val;
3029+
3030+ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &val);
3031+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
3032+ val | F_ENCODE(1, TOPAZ_CR_MMU_INVALDC));
3033+ wmb();
3034+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
3035+ val & ~F_ENCODE(0, TOPAZ_CR_MMU_INVALDC));
3036+ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &val);
3037+}
3038+
3039+/*
3040+ * this function will test whether the mmu is correct:
3041+ * it get a drm_buffer_object and use CMD_SYNC to write
3042+ * certain value into this buffer.
3043+ */
3044+static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value)
3045+{
3046+ struct drm_psb_private *dev_priv = dev->dev_private;
3047+ uint32_t sync_cmd;
3048+ unsigned long real_pfn;
3049+ int ret;
3050+ uint32_t cmd_seq;
3051+
3052+ *((uint32_t *)dev_priv->topaz_sync_addr) = 0xeeeeeeee;
3053+
3054+ /* topaz_mmu_flush(dev); */
3055+
3056+ sync_cmd = MTX_CMDID_SYNC | (3 << 8) | (0xeeee) << 16;
3057+
3058+ topaz_write_mtx_mem_multiple_setup(dev_priv,
3059+ dev_priv->topaz_ccb_buffer_addr + ccb_offset);
3060+
3061+ topaz_write_mtx_mem_multiple(dev_priv, sync_cmd);
3062+ topaz_write_mtx_mem_multiple(dev_priv, dev_priv->topaz_sync_offset);
3063+ topaz_write_mtx_mem_multiple(dev_priv, sync_value);
3064+
3065+ topaz_mtx_kick(dev_priv, 1);
3066+
3067+ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
3068+ dev_priv->topaz_sync_offset, &real_pfn);
3069+ if (ret != 0) {
3070+ PSB_DEBUG_GENERAL("psb_mmu_virtual_to_pfn failed,exit\n");
3071+ return;
3072+ }
3073+ PSB_DEBUG_GENERAL("TOPAZ: issued SYNC command, "
3074+ "BO offset=0x%08x (pfn=%lu), synch value=0x%08x\n",
3075+ dev_priv->topaz_sync_offset, real_pfn, sync_value);
3076+
3077+ /* XXX: if we can use interrupt, we can wait this command finish */
3078+ /* topaz_wait_for_register (dev_priv,
3079+ TOPAZ_START + TOPAZ_CR_IMG_TOPAZ_INTSTAT, 0xf, 0xf); */
3080+ DRM_UDELAY(1000);
3081+
3082+ cmd_seq = topaz_read_mtx_mem(dev_priv,
3083+ dev_priv->topaz_ccb_ctrl_addr + 4);
3084+ PSB_DEBUG_GENERAL("Topaz: cmd_seq equals 0x%x, and expected 0x%x "
3085+ "(WB_seq=0x%08x,WB_roff=%d),synch value is 0x%x,"
3086+ "expected 0x%08x\n",
3087+ cmd_seq, 0xeeee, WB_SEQ, WB_ROFF,
3088+ *((uint32_t *)dev_priv->topaz_sync_addr), sync_value);
3089+
3090+ PSB_DEBUG_GENERAL("Topaz: after MMU test, query IRQ and clear it\n");
3091+ topaz_test_queryirq(dev);
3092+ topaz_test_clearirq(dev);
3093+
3094+ ccb_offset += 3*4; /* shift 3DWs */
3095+}
3096+
3097+#endif
3098+
3099+int lnc_topaz_restore_mtx_state(struct drm_device *dev)
3100+{
3101+ struct drm_psb_private *dev_priv =
3102+ (struct drm_psb_private *)dev->dev_private;
3103+ uint32_t reg_val;
3104+ uint32_t *mtx_reg_state;
3105+ int i;
3106+
3107+ if (dev_priv->topaz_mtx_data_mem == NULL) {
3108+ DRM_ERROR("TOPAZ: try to restore context without "
3109+ "space allocated\n");
3110+ return -1;
3111+ }
3112+
3113+ /* turn on mtx clocks */
3114+ MTX_READ32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, &reg_val);
3115+ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
3116+ reg_val & (~MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE));
3117+
3118+ /* reset mtx */
3119+ /* FIXME: should use core_write??? */
3120+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
3121+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
3122+ DRM_UDELAY(6000);
3123+
3124+ topaz_mmu_hwsetup(dev_priv);
3125+ /* upload code, restore mtx data */
3126+ mtx_dma_write(dev);
3127+
3128+ mtx_reg_state = dev_priv->topaz_mtx_reg_state;
3129+ /* restore register */
3130+ /* FIXME: conside to put read/write into one function */
3131+ /* Saves 8 Registers of D0 Bank */
3132+ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
3133+ for (i = 0; i < 8; i++) {
3134+ topaz_write_core_reg(dev_priv, 0x1 | (i<<4),
3135+ *mtx_reg_state);
3136+ mtx_reg_state++;
3137+ }
3138+ /* Saves 8 Registers of D1 Bank */
3139+ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
3140+ for (i = 0; i < 8; i++) {
3141+ topaz_write_core_reg(dev_priv, 0x2 | (i<<4),
3142+ *mtx_reg_state);
3143+ mtx_reg_state++;
3144+ }
3145+ /* Saves 4 Registers of A0 Bank */
3146+ /* A0StP, A0FrP, A0.2 and A0.3 */
3147+ for (i = 0; i < 4; i++) {
3148+ topaz_write_core_reg(dev_priv, 0x3 | (i<<4),
3149+ *mtx_reg_state);
3150+ mtx_reg_state++;
3151+ }
3152+ /* Saves 4 Registers of A1 Bank */
3153+ /* A1GbP, A1LbP, A1.2 and A1.3 */
3154+ for (i = 0; i < 4; i++) {
3155+ topaz_write_core_reg(dev_priv, 0x4 | (i<<4),
3156+ *mtx_reg_state);
3157+ mtx_reg_state++;
3158+ }
3159+ /* Saves PC and PCX */
3160+ for (i = 0; i < 2; i++) {
3161+ topaz_write_core_reg(dev_priv, 0x5 | (i<<4),
3162+ *mtx_reg_state);
3163+ mtx_reg_state++;
3164+ }
3165+ /* Saves 8 Control Registers */
3166+ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
3167+ * TXGPIOO */
3168+ for (i = 0; i < 8; i++) {
3169+ topaz_write_core_reg(dev_priv, 0x7 | (i<<4),
3170+ *mtx_reg_state);
3171+ mtx_reg_state++;
3172+ }
3173+
3174+ /* turn on MTX */
3175+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
3176+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
3177+
3178+ return 0;
3179+}
3180+
3181+int lnc_topaz_save_mtx_state(struct drm_device *dev)
3182+{
3183+ struct drm_psb_private *dev_priv =
3184+ (struct drm_psb_private *)dev->dev_private;
3185+ uint32_t *mtx_reg_state;
3186+ int i;
3187+ struct topaz_codec_fw *cur_codec_fw;
3188+
3189+ /* FIXME: make sure the topaz_mtx_data_mem is allocated */
3190+ if (dev_priv->topaz_mtx_data_mem == NULL) {
3191+ DRM_ERROR("TOPAZ: try to save context without space "
3192+ "allocated\n");
3193+ return -1;
3194+ }
3195+
3196+ topaz_wait_for_register(dev_priv,
3197+ MTX_START + MTX_CORE_CR_MTX_TXRPT_OFFSET,
3198+ TXRPT_WAITONKICK_VALUE,
3199+ 0xffffffff);
3200+
3201+ /* stop mtx */
3202+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
3203+ MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK);
3204+
3205+ mtx_reg_state = dev_priv->topaz_mtx_reg_state;
3206+
3207+ /* FIXME: conside to put read/write into one function */
3208+ /* Saves 8 Registers of D0 Bank */
3209+ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
3210+ for (i = 0; i < 8; i++) {
3211+ topaz_read_core_reg(dev_priv, 0x1 | (i<<4),
3212+ mtx_reg_state);
3213+ mtx_reg_state++;
3214+ }
3215+ /* Saves 8 Registers of D1 Bank */
3216+ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
3217+ for (i = 0; i < 8; i++) {
3218+ topaz_read_core_reg(dev_priv, 0x2 | (i<<4),
3219+ mtx_reg_state);
3220+ mtx_reg_state++;
3221+ }
3222+ /* Saves 4 Registers of A0 Bank */
3223+ /* A0StP, A0FrP, A0.2 and A0.3 */
3224+ for (i = 0; i < 4; i++) {
3225+ topaz_read_core_reg(dev_priv, 0x3 | (i<<4),
3226+ mtx_reg_state);
3227+ mtx_reg_state++;
3228+ }
3229+ /* Saves 4 Registers of A1 Bank */
3230+ /* A1GbP, A1LbP, A1.2 and A1.3 */
3231+ for (i = 0; i < 4; i++) {
3232+ topaz_read_core_reg(dev_priv, 0x4 | (i<<4),
3233+ mtx_reg_state);
3234+ mtx_reg_state++;
3235+ }
3236+ /* Saves PC and PCX */
3237+ for (i = 0; i < 2; i++) {
3238+ topaz_read_core_reg(dev_priv, 0x5 | (i<<4),
3239+ mtx_reg_state);
3240+ mtx_reg_state++;
3241+ }
3242+ /* Saves 8 Control Registers */
3243+ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
3244+ * TXGPIOO */
3245+ for (i = 0; i < 8; i++) {
3246+ topaz_read_core_reg(dev_priv, 0x7 | (i<<4),
3247+ mtx_reg_state);
3248+ mtx_reg_state++;
3249+ }
3250+
3251+ /* save mtx data memory */
3252+ cur_codec_fw = &topaz_fw[dev_priv->topaz_cur_codec];
3253+
3254+ mtx_dma_read(dev, cur_codec_fw->data_location + 0x80900000 - 0x82880000,
3255+ dev_priv->cur_mtx_data_size);
3256+
3257+ /* turn off mtx clocks */
3258+ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
3259+ MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE);
3260+
3261+ return 0;
3262+}
3263+
3264+void mtx_dma_read(struct drm_device *dev, uint32_t source_addr, uint32_t size)
3265+{
3266+ struct drm_psb_private *dev_priv =
3267+ (struct drm_psb_private *)dev->dev_private;
3268+ struct ttm_buffer_object *target;
3269+
3270+ /* setup mtx DMAC registers to do transfer */
3271+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, source_addr);
3272+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
3273+ F_ENCODE(2, MTX_BURSTSIZE) |
3274+ F_ENCODE(1, MTX_RNW) |
3275+ F_ENCODE(1, MTX_ENABLE) |
3276+ F_ENCODE(size, MTX_LENGTH));
3277+
3278+ /* give the DMAC access to the host memory via BIF */
3279+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
3280+
3281+ target = dev_priv->topaz_mtx_data_mem;
3282+ /* transfert the data */
3283+ /* FIXME: size is meaured by bytes? */
3284+ topaz_dma_transfer(dev_priv, 0, target->offset, 0,
3285+ MTX_CR_MTX_SYSC_CDMAT,
3286+ size, 0, 1);
3287+
3288+ /* wait for it transfer */
3289+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
3290+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
3291+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
3292+ /* clear interrupt */
3293+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
3294+ /* give access back to topaz core */
3295+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
3296+}
3297+
3298+void dmac_transfer(struct drm_device *dev, uint32_t channel, uint32_t dst_addr,
3299+ uint32_t soc_addr, uint32_t bytes_num,
3300+ int increment, int rnw)
3301+{
3302+ struct drm_psb_private *dev_priv =
3303+ (struct drm_psb_private *)dev->dev_private;
3304+ uint32_t count_reg;
3305+ uint32_t irq_state;
3306+
3307+ /* check no transfer is in progress */
3308+ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &count_reg);
3309+ if (0 != (count_reg & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) {
3310+ DRM_ERROR("TOPAZ: there's transfer in progress when wanna "
3311+ "save mtx data\n");
3312+ /* FIXME: how to handle this error */
3313+ return;
3314+ }
3315+
3316+ /* no hold off period */
3317+ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
3318+ /* cleare irq state */
3319+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
3320+ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_state);
3321+ if (0 != irq_state) {
3322+ DRM_ERROR("TOPAZ: there's irq cann't clear\n");
3323+ return;
3324+ }
3325+
3326+ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel), dst_addr);
3327+ count_reg = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP,
3328+ DMAC_PWIDTH_32_BIT, rnw,
3329+ DMAC_PWIDTH_32_BIT, bytes_num);
3330+ /* generate an interrupt at end of transfer */
3331+ count_reg |= MASK_IMG_SOC_TRANSFER_IEN;
3332+ count_reg |= F_ENCODE(rnw, IMG_SOC_DIR);
3333+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count_reg);
3334+
3335+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
3336+ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, increment,
3337+ DMAC_BURST_2));
3338+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
3339+
3340+ /* Finally, rewrite the count register with the enable
3341+ * bit set to kick off the transfer */
3342+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel),
3343+ count_reg | MASK_IMG_SOC_EN);
3344+}
3345+
3346+void mtx_dma_write(struct drm_device *dev)
3347+{
3348+ struct topaz_codec_fw *cur_codec_fw;
3349+ struct drm_psb_private *dev_priv =
3350+ (struct drm_psb_private *)dev->dev_private;
3351+
3352+ cur_codec_fw = &topaz_fw[dev_priv->topaz_cur_codec];
3353+
3354+ /* upload code */
3355+ /* setup mtx DMAC registers to recieve transfer */
3356+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
3357+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
3358+ F_ENCODE(2, MTX_BURSTSIZE) |
3359+ F_ENCODE(0, MTX_RNW) |
3360+ F_ENCODE(1, MTX_ENABLE) |
3361+ F_ENCODE(cur_codec_fw->text_size / 4, MTX_LENGTH));
3362+
3363+ /* give DMAC access to host memory */
3364+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
3365+
3366+ /* transfer code */
3367+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
3368+ MTX_CR_MTX_SYSC_CDMAT, cur_codec_fw->text_size / 4,
3369+ 0, 0);
3370+ /* wait finished */
3371+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
3372+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
3373+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
3374+ /* clear interrupt */
3375+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
3376+
3377+ /* setup mtx start recieving data */
3378+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000 +
3379+ (cur_codec_fw->data_location) - 0x82880000);
3380+
3381+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
3382+ F_ENCODE(2, MTX_BURSTSIZE) |
3383+ F_ENCODE(0, MTX_RNW) |
3384+ F_ENCODE(1, MTX_ENABLE) |
3385+ F_ENCODE(dev_priv->cur_mtx_data_size, MTX_LENGTH));
3386+
3387+ /* give DMAC access to host memory */
3388+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
3389+
3390+ /* transfer data */
3391+ topaz_dma_transfer(dev_priv, 0, dev_priv->topaz_mtx_data_mem->offset,
3392+ 0, MTX_CR_MTX_SYSC_CDMAT,
3393+ dev_priv->cur_mtx_data_size,
3394+ 0, 0);
3395+ /* wait finished */
3396+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
3397+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
3398+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
3399+ /* clear interrupt */
3400+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
3401+
3402+ /* give access back to Topaz Core */
3403+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
3404+}
3405+
3406+#if 0
3407+void topaz_save_default_regs(struct drm_psb_private *dev_priv, uint32_t *data)
3408+{
3409+ int n;
3410+ int count;
3411+
3412+ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
3413+ for (n = 0; n < count; n++, ++data)
3414+ MM_READ32(topaz_default_regs[n][0],
3415+ topaz_default_regs[n][1],
3416+ data);
3417+
3418+}
3419+
3420+void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
3421+ uint32_t *data)
3422+{
3423+ int n;
3424+ int count;
3425+
3426+ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
3427+ for (n = 0; n < count; n++, ++data)
3428+ MM_WRITE32(topaz_default_regs[n][0],
3429+ topaz_default_regs[n][1],
3430+ *data);
3431+
3432+}
3433+#endif
3434diff -uNr a/drivers/gpu/drm/psb/Makefile b/drivers/gpu/drm/psb/Makefile
3435--- a/drivers/gpu/drm/psb/Makefile 1969-12-31 16:00:00.000000000 -0800
3436+++ b/drivers/gpu/drm/psb/Makefile 2009-04-07 13:28:38.000000000 -0700
3437@@ -0,0 +1,18 @@
3438+#
3439+# Makefile for the drm device driver. This driver provides support for the
3440+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
3441+
3442+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/psb
3443+
3444+psb-y := psb_drv.o psb_mmu.o psb_sgx.o psb_irq.o psb_fence.o \
3445+ psb_buffer.o psb_gtt.o psb_schedule.o psb_scene.o \
3446+ psb_reset.o psb_xhw.o psb_msvdx.o \
3447+ lnc_topaz.o lnc_topazinit.o \
3448+ psb_msvdxinit.o psb_ttm_glue.o psb_fb.o psb_setup.o \
3449+ ttm/ttm_object.o ttm/ttm_lock.o ttm/ttm_fence_user.o \
3450+ ttm/ttm_fence.o ttm/ttm_tt.o ttm/ttm_execbuf_util.o \
3451+ ttm/ttm_bo.o ttm/ttm_bo_util.o ttm/ttm_placement_user.o \
3452+ ttm/ttm_bo_vm.o ttm/ttm_pat_compat.o ttm/ttm_memory.o
3453+
3454+obj-$(CONFIG_DRM_PSB) += psb.o
3455+
3456diff -uNr a/drivers/gpu/drm/psb/psb_buffer.c b/drivers/gpu/drm/psb/psb_buffer.c
3457--- a/drivers/gpu/drm/psb/psb_buffer.c 1969-12-31 16:00:00.000000000 -0800
3458+++ b/drivers/gpu/drm/psb/psb_buffer.c 2009-04-07 13:28:38.000000000 -0700
3459@@ -0,0 +1,504 @@
3460+/**************************************************************************
3461+ * Copyright (c) 2007, Intel Corporation.
3462+ * All Rights Reserved.
3463+ *
3464+ * This program is free software; you can redistribute it and/or modify it
3465+ * under the terms and conditions of the GNU General Public License,
3466+ * version 2, as published by the Free Software Foundation.
3467+ *
3468+ * This program is distributed in the hope it will be useful, but WITHOUT
3469+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3470+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3471+ * more details.
3472+ *
3473+ * You should have received a copy of the GNU General Public License along with
3474+ * this program; if not, write to the Free Software Foundation, Inc.,
3475+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
3476+ *
3477+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
3478+ * develop this driver.
3479+ *
3480+ **************************************************************************/
3481+/*
3482+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
3483+ */
3484+#include "ttm/ttm_placement_common.h"
3485+#include "ttm/ttm_execbuf_util.h"
3486+#include "ttm/ttm_fence_api.h"
3487+#include <drm/drmP.h>
3488+#include "psb_drv.h"
3489+#include "psb_schedule.h"
3490+
3491+#define DRM_MEM_TTM 26
3492+
3493+struct drm_psb_ttm_backend {
3494+ struct ttm_backend base;
3495+ struct page **pages;
3496+ unsigned int desired_tile_stride;
3497+ unsigned int hw_tile_stride;
3498+ int mem_type;
3499+ unsigned long offset;
3500+ unsigned long num_pages;
3501+};
3502+
3503+/*
3504+ * Poulsbo GPU virtual space looks like this
3505+ * (We currently use only one MMU context).
3506+ *
3507+ * gatt_start = Start of GATT aperture in bus space.
3508+ * stolen_end = End of GATT populated by stolen memory in bus space.
3509+ * gatt_end = End of GATT
3510+ * twod_end = MIN(gatt_start + 256_MEM, gatt_end)
3511+ *
3512+ * 0x00000000 -> 0x10000000 Temporary mapping space for tiling-
3513+ * and copy operations.
3514+ * This space is not managed and is protected by the
3515+ * temp_mem mutex.
3516+ *
3517+ * 0x10000000 -> 0x20000000 DRM_PSB_MEM_KERNEL For kernel buffers.
3518+ *
3519+ * 0x20000000 -> gatt_start DRM_PSB_MEM_MMU For generic MMU-only use.
3520+ *
3521+ * gatt_start -> stolen_end TTM_PL_VRAM Pre-populated GATT pages.
3522+ *
3523+ * stolen_end -> twod_end TTM_PL_TT GATT memory usable by 2D engine.
3524+ *
3525+ * twod_end -> gatt_end DRM_BO_MEM_APER GATT memory not
3526+ * usable by 2D engine.
3527+ *
3528+ * gatt_end -> 0xffffffff Currently unused.
3529+ */
3530+
3531+static int psb_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
3532+ struct ttm_mem_type_manager *man)
3533+{
3534+
3535+ struct drm_psb_private *dev_priv =
3536+ container_of(bdev, struct drm_psb_private, bdev);
3537+ struct psb_gtt *pg = dev_priv->pg;
3538+
3539+ switch (type) {
3540+ case TTM_PL_SYSTEM:
3541+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
3542+ man->available_caching = TTM_PL_FLAG_CACHED |
3543+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
3544+ man->default_caching = TTM_PL_FLAG_CACHED;
3545+ break;
3546+ case DRM_PSB_MEM_KERNEL:
3547+ man->io_offset = 0x00000000;
3548+ man->io_size = 0x00000000;
3549+ man->io_addr = NULL;
3550+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3551+ TTM_MEMTYPE_FLAG_CMA;
3552+ man->gpu_offset = PSB_MEM_KERNEL_START;
3553+ man->available_caching = TTM_PL_FLAG_CACHED |
3554+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
3555+ man->default_caching = TTM_PL_FLAG_WC;
3556+ break;
3557+ case DRM_PSB_MEM_MMU:
3558+ man->io_offset = 0x00000000;
3559+ man->io_size = 0x00000000;
3560+ man->io_addr = NULL;
3561+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3562+ TTM_MEMTYPE_FLAG_CMA;
3563+ man->gpu_offset = PSB_MEM_MMU_START;
3564+ man->available_caching = TTM_PL_FLAG_CACHED |
3565+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
3566+ man->default_caching = TTM_PL_FLAG_WC;
3567+ break;
3568+ case DRM_PSB_MEM_PDS:
3569+ man->io_offset = 0x00000000;
3570+ man->io_size = 0x00000000;
3571+ man->io_addr = NULL;
3572+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3573+ TTM_MEMTYPE_FLAG_CMA;
3574+ man->gpu_offset = PSB_MEM_PDS_START;
3575+ man->available_caching = TTM_PL_FLAG_CACHED |
3576+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
3577+ man->default_caching = TTM_PL_FLAG_WC;
3578+ break;
3579+ case DRM_PSB_MEM_RASTGEOM:
3580+ man->io_offset = 0x00000000;
3581+ man->io_size = 0x00000000;
3582+ man->io_addr = NULL;
3583+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3584+ TTM_MEMTYPE_FLAG_CMA;
3585+ man->gpu_offset = PSB_MEM_RASTGEOM_START;
3586+ man->available_caching = TTM_PL_FLAG_CACHED |
3587+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
3588+ man->default_caching = TTM_PL_FLAG_WC;
3589+ break;
3590+ case TTM_PL_VRAM:
3591+ man->io_addr = NULL;
3592+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3593+ TTM_MEMTYPE_FLAG_FIXED |
3594+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
3595+#ifdef PSB_WORKING_HOST_MMU_ACCESS
3596+ man->io_offset = pg->gatt_start;
3597+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
3598+#else
3599+ man->io_offset = pg->stolen_base;
3600+ man->io_size = pg->vram_stolen_size;
3601+#endif
3602+ man->gpu_offset = pg->gatt_start;
3603+ man->available_caching = TTM_PL_FLAG_UNCACHED |
3604+ TTM_PL_FLAG_WC;
3605+ man->default_caching = TTM_PL_FLAG_WC;
3606+ break;
3607+ case TTM_PL_CI:
3608+ man->io_addr = NULL;
3609+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3610+ TTM_MEMTYPE_FLAG_FIXED |
3611+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
3612+ man->io_offset = dev_priv->ci_region_start;
3613+ man->io_size = pg->ci_stolen_size;
3614+ man->gpu_offset = pg->gatt_start - pg->ci_stolen_size;
3615+ man->available_caching = TTM_PL_FLAG_UNCACHED;
3616+ man->default_caching = TTM_PL_FLAG_UNCACHED;
3617+ break;
3618+ case TTM_PL_TT: /* Mappable GATT memory */
3619+ man->io_offset = pg->gatt_start;
3620+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
3621+ man->io_addr = NULL;
3622+#ifdef PSB_WORKING_HOST_MMU_ACCESS
3623+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3624+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
3625+#else
3626+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3627+ TTM_MEMTYPE_FLAG_CMA;
3628+#endif
3629+ man->available_caching = TTM_PL_FLAG_CACHED |
3630+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
3631+ man->default_caching = TTM_PL_FLAG_WC;
3632+ man->gpu_offset = pg->gatt_start;
3633+ break;
3634+ case DRM_PSB_MEM_APER: /*MMU memory. Mappable. Not usable for 2D. */
3635+ man->io_offset = pg->gatt_start;
3636+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
3637+ man->io_addr = NULL;
3638+#ifdef PSB_WORKING_HOST_MMU_ACCESS
3639+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3640+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
3641+#else
3642+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
3643+ TTM_MEMTYPE_FLAG_CMA;
3644+#endif
3645+ man->gpu_offset = pg->gatt_start;
3646+ man->available_caching = TTM_PL_FLAG_CACHED |
3647+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
3648+ man->default_caching = TTM_PL_FLAG_WC;
3649+ break;
3650+ default:
3651+ DRM_ERROR("Unsupported memory type %u\n", (unsigned) type);
3652+ return -EINVAL;
3653+ }
3654+ return 0;
3655+}
3656+
3657+static uint32_t psb_evict_mask(struct ttm_buffer_object *bo)
3658+{
3659+ uint32_t cur_placement = bo->mem.flags & ~TTM_PL_MASK_MEM;
3660+
3661+
3662+ switch (bo->mem.mem_type) {
3663+ case TTM_PL_VRAM:
3664+ if (bo->mem.proposed_flags & TTM_PL_FLAG_TT)
3665+ return cur_placement | TTM_PL_FLAG_TT;
3666+ else
3667+ return cur_placement | TTM_PL_FLAG_SYSTEM;
3668+ default:
3669+ return cur_placement | TTM_PL_FLAG_SYSTEM;
3670+ }
3671+}
3672+
3673+static int psb_invalidate_caches(struct ttm_bo_device *bdev,
3674+ uint32_t placement)
3675+{
3676+ return 0;
3677+}
3678+
3679+static int psb_move_blit(struct ttm_buffer_object *bo,
3680+ bool evict, bool no_wait,
3681+ struct ttm_mem_reg *new_mem)
3682+{
3683+ struct drm_psb_private *dev_priv =
3684+ container_of(bo->bdev, struct drm_psb_private, bdev);
3685+ struct drm_device *dev = dev_priv->dev;
3686+ struct ttm_mem_reg *old_mem = &bo->mem;
3687+ struct ttm_fence_object *fence;
3688+ int dir = 0;
3689+ int ret;
3690+
3691+ if ((old_mem->mem_type == new_mem->mem_type) &&
3692+ (new_mem->mm_node->start <
3693+ old_mem->mm_node->start + old_mem->mm_node->size)) {
3694+ dir = 1;
3695+ }
3696+
3697+ psb_emit_2d_copy_blit(dev,
3698+ old_mem->mm_node->start << PAGE_SHIFT,
3699+ new_mem->mm_node->start << PAGE_SHIFT,
3700+ new_mem->num_pages, dir);
3701+
3702+ ret = ttm_fence_object_create(&dev_priv->fdev, 0,
3703+ _PSB_FENCE_TYPE_EXE,
3704+ TTM_FENCE_FLAG_EMIT,
3705+ &fence);
3706+ if (unlikely(ret != 0)) {
3707+ psb_idle_2d(dev);
3708+ if (fence)
3709+ ttm_fence_object_unref(&fence);
3710+ }
3711+
3712+ ret = ttm_bo_move_accel_cleanup(bo, (void *) fence,
3713+ (void *) (unsigned long)
3714+ _PSB_FENCE_TYPE_EXE,
3715+ evict, no_wait, new_mem);
3716+ if (fence)
3717+ ttm_fence_object_unref(&fence);
3718+ return ret;
3719+}
3720+
3721+/*
3722+ * Flip destination ttm into GATT,
3723+ * then blit and subsequently move out again.
3724+ */
3725+
3726+static int psb_move_flip(struct ttm_buffer_object *bo,
3727+ bool evict, bool interruptible, bool no_wait,
3728+ struct ttm_mem_reg *new_mem)
3729+{
3730+ struct ttm_bo_device *bdev = bo->bdev;
3731+ struct ttm_mem_reg tmp_mem;
3732+ int ret;
3733+
3734+ tmp_mem = *new_mem;
3735+ tmp_mem.mm_node = NULL;
3736+ tmp_mem.proposed_flags = TTM_PL_FLAG_TT;
3737+
3738+ ret = ttm_bo_mem_space(bo, &tmp_mem, interruptible, no_wait);
3739+ if (ret)
3740+ return ret;
3741+ ret = ttm_tt_bind(bo->ttm, &tmp_mem);
3742+ if (ret)
3743+ goto out_cleanup;
3744+ ret = psb_move_blit(bo, true, no_wait, &tmp_mem);
3745+ if (ret)
3746+ goto out_cleanup;
3747+
3748+ ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
3749+out_cleanup:
3750+ if (tmp_mem.mm_node) {
3751+ spin_lock(&bdev->lru_lock);
3752+ drm_mm_put_block(tmp_mem.mm_node);
3753+ tmp_mem.mm_node = NULL;
3754+ spin_unlock(&bdev->lru_lock);
3755+ }
3756+ return ret;
3757+}
3758+
3759+static int psb_move(struct ttm_buffer_object *bo,
3760+ bool evict, bool interruptible,
3761+ bool no_wait, struct ttm_mem_reg *new_mem)
3762+{
3763+ struct ttm_mem_reg *old_mem = &bo->mem;
3764+
3765+ if (old_mem->mem_type == TTM_PL_SYSTEM) {
3766+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
3767+ } else if (new_mem->mem_type == TTM_PL_SYSTEM) {
3768+ int ret = psb_move_flip(bo, evict, interruptible,
3769+ no_wait, new_mem);
3770+ if (unlikely(ret != 0)) {
3771+ if (ret == -ERESTART)
3772+ return ret;
3773+ else
3774+ return ttm_bo_move_memcpy(bo, evict, no_wait,
3775+ new_mem);
3776+ }
3777+ } else {
3778+ if (psb_move_blit(bo, evict, no_wait, new_mem))
3779+ return ttm_bo_move_memcpy(bo, evict, no_wait,
3780+ new_mem);
3781+ }
3782+ return 0;
3783+}
3784+
3785+static int drm_psb_tbe_populate(struct ttm_backend *backend,
3786+ unsigned long num_pages,
3787+ struct page **pages,
3788+ struct page *dummy_read_page)
3789+{
3790+ struct drm_psb_ttm_backend *psb_be =
3791+ container_of(backend, struct drm_psb_ttm_backend, base);
3792+
3793+ psb_be->pages = pages;
3794+ return 0;
3795+}
3796+
3797+static int drm_psb_tbe_unbind(struct ttm_backend *backend)
3798+{
3799+ struct ttm_bo_device *bdev = backend->bdev;
3800+ struct drm_psb_private *dev_priv =
3801+ container_of(bdev, struct drm_psb_private, bdev);
3802+ struct drm_psb_ttm_backend *psb_be =
3803+ container_of(backend, struct drm_psb_ttm_backend, base);
3804+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
3805+ struct ttm_mem_type_manager *man = &bdev->man[psb_be->mem_type];
3806+
3807+ PSB_DEBUG_RENDER("MMU unbind.\n");
3808+
3809+ if (psb_be->mem_type == TTM_PL_TT) {
3810+ uint32_t gatt_p_offset =
3811+ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
3812+
3813+ (void) psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
3814+ psb_be->num_pages,
3815+ psb_be->desired_tile_stride,
3816+ psb_be->hw_tile_stride);
3817+ }
3818+
3819+ psb_mmu_remove_pages(pd, psb_be->offset,
3820+ psb_be->num_pages,
3821+ psb_be->desired_tile_stride,
3822+ psb_be->hw_tile_stride);
3823+
3824+ return 0;
3825+}
3826+
3827+static int drm_psb_tbe_bind(struct ttm_backend *backend,
3828+ struct ttm_mem_reg *bo_mem)
3829+{
3830+ struct ttm_bo_device *bdev = backend->bdev;
3831+ struct drm_psb_private *dev_priv =
3832+ container_of(bdev, struct drm_psb_private, bdev);
3833+ struct drm_psb_ttm_backend *psb_be =
3834+ container_of(backend, struct drm_psb_ttm_backend, base);
3835+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
3836+ struct ttm_mem_type_manager *man = &bdev->man[bo_mem->mem_type];
3837+ int type;
3838+ int ret = 0;
3839+
3840+ psb_be->mem_type = bo_mem->mem_type;
3841+ psb_be->num_pages = bo_mem->num_pages;
3842+ psb_be->desired_tile_stride = 0;
3843+ psb_be->hw_tile_stride = 0;
3844+ psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
3845+ man->gpu_offset;
3846+
3847+ type =
3848+ (bo_mem->
3849+ flags & TTM_PL_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
3850+
3851+ PSB_DEBUG_RENDER("MMU bind.\n");
3852+ if (psb_be->mem_type == TTM_PL_TT) {
3853+ uint32_t gatt_p_offset =
3854+ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
3855+
3856+ ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
3857+ gatt_p_offset,
3858+ psb_be->num_pages,
3859+ psb_be->desired_tile_stride,
3860+ psb_be->hw_tile_stride, type);
3861+ }
3862+
3863+ ret = psb_mmu_insert_pages(pd, psb_be->pages,
3864+ psb_be->offset, psb_be->num_pages,
3865+ psb_be->desired_tile_stride,
3866+ psb_be->hw_tile_stride, type);
3867+ if (ret)
3868+ goto out_err;
3869+
3870+ return 0;
3871+out_err:
3872+ drm_psb_tbe_unbind(backend);
3873+ return ret;
3874+
3875+}
3876+
3877+static void drm_psb_tbe_clear(struct ttm_backend *backend)
3878+{
3879+ struct drm_psb_ttm_backend *psb_be =
3880+ container_of(backend, struct drm_psb_ttm_backend, base);
3881+
3882+ psb_be->pages = NULL;
3883+ return;
3884+}
3885+
3886+static void drm_psb_tbe_destroy(struct ttm_backend *backend)
3887+{
3888+ struct drm_psb_ttm_backend *psb_be =
3889+ container_of(backend, struct drm_psb_ttm_backend, base);
3890+
3891+ if (backend)
3892+ drm_free(psb_be, sizeof(*psb_be), DRM_MEM_TTM);
3893+}
3894+
3895+static struct ttm_backend_func psb_ttm_backend = {
3896+ .populate = drm_psb_tbe_populate,
3897+ .clear = drm_psb_tbe_clear,
3898+ .bind = drm_psb_tbe_bind,
3899+ .unbind = drm_psb_tbe_unbind,
3900+ .destroy = drm_psb_tbe_destroy,
3901+};
3902+
3903+static struct ttm_backend *drm_psb_tbe_init(struct ttm_bo_device *bdev)
3904+{
3905+ struct drm_psb_ttm_backend *psb_be;
3906+
3907+ psb_be = drm_calloc(1, sizeof(*psb_be), DRM_MEM_TTM);
3908+ if (!psb_be)
3909+ return NULL;
3910+ psb_be->pages = NULL;
3911+ psb_be->base.func = &psb_ttm_backend;
3912+ psb_be->base.bdev = bdev;
3913+ return &psb_be->base;
3914+}
3915+
3916+/*
3917+ * Use this memory type priority if no eviction is needed.
3918+ */
3919+static uint32_t psb_mem_prios[] = {
3920+ TTM_PL_CI,
3921+ TTM_PL_VRAM,
3922+ TTM_PL_TT,
3923+ DRM_PSB_MEM_KERNEL,
3924+ DRM_PSB_MEM_MMU,
3925+ DRM_PSB_MEM_RASTGEOM,
3926+ DRM_PSB_MEM_PDS,
3927+ DRM_PSB_MEM_APER,
3928+ TTM_PL_SYSTEM
3929+};
3930+
3931+/*
3932+ * Use this memory type priority if need to evict.
3933+ */
3934+static uint32_t psb_busy_prios[] = {
3935+ TTM_PL_TT,
3936+ TTM_PL_VRAM,
3937+ TTM_PL_CI,
3938+ DRM_PSB_MEM_KERNEL,
3939+ DRM_PSB_MEM_MMU,
3940+ DRM_PSB_MEM_RASTGEOM,
3941+ DRM_PSB_MEM_PDS,
3942+ DRM_PSB_MEM_APER,
3943+ TTM_PL_SYSTEM
3944+};
3945+
3946+
3947+struct ttm_bo_driver psb_ttm_bo_driver = {
3948+ .mem_type_prio = psb_mem_prios,
3949+ .mem_busy_prio = psb_busy_prios,
3950+ .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
3951+ .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
3952+ .create_ttm_backend_entry = &drm_psb_tbe_init,
3953+ .invalidate_caches = &psb_invalidate_caches,
3954+ .init_mem_type = &psb_init_mem_type,
3955+ .evict_flags = &psb_evict_mask,
3956+ .move = &psb_move,
3957+ .verify_access = &psb_verify_access,
3958+ .sync_obj_signaled = &ttm_fence_sync_obj_signaled,
3959+ .sync_obj_wait = &ttm_fence_sync_obj_wait,
3960+ .sync_obj_flush = &ttm_fence_sync_obj_flush,
3961+ .sync_obj_unref = &ttm_fence_sync_obj_unref,
3962+ .sync_obj_ref = &ttm_fence_sync_obj_ref
3963+};
3964diff -uNr a/drivers/gpu/drm/psb/psb_drm.h b/drivers/gpu/drm/psb/psb_drm.h
3965--- a/drivers/gpu/drm/psb/psb_drm.h 1969-12-31 16:00:00.000000000 -0800
3966+++ b/drivers/gpu/drm/psb/psb_drm.h 2009-04-07 13:28:38.000000000 -0700
3967@@ -0,0 +1,444 @@
3968+/**************************************************************************
3969+ * Copyright (c) 2007, Intel Corporation.
3970+ * All Rights Reserved.
3971+ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
3972+ * All Rights Reserved.
3973+ *
3974+ * This program is free software; you can redistribute it and/or modify it
3975+ * under the terms and conditions of the GNU General Public License,
3976+ * version 2, as published by the Free Software Foundation.
3977+ *
3978+ * This program is distributed in the hope it will be useful, but WITHOUT
3979+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3980+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3981+ * more details.
3982+ *
3983+ * You should have received a copy of the GNU General Public License along with
3984+ * this program; if not, write to the Free Software Foundation, Inc.,
3985+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
3986+ *
3987+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
3988+ * develop this driver.
3989+ *
3990+ **************************************************************************/
3991+/*
3992+ */
3993+
3994+#ifndef _PSB_DRM_H_
3995+#define _PSB_DRM_H_
3996+
3997+#if defined(__linux__) && !defined(__KERNEL__)
3998+#include<stdint.h>
3999+#endif
4000+
4001+#include "ttm/ttm_fence_user.h"
4002+#include "ttm/ttm_placement_user.h"
4003+
4004+#define DRM_PSB_SAREA_MAJOR 0
4005+#define DRM_PSB_SAREA_MINOR 2
4006+#define PSB_FIXED_SHIFT 16
4007+
4008+#define DRM_PSB_FIRST_TA_USE_REG 3
4009+#define DRM_PSB_NUM_TA_USE_REG 6
4010+#define DRM_PSB_FIRST_RASTER_USE_REG 8
4011+#define DRM_PSB_NUM_RASTER_USE_REG 7
4012+
4013+/*
4014+ * Public memory types.
4015+ */
4016+
4017+#define DRM_PSB_MEM_MMU TTM_PL_PRIV1
4018+#define DRM_PSB_FLAG_MEM_MMU TTM_PL_FLAG_PRIV1
4019+#define DRM_PSB_MEM_PDS TTM_PL_PRIV2
4020+#define DRM_PSB_FLAG_MEM_PDS TTM_PL_FLAG_PRIV2
4021+#define DRM_PSB_MEM_APER TTM_PL_PRIV3
4022+#define DRM_PSB_FLAG_MEM_APER TTM_PL_FLAG_PRIV3
4023+#define DRM_PSB_MEM_RASTGEOM TTM_PL_PRIV4
4024+#define DRM_PSB_FLAG_MEM_RASTGEOM TTM_PL_FLAG_PRIV4
4025+#define PSB_MEM_RASTGEOM_START 0x30000000
4026+
4027+typedef int32_t psb_fixed;
4028+typedef uint32_t psb_ufixed;
4029+
4030+static inline int32_t psb_int_to_fixed(int a)
4031+{
4032+ return a * (1 << PSB_FIXED_SHIFT);
4033+}
4034+
4035+static inline uint32_t psb_unsigned_to_ufixed(unsigned int a)
4036+{
4037+ return a << PSB_FIXED_SHIFT;
4038+}
4039+
4040+/*Status of the command sent to the gfx device.*/
4041+typedef enum {
4042+ DRM_CMD_SUCCESS,
4043+ DRM_CMD_FAILED,
4044+ DRM_CMD_HANG
4045+} drm_cmd_status_t;
4046+
4047+struct drm_psb_scanout {
4048+ uint32_t buffer_id; /* DRM buffer object ID */
4049+ uint32_t rotation; /* Rotation as in RR_rotation definitions */
4050+ uint32_t stride; /* Buffer stride in bytes */
4051+ uint32_t depth; /* Buffer depth in bits (NOT) bpp */
4052+ uint32_t width; /* Buffer width in pixels */
4053+ uint32_t height; /* Buffer height in lines */
4054+ int32_t transform[3][3]; /* Buffer composite transform */
4055+ /* (scaling, rot, reflect) */
4056+};
4057+
4058+#define DRM_PSB_SAREA_OWNERS 16
4059+#define DRM_PSB_SAREA_OWNER_2D 0
4060+#define DRM_PSB_SAREA_OWNER_3D 1
4061+
4062+#define DRM_PSB_SAREA_SCANOUTS 3
4063+
4064+struct drm_psb_sarea {
4065+ /* Track changes of this data structure */
4066+
4067+ uint32_t major;
4068+ uint32_t minor;
4069+
4070+ /* Last context to touch part of hw */
4071+ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
4072+
4073+ /* Definition of front- and rotated buffers */
4074+ uint32_t num_scanouts;
4075+ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
4076+
4077+ int planeA_x;
4078+ int planeA_y;
4079+ int planeA_w;
4080+ int planeA_h;
4081+ int planeB_x;
4082+ int planeB_y;
4083+ int planeB_w;
4084+ int planeB_h;
4085+ /* Number of active scanouts */
4086+ uint32_t num_active_scanouts;
4087+};
4088+
4089+#define PSB_RELOC_MAGIC 0x67676767
4090+#define PSB_RELOC_SHIFT_MASK 0x0000FFFF
4091+#define PSB_RELOC_SHIFT_SHIFT 0
4092+#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000
4093+#define PSB_RELOC_ALSHIFT_SHIFT 16
4094+
4095+#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated
4096+ * buffer
4097+ */
4098+#define PSB_RELOC_OP_2D_OFFSET 1 /* Offset of the indicated
4099+ * buffer, relative to 2D
4100+ * base address
4101+ */
4102+#define PSB_RELOC_OP_PDS_OFFSET 2 /* Offset of the indicated buffer,
4103+ * relative to PDS base address
4104+ */
4105+#define PSB_RELOC_OP_STRIDE 3 /* Stride of the indicated
4106+ * buffer (for tiling)
4107+ */
4108+#define PSB_RELOC_OP_USE_OFFSET 4 /* Offset of USE buffer
4109+ * relative to base reg
4110+ */
4111+#define PSB_RELOC_OP_USE_REG 5 /* Base reg of USE buffer */
4112+
4113+struct drm_psb_reloc {
4114+ uint32_t reloc_op;
4115+ uint32_t where; /* offset in destination buffer */
4116+ uint32_t buffer; /* Buffer reloc applies to */
4117+ uint32_t mask; /* Destination format: */
4118+ uint32_t shift; /* Destination format: */
4119+ uint32_t pre_add; /* Destination format: */
4120+ uint32_t background; /* Destination add */
4121+ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */
4122+ uint32_t arg0; /* Reloc-op dependant */
4123+ uint32_t arg1;
4124+};
4125+
4126+
4127+#define PSB_GPU_ACCESS_READ (1ULL << 32)
4128+#define PSB_GPU_ACCESS_WRITE (1ULL << 33)
4129+#define PSB_GPU_ACCESS_MASK (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)
4130+
4131+#define PSB_BO_FLAG_TA (1ULL << 48)
4132+#define PSB_BO_FLAG_SCENE (1ULL << 49)
4133+#define PSB_BO_FLAG_FEEDBACK (1ULL << 50)
4134+#define PSB_BO_FLAG_USSE (1ULL << 51)
4135+#define PSB_BO_FLAG_COMMAND (1ULL << 52)
4136+
4137+#define PSB_ENGINE_2D 0
4138+#define PSB_ENGINE_VIDEO 1
4139+#define PSB_ENGINE_RASTERIZER 2
4140+#define PSB_ENGINE_TA 3
4141+#define PSB_ENGINE_HPRAST 4
4142+#define LNC_ENGINE_ENCODE 5
4143+
4144+#define PSB_DEVICE_SGX 0x1
4145+#define PSB_DEVICE_DISLAY 0x2
4146+#define PSB_DEVICE_MSVDX 0x4
4147+#define PSB_DEVICE_TOPAZ 0x8
4148+
4149+/*
4150+ * For this fence class we have a couple of
4151+ * fence types.
4152+ */
4153+
4154+#define _PSB_FENCE_EXE_SHIFT 0
4155+#define _PSB_FENCE_TA_DONE_SHIFT 1
4156+#define _PSB_FENCE_RASTER_DONE_SHIFT 2
4157+#define _PSB_FENCE_SCENE_DONE_SHIFT 3
4158+#define _PSB_FENCE_FEEDBACK_SHIFT 4
4159+
4160+#define _PSB_ENGINE_TA_FENCE_TYPES 5
4161+#define _PSB_FENCE_TYPE_EXE (1 << _PSB_FENCE_EXE_SHIFT)
4162+#define _PSB_FENCE_TYPE_TA_DONE (1 << _PSB_FENCE_TA_DONE_SHIFT)
4163+#define _PSB_FENCE_TYPE_RASTER_DONE (1 << _PSB_FENCE_RASTER_DONE_SHIFT)
4164+#define _PSB_FENCE_TYPE_SCENE_DONE (1 << _PSB_FENCE_SCENE_DONE_SHIFT)
4165+#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT)
4166+
4167+#define PSB_ENGINE_HPRAST 4
4168+#define PSB_NUM_ENGINES 6
4169+
4170+#define PSB_TA_FLAG_FIRSTPASS (1 << 0)
4171+#define PSB_TA_FLAG_LASTPASS (1 << 1)
4172+
4173+#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
4174+
4175+struct drm_psb_extension_rep {
4176+ int32_t exists;
4177+ uint32_t driver_ioctl_offset;
4178+ uint32_t sarea_offset;
4179+ uint32_t major;
4180+ uint32_t minor;
4181+ uint32_t pl;
4182+};
4183+
4184+#define DRM_PSB_EXT_NAME_LEN 128
4185+
4186+union drm_psb_extension_arg {
4187+ char extension[DRM_PSB_EXT_NAME_LEN];
4188+ struct drm_psb_extension_rep rep;
4189+};
4190+
4191+struct psb_validate_req {
4192+ uint64_t set_flags;
4193+ uint64_t clear_flags;
4194+ uint64_t next;
4195+ uint64_t presumed_gpu_offset;
4196+ uint32_t buffer_handle;
4197+ uint32_t presumed_flags;
4198+ uint32_t group;
4199+ uint32_t pad64;
4200+};
4201+
4202+struct psb_validate_rep {
4203+ uint64_t gpu_offset;
4204+ uint32_t placement;
4205+ uint32_t fence_type_mask;
4206+};
4207+
4208+#define PSB_USE_PRESUMED (1 << 0)
4209+
4210+struct psb_validate_arg {
4211+ int handled;
4212+ int ret;
4213+ union {
4214+ struct psb_validate_req req;
4215+ struct psb_validate_rep rep;
4216+ } d;
4217+};
4218+
4219+struct drm_psb_scene {
4220+ int handle_valid;
4221+ uint32_t handle;
4222+ uint32_t w; /* also contains msaa info */
4223+ uint32_t h;
4224+ uint32_t num_buffers;
4225+};
4226+
4227+#define DRM_PSB_FENCE_NO_USER (1 << 0)
4228+
4229+struct psb_ttm_fence_rep {
4230+ uint32_t handle;
4231+ uint32_t fence_class;
4232+ uint32_t fence_type;
4233+ uint32_t signaled_types;
4234+ uint32_t error;
4235+};
4236+
4237+typedef struct drm_psb_cmdbuf_arg {
4238+ uint64_t buffer_list; /* List of buffers to validate */
4239+ uint64_t clip_rects; /* See i915 counterpart */
4240+ uint64_t scene_arg;
4241+ uint64_t fence_arg;
4242+
4243+ uint32_t ta_flags;
4244+
4245+ uint32_t ta_handle; /* TA reg-value pairs */
4246+ uint32_t ta_offset;
4247+ uint32_t ta_size;
4248+
4249+ uint32_t oom_handle;
4250+ uint32_t oom_offset;
4251+ uint32_t oom_size;
4252+
4253+ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
4254+ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
4255+ uint32_t cmdbuf_size;
4256+
4257+ uint32_t reloc_handle; /* Reloc buffer object */
4258+ uint32_t reloc_offset;
4259+ uint32_t num_relocs;
4260+
4261+ int32_t damage; /* Damage front buffer with cliprects */
4262+ /* Not implemented yet */
4263+ uint32_t fence_flags;
4264+ uint32_t engine;
4265+
4266+ /*
4267+ * Feedback;
4268+ */
4269+
4270+ uint32_t feedback_ops;
4271+ uint32_t feedback_handle;
4272+ uint32_t feedback_offset;
4273+ uint32_t feedback_breakpoints;
4274+ uint32_t feedback_size;
4275+}drm_psb_cmdbuf_arg_t;
4276+
4277+struct drm_psb_xhw_init_arg {
4278+ uint32_t operation;
4279+ uint32_t buffer_handle;
4280+};
4281+
4282+/*
4283+ * Feedback components:
4284+ */
4285+
4286+/*
4287+ * Vistest component. The number of these in the feedback buffer
4288+ * equals the number of vistest breakpoints + 1.
4289+ * This is currently the only feedback component.
4290+ */
4291+
4292+struct drm_psb_vistest {
4293+ uint32_t vt[8];
4294+};
4295+
4296+#define PSB_HW_COOKIE_SIZE 16
4297+#define PSB_HW_FEEDBACK_SIZE 8
4298+#define PSB_HW_OOM_CMD_SIZE (6 + DRM_PSB_NUM_RASTER_USE_REG * 2)
4299+
4300+struct drm_psb_xhw_arg {
4301+ uint32_t op;
4302+ int ret;
4303+ uint32_t irq_op;
4304+ uint32_t issue_irq;
4305+ uint32_t cookie[PSB_HW_COOKIE_SIZE];
4306+ union {
4307+ struct {
4308+ uint32_t w; /* also contains msaa info */
4309+ uint32_t h;
4310+ uint32_t size;
4311+ uint32_t clear_p_start;
4312+ uint32_t clear_num_pages;
4313+ } si;
4314+ struct {
4315+ uint32_t fire_flags;
4316+ uint32_t hw_context;
4317+ uint32_t offset;
4318+ uint32_t engine;
4319+ uint32_t flags;
4320+ uint32_t rca;
4321+ uint32_t num_oom_cmds;
4322+ uint32_t oom_cmds[PSB_HW_OOM_CMD_SIZE];
4323+ } sb;
4324+ struct {
4325+ uint32_t pages;
4326+ uint32_t size;
4327+ uint32_t ta_min_size;
4328+ } bi;
4329+ struct {
4330+ uint32_t bca;
4331+ uint32_t rca;
4332+ uint32_t flags;
4333+ } oom;
4334+ struct {
4335+ uint32_t pt_offset;
4336+ uint32_t param_offset;
4337+ uint32_t flags;
4338+ } bl;
4339+ struct {
4340+ uint32_t value;
4341+ } cl;
4342+ uint32_t feedback[PSB_HW_FEEDBACK_SIZE];
4343+ } arg;
4344+};
4345+
4346+/* Controlling the kernel modesetting buffers */
4347+
4348+#define DRM_PSB_KMS_OFF 0x00
4349+#define DRM_PSB_KMS_ON 0x01
4350+#define DRM_PSB_VT_LEAVE 0x02
4351+#define DRM_PSB_VT_ENTER 0x03
4352+#define DRM_PSB_XHW_INIT 0x04
4353+#define DRM_PSB_XHW 0x05
4354+#define DRM_PSB_EXTENSION 0x06
4355+
4356+/*
4357+ * Xhw commands.
4358+ */
4359+
4360+#define PSB_XHW_INIT 0x00
4361+#define PSB_XHW_TAKEDOWN 0x01
4362+
4363+#define PSB_XHW_FIRE_RASTER 0x00
4364+#define PSB_XHW_SCENE_INFO 0x01
4365+#define PSB_XHW_SCENE_BIND_FIRE 0x02
4366+#define PSB_XHW_TA_MEM_INFO 0x03
4367+#define PSB_XHW_RESET_DPM 0x04
4368+#define PSB_XHW_OOM 0x05
4369+#define PSB_XHW_TERMINATE 0x06
4370+#define PSB_XHW_VISTEST 0x07
4371+#define PSB_XHW_RESUME 0x08
4372+#define PSB_XHW_TA_MEM_LOAD 0x09
4373+#define PSB_XHW_CHECK_LOCKUP 0x0a
4374+
4375+#define PSB_SCENE_FLAG_DIRTY (1 << 0)
4376+#define PSB_SCENE_FLAG_COMPLETE (1 << 1)
4377+#define PSB_SCENE_FLAG_SETUP (1 << 2)
4378+#define PSB_SCENE_FLAG_SETUP_ONLY (1 << 3)
4379+#define PSB_SCENE_FLAG_CLEARED (1 << 4)
4380+
4381+#define PSB_TA_MEM_FLAG_TA (1 << 0)
4382+#define PSB_TA_MEM_FLAG_RASTER (1 << 1)
4383+#define PSB_TA_MEM_FLAG_HOSTA (1 << 2)
4384+#define PSB_TA_MEM_FLAG_HOSTD (1 << 3)
4385+#define PSB_TA_MEM_FLAG_INIT (1 << 4)
4386+#define PSB_TA_MEM_FLAG_NEW_PT_OFFSET (1 << 5)
4387+
4388+/*Raster fire will deallocate memory */
4389+#define PSB_FIRE_FLAG_RASTER_DEALLOC (1 << 0)
4390+/*Isp reset needed due to change in ZLS format */
4391+#define PSB_FIRE_FLAG_NEEDS_ISP_RESET (1 << 1)
4392+/*These are set by Xpsb. */
4393+#define PSB_FIRE_FLAG_XHW_MASK 0xff000000
4394+/*The task has had at least one OOM and Xpsb will
4395+ send back messages on each fire. */
4396+#define PSB_FIRE_FLAG_XHW_OOM (1 << 24)
4397+
4398+#define PSB_SCENE_ENGINE_TA 0
4399+#define PSB_SCENE_ENGINE_RASTER 1
4400+#define PSB_SCENE_NUM_ENGINES 2
4401+
4402+#define PSB_LOCKUP_RASTER (1 << 0)
4403+#define PSB_LOCKUP_TA (1 << 1)
4404+
4405+struct drm_psb_dev_info_arg {
4406+ uint32_t num_use_attribute_registers;
4407+};
4408+#define DRM_PSB_DEVINFO 0x01
4409+
4410+
4411+#endif
4412diff -uNr a/drivers/gpu/drm/psb/psb_drv.c b/drivers/gpu/drm/psb/psb_drv.c
4413--- a/drivers/gpu/drm/psb/psb_drv.c 1969-12-31 16:00:00.000000000 -0800
4414+++ b/drivers/gpu/drm/psb/psb_drv.c 2009-04-07 13:31:58.000000000 -0700
4415@@ -0,0 +1,1465 @@
4416+/**************************************************************************
4417+ * Copyright (c) 2007, Intel Corporation.
4418+ * All Rights Reserved.
4419+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
4420+ * All Rights Reserved.
4421+ *
4422+ * This program is free software; you can redistribute it and/or modify it
4423+ * under the terms and conditions of the GNU General Public License,
4424+ * version 2, as published by the Free Software Foundation.
4425+ *
4426+ * This program is distributed in the hope it will be useful, but WITHOUT
4427+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
4428+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
4429+ * more details.
4430+ *
4431+ * You should have received a copy of the GNU General Public License along with
4432+ * this program; if not, write to the Free Software Foundation, Inc.,
4433+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4434+ *
4435+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4436+ * develop this driver.
4437+ *
4438+ **************************************************************************/
4439+/*
4440+ */
4441+
4442+#include <drm/drmP.h>
4443+#include <drm/drm.h>
4444+#include "psb_drm.h"
4445+#include "psb_drv.h"
4446+#include "psb_reg.h"
4447+#include "psb_intel_reg.h"
4448+#include "psb_msvdx.h"
4449+#include "lnc_topaz.h"
4450+#include <drm/drm_pciids.h>
4451+#include "psb_scene.h"
4452+
4453+#include <linux/cpu.h>
4454+#include <linux/notifier.h>
4455+#include <linux/spinlock.h>
4456+
4457+int drm_psb_debug;
4458+EXPORT_SYMBOL(drm_psb_debug);
4459+static int drm_psb_trap_pagefaults;
4460+static int drm_psb_clock_gating;
4461+static int drm_psb_ta_mem_size = 32 * 1024;
4462+
4463+int drm_psb_disable_vsync;
4464+int drm_psb_no_fb;
4465+int drm_psb_force_pipeb;
4466+int drm_idle_check_interval = 5;
4467+int drm_psb_ospm;
4468+
4469+MODULE_PARM_DESC(debug, "Enable debug output");
4470+MODULE_PARM_DESC(clock_gating, "clock gating");
4471+MODULE_PARM_DESC(no_fb, "Disable FBdev");
4472+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
4473+MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
4474+MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
4475+MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
4476+MODULE_PARM_DESC(ospm, "switch for ospm support");
4477+module_param_named(debug, drm_psb_debug, int, 0600);
4478+module_param_named(clock_gating, drm_psb_clock_gating, int, 0600);
4479+module_param_named(no_fb, drm_psb_no_fb, int, 0600);
4480+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
4481+module_param_named(disable_vsync, drm_psb_disable_vsync, int, 0600);
4482+module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
4483+module_param_named(ta_mem_size, drm_psb_ta_mem_size, int, 0600);
4484+module_param_named(ospm, drm_psb_ospm, int, 0600);
4485+
4486+#ifndef CONFIG_X86_PAT
4487+#warning "Don't build this driver without PAT support!!!"
4488+#endif
4489+
4490+#define psb_PCI_IDS \
4491+ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
4492+ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
4493+ {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4494+ {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4495+ {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4496+ {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4497+ {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4498+ {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4499+ {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4500+ {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
4501+ {0, 0, 0}
4502+
4503+static struct pci_device_id pciidlist[] = {
4504+ psb_PCI_IDS
4505+};
4506+
4507+/*
4508+ * Standard IOCTLs.
4509+ */
4510+
4511+#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
4512+#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
4513+#define DRM_IOCTL_PSB_VT_LEAVE DRM_IO(DRM_PSB_VT_LEAVE + DRM_COMMAND_BASE)
4514+#define DRM_IOCTL_PSB_VT_ENTER DRM_IO(DRM_PSB_VT_ENTER + DRM_COMMAND_BASE)
4515+#define DRM_IOCTL_PSB_XHW_INIT DRM_IOW(DRM_PSB_XHW_INIT + DRM_COMMAND_BASE, \
4516+ struct drm_psb_xhw_init_arg)
4517+#define DRM_IOCTL_PSB_XHW DRM_IO(DRM_PSB_XHW + DRM_COMMAND_BASE)
4518+#define DRM_IOCTL_PSB_EXTENSION DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
4519+ union drm_psb_extension_arg)
4520+/*
4521+ * TTM execbuf extension.
4522+ */
4523+
4524+#define DRM_PSB_CMDBUF (DRM_PSB_EXTENSION + 1)
4525+#define DRM_PSB_SCENE_UNREF (DRM_PSB_CMDBUF + 1)
4526+#define DRM_IOCTL_PSB_CMDBUF DRM_IOW(DRM_PSB_CMDBUF + DRM_COMMAND_BASE, \
4527+ struct drm_psb_cmdbuf_arg)
4528+#define DRM_IOCTL_PSB_SCENE_UNREF DRM_IOW(DRM_PSB_SCENE_UNREF + DRM_COMMAND_BASE, \
4529+ struct drm_psb_scene)
4530+#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
4531+#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
4532+#define DRM_IOCTL_PSB_EXTENSION DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
4533+ union drm_psb_extension_arg)
4534+/*
4535+ * TTM placement user extension.
4536+ */
4537+
4538+#define DRM_PSB_PLACEMENT_OFFSET (DRM_PSB_SCENE_UNREF + 1)
4539+
4540+#define DRM_PSB_TTM_PL_CREATE (TTM_PL_CREATE + DRM_PSB_PLACEMENT_OFFSET)
4541+#define DRM_PSB_TTM_PL_REFERENCE (TTM_PL_REFERENCE + DRM_PSB_PLACEMENT_OFFSET)
4542+#define DRM_PSB_TTM_PL_UNREF (TTM_PL_UNREF + DRM_PSB_PLACEMENT_OFFSET)
4543+#define DRM_PSB_TTM_PL_SYNCCPU (TTM_PL_SYNCCPU + DRM_PSB_PLACEMENT_OFFSET)
4544+#define DRM_PSB_TTM_PL_WAITIDLE (TTM_PL_WAITIDLE + DRM_PSB_PLACEMENT_OFFSET)
4545+#define DRM_PSB_TTM_PL_SETSTATUS (TTM_PL_SETSTATUS + DRM_PSB_PLACEMENT_OFFSET)
4546+
4547+/*
4548+ * TTM fence extension.
4549+ */
4550+
4551+#define DRM_PSB_FENCE_OFFSET (DRM_PSB_TTM_PL_SETSTATUS + 1)
4552+#define DRM_PSB_TTM_FENCE_SIGNALED (TTM_FENCE_SIGNALED + DRM_PSB_FENCE_OFFSET)
4553+#define DRM_PSB_TTM_FENCE_FINISH (TTM_FENCE_FINISH + DRM_PSB_FENCE_OFFSET)
4554+#define DRM_PSB_TTM_FENCE_UNREF (TTM_FENCE_UNREF + DRM_PSB_FENCE_OFFSET)
4555+
4556+#define DRM_IOCTL_PSB_TTM_PL_CREATE \
4557+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE,\
4558+ union ttm_pl_create_arg)
4559+#define DRM_IOCTL_PSB_TTM_PL_REFERENCE \
4560+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_REFERENCE,\
4561+ union ttm_pl_reference_arg)
4562+#define DRM_IOCTL_PSB_TTM_PL_UNREF \
4563+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_UNREF,\
4564+ struct ttm_pl_reference_req)
4565+#define DRM_IOCTL_PSB_TTM_PL_SYNCCPU \
4566+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SYNCCPU,\
4567+ struct ttm_pl_synccpu_arg)
4568+#define DRM_IOCTL_PSB_TTM_PL_WAITIDLE \
4569+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_WAITIDLE,\
4570+ struct ttm_pl_waitidle_arg)
4571+#define DRM_IOCTL_PSB_TTM_PL_SETSTATUS \
4572+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SETSTATUS,\
4573+ union ttm_pl_setstatus_arg)
4574+#define DRM_IOCTL_PSB_TTM_FENCE_SIGNALED \
4575+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_SIGNALED, \
4576+ union ttm_fence_signaled_arg)
4577+#define DRM_IOCTL_PSB_TTM_FENCE_FINISH \
4578+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_FINISH, \
4579+ union ttm_fence_finish_arg)
4580+#define DRM_IOCTL_PSB_TTM_FENCE_UNREF \
4581+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_UNREF, \
4582+ struct ttm_fence_unref_arg)
4583+
4584+static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
4585+ struct drm_file *file_priv);
4586+static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
4587+ struct drm_file *file_priv);
4588+
4589+#define PSB_IOCTL_DEF(ioctl, func, flags) \
4590+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, func, flags}
4591+
4592+static struct drm_ioctl_desc psb_ioctls[] = {
4593+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_OFF, psbfb_kms_off_ioctl,
4594+ DRM_ROOT_ONLY),
4595+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_ON, psbfb_kms_on_ioctl, DRM_ROOT_ONLY),
4596+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_LEAVE, psb_vt_leave_ioctl,
4597+ DRM_ROOT_ONLY),
4598+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_ENTER, psb_vt_enter_ioctl, DRM_ROOT_ONLY),
4599+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_XHW_INIT, psb_xhw_init_ioctl,
4600+ DRM_ROOT_ONLY),
4601+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_XHW, psb_xhw_ioctl, DRM_ROOT_ONLY),
4602+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_EXTENSION, psb_extension_ioctl, DRM_AUTH),
4603+
4604+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_CMDBUF, psb_cmdbuf_ioctl, DRM_AUTH),
4605+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_SCENE_UNREF, drm_psb_scene_unref_ioctl,
4606+ DRM_AUTH),
4607+
4608+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE, psb_pl_create_ioctl,
4609+ DRM_AUTH),
4610+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_REFERENCE, psb_pl_reference_ioctl,
4611+ DRM_AUTH),
4612+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_UNREF, psb_pl_unref_ioctl,
4613+ DRM_AUTH),
4614+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SYNCCPU, psb_pl_synccpu_ioctl,
4615+ DRM_AUTH),
4616+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_WAITIDLE, psb_pl_waitidle_ioctl,
4617+ DRM_AUTH),
4618+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SETSTATUS, psb_pl_setstatus_ioctl,
4619+ DRM_AUTH),
4620+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_SIGNALED,
4621+ psb_fence_signaled_ioctl, DRM_AUTH),
4622+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_FINISH, psb_fence_finish_ioctl,
4623+ DRM_AUTH),
4624+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_UNREF, psb_fence_unref_ioctl,
4625+ DRM_AUTH)
4626+};
4627+
4628+static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
4629+
4630+static void get_ci_info(struct drm_psb_private *dev_priv)
4631+{
4632+ struct pci_dev *pdev;
4633+
4634+ pdev = pci_get_subsys(0x8086, 0x080b, 0, 0, NULL);
4635+ if (pdev == NULL) {
4636+ /* IF no pci_device we set size & addr to 0, no ci
4637+ * share buffer can be created */
4638+ dev_priv->ci_region_start = 0;
4639+ dev_priv->ci_region_size = 0;
4640+ printk(KERN_ERR "can't find CI device, no ci share buffer\n");
4641+ return;
4642+ }
4643+
4644+ dev_priv->ci_region_start = pci_resource_start(pdev, 1);
4645+ dev_priv->ci_region_size = pci_resource_len(pdev, 1);
4646+
4647+ printk(KERN_INFO "ci_region_start %x ci_region_size %d\n",
4648+ dev_priv->ci_region_start, dev_priv->ci_region_size);
4649+
4650+ pci_dev_put(pdev);
4651+
4652+ return;
4653+}
4654+
4655+static int dri_library_name(struct drm_device *dev, char *buf)
4656+{
4657+ return snprintf(buf, PAGE_SIZE, "psb\n");
4658+}
4659+
4660+static void psb_set_uopt(struct drm_psb_uopt *uopt)
4661+{
4662+ uopt->clock_gating = drm_psb_clock_gating;
4663+}
4664+
4665+static void psb_lastclose(struct drm_device *dev)
4666+{
4667+ struct drm_psb_private *dev_priv =
4668+ (struct drm_psb_private *) dev->dev_private;
4669+
4670+ if (!dev->dev_private)
4671+ return;
4672+
4673+ if (dev_priv->ta_mem)
4674+ psb_ta_mem_unref(&dev_priv->ta_mem);
4675+ mutex_lock(&dev_priv->cmdbuf_mutex);
4676+ if (dev_priv->context.buffers) {
4677+ vfree(dev_priv->context.buffers);
4678+ dev_priv->context.buffers = NULL;
4679+ }
4680+ mutex_unlock(&dev_priv->cmdbuf_mutex);
4681+}
4682+
4683+static void psb_do_takedown(struct drm_device *dev)
4684+{
4685+ struct drm_psb_private *dev_priv =
4686+ (struct drm_psb_private *) dev->dev_private;
4687+ struct ttm_bo_device *bdev = &dev_priv->bdev;
4688+
4689+
4690+ if (dev_priv->have_mem_rastgeom) {
4691+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_RASTGEOM);
4692+ dev_priv->have_mem_rastgeom = 0;
4693+ }
4694+ if (dev_priv->have_mem_mmu) {
4695+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_MMU);
4696+ dev_priv->have_mem_mmu = 0;
4697+ }
4698+ if (dev_priv->have_mem_aper) {
4699+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_APER);
4700+ dev_priv->have_mem_aper = 0;
4701+ }
4702+ if (dev_priv->have_tt) {
4703+ ttm_bo_clean_mm(bdev, TTM_PL_TT);
4704+ dev_priv->have_tt = 0;
4705+ }
4706+ if (dev_priv->have_vram) {
4707+ ttm_bo_clean_mm(bdev, TTM_PL_VRAM);
4708+ dev_priv->have_vram = 0;
4709+ }
4710+ if (dev_priv->have_camera) {
4711+ ttm_bo_clean_mm(bdev, TTM_PL_CI);
4712+ dev_priv->have_camera = 0;
4713+ }
4714+
4715+ if (dev_priv->has_msvdx)
4716+ psb_msvdx_uninit(dev);
4717+
4718+ if (IS_MRST(dev)) {
4719+ if (dev_priv->has_topaz)
4720+ lnc_topaz_uninit(dev);
4721+ }
4722+
4723+ if (dev_priv->comm) {
4724+ kunmap(dev_priv->comm_page);
4725+ dev_priv->comm = NULL;
4726+ }
4727+ if (dev_priv->comm_page) {
4728+ __free_page(dev_priv->comm_page);
4729+ dev_priv->comm_page = NULL;
4730+ }
4731+}
4732+
4733+void psb_clockgating(struct drm_psb_private *dev_priv)
4734+{
4735+ uint32_t clock_gating;
4736+
4737+ if (dev_priv->uopt.clock_gating == 1) {
4738+ PSB_DEBUG_INIT("Disabling clock gating.\n");
4739+
4740+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
4741+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
4742+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
4743+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
4744+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
4745+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
4746+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
4747+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
4748+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
4749+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
4750+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
4751+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
4752+
4753+ } else if (dev_priv->uopt.clock_gating == 2) {
4754+ PSB_DEBUG_INIT("Enabling clock gating.\n");
4755+
4756+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_AUTO <<
4757+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
4758+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
4759+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
4760+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
4761+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
4762+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
4763+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
4764+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
4765+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
4766+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
4767+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
4768+ } else
4769+ clock_gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
4770+
4771+#ifdef FIX_TG_2D_CLOCKGATE
4772+ clock_gating &= ~_PSB_C_CLKGATECTL_2D_CLKG_MASK;
4773+ clock_gating |= (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
4774+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT);
4775+#endif
4776+ PSB_WSGX32(clock_gating, PSB_CR_CLKGATECTL);
4777+ (void) PSB_RSGX32(PSB_CR_CLKGATECTL);
4778+}
4779+
4780+#define FB_REG06 0xD0810600
4781+#define FB_MIPI_DISABLE BIT11
4782+#define FB_REG09 0xD0810900
4783+#define FB_SKU_MASK (BIT12|BIT13|BIT14)
4784+#define FB_SKU_SHIFT 12
4785+#define FB_SKU_100 0
4786+#define FB_SKU_100L 1
4787+#define FB_SKU_83 2
4788+#if 1 /* FIXME remove it after PO */
4789+#define FB_GFX_CLK_DIVIDE_MASK (BIT20|BIT21|BIT22)
4790+#define FB_GFX_CLK_DIVIDE_SHIFT 20
4791+#define FB_VED_CLK_DIVIDE_MASK (BIT23|BIT24)
4792+#define FB_VED_CLK_DIVIDE_SHIFT 23
4793+#define FB_VEC_CLK_DIVIDE_MASK (BIT25|BIT26)
4794+#define FB_VEC_CLK_DIVIDE_SHIFT 25
4795+#endif /* FIXME remove it after PO */
4796+
4797+
4798+void mrst_get_fuse_settings(struct drm_psb_private *dev_priv)
4799+{
4800+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
4801+ uint32_t fuse_value = 0;
4802+ uint32_t fuse_value_tmp = 0;
4803+
4804+ pci_write_config_dword(pci_root, 0xD0, FB_REG06);
4805+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
4806+
4807+ dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
4808+
4809+ DRM_INFO("internal display is %s\n",
4810+ dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
4811+
4812+ pci_write_config_dword(pci_root, 0xD0, FB_REG09);
4813+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
4814+
4815+ DRM_INFO("SKU values is 0x%x. \n", fuse_value);
4816+ fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
4817+
4818+ switch (fuse_value_tmp) {
4819+ case FB_SKU_100:
4820+ DRM_INFO("SKU values is SKU_100. LNC core clock is 200MHz. \n");
4821+ dev_priv->sku_100 = true;
4822+ break;
4823+ case FB_SKU_100L:
4824+ DRM_INFO("SKU values is SKU_100L. LNC core clock is 100MHz. \n");
4825+ dev_priv->sku_100L = true;
4826+ break;
4827+ case FB_SKU_83:
4828+ DRM_INFO("SKU values is SKU_83. LNC core clock is 166MHz. \n");
4829+ dev_priv->sku_83 = true;
4830+ break;
4831+ default:
4832+ DRM_ERROR("Invalid SKU values, SKU value = 0x%08x\n",
4833+ fuse_value_tmp);
4834+ }
4835+
4836+#if 1 /* FIXME remove it after PO */
4837+ fuse_value_tmp = (fuse_value & FB_GFX_CLK_DIVIDE_MASK) >> FB_GFX_CLK_DIVIDE_SHIFT;
4838+
4839+ switch (fuse_value_tmp) {
4840+ case 0:
4841+ DRM_INFO("Gfx clk : core clk = 1:1. \n");
4842+ break;
4843+ case 1:
4844+ DRM_INFO("Gfx clk : core clk = 4:3. \n");
4845+ break;
4846+ case 2:
4847+ DRM_INFO("Gfx clk : core clk = 8:5. \n");
4848+ break;
4849+ case 3:
4850+ DRM_INFO("Gfx clk : core clk = 2:1. \n");
4851+ break;
4852+ case 5:
4853+ DRM_INFO("Gfx clk : core clk = 8:3. \n");
4854+ break;
4855+ case 6:
4856+ DRM_INFO("Gfx clk : core clk = 16:5. \n");
4857+ break;
4858+ default:
4859+ DRM_ERROR("Invalid GFX CLK DIVIDE values, value = 0x%08x\n",
4860+ fuse_value_tmp);
4861+ }
4862+
4863+ fuse_value_tmp = (fuse_value & FB_VED_CLK_DIVIDE_MASK) >> FB_VED_CLK_DIVIDE_SHIFT;
4864+
4865+ switch (fuse_value_tmp) {
4866+ case 0:
4867+ DRM_INFO("Ved clk : core clk = 1:1. \n");
4868+ break;
4869+ case 1:
4870+ DRM_INFO("Ved clk : core clk = 4:3. \n");
4871+ break;
4872+ case 2:
4873+ DRM_INFO("Ved clk : core clk = 8:5. \n");
4874+ break;
4875+ case 3:
4876+ DRM_INFO("Ved clk : core clk = 2:1. \n");
4877+ break;
4878+ default:
4879+ DRM_ERROR("Invalid VED CLK DIVIDE values, value = 0x%08x\n",
4880+ fuse_value_tmp);
4881+ }
4882+
4883+ fuse_value_tmp = (fuse_value & FB_VEC_CLK_DIVIDE_MASK) >> FB_VEC_CLK_DIVIDE_SHIFT;
4884+
4885+ switch (fuse_value_tmp) {
4886+ case 0:
4887+ DRM_INFO("Vec clk : core clk = 1:1. \n");
4888+ break;
4889+ case 1:
4890+ DRM_INFO("Vec clk : core clk = 4:3. \n");
4891+ break;
4892+ case 2:
4893+ DRM_INFO("Vec clk : core clk = 8:5. \n");
4894+ break;
4895+ case 3:
4896+ DRM_INFO("Vec clk : core clk = 2:1. \n");
4897+ break;
4898+ default:
4899+ DRM_ERROR("Invalid VEC CLK DIVIDE values, value = 0x%08x\n",
4900+ fuse_value_tmp);
4901+ }
4902+#endif /* FIXME remove it after PO */
4903+
4904+ return;
4905+}
4906+
4907+static int psb_do_init(struct drm_device *dev)
4908+{
4909+ struct drm_psb_private *dev_priv =
4910+ (struct drm_psb_private *) dev->dev_private;
4911+ struct ttm_bo_device *bdev = &dev_priv->bdev;
4912+ struct psb_gtt *pg = dev_priv->pg;
4913+
4914+ uint32_t stolen_gtt;
4915+ uint32_t tt_start;
4916+ uint32_t tt_pages;
4917+
4918+ int ret = -ENOMEM;
4919+
4920+ dev_priv->ta_mem_pages =
4921+ PSB_ALIGN_TO(drm_psb_ta_mem_size * 1024,
4922+ PAGE_SIZE) >> PAGE_SHIFT;
4923+ dev_priv->comm_page = alloc_page(GFP_KERNEL);
4924+ if (!dev_priv->comm_page)
4925+ goto out_err;
4926+
4927+ dev_priv->comm = kmap(dev_priv->comm_page);
4928+ memset((void *) dev_priv->comm, 0, PAGE_SIZE);
4929+
4930+ set_pages_uc(dev_priv->comm_page, 1);
4931+
4932+ /*
4933+ * Initialize sequence numbers for the different command
4934+ * submission mechanisms.
4935+ */
4936+
4937+ dev_priv->sequence[PSB_ENGINE_2D] = 0;
4938+ dev_priv->sequence[PSB_ENGINE_RASTERIZER] = 0;
4939+ dev_priv->sequence[PSB_ENGINE_TA] = 0;
4940+ dev_priv->sequence[PSB_ENGINE_HPRAST] = 0;
4941+
4942+ if (pg->gatt_start & 0x0FFFFFFF) {
4943+ DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
4944+ ret = -EINVAL;
4945+ goto out_err;
4946+ }
4947+
4948+ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
4949+ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
4950+ stolen_gtt =
4951+ (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
4952+
4953+ dev_priv->gatt_free_offset = pg->gatt_start +
4954+ (stolen_gtt << PAGE_SHIFT) * 1024;
4955+
4956+ /*
4957+ * Insert a cache-coherent communications page in mmu space
4958+ * just after the stolen area. Will be used for fencing etc.
4959+ */
4960+
4961+ dev_priv->comm_mmu_offset = dev_priv->gatt_free_offset;
4962+ dev_priv->gatt_free_offset += PAGE_SIZE;
4963+
4964+ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
4965+ &dev_priv->comm_page,
4966+ dev_priv->comm_mmu_offset, 1, 0, 0, 0);
4967+
4968+ if (ret)
4969+ goto out_err;
4970+
4971+ if (1 || drm_debug) {
4972+ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
4973+ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
4974+ DRM_INFO("SGX core id = 0x%08x\n", core_id);
4975+ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
4976+ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
4977+ _PSB_CC_REVISION_MAJOR_SHIFT,
4978+ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
4979+ _PSB_CC_REVISION_MINOR_SHIFT);
4980+ DRM_INFO
4981+ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
4982+ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
4983+ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
4984+ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
4985+ _PSB_CC_REVISION_DESIGNER_SHIFT);
4986+ }
4987+
4988+ spin_lock_init(&dev_priv->irqmask_lock);
4989+ dev_priv->fence0_irq_on = 0;
4990+
4991+ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
4992+ pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
4993+ tt_start = dev_priv->gatt_free_offset - pg->gatt_start;
4994+ tt_pages -= tt_start >> PAGE_SHIFT;
4995+
4996+ if (!ttm_bo_init_mm(bdev, TTM_PL_VRAM, 0,
4997+ pg->vram_stolen_size >> PAGE_SHIFT)) {
4998+ dev_priv->have_vram = 1;
4999+ }
5000+
5001+ if (!ttm_bo_init_mm(bdev, TTM_PL_CI, 0,
5002+ dev_priv->ci_region_size >> PAGE_SHIFT)) {
5003+ dev_priv->have_camera = 1;
5004+ }
5005+
5006+ if (!ttm_bo_init_mm(bdev, TTM_PL_TT, tt_start >> PAGE_SHIFT,
5007+ tt_pages)) {
5008+ dev_priv->have_tt = 1;
5009+ }
5010+
5011+ if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_MMU, 0x00000000,
5012+ (pg->gatt_start - PSB_MEM_MMU_START -
5013+ pg->ci_stolen_size) >> PAGE_SHIFT)) {
5014+ dev_priv->have_mem_mmu = 1;
5015+ }
5016+
5017+ if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_RASTGEOM, 0x00000000,
5018+ (PSB_MEM_MMU_START -
5019+ PSB_MEM_RASTGEOM_START) >> PAGE_SHIFT)) {
5020+ dev_priv->have_mem_rastgeom = 1;
5021+ }
5022+#if 0
5023+ if (pg->gatt_pages > PSB_TT_PRIV0_PLIMIT) {
5024+ if (!ttm_bo_init_mm
5025+ (bdev, DRM_PSB_MEM_APER, PSB_TT_PRIV0_PLIMIT,
5026+ pg->gatt_pages - PSB_TT_PRIV0_PLIMIT, 1)) {
5027+ dev_priv->have_mem_aper = 1;
5028+ }
5029+ }
5030+#endif
5031+
5032+ PSB_DEBUG_INIT("Init MSVDX\n");
5033+ dev_priv->has_msvdx = 1;
5034+ if (psb_msvdx_init(dev))
5035+ dev_priv->has_msvdx = 0;
5036+
5037+ if (IS_MRST(dev)) {
5038+ PSB_DEBUG_INIT("Init Topaz\n");
5039+ dev_priv->has_topaz = 1;
5040+ if (lnc_topaz_init(dev))
5041+ dev_priv->has_topaz = 0;
5042+ }
5043+ return 0;
5044+out_err:
5045+ psb_do_takedown(dev);
5046+ return ret;
5047+}
5048+
5049+static int psb_driver_unload(struct drm_device *dev)
5050+{
5051+ struct drm_psb_private *dev_priv =
5052+ (struct drm_psb_private *) dev->dev_private;
5053+
5054+ if (drm_psb_no_fb == 0)
5055+ psb_modeset_cleanup(dev);
5056+
5057+ if (dev_priv) {
5058+ struct ttm_bo_device *bdev = &dev_priv->bdev;
5059+
5060+ psb_watchdog_takedown(dev_priv);
5061+ psb_do_takedown(dev);
5062+ psb_xhw_takedown(dev_priv);
5063+ psb_scheduler_takedown(&dev_priv->scheduler);
5064+
5065+ if (dev_priv->have_mem_pds) {
5066+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_PDS);
5067+ dev_priv->have_mem_pds = 0;
5068+ }
5069+ if (dev_priv->have_mem_kernel) {
5070+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_KERNEL);
5071+ dev_priv->have_mem_kernel = 0;
5072+ }
5073+
5074+ if (dev_priv->pf_pd) {
5075+ psb_mmu_free_pagedir(dev_priv->pf_pd);
5076+ dev_priv->pf_pd = NULL;
5077+ }
5078+ if (dev_priv->mmu) {
5079+ struct psb_gtt *pg = dev_priv->pg;
5080+
5081+ down_read(&pg->sem);
5082+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
5083+ (dev_priv->mmu),
5084+ pg->gatt_start,
5085+ pg->vram_stolen_size >>
5086+ PAGE_SHIFT);
5087+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
5088+ (dev_priv->mmu),
5089+ pg->gatt_start - pg->ci_stolen_size,
5090+ pg->ci_stolen_size >>
5091+ PAGE_SHIFT);
5092+ up_read(&pg->sem);
5093+ psb_mmu_driver_takedown(dev_priv->mmu);
5094+ dev_priv->mmu = NULL;
5095+ }
5096+ psb_gtt_takedown(dev_priv->pg, 1);
5097+ if (dev_priv->scratch_page) {
5098+ __free_page(dev_priv->scratch_page);
5099+ dev_priv->scratch_page = NULL;
5100+ }
5101+ if (dev_priv->has_bo_device) {
5102+ ttm_bo_device_release(&dev_priv->bdev);
5103+ dev_priv->has_bo_device = 0;
5104+ }
5105+ if (dev_priv->has_fence_device) {
5106+ ttm_fence_device_release(&dev_priv->fdev);
5107+ dev_priv->has_fence_device = 0;
5108+ }
5109+ if (dev_priv->vdc_reg) {
5110+ iounmap(dev_priv->vdc_reg);
5111+ dev_priv->vdc_reg = NULL;
5112+ }
5113+ if (dev_priv->sgx_reg) {
5114+ iounmap(dev_priv->sgx_reg);
5115+ dev_priv->sgx_reg = NULL;
5116+ }
5117+ if (dev_priv->msvdx_reg) {
5118+ iounmap(dev_priv->msvdx_reg);
5119+ dev_priv->msvdx_reg = NULL;
5120+ }
5121+
5122+ if (IS_MRST(dev)) {
5123+ if (dev_priv->topaz_reg) {
5124+ iounmap(dev_priv->topaz_reg);
5125+ dev_priv->topaz_reg = NULL;
5126+ }
5127+ }
5128+
5129+ if (dev_priv->tdev)
5130+ ttm_object_device_release(&dev_priv->tdev);
5131+
5132+ if (dev_priv->has_global)
5133+ psb_ttm_global_release(dev_priv);
5134+
5135+ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
5136+ dev->dev_private = NULL;
5137+ }
5138+ return 0;
5139+}
5140+
5141+
5142+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
5143+{
5144+ struct drm_psb_private *dev_priv;
5145+ struct ttm_bo_device *bdev;
5146+ unsigned long resource_start;
5147+ struct psb_gtt *pg;
5148+ int ret = -ENOMEM;
5149+
5150+ if (IS_MRST(dev))
5151+ DRM_INFO("Run drivers on Moorestown platform!\n");
5152+ else
5153+ DRM_INFO("Run drivers on Poulsbo platform!\n");
5154+
5155+ dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
5156+ if (dev_priv == NULL)
5157+ return -ENOMEM;
5158+
5159+ dev_priv->dev = dev;
5160+ bdev = &dev_priv->bdev;
5161+
5162+ ret = psb_ttm_global_init(dev_priv);
5163+ if (unlikely(ret != 0))
5164+ goto out_err;
5165+ dev_priv->has_global = 1;
5166+
5167+ dev_priv->tdev = ttm_object_device_init
5168+ (dev_priv->mem_global_ref.object, PSB_OBJECT_HASH_ORDER);
5169+ if (unlikely(dev_priv->tdev == NULL))
5170+ goto out_err;
5171+
5172+ mutex_init(&dev_priv->temp_mem);
5173+ mutex_init(&dev_priv->cmdbuf_mutex);
5174+ mutex_init(&dev_priv->reset_mutex);
5175+ INIT_LIST_HEAD(&dev_priv->context.validate_list);
5176+ INIT_LIST_HEAD(&dev_priv->context.kern_validate_list);
5177+ psb_init_disallowed();
5178+
5179+#ifdef FIX_TG_16
5180+ atomic_set(&dev_priv->lock_2d, 0);
5181+ atomic_set(&dev_priv->ta_wait_2d, 0);
5182+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
5183+ atomic_set(&dev_priv->waiters_2d, 0);;
5184+ DRM_INIT_WAITQUEUE(&dev_priv->queue_2d);
5185+#else
5186+ mutex_init(&dev_priv->mutex_2d);
5187+#endif
5188+
5189+ spin_lock_init(&dev_priv->reloc_lock);
5190+
5191+ DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
5192+ DRM_INIT_WAITQUEUE(&dev_priv->event_2d_queue);
5193+
5194+ dev->dev_private = (void *) dev_priv;
5195+ dev_priv->chipset = chipset;
5196+ psb_set_uopt(&dev_priv->uopt);
5197+
5198+ PSB_DEBUG_GENERAL("Init watchdog and scheduler\n");
5199+ psb_watchdog_init(dev_priv);
5200+ psb_scheduler_init(dev, &dev_priv->scheduler);
5201+
5202+
5203+ PSB_DEBUG_INIT("Mapping MMIO\n");
5204+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
5205+
5206+ if (IS_MRST(dev))
5207+ dev_priv->msvdx_reg =
5208+ ioremap(resource_start + MRST_MSVDX_OFFSET,
5209+ PSB_MSVDX_SIZE);
5210+ else
5211+ dev_priv->msvdx_reg =
5212+ ioremap(resource_start + PSB_MSVDX_OFFSET,
5213+ PSB_MSVDX_SIZE);
5214+
5215+ if (!dev_priv->msvdx_reg)
5216+ goto out_err;
5217+
5218+ if (IS_MRST(dev)) {
5219+ dev_priv->topaz_reg =
5220+ ioremap(resource_start + LNC_TOPAZ_OFFSET,
5221+ LNC_TOPAZ_SIZE);
5222+ if (!dev_priv->topaz_reg)
5223+ goto out_err;
5224+ }
5225+
5226+ dev_priv->vdc_reg =
5227+ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
5228+ if (!dev_priv->vdc_reg)
5229+ goto out_err;
5230+
5231+ if (IS_MRST(dev))
5232+ dev_priv->sgx_reg =
5233+ ioremap(resource_start + MRST_SGX_OFFSET,
5234+ PSB_SGX_SIZE);
5235+ else
5236+ dev_priv->sgx_reg =
5237+ ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
5238+
5239+ if (!dev_priv->sgx_reg)
5240+ goto out_err;
5241+
5242+ if (IS_MRST(dev))
5243+ mrst_get_fuse_settings(dev_priv);
5244+
5245+ PSB_DEBUG_INIT("Init TTM fence and BO driver\n");
5246+
5247+ get_ci_info(dev_priv);
5248+
5249+ psb_clockgating(dev_priv);
5250+
5251+ ret = psb_ttm_fence_device_init(&dev_priv->fdev);
5252+ if (unlikely(ret != 0))
5253+ goto out_err;
5254+
5255+ dev_priv->has_fence_device = 1;
5256+ ret = ttm_bo_device_init(bdev,
5257+ dev_priv->mem_global_ref.object,
5258+ &psb_ttm_bo_driver,
5259+ DRM_PSB_FILE_PAGE_OFFSET);
5260+ if (unlikely(ret != 0))
5261+ goto out_err;
5262+ dev_priv->has_bo_device = 1;
5263+ ttm_lock_init(&dev_priv->ttm_lock);
5264+
5265+ ret = -ENOMEM;
5266+
5267+ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
5268+ if (!dev_priv->scratch_page)
5269+ goto out_err;
5270+
5271+ set_pages_uc(dev_priv->scratch_page, 1);
5272+
5273+ dev_priv->pg = psb_gtt_alloc(dev);
5274+ if (!dev_priv->pg)
5275+ goto out_err;
5276+
5277+ ret = psb_gtt_init(dev_priv->pg, 0);
5278+ if (ret)
5279+ goto out_err;
5280+
5281+ dev_priv->mmu = psb_mmu_driver_init(dev_priv->sgx_reg,
5282+ drm_psb_trap_pagefaults, 0,
5283+ dev_priv);
5284+ if (!dev_priv->mmu)
5285+ goto out_err;
5286+
5287+ pg = dev_priv->pg;
5288+
5289+ /*
5290+ * Make sgx MMU aware of the stolen memory area we call VRAM.
5291+ */
5292+
5293+ down_read(&pg->sem);
5294+ ret =
5295+ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd
5296+ (dev_priv->mmu),
5297+ pg->stolen_base >> PAGE_SHIFT,
5298+ pg->gatt_start,
5299+ pg->vram_stolen_size >> PAGE_SHIFT, 0);
5300+ up_read(&pg->sem);
5301+ if (ret)
5302+ goto out_err;
5303+
5304+ /*
5305+ * Make sgx MMU aware of the stolen memory area we call VRAM.
5306+ */
5307+
5308+ down_read(&pg->sem);
5309+ ret =
5310+ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd
5311+ (dev_priv->mmu),
5312+ dev_priv->ci_region_start >> PAGE_SHIFT,
5313+ pg->gatt_start - pg->ci_stolen_size,
5314+ pg->ci_stolen_size >> PAGE_SHIFT, 0);
5315+ up_read(&pg->sem);
5316+ if (ret)
5317+ goto out_err;
5318+
5319+ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
5320+ if (!dev_priv->pf_pd)
5321+ goto out_err;
5322+
5323+ /*
5324+ * Make all presumably unused requestors page-fault by making them
5325+ * use context 1 which does not have any valid mappings.
5326+ */
5327+
5328+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
5329+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
5330+ PSB_RSGX32(PSB_CR_BIF_BANK1);
5331+
5332+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
5333+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
5334+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
5335+
5336+ psb_init_2d(dev_priv);
5337+
5338+ ret = ttm_bo_init_mm(bdev, DRM_PSB_MEM_KERNEL, 0x00000000,
5339+ (PSB_MEM_PDS_START - PSB_MEM_KERNEL_START)
5340+ >> PAGE_SHIFT);
5341+ if (ret)
5342+ goto out_err;
5343+ dev_priv->have_mem_kernel = 1;
5344+
5345+ ret = ttm_bo_init_mm(bdev, DRM_PSB_MEM_PDS, 0x00000000,
5346+ (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START)
5347+ >> PAGE_SHIFT);
5348+ if (ret)
5349+ goto out_err;
5350+ dev_priv->have_mem_pds = 1;
5351+
5352+ PSB_DEBUG_INIT("Begin to init SGX/MSVDX/Topaz\n");
5353+
5354+ ret = psb_do_init(dev);
5355+ if (ret)
5356+ return ret;
5357+
5358+ ret = psb_xhw_init(dev);
5359+ if (ret)
5360+ return ret;
5361+
5362+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
5363+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
5364+
5365+ psb_init_ospm(dev_priv);
5366+
5367+ if (drm_psb_no_fb == 0) {
5368+ psb_modeset_init(dev);
5369+ drm_helper_initial_config(dev, false);
5370+ }
5371+
5372+ /*initialize the MSI for MRST*/
5373+ if (IS_MRST(dev)) {
5374+ if (pci_enable_msi(dev->pdev)) {
5375+ DRM_ERROR("Enable MSI for MRST failed!\n");
5376+ } else {
5377+ PSB_DEBUG_INIT("Enabled MSI IRQ (%d)\n",
5378+ dev->pdev->irq);
5379+ /* pci_write_config_word(pdev, 0x04, 0x07); */
5380+ }
5381+ }
5382+
5383+ /*set SGX in low power mode*/
5384+ if (drm_psb_ospm && IS_MRST(dev))
5385+ if (psb_try_power_down_sgx(dev))
5386+ PSB_DEBUG_PM("initialize SGX to low power failed\n");
5387+ return 0;
5388+out_err:
5389+ psb_driver_unload(dev);
5390+ return ret;
5391+}
5392+
5393+int psb_driver_device_is_agp(struct drm_device *dev)
5394+{
5395+ return 0;
5396+}
5397+
5398+static int psb_prepare_msvdx_suspend(struct drm_device *dev)
5399+{
5400+#ifdef PSB_FIXME
5401+ struct drm_psb_private *dev_priv =
5402+ (struct drm_psb_private *) dev->dev_private;
5403+ struct ttm_fence_device *fdev = &dev_priv->fdev;
5404+ struct ttm_fence_class_manager *fc =
5405+ &fdev->fence_class[PSB_ENGINE_VIDEO];
5406+ struct ttm_fence_object *fence;
5407+ int ret = 0;
5408+ int signaled = 0;
5409+ int count = 0;
5410+ unsigned long _end = jiffies + 3 * DRM_HZ;
5411+
5412+ PSB_DEBUG_GENERAL
5413+ ("MSVDXACPI Entering psb_prepare_msvdx_suspend....\n");
5414+
5415+ /*set the msvdx-reset flag here.. */
5416+ dev_priv->msvdx_needs_reset = 1;
5417+
5418+ /*Ensure that all pending IRQs are serviced, */
5419+
5420+ /*
5421+ * Save the last MSVDX fence in dev_priv instead!!!
5422+ * Need to be fc->write_locked while accessing a fence from the ring.
5423+ */
5424+
5425+ list_for_each_entry(fence, &fc->ring, ring) {
5426+ count++;
5427+ do {
5428+ DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
5429+ (signaled =
5430+ ttm_fence_object_signaled(fence,
5431+ DRM_FENCE_TYPE_EXE)));
5432+ if (signaled)
5433+ break;
5434+ if (time_after_eq(jiffies, _end))
5435+ PSB_DEBUG_GENERAL
5436+ ("MSVDXACPI: fence 0x%x didn't get"
5437+ " signaled for 3 secs; "
5438+ "we will suspend anyways\n",
5439+ (unsigned int) fence);
5440+ } while (ret == -EINTR);
5441+
5442+ }
5443+ PSB_DEBUG_GENERAL("MSVDXACPI: All MSVDX IRQs (%d) serviced...\n",
5444+ count);
5445+#endif
5446+ return 0;
5447+}
5448+
5449+static int psb_suspend(struct pci_dev *pdev, pm_message_t state)
5450+{
5451+ struct drm_device *dev = pci_get_drvdata(pdev);
5452+ struct drm_psb_private *dev_priv =
5453+ (struct drm_psb_private *) dev->dev_private;
5454+
5455+ if (!down_write_trylock(&dev_priv->sgx_sem))
5456+ return -EBUSY;
5457+ if (dev_priv->graphics_state != PSB_PWR_STATE_D0i0);
5458+ PSB_DEBUG_PM("Not suspending from D0i0\n");
5459+ if (dev_priv->graphics_state == PSB_PWR_STATE_D3)
5460+ goto exit;
5461+ if (drm_psb_no_fb == 0){
5462+ psbfb_suspend(dev);
5463+ psb_modeset_cleanup(dev);
5464+ }
5465+
5466+ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
5467+ (void) psb_idle_3d(dev);
5468+ (void) psb_idle_2d(dev);
5469+ flush_scheduled_work();
5470+
5471+ if (dev_priv->has_msvdx)
5472+ psb_prepare_msvdx_suspend(dev);
5473+
5474+ if (dev_priv->has_topaz)
5475+ lnc_prepare_topaz_suspend(dev);
5476+
5477+#ifdef OSPM_STAT
5478+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i0)
5479+ dev_priv->gfx_d0i0_time += jiffies - dev_priv->gfx_last_mode_change;
5480+ else if (dev_priv->graphics_state == PSB_PWR_STATE_D0i3)
5481+ dev_priv->gfx_d0i3_time += jiffies - dev_priv->gfx_last_mode_change;
5482+ else
5483+ PSB_DEBUG_PM("suspend: illegal previous power state\n");
5484+ dev_priv->gfx_last_mode_change = jiffies;
5485+ dev_priv->gfx_d3_cnt++;
5486+#endif
5487+
5488+ dev_priv->graphics_state = PSB_PWR_STATE_D3;
5489+ dev_priv->msvdx_state = PSB_PWR_STATE_D3;
5490+ dev_priv->topaz_power_state = LNC_TOPAZ_POWEROFF;
5491+ pci_save_state(pdev);
5492+ pci_disable_device(pdev);
5493+ pci_set_power_state(pdev, PCI_D3hot);
5494+ psb_down_island_power(dev, PSB_GRAPHICS_ISLAND | PSB_VIDEO_ENC_ISLAND
5495+ | PSB_VIDEO_DEC_ISLAND);
5496+exit:
5497+ up_write(&dev_priv->sgx_sem);
5498+ return 0;
5499+}
5500+
5501+static int psb_resume(struct pci_dev *pdev)
5502+{
5503+ struct drm_device *dev = pci_get_drvdata(pdev);
5504+ struct drm_psb_private *dev_priv =
5505+ (struct drm_psb_private *) dev->dev_private;
5506+ struct psb_gtt *pg = dev_priv->pg;
5507+ int ret;
5508+ if (dev_priv->graphics_state != PSB_PWR_STATE_D3)
5509+ return 0;
5510+
5511+ psb_up_island_power(dev, PSB_GRAPHICS_ISLAND | PSB_VIDEO_ENC_ISLAND
5512+ | PSB_VIDEO_DEC_ISLAND);
5513+ pci_set_power_state(pdev, PCI_D0);
5514+ pci_restore_state(pdev);
5515+ ret = pci_enable_device(pdev);
5516+ if (ret)
5517+ return ret;
5518+
5519+ DRM_ERROR("FIXME: topaz's resume is not ready..\n");
5520+#ifdef OSPM_STAT
5521+ if (dev_priv->graphics_state == PSB_PWR_STATE_D3)
5522+ dev_priv->gfx_d3_time += jiffies - dev_priv->gfx_last_mode_change;
5523+ else
5524+ PSB_DEBUG_PM("resume :illegal previous power state\n");
5525+ dev_priv->gfx_last_mode_change = jiffies;
5526+ dev_priv->gfx_d0i0_cnt++;
5527+#endif
5528+ dev_priv->graphics_state = PSB_PWR_STATE_D0i0;
5529+ dev_priv->msvdx_state = PSB_PWR_STATE_D0i0;
5530+ dev_priv->topaz_power_state = LNC_TOPAZ_POWERON;
5531+ INIT_LIST_HEAD(&dev_priv->resume_buf.head);
5532+ dev_priv->msvdx_needs_reset = 1;
5533+
5534+ lnc_prepare_topaz_resume(dev);
5535+
5536+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
5537+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
5538+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
5539+
5540+ /*
5541+ * Don't reinitialize the GTT as it is unnecessary. The gtt is
5542+ * stored in memory so it will automatically be restored. All
5543+ * we need to do is restore the PGETBL_CTL which we already do
5544+ * above.
5545+ */
5546+
5547+ //psb_gtt_init(dev_priv->pg, 1);
5548+
5549+ /*
5550+ * The SGX loses it's register contents.
5551+ * Restore BIF registers. The MMU page tables are
5552+ * "normal" pages, so their contents should be kept.
5553+ */
5554+
5555+ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
5556+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
5557+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
5558+ PSB_RSGX32(PSB_CR_BIF_BANK1);
5559+
5560+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
5561+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
5562+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
5563+
5564+ /*
5565+ * 2D Base registers..
5566+ */
5567+ psb_init_2d(dev_priv);
5568+
5569+ /*
5570+ * Persistant 3D base registers and USSE base registers..
5571+ */
5572+
5573+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
5574+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
5575+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
5576+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
5577+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
5578+
5579+ /*
5580+ * Now, re-initialize the 3D engine.
5581+ */
5582+
5583+ psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
5584+
5585+ psb_scheduler_ta_mem_check(dev_priv);
5586+ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
5587+ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
5588+ PSB_TA_MEM_FLAG_TA |
5589+ PSB_TA_MEM_FLAG_RASTER |
5590+ PSB_TA_MEM_FLAG_HOSTA |
5591+ PSB_TA_MEM_FLAG_HOSTD |
5592+ PSB_TA_MEM_FLAG_INIT,
5593+ dev_priv->ta_mem->ta_memory->offset,
5594+ dev_priv->ta_mem->hw_data->offset,
5595+ dev_priv->ta_mem->hw_cookie);
5596+ }
5597+
5598+ if (drm_psb_no_fb == 0) {
5599+ psb_modeset_init(dev);
5600+ drm_helper_initial_config(dev, false);
5601+ psbfb_resume(dev);
5602+ }
5603+ return 0;
5604+}
5605+
5606+int psb_extension_ioctl(struct drm_device *dev, void *data,
5607+ struct drm_file *file_priv)
5608+{
5609+ union drm_psb_extension_arg *arg = data;
5610+ struct drm_psb_extension_rep *rep = &arg->rep;
5611+
5612+ if (strcmp(arg->extension, "psb_ttm_placement_alphadrop") == 0) {
5613+ rep->exists = 1;
5614+ rep->driver_ioctl_offset = DRM_PSB_PLACEMENT_OFFSET;
5615+ rep->sarea_offset = 0;
5616+ rep->major = 1;
5617+ rep->minor = 0;
5618+ rep->pl = 0;
5619+ return 0;
5620+ }
5621+ if (strcmp(arg->extension, "psb_ttm_fence_alphadrop") == 0) {
5622+ rep->exists = 1;
5623+ rep->driver_ioctl_offset = DRM_PSB_FENCE_OFFSET;
5624+ rep->sarea_offset = 0;
5625+ rep->major = 1;
5626+ rep->minor = 0;
5627+ rep->pl = 0;
5628+ return 0;
5629+ }
5630+ if (strcmp(arg->extension, "psb_ttm_execbuf_alphadrop") == 0) {
5631+ rep->exists = 1;
5632+ rep->driver_ioctl_offset = DRM_PSB_CMDBUF;
5633+ rep->sarea_offset = 0;
5634+ rep->major = 1;
5635+ rep->minor = 0;
5636+ rep->pl = 0;
5637+ return 0;
5638+ }
5639+
5640+ rep->exists = 0;
5641+ return 0;
5642+}
5643+
5644+static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
5645+ struct drm_file *file_priv)
5646+{
5647+ struct drm_psb_private *dev_priv = psb_priv(dev);
5648+ struct ttm_bo_device *bdev = &dev_priv->bdev;
5649+ struct ttm_mem_type_manager *man;
5650+ int clean;
5651+ int ret;
5652+
5653+ ret = ttm_write_lock(&dev_priv->ttm_lock, 1,
5654+ psb_fpriv(file_priv)->tfile);
5655+ if (unlikely(ret != 0))
5656+ return ret;
5657+
5658+ /*
5659+ * Clean VRAM and TT for fbdev.
5660+ */
5661+
5662+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
5663+ if (unlikely(ret != 0))
5664+ goto out_unlock;
5665+
5666+ man = &bdev->man[TTM_PL_VRAM];
5667+ spin_lock(&bdev->lru_lock);
5668+ clean = drm_mm_clean(&man->manager);
5669+ spin_unlock(&bdev->lru_lock);
5670+ if (unlikely(!clean))
5671+ DRM_INFO("Notice: VRAM was not clean after VT switch, if you are running fbdev please ignore.\n");
5672+
5673+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_TT);
5674+ if (unlikely(ret != 0))
5675+ goto out_unlock;
5676+
5677+ man = &bdev->man[TTM_PL_TT];
5678+ spin_lock(&bdev->lru_lock);
5679+ clean = drm_mm_clean(&man->manager);
5680+ spin_unlock(&bdev->lru_lock);
5681+ if (unlikely(!clean))
5682+ DRM_INFO("Warning: GATT was not clean after VT switch.\n");
5683+
5684+ ttm_bo_swapout_all(&dev_priv->bdev);
5685+
5686+ return 0;
5687+out_unlock:
5688+ (void) ttm_write_unlock(&dev_priv->ttm_lock,
5689+ psb_fpriv(file_priv)->tfile);
5690+ return ret;
5691+}
5692+
5693+static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
5694+ struct drm_file *file_priv)
5695+{
5696+ struct drm_psb_private *dev_priv = psb_priv(dev);
5697+ return ttm_write_unlock(&dev_priv->ttm_lock,
5698+ psb_fpriv(file_priv)->tfile);
5699+}
5700+
5701+/* always available as we are SIGIO'd */
5702+static unsigned int psb_poll(struct file *filp,
5703+ struct poll_table_struct *wait)
5704+{
5705+ return POLLIN | POLLRDNORM;
5706+}
5707+
5708+int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
5709+{
5710+ /*psb_check_power_state(dev, PSB_DEVICE_SGX);*/
5711+ return 0;
5712+}
5713+
5714+static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
5715+ unsigned long arg)
5716+{
5717+ struct drm_file *file_priv = filp->private_data;
5718+ struct drm_device *dev = file_priv->minor->dev;
5719+ unsigned int nr = DRM_IOCTL_NR(cmd);
5720+ long ret;
5721+
5722+ /*
5723+ * The driver private ioctls and TTM ioctls should be
5724+ * thread-safe.
5725+ */
5726+
5727+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
5728+ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
5729+ struct drm_ioctl_desc *ioctl = &psb_ioctls[nr - DRM_COMMAND_BASE];
5730+
5731+ if (unlikely(ioctl->cmd != cmd)) {
5732+ DRM_ERROR("Invalid drm command %d\n",
5733+ nr - DRM_COMMAND_BASE);
5734+ return -EINVAL;
5735+ }
5736+
5737+ return drm_unlocked_ioctl(filp, cmd, arg);
5738+ }
5739+ /*
5740+ * Not all old drm ioctls are thread-safe.
5741+ */
5742+
5743+ lock_kernel();
5744+ ret = drm_unlocked_ioctl(filp, cmd, arg);
5745+ unlock_kernel();
5746+ return ret;
5747+}
5748+
5749+static int psb_ospm_read(char *buf, char **start, off_t offset, int request,
5750+ int *eof, void *data)
5751+{
5752+ struct drm_minor *minor = (struct drm_minor *) data;
5753+ struct drm_device *dev = minor->dev;
5754+ struct drm_psb_private *dev_priv =
5755+ (struct drm_psb_private *) dev->dev_private;
5756+ int len = 0;
5757+ unsigned long d0i0 = 0;
5758+ unsigned long d0i3 = 0;
5759+ unsigned long d3 = 0;
5760+ *start = &buf[offset];
5761+ *eof = 0;
5762+ DRM_PROC_PRINT("D0i3:%s ", drm_psb_ospm ? "enabled" : "disabled");
5763+ switch (dev_priv->graphics_state) {
5764+ case PSB_PWR_STATE_D0i0:
5765+ DRM_PROC_PRINT("GFX:%s\n", "D0i0");
5766+ break;
5767+ case PSB_PWR_STATE_D0i3:
5768+ DRM_PROC_PRINT("GFX:%s\n", "D0i3");
5769+ break;
5770+ case PSB_PWR_STATE_D3:
5771+ DRM_PROC_PRINT("GFX:%s\n", "D3");
5772+ break;
5773+ default:
5774+ DRM_PROC_PRINT("GFX:%s\n", "unkown");
5775+ }
5776+#ifdef OSPM_STAT
5777+ d0i0 = dev_priv->gfx_d0i0_time * 1000 / HZ;
5778+ d0i3 = dev_priv->gfx_d0i3_time * 1000 / HZ;
5779+ d3 = dev_priv->gfx_d3_time * 1000 / HZ;
5780+ switch (dev_priv->graphics_state) {
5781+ case PSB_PWR_STATE_D0i0:
5782+ d0i0 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
5783+ break;
5784+ case PSB_PWR_STATE_D0i3:
5785+ d0i3 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
5786+ break;
5787+ case PSB_PWR_STATE_D3:
5788+ d3 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
5789+ break;
5790+ }
5791+ DRM_PROC_PRINT("GFX(cnt/ms):\n");
5792+ DRM_PROC_PRINT("D0i0:%lu/%lu, D0i3:%lu/%lu, D3:%lu/%lu \n",
5793+ dev_priv->gfx_d0i0_cnt, d0i0, dev_priv->gfx_d0i3_cnt, d0i3,
5794+ dev_priv->gfx_d3_cnt, d3);
5795+#endif
5796+ if (len > request + offset)
5797+ return request;
5798+ *eof = 1;
5799+ return len - offset;
5800+}
5801+
5802+static int psb_proc_init(struct drm_minor *minor)
5803+{
5804+ struct proc_dir_entry *ent;
5805+ if (!minor->dev_root)
5806+ return 0;
5807+ ent = create_proc_read_entry(OSPM_PROC_ENTRY, 0, minor->dev_root,
5808+ psb_ospm_read, minor);
5809+ if (ent)
5810+ return 0;
5811+ else
5812+ return -1;
5813+}
5814+
5815+static void psb_proc_cleanup(struct drm_minor *minor)
5816+{
5817+ if (!minor->dev_root)
5818+ return;
5819+ remove_proc_entry(OSPM_PROC_ENTRY, minor->dev_root);
5820+ return;
5821+}
5822+
5823+static struct drm_driver driver = {
5824+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
5825+ .load = psb_driver_load,
5826+ .unload = psb_driver_unload,
5827+ .dri_library_name = dri_library_name,
5828+ .get_reg_ofs = drm_core_get_reg_ofs,
5829+ .ioctls = psb_ioctls,
5830+ .device_is_agp = psb_driver_device_is_agp,
5831+ .irq_preinstall = psb_irq_preinstall,
5832+ .irq_postinstall = psb_irq_postinstall,
5833+ .irq_uninstall = psb_irq_uninstall,
5834+ .irq_handler = psb_irq_handler,
5835+ .firstopen = NULL,
5836+ .lastclose = psb_lastclose,
5837+ .open = psb_driver_open,
5838+ .proc_init = psb_proc_init,
5839+ .proc_cleanup = psb_proc_cleanup,
5840+ .fops = {
5841+ .owner = THIS_MODULE,
5842+ .open = psb_open,
5843+ .release = psb_release,
5844+ .unlocked_ioctl = psb_unlocked_ioctl,
5845+ .mmap = psb_mmap,
5846+ .poll = psb_poll,
5847+ .fasync = drm_fasync,
5848+ },
5849+ .pci_driver = {
5850+ .name = DRIVER_NAME,
5851+ .id_table = pciidlist,
5852+ .resume = psb_resume,
5853+ .suspend = psb_suspend,
5854+ },
5855+ .name = DRIVER_NAME,
5856+ .desc = DRIVER_DESC,
5857+ .date = PSB_DRM_DRIVER_DATE,
5858+ .major = PSB_DRM_DRIVER_MAJOR,
5859+ .minor = PSB_DRM_DRIVER_MINOR,
5860+ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
5861+};
5862+
5863+static int __init psb_init(void)
5864+{
5865+ driver.num_ioctls = psb_max_ioctl;
5866+
5867+ return drm_init(&driver);
5868+}
5869+
5870+static void __exit psb_exit(void)
5871+{
5872+ drm_exit(&driver);
5873+}
5874+
5875+module_init(psb_init);
5876+module_exit(psb_exit);
5877+
5878+MODULE_AUTHOR(DRIVER_AUTHOR);
5879+MODULE_DESCRIPTION(DRIVER_DESC);
5880+MODULE_LICENSE("GPL");
5881diff -uNr a/drivers/gpu/drm/psb/psb_drv.h b/drivers/gpu/drm/psb/psb_drv.h
5882--- a/drivers/gpu/drm/psb/psb_drv.h 1969-12-31 16:00:00.000000000 -0800
5883+++ b/drivers/gpu/drm/psb/psb_drv.h 2009-04-07 13:28:38.000000000 -0700
5884@@ -0,0 +1,1129 @@
5885+/**************************************************************************
5886+ *Copyright (c) 2007-2008, Intel Corporation.
5887+ *All Rights Reserved.
5888+ *
5889+ *This program is free software; you can redistribute it and/or modify it
5890+ *under the terms and conditions of the GNU General Public License,
5891+ *version 2, as published by the Free Software Foundation.
5892+ *
5893+ *This program is distributed in the hope it will be useful, but WITHOUT
5894+ *ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
5895+ *FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
5896+ *more details.
5897+ *
5898+ *You should have received a copy of the GNU General Public License along with
5899+ *this program; if not, write to the Free Software Foundation, Inc.,
5900+ *51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5901+ *
5902+ *Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
5903+ *develop this driver.
5904+ *
5905+ **************************************************************************/
5906+/*
5907+ */
5908+#ifndef _PSB_DRV_H_
5909+#define _PSB_DRV_H_
5910+
5911+#include <drm/drmP.h>
5912+#include "psb_drm.h"
5913+#include "psb_reg.h"
5914+#include "psb_schedule.h"
5915+#include "psb_intel_drv.h"
5916+#include "ttm/ttm_object.h"
5917+#include "ttm/ttm_fence_driver.h"
5918+#include "ttm/ttm_bo_driver.h"
5919+#include "ttm/ttm_lock.h"
5920+
5921+extern struct ttm_bo_driver psb_ttm_bo_driver;
5922+
5923+enum {
5924+ CHIP_PSB_8108 = 0,
5925+ CHIP_PSB_8109 = 1,
5926+ CHIP_MRST_4100 = 2
5927+};
5928+
5929+/*
5930+ *Hardware bugfixes
5931+ */
5932+
5933+#define FIX_TG_16
5934+#define FIX_TG_2D_CLOCKGATE
5935+#define OSPM_STAT
5936+
5937+#define DRIVER_NAME "psb"
5938+#define DRIVER_DESC "drm driver for the Intel GMA500"
5939+#define DRIVER_AUTHOR "Tungsten Graphics Inc."
5940+#define OSPM_PROC_ENTRY "ospm"
5941+
5942+#define PSB_DRM_DRIVER_DATE "2009-02-09"
5943+#define PSB_DRM_DRIVER_MAJOR 8
5944+#define PSB_DRM_DRIVER_MINOR 0
5945+#define PSB_DRM_DRIVER_PATCHLEVEL 0
5946+
5947+/*
5948+ *TTM driver private offsets.
5949+ */
5950+
5951+#define DRM_PSB_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
5952+
5953+#define PSB_OBJECT_HASH_ORDER 13
5954+#define PSB_FILE_OBJECT_HASH_ORDER 12
5955+#define PSB_BO_HASH_ORDER 12
5956+
5957+#define PSB_VDC_OFFSET 0x00000000
5958+#define PSB_VDC_SIZE 0x000080000
5959+#define MRST_MMIO_SIZE 0x0000C0000
5960+#define PSB_SGX_SIZE 0x8000
5961+#define PSB_SGX_OFFSET 0x00040000
5962+#define MRST_SGX_OFFSET 0x00080000
5963+#define PSB_MMIO_RESOURCE 0
5964+#define PSB_GATT_RESOURCE 2
5965+#define PSB_GTT_RESOURCE 3
5966+#define PSB_GMCH_CTRL 0x52
5967+#define PSB_BSM 0x5C
5968+#define _PSB_GMCH_ENABLED 0x4
5969+#define PSB_PGETBL_CTL 0x2020
5970+#define _PSB_PGETBL_ENABLED 0x00000001
5971+#define PSB_SGX_2D_SLAVE_PORT 0x4000
5972+#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
5973+#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
5974+#define PSB_NUM_VALIDATE_BUFFERS 2048
5975+#define PSB_MEM_KERNEL_START 0x10000000
5976+#define PSB_MEM_PDS_START 0x20000000
5977+#define PSB_MEM_MMU_START 0x40000000
5978+
5979+#define DRM_PSB_MEM_KERNEL TTM_PL_PRIV0
5980+#define DRM_PSB_FLAG_MEM_KERNEL TTM_PL_FLAG_PRIV0
5981+
5982+/*
5983+ *Flags for external memory type field.
5984+ */
5985+
5986+#define MRST_MSVDX_OFFSET 0x90000 /*MSVDX Base offset */
5987+#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */
5988+/* MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
5989+#define PSB_MSVDX_SIZE 0x10000
5990+
5991+#define LNC_TOPAZ_OFFSET 0xA0000
5992+#define LNC_TOPAZ_SIZE 0x10000
5993+
5994+#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
5995+#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
5996+#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
5997+
5998+/*
5999+ *PTE's and PDE's
6000+ */
6001+
6002+#define PSB_PDE_MASK 0x003FFFFF
6003+#define PSB_PDE_SHIFT 22
6004+#define PSB_PTE_SHIFT 12
6005+
6006+#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
6007+#define PSB_PTE_WO 0x0002 /* Write only */
6008+#define PSB_PTE_RO 0x0004 /* Read only */
6009+#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
6010+
6011+/*
6012+ *VDC registers and bits
6013+ */
6014+#define PSB_HWSTAM 0x2098
6015+#define PSB_INSTPM 0x20C0
6016+#define PSB_INT_IDENTITY_R 0x20A4
6017+#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
6018+#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
6019+#define _PSB_IRQ_SGX_FLAG (1<<18)
6020+#define _PSB_IRQ_MSVDX_FLAG (1<<19)
6021+#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
6022+#define PSB_INT_MASK_R 0x20A8
6023+#define PSB_INT_ENABLE_R 0x20A0
6024+#define PSB_PIPEASTAT 0x70024
6025+#define _PSB_VBLANK_INTERRUPT_ENABLE (1 << 17)
6026+#define _PSB_VBLANK_CLEAR (1 << 1)
6027+#define PSB_PIPEBSTAT 0x71024
6028+
6029+#define _PSB_MMU_ER_MASK 0x0001FF00
6030+#define _PSB_MMU_ER_HOST (1 << 16)
6031+#define GPIOA 0x5010
6032+#define GPIOB 0x5014
6033+#define GPIOC 0x5018
6034+#define GPIOD 0x501c
6035+#define GPIOE 0x5020
6036+#define GPIOF 0x5024
6037+#define GPIOG 0x5028
6038+#define GPIOH 0x502c
6039+#define GPIO_CLOCK_DIR_MASK (1 << 0)
6040+#define GPIO_CLOCK_DIR_IN (0 << 1)
6041+#define GPIO_CLOCK_DIR_OUT (1 << 1)
6042+#define GPIO_CLOCK_VAL_MASK (1 << 2)
6043+#define GPIO_CLOCK_VAL_OUT (1 << 3)
6044+#define GPIO_CLOCK_VAL_IN (1 << 4)
6045+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
6046+#define GPIO_DATA_DIR_MASK (1 << 8)
6047+#define GPIO_DATA_DIR_IN (0 << 9)
6048+#define GPIO_DATA_DIR_OUT (1 << 9)
6049+#define GPIO_DATA_VAL_MASK (1 << 10)
6050+#define GPIO_DATA_VAL_OUT (1 << 11)
6051+#define GPIO_DATA_VAL_IN (1 << 12)
6052+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
6053+
6054+#define VCLK_DIVISOR_VGA0 0x6000
6055+#define VCLK_DIVISOR_VGA1 0x6004
6056+#define VCLK_POST_DIV 0x6010
6057+
6058+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
6059+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
6060+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
6061+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
6062+#define PSB_COMM_USER_IRQ (1024 >> 2)
6063+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
6064+#define PSB_COMM_FW (2048 >> 2)
6065+
6066+#define PSB_UIRQ_VISTEST 1
6067+#define PSB_UIRQ_OOM_REPLY 2
6068+#define PSB_UIRQ_FIRE_TA_REPLY 3
6069+#define PSB_UIRQ_FIRE_RASTER_REPLY 4
6070+
6071+#define PSB_2D_SIZE (256*1024*1024)
6072+#define PSB_MAX_RELOC_PAGES 1024
6073+
6074+#define PSB_LOW_REG_OFFS 0x0204
6075+#define PSB_HIGH_REG_OFFS 0x0600
6076+
6077+#define PSB_NUM_VBLANKS 2
6078+
6079+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
6080+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
6081+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
6082+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
6083+#define PSB_COMM_FW (2048 >> 2)
6084+
6085+#define PSB_2D_SIZE (256*1024*1024)
6086+#define PSB_MAX_RELOC_PAGES 1024
6087+
6088+#define PSB_LOW_REG_OFFS 0x0204
6089+#define PSB_HIGH_REG_OFFS 0x0600
6090+
6091+#define PSB_NUM_VBLANKS 2
6092+#define PSB_WATCHDOG_DELAY (DRM_HZ / 10)
6093+
6094+#define PSB_PWR_STATE_MASK 0x0F
6095+#define PSB_PWR_ACTION_MASK 0xF0
6096+#define PSB_PWR_STATE_D0i0 0x1
6097+#define PSB_PWR_STATE_D0i3 0x2
6098+#define PSB_PWR_STATE_D3 0x3
6099+#define PSB_PWR_ACTION_DOWN 0x10 /*Need to power down*/
6100+#define PSB_PWR_ACTION_UP 0x20/*Need to power up*/
6101+#define PSB_GRAPHICS_ISLAND 0x1
6102+#define PSB_VIDEO_ENC_ISLAND 0x2
6103+#define PSB_VIDEO_DEC_ISLAND 0x4
6104+#define LNC_TOPAZ_POWERON 0x1
6105+#define LNC_TOPAZ_POWEROFF 0x0
6106+
6107+/*
6108+ *User options.
6109+ */
6110+
6111+struct drm_psb_uopt {
6112+ int clock_gating;
6113+};
6114+
6115+/**
6116+ *struct psb_context
6117+ *
6118+ *@buffers: array of pre-allocated validate buffers.
6119+ *@used_buffers: number of buffers in @buffers array currently in use.
6120+ *@validate_buffer: buffers validated from user-space.
6121+ *@kern_validate_buffers : buffers validated from kernel-space.
6122+ *@fence_flags : Fence flags to be used for fence creation.
6123+ *
6124+ *This structure is used during execbuf validation.
6125+ */
6126+
6127+struct psb_context {
6128+ struct psb_validate_buffer *buffers;
6129+ uint32_t used_buffers;
6130+ struct list_head validate_list;
6131+ struct list_head kern_validate_list;
6132+ uint32_t fence_types;
6133+ uint32_t val_seq;
6134+};
6135+
6136+struct psb_gtt {
6137+ struct drm_device *dev;
6138+ int initialized;
6139+ uint32_t gatt_start;
6140+ uint32_t gtt_start;
6141+ uint32_t gtt_phys_start;
6142+ unsigned gtt_pages;
6143+ unsigned gatt_pages;
6144+ uint32_t stolen_base;
6145+ uint32_t pge_ctl;
6146+ u16 gmch_ctrl;
6147+ unsigned long stolen_size;
6148+ unsigned long vram_stolen_size;
6149+ unsigned long ci_stolen_size;
6150+ unsigned long rar_stolen_size;
6151+ uint32_t *gtt_map;
6152+ struct rw_semaphore sem;
6153+};
6154+
6155+struct psb_use_base {
6156+ struct list_head head;
6157+ struct ttm_fence_object *fence;
6158+ unsigned int reg;
6159+ unsigned long offset;
6160+ unsigned int dm;
6161+};
6162+
6163+struct psb_validate_buffer;
6164+
6165+struct psb_msvdx_cmd_queue {
6166+ struct list_head head;
6167+ void *cmd;
6168+ unsigned long cmd_size;
6169+ uint32_t sequence;
6170+};
6171+
6172+
6173+struct drm_psb_private {
6174+
6175+ /*
6176+ *TTM Glue.
6177+ */
6178+
6179+ struct drm_global_reference mem_global_ref;
6180+ int has_global;
6181+
6182+ struct drm_device *dev;
6183+ struct ttm_object_device *tdev;
6184+ struct ttm_fence_device fdev;
6185+ struct ttm_bo_device bdev;
6186+ struct ttm_lock ttm_lock;
6187+ struct vm_operations_struct *ttm_vm_ops;
6188+ int has_fence_device;
6189+ int has_bo_device;
6190+
6191+ unsigned long chipset;
6192+
6193+ struct psb_xhw_buf resume_buf;
6194+ struct drm_psb_dev_info_arg dev_info;
6195+ struct drm_psb_uopt uopt;
6196+
6197+ struct psb_gtt *pg;
6198+
6199+ struct page *scratch_page;
6200+ struct page *comm_page;
6201+ /* Deleted volatile because it is not recommended to use. */
6202+ uint32_t *comm;
6203+ uint32_t comm_mmu_offset;
6204+ uint32_t mmu_2d_offset;
6205+ uint32_t sequence[PSB_NUM_ENGINES];
6206+ uint32_t last_sequence[PSB_NUM_ENGINES];
6207+ int idle[PSB_NUM_ENGINES];
6208+ uint32_t last_submitted_seq[PSB_NUM_ENGINES];
6209+ int engine_lockup_2d;
6210+
6211+ struct psb_mmu_driver *mmu;
6212+ struct psb_mmu_pd *pf_pd;
6213+
6214+ uint8_t *sgx_reg;
6215+ uint8_t *vdc_reg;
6216+ uint32_t gatt_free_offset;
6217+
6218+ /*
6219+ *MSVDX
6220+ */
6221+ int has_msvdx;
6222+ uint8_t *msvdx_reg;
6223+ int msvdx_needs_reset;
6224+ atomic_t msvdx_mmu_invaldc;
6225+
6226+ /*
6227+ *TOPAZ
6228+ */
6229+ uint8_t *topaz_reg;
6230+
6231+ void *topaz_mtx_reg_state;
6232+ struct ttm_buffer_object *topaz_mtx_data_mem;
6233+ uint32_t topaz_cur_codec;
6234+ uint32_t cur_mtx_data_size;
6235+ int topaz_needs_reset;
6236+ int has_topaz;
6237+#define TOPAZ_MAX_IDELTIME (HZ*30)
6238+ int topaz_start_idle;
6239+ unsigned long topaz_idle_start_jiffies;
6240+ /* used by lnc_topaz_lockup */
6241+ uint32_t topaz_current_sequence;
6242+ uint32_t topaz_last_sequence;
6243+ uint32_t topaz_finished_sequence;
6244+
6245+ /*
6246+ *Fencing / irq.
6247+ */
6248+
6249+ uint32_t sgx_irq_mask;
6250+ uint32_t sgx2_irq_mask;
6251+ uint32_t vdc_irq_mask;
6252+
6253+ spinlock_t irqmask_lock;
6254+ spinlock_t sequence_lock;
6255+ int fence0_irq_on;
6256+ int irq_enabled;
6257+ unsigned int irqen_count_2d;
6258+ wait_queue_head_t event_2d_queue;
6259+
6260+#ifdef FIX_TG_16
6261+ wait_queue_head_t queue_2d;
6262+ atomic_t lock_2d;
6263+ atomic_t ta_wait_2d;
6264+ atomic_t ta_wait_2d_irq;
6265+ atomic_t waiters_2d;
6266+#else
6267+ struct mutex mutex_2d;
6268+#endif
6269+ uint32_t msvdx_current_sequence;
6270+ uint32_t msvdx_last_sequence;
6271+ int fence2_irq_on;
6272+
6273+ /*
6274+ *Modesetting
6275+ */
6276+ struct psb_intel_mode_device mode_dev;
6277+
6278+ /*
6279+ *MSVDX Rendec Memory
6280+ */
6281+ struct ttm_buffer_object *ccb0;
6282+ uint32_t base_addr0;
6283+ struct ttm_buffer_object *ccb1;
6284+ uint32_t base_addr1;
6285+
6286+ /*
6287+ * CI share buffer
6288+ */
6289+ unsigned int ci_region_start;
6290+ unsigned int ci_region_size;
6291+
6292+ /*
6293+ *Memory managers
6294+ */
6295+
6296+ int have_vram;
6297+ int have_camera;
6298+ int have_tt;
6299+ int have_mem_mmu;
6300+ int have_mem_aper;
6301+ int have_mem_kernel;
6302+ int have_mem_pds;
6303+ int have_mem_rastgeom;
6304+ struct mutex temp_mem;
6305+
6306+ /*
6307+ *Relocation buffer mapping.
6308+ */
6309+
6310+ spinlock_t reloc_lock;
6311+ unsigned int rel_mapped_pages;
6312+ wait_queue_head_t rel_mapped_queue;
6313+
6314+ /*
6315+ *SAREA
6316+ */
6317+ struct drm_psb_sarea *sarea_priv;
6318+
6319+ /*
6320+ *LVDS info
6321+ */
6322+ int backlight_duty_cycle; /* restore backlight to this value */
6323+ bool panel_wants_dither;
6324+ struct drm_display_mode *panel_fixed_mode;
6325+
6326+/* MRST private date start */
6327+/*FIXME JLIU7 need to revisit */
6328+ bool sku_83;
6329+ bool sku_100;
6330+ bool sku_100L;
6331+ bool sku_bypass;
6332+ uint32_t iLVDS_enable;
6333+
6334+ /* pipe config register value */
6335+ uint32_t pipeconf;
6336+
6337+ /* plane control register value */
6338+ uint32_t dspcntr;
6339+
6340+/* MRST_DSI private date start */
6341+ /*
6342+ *MRST DSI info
6343+ */
6344+ /* The DSI device ready */
6345+ bool dsi_device_ready;
6346+
6347+ /* The DPI panel power on */
6348+ bool dpi_panel_on;
6349+
6350+ /* The DBI panel power on */
6351+ bool dbi_panel_on;
6352+
6353+ /* The DPI display */
6354+ bool dpi;
6355+
6356+ /* status */
6357+ uint32_t videoModeFormat:2;
6358+ uint32_t laneCount:3;
6359+ uint32_t status_reserved:27;
6360+
6361+ /* dual display - DPI & DBI */
6362+ bool dual_display;
6363+
6364+ /* HS or LP transmission */
6365+ bool lp_transmission;
6366+
6367+ /* configuration phase */
6368+ bool config_phase;
6369+
6370+ /* DSI clock */
6371+ uint32_t RRate;
6372+ uint32_t DDR_Clock;
6373+ uint32_t DDR_Clock_Calculated;
6374+ uint32_t ClockBits;
6375+
6376+ /* DBI Buffer pointer */
6377+ u8 *p_DBI_commandBuffer_orig;
6378+ u8 *p_DBI_commandBuffer;
6379+ uint32_t DBI_CB_pointer;
6380+ u8 *p_DBI_dataBuffer_orig;
6381+ u8 *p_DBI_dataBuffer;
6382+ uint32_t DBI_DB_pointer;
6383+
6384+ /* DPI panel spec */
6385+ uint32_t pixelClock;
6386+ uint32_t HsyncWidth;
6387+ uint32_t HbackPorch;
6388+ uint32_t HfrontPorch;
6389+ uint32_t HactiveArea;
6390+ uint32_t VsyncWidth;
6391+ uint32_t VbackPorch;
6392+ uint32_t VfrontPorch;
6393+ uint32_t VactiveArea;
6394+ uint32_t bpp:5;
6395+ uint32_t Reserved:27;
6396+
6397+ /* DBI panel spec */
6398+ uint32_t dbi_pixelClock;
6399+ uint32_t dbi_HsyncWidth;
6400+ uint32_t dbi_HbackPorch;
6401+ uint32_t dbi_HfrontPorch;
6402+ uint32_t dbi_HactiveArea;
6403+ uint32_t dbi_VsyncWidth;
6404+ uint32_t dbi_VbackPorch;
6405+ uint32_t dbi_VfrontPorch;
6406+ uint32_t dbi_VactiveArea;
6407+ uint32_t dbi_bpp:5;
6408+ uint32_t dbi_Reserved:27;
6409+
6410+/* MRST_DSI private date end */
6411+
6412+ /*
6413+ *Register state
6414+ */
6415+ uint32_t saveDSPACNTR;
6416+ uint32_t saveDSPBCNTR;
6417+ uint32_t savePIPEACONF;
6418+ uint32_t savePIPEBCONF;
6419+ uint32_t savePIPEASRC;
6420+ uint32_t savePIPEBSRC;
6421+ uint32_t saveFPA0;
6422+ uint32_t saveFPA1;
6423+ uint32_t saveDPLL_A;
6424+ uint32_t saveDPLL_A_MD;
6425+ uint32_t saveHTOTAL_A;
6426+ uint32_t saveHBLANK_A;
6427+ uint32_t saveHSYNC_A;
6428+ uint32_t saveVTOTAL_A;
6429+ uint32_t saveVBLANK_A;
6430+ uint32_t saveVSYNC_A;
6431+ uint32_t saveDSPASTRIDE;
6432+ uint32_t saveDSPASIZE;
6433+ uint32_t saveDSPAPOS;
6434+ uint32_t saveDSPABASE;
6435+ uint32_t saveDSPASURF;
6436+ uint32_t saveFPB0;
6437+ uint32_t saveFPB1;
6438+ uint32_t saveDPLL_B;
6439+ uint32_t saveDPLL_B_MD;
6440+ uint32_t saveHTOTAL_B;
6441+ uint32_t saveHBLANK_B;
6442+ uint32_t saveHSYNC_B;
6443+ uint32_t saveVTOTAL_B;
6444+ uint32_t saveVBLANK_B;
6445+ uint32_t saveVSYNC_B;
6446+ uint32_t saveDSPBSTRIDE;
6447+ uint32_t saveDSPBSIZE;
6448+ uint32_t saveDSPBPOS;
6449+ uint32_t saveDSPBBASE;
6450+ uint32_t saveDSPBSURF;
6451+ uint32_t saveVCLK_DIVISOR_VGA0;
6452+ uint32_t saveVCLK_DIVISOR_VGA1;
6453+ uint32_t saveVCLK_POST_DIV;
6454+ uint32_t saveVGACNTRL;
6455+ uint32_t saveADPA;
6456+ uint32_t saveLVDS;
6457+ uint32_t saveDVOA;
6458+ uint32_t saveDVOB;
6459+ uint32_t saveDVOC;
6460+ uint32_t savePP_ON;
6461+ uint32_t savePP_OFF;
6462+ uint32_t savePP_CONTROL;
6463+ uint32_t savePP_CYCLE;
6464+ uint32_t savePFIT_CONTROL;
6465+ uint32_t savePaletteA[256];
6466+ uint32_t savePaletteB[256];
6467+ uint32_t saveBLC_PWM_CTL;
6468+ uint32_t saveCLOCKGATING;
6469+
6470+ /*
6471+ *Xhw
6472+ */
6473+
6474+ uint32_t *xhw;
6475+ struct ttm_buffer_object *xhw_bo;
6476+ struct ttm_bo_kmap_obj xhw_kmap;
6477+ struct list_head xhw_in;
6478+ spinlock_t xhw_lock;
6479+ atomic_t xhw_client;
6480+ struct drm_file *xhw_file;
6481+ wait_queue_head_t xhw_queue;
6482+ wait_queue_head_t xhw_caller_queue;
6483+ struct mutex xhw_mutex;
6484+ struct psb_xhw_buf *xhw_cur_buf;
6485+ int xhw_submit_ok;
6486+ int xhw_on;
6487+
6488+ /*
6489+ *Scheduling.
6490+ */
6491+
6492+ struct mutex reset_mutex;
6493+ struct psb_scheduler scheduler;
6494+ struct mutex cmdbuf_mutex;
6495+ uint32_t ta_mem_pages;
6496+ struct psb_ta_mem *ta_mem;
6497+ int force_ta_mem_load;
6498+ atomic_t val_seq;
6499+
6500+ /*
6501+ *TODO: change this to be per drm-context.
6502+ */
6503+
6504+ struct psb_context context;
6505+
6506+ /*
6507+ *Watchdog
6508+ */
6509+
6510+ spinlock_t watchdog_lock;
6511+ struct timer_list watchdog_timer;
6512+ struct work_struct watchdog_wq;
6513+ struct work_struct msvdx_watchdog_wq;
6514+ struct work_struct topaz_watchdog_wq;
6515+ int timer_available;
6516+
6517+ /*
6518+ *msvdx command queue
6519+ */
6520+ spinlock_t msvdx_lock;
6521+ struct mutex msvdx_mutex;
6522+ struct list_head msvdx_queue;
6523+ int msvdx_busy;
6524+ int msvdx_fw_loaded;
6525+ void *msvdx_fw;
6526+ int msvdx_fw_size;
6527+
6528+ /*
6529+ *topaz command queue
6530+ */
6531+ spinlock_t topaz_lock;
6532+ struct mutex topaz_mutex;
6533+ struct list_head topaz_queue;
6534+ int topaz_busy; /* 0 means topaz is free */
6535+ int topaz_fw_loaded;
6536+
6537+ /* topaz ccb data */
6538+ /* XXX: should the addr stored by 32 bits? more compatible way?? */
6539+ uint32_t topaz_ccb_buffer_addr;
6540+ uint32_t topaz_ccb_ctrl_addr;
6541+ uint32_t topaz_ccb_size;
6542+ uint32_t topaz_cmd_windex;
6543+ uint16_t topaz_cmd_seq;
6544+
6545+ uint32_t stored_initial_qp;
6546+ uint32_t topaz_dash_access_ctrl;
6547+
6548+ struct ttm_buffer_object *topaz_bo; /* 4K->2K/2K for writeback/sync */
6549+ struct ttm_bo_kmap_obj topaz_bo_kmap;
6550+ void *topaz_ccb_wb;
6551+ uint32_t topaz_wb_offset;
6552+ uint32_t *topaz_sync_addr;
6553+ uint32_t topaz_sync_offset;
6554+ uint32_t topaz_sync_cmd_seq;
6555+
6556+ struct rw_semaphore sgx_sem; /*sgx is in used*/
6557+ struct semaphore pm_sem; /*pm action in process*/
6558+ unsigned char graphics_state;
6559+#ifdef OSPM_STAT
6560+ unsigned long gfx_d0i3_time;
6561+ unsigned long gfx_d0i0_time;
6562+ unsigned long gfx_d3_time;
6563+ unsigned long gfx_last_mode_change;
6564+ unsigned long gfx_d0i0_cnt;
6565+ unsigned long gfx_d0i3_cnt;
6566+ unsigned long gfx_d3_cnt;
6567+#endif
6568+
6569+ /* MSVDX OSPM */
6570+ unsigned char msvdx_state;
6571+ unsigned long msvdx_last_action;
6572+ uint32_t msvdx_clk_state;
6573+
6574+ /* TOPAZ OSPM */
6575+ unsigned char topaz_power_state;
6576+ unsigned long topaz_last_action;
6577+ uint32_t topaz_clk_state;
6578+};
6579+
6580+struct psb_fpriv {
6581+ struct ttm_object_file *tfile;
6582+};
6583+
6584+struct psb_mmu_driver;
6585+
6586+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
6587+extern int drm_pick_crtcs(struct drm_device *dev);
6588+
6589+
6590+static inline struct psb_fpriv *psb_fpriv(struct drm_file *file_priv)
6591+{
6592+ return (struct psb_fpriv *) file_priv->driver_priv;
6593+}
6594+
6595+static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
6596+{
6597+ return (struct drm_psb_private *) dev->dev_private;
6598+}
6599+
6600+/*
6601+ *TTM glue. psb_ttm_glue.c
6602+ */
6603+
6604+extern int psb_open(struct inode *inode, struct file *filp);
6605+extern int psb_release(struct inode *inode, struct file *filp);
6606+extern int psb_mmap(struct file *filp, struct vm_area_struct *vma);
6607+
6608+extern int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
6609+ struct drm_file *file_priv);
6610+extern int psb_verify_access(struct ttm_buffer_object *bo,
6611+ struct file *filp);
6612+extern ssize_t psb_ttm_read(struct file *filp, char __user *buf,
6613+ size_t count, loff_t *f_pos);
6614+extern ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
6615+ size_t count, loff_t *f_pos);
6616+extern int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
6617+ struct drm_file *file_priv);
6618+extern int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
6619+ struct drm_file *file_priv);
6620+extern int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
6621+ struct drm_file *file_priv);
6622+extern int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
6623+ struct drm_file *file_priv);
6624+extern int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
6625+ struct drm_file *file_priv);
6626+extern int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
6627+ struct drm_file *file_priv);
6628+extern int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
6629+ struct drm_file *file_priv);
6630+extern int psb_pl_create_ioctl(struct drm_device *dev, void *data,
6631+ struct drm_file *file_priv);
6632+extern int psb_extension_ioctl(struct drm_device *dev, void *data,
6633+ struct drm_file *file_priv);
6634+extern int psb_ttm_global_init(struct drm_psb_private *dev_priv);
6635+extern void psb_ttm_global_release(struct drm_psb_private *dev_priv);
6636+/*
6637+ *MMU stuff.
6638+ */
6639+
6640+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
6641+ int trap_pagefaults,
6642+ int invalid_type,
6643+ struct drm_psb_private *dev_priv);
6644+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
6645+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
6646+ *driver);
6647+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
6648+ uint32_t gtt_start, uint32_t gtt_pages);
6649+extern void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset);
6650+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
6651+ int trap_pagefaults,
6652+ int invalid_type);
6653+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
6654+extern void psb_mmu_flush(struct psb_mmu_driver *driver);
6655+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
6656+ unsigned long address,
6657+ uint32_t num_pages);
6658+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
6659+ uint32_t start_pfn,
6660+ unsigned long address,
6661+ uint32_t num_pages, int type);
6662+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
6663+ unsigned long *pfn);
6664+
6665+/*
6666+ *Enable / disable MMU for different requestors.
6667+ */
6668+
6669+extern void psb_mmu_enable_requestor(struct psb_mmu_driver *driver,
6670+ uint32_t mask);
6671+extern void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
6672+ uint32_t mask);
6673+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
6674+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
6675+ unsigned long address, uint32_t num_pages,
6676+ uint32_t desired_tile_stride,
6677+ uint32_t hw_tile_stride, int type);
6678+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
6679+ unsigned long address, uint32_t num_pages,
6680+ uint32_t desired_tile_stride,
6681+ uint32_t hw_tile_stride);
6682+/*
6683+ *psb_sgx.c
6684+ */
6685+
6686+extern int psb_blit_sequence(struct drm_psb_private *dev_priv,
6687+ uint32_t sequence);
6688+extern void psb_init_2d(struct drm_psb_private *dev_priv);
6689+extern int psb_idle_2d(struct drm_device *dev);
6690+extern int psb_idle_3d(struct drm_device *dev);
6691+extern int psb_emit_2d_copy_blit(struct drm_device *dev,
6692+ uint32_t src_offset,
6693+ uint32_t dst_offset, uint32_t pages,
6694+ int direction);
6695+extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
6696+ struct drm_file *file_priv);
6697+extern int psb_reg_submit(struct drm_psb_private *dev_priv,
6698+ uint32_t *regs, unsigned int cmds);
6699+extern int psb_submit_copy_cmdbuf(struct drm_device *dev,
6700+ struct ttm_buffer_object *cmd_buffer,
6701+ unsigned long cmd_offset,
6702+ unsigned long cmd_size, int engine,
6703+ uint32_t *copy_buffer);
6704+
6705+extern void psb_init_disallowed(void);
6706+extern void psb_fence_or_sync(struct drm_file *file_priv,
6707+ uint32_t engine,
6708+ uint32_t fence_types,
6709+ uint32_t fence_flags,
6710+ struct list_head *list,
6711+ struct psb_ttm_fence_rep *fence_arg,
6712+ struct ttm_fence_object **fence_p);
6713+extern int psb_validate_kernel_buffer(struct psb_context *context,
6714+ struct ttm_buffer_object *bo,
6715+ uint32_t fence_class,
6716+ uint64_t set_flags,
6717+ uint64_t clr_flags);
6718+extern void psb_init_ospm(struct drm_psb_private *dev_priv);
6719+extern void psb_check_power_state(struct drm_device *dev, int devices);
6720+extern void psb_down_island_power(struct drm_device *dev, int islands);
6721+extern void psb_up_island_power(struct drm_device *dev, int islands);
6722+extern int psb_try_power_down_sgx(struct drm_device *dev);
6723+
6724+/*
6725+ *psb_irq.c
6726+ */
6727+
6728+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
6729+extern void psb_irq_preinstall(struct drm_device *dev);
6730+extern int psb_irq_postinstall(struct drm_device *dev);
6731+extern void psb_irq_uninstall(struct drm_device *dev);
6732+extern int psb_vblank_wait2(struct drm_device *dev,
6733+ unsigned int *sequence);
6734+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
6735+
6736+/*
6737+ *psb_fence.c
6738+ */
6739+
6740+extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
6741+extern void psb_2D_irq_off(struct drm_psb_private *dev_priv);
6742+extern void psb_2D_irq_on(struct drm_psb_private *dev_priv);
6743+extern uint32_t psb_fence_advance_sequence(struct drm_device *dev,
6744+ uint32_t class);
6745+extern int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
6746+ uint32_t fence_class,
6747+ uint32_t flags, uint32_t *sequence,
6748+ unsigned long *timeout_jiffies);
6749+extern void psb_fence_error(struct drm_device *dev,
6750+ uint32_t class,
6751+ uint32_t sequence, uint32_t type, int error);
6752+extern int psb_ttm_fence_device_init(struct ttm_fence_device *fdev);
6753+
6754+/*MSVDX stuff*/
6755+extern void psb_msvdx_irq_off(struct drm_psb_private *dev_priv);
6756+extern void psb_msvdx_irq_on(struct drm_psb_private *dev_priv);
6757+
6758+/*
6759+ *psb_gtt.c
6760+ */
6761+extern int psb_gtt_init(struct psb_gtt *pg, int resume);
6762+extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
6763+ unsigned offset_pages, unsigned num_pages,
6764+ unsigned desired_tile_stride,
6765+ unsigned hw_tile_stride, int type);
6766+extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
6767+ unsigned num_pages,
6768+ unsigned desired_tile_stride,
6769+ unsigned hw_tile_stride);
6770+
6771+extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
6772+extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
6773+
6774+/*
6775+ *psb_fb.c
6776+ */
6777+extern int psbfb_probed(struct drm_device *dev);
6778+extern int psbfb_remove(struct drm_device *dev,
6779+ struct drm_framebuffer *fb);
6780+extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
6781+ struct drm_file *file_priv);
6782+extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
6783+ struct drm_file *file_priv);
6784+extern void psbfb_suspend(struct drm_device *dev);
6785+extern void psbfb_resume(struct drm_device *dev);
6786+
6787+/*
6788+ *psb_reset.c
6789+ */
6790+
6791+extern void psb_reset(struct drm_psb_private *dev_priv, int reset_2d);
6792+extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
6793+extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
6794+extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
6795+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
6796+
6797+/*
6798+ *psb_xhw.c
6799+ */
6800+
6801+extern int psb_xhw_ioctl(struct drm_device *dev, void *data,
6802+ struct drm_file *file_priv);
6803+extern int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
6804+ struct drm_file *file_priv);
6805+extern int psb_xhw_init(struct drm_device *dev);
6806+extern void psb_xhw_takedown(struct drm_psb_private *dev_priv);
6807+extern void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
6808+ struct drm_file *file_priv, int closing);
6809+extern int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
6810+ struct psb_xhw_buf *buf,
6811+ uint32_t fire_flags,
6812+ uint32_t hw_context,
6813+ uint32_t *cookie,
6814+ uint32_t *oom_cmds,
6815+ uint32_t num_oom_cmds,
6816+ uint32_t offset,
6817+ uint32_t engine, uint32_t flags);
6818+extern int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
6819+ struct psb_xhw_buf *buf,
6820+ uint32_t fire_flags);
6821+extern int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
6822+ struct psb_xhw_buf *buf, uint32_t w,
6823+ uint32_t h, uint32_t *hw_cookie,
6824+ uint32_t *bo_size, uint32_t *clear_p_start,
6825+ uint32_t *clear_num_pages);
6826+
6827+extern int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
6828+ struct psb_xhw_buf *buf);
6829+extern int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
6830+ struct psb_xhw_buf *buf, uint32_t *value);
6831+extern int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
6832+ struct psb_xhw_buf *buf,
6833+ uint32_t pages,
6834+ uint32_t * hw_cookie,
6835+ uint32_t * size,
6836+ uint32_t * ta_min_size);
6837+extern int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
6838+ struct psb_xhw_buf *buf, uint32_t *cookie);
6839+extern void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
6840+ struct psb_xhw_buf *buf,
6841+ uint32_t *cookie,
6842+ uint32_t *bca,
6843+ uint32_t *rca, uint32_t *flags);
6844+extern int psb_xhw_vistest(struct drm_psb_private *dev_priv,
6845+ struct psb_xhw_buf *buf);
6846+extern int psb_xhw_handler(struct drm_psb_private *dev_priv);
6847+extern int psb_xhw_resume(struct drm_psb_private *dev_priv,
6848+ struct psb_xhw_buf *buf);
6849+extern void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
6850+ struct psb_xhw_buf *buf, uint32_t *cookie);
6851+extern int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
6852+ struct psb_xhw_buf *buf,
6853+ uint32_t flags,
6854+ uint32_t param_offset,
6855+ uint32_t pt_offset, uint32_t *hw_cookie);
6856+extern void psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
6857+ struct psb_xhw_buf *buf);
6858+
6859+/*
6860+ *psb_schedule.c: HW bug fixing.
6861+ */
6862+
6863+#ifdef FIX_TG_16
6864+
6865+extern void psb_2d_unlock(struct drm_psb_private *dev_priv);
6866+extern void psb_2d_lock(struct drm_psb_private *dev_priv);
6867+extern int psb_2d_trylock(struct drm_psb_private *dev_priv);
6868+extern void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv);
6869+extern int psb_2d_trylock(struct drm_psb_private *dev_priv);
6870+extern void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
6871+#else
6872+
6873+#define psb_2d_lock(_dev_priv) mutex_lock(&(_dev_priv)->mutex_2d)
6874+#define psb_2d_unlock(_dev_priv) mutex_unlock(&(_dev_priv)->mutex_2d)
6875+
6876+#endif
6877+
6878+/* modesetting */
6879+extern void psb_modeset_init(struct drm_device *dev);
6880+extern void psb_modeset_cleanup(struct drm_device *dev);
6881+
6882+
6883+/*
6884+ *Utilities
6885+ */
6886+#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
6887+
6888+static inline u32 MSG_READ32(uint port, uint offset)
6889+{
6890+ int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
6891+ outl(0x800000D0, 0xCF8);
6892+ outl(mcr, 0xCFC);
6893+ outl(0x800000D4, 0xCF8);
6894+ return inl(0xcfc);
6895+}
6896+static inline void MSG_WRITE32(uint port, uint offset, u32 value)
6897+{
6898+ int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
6899+ outl(0x800000D4, 0xCF8);
6900+ outl(value, 0xcfc);
6901+ outl(0x800000D0, 0xCF8);
6902+ outl(mcr, 0xCFC);
6903+}
6904+
6905+static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
6906+{
6907+ struct drm_psb_private *dev_priv = dev->dev_private;
6908+
6909+ return ioread32(dev_priv->vdc_reg + (reg));
6910+}
6911+
6912+#define REG_READ(reg) REGISTER_READ(dev, (reg))
6913+static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
6914+ uint32_t val)
6915+{
6916+ struct drm_psb_private *dev_priv = dev->dev_private;
6917+
6918+ iowrite32((val), dev_priv->vdc_reg + (reg));
6919+}
6920+
6921+#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
6922+
6923+static inline void REGISTER_WRITE16(struct drm_device *dev,
6924+ uint32_t reg, uint32_t val)
6925+{
6926+ struct drm_psb_private *dev_priv = dev->dev_private;
6927+
6928+ iowrite16((val), dev_priv->vdc_reg + (reg));
6929+}
6930+
6931+#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val))
6932+
6933+static inline void REGISTER_WRITE8(struct drm_device *dev,
6934+ uint32_t reg, uint32_t val)
6935+{
6936+ struct drm_psb_private *dev_priv = dev->dev_private;
6937+
6938+ iowrite8((val), dev_priv->vdc_reg + (reg));
6939+}
6940+
6941+#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
6942+
6943+#define PSB_ALIGN_TO(_val, _align) \
6944+ (((_val) + ((_align) - 1)) & ~((_align) - 1))
6945+#define PSB_WVDC32(_val, _offs) \
6946+ iowrite32(_val, dev_priv->vdc_reg + (_offs))
6947+#define PSB_RVDC32(_offs) \
6948+ ioread32(dev_priv->vdc_reg + (_offs))
6949+#define PSB_WSGX32(_val, _offs) \
6950+ iowrite32(_val, dev_priv->sgx_reg + (_offs))
6951+#define PSB_RSGX32(_offs) \
6952+ ioread32(dev_priv->sgx_reg + (_offs))
6953+#define PSB_WMSVDX32(_val, _offs) \
6954+ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
6955+#define PSB_RMSVDX32(_offs) \
6956+ ioread32(dev_priv->msvdx_reg + (_offs))
6957+
6958+#define PSB_ALPL(_val, _base) \
6959+ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
6960+#define PSB_ALPLM(_val, _base) \
6961+ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
6962+
6963+#define PSB_D_RENDER (1 << 16)
6964+
6965+#define PSB_D_GENERAL (1 << 0)
6966+#define PSB_D_INIT (1 << 1)
6967+#define PSB_D_IRQ (1 << 2)
6968+#define PSB_D_FW (1 << 3)
6969+#define PSB_D_PERF (1 << 4)
6970+#define PSB_D_TMP (1 << 5)
6971+#define PSB_D_PM (1 << 6)
6972+
6973+extern int drm_psb_debug;
6974+extern int drm_psb_no_fb;
6975+extern int drm_psb_disable_vsync;
6976+extern int drm_idle_check_interval;
6977+extern int drm_psb_ospm;
6978+
6979+#define PSB_DEBUG_FW(_fmt, _arg...) \
6980+ PSB_DEBUG(PSB_D_FW, _fmt, ##_arg)
6981+#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
6982+ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
6983+#define PSB_DEBUG_INIT(_fmt, _arg...) \
6984+ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
6985+#define PSB_DEBUG_IRQ(_fmt, _arg...) \
6986+ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
6987+#define PSB_DEBUG_RENDER(_fmt, _arg...) \
6988+ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
6989+#define PSB_DEBUG_PERF(_fmt, _arg...) \
6990+ PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg)
6991+#define PSB_DEBUG_TMP(_fmt, _arg...) \
6992+ PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg)
6993+#define PSB_DEBUG_PM(_fmt, _arg...) \
6994+ PSB_DEBUG(PSB_D_PM, _fmt, ##_arg)
6995+
6996+#if DRM_DEBUG_CODE
6997+#define PSB_DEBUG(_flag, _fmt, _arg...) \
6998+ do { \
6999+ if (unlikely((_flag) & drm_psb_debug)) \
7000+ printk(KERN_DEBUG \
7001+ "[psb:0x%02x:%s] " _fmt , _flag, \
7002+ __func__ , ##_arg); \
7003+ } while (0)
7004+#else
7005+#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
7006+#endif
7007+
7008+#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \
7009+ ((dev)->pci_device == 0x8109))
7010+
7011+#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
7012+
7013+#endif
7014diff -uNr a/drivers/gpu/drm/psb/psb_fb.c b/drivers/gpu/drm/psb/psb_fb.c
7015--- a/drivers/gpu/drm/psb/psb_fb.c 1969-12-31 16:00:00.000000000 -0800
7016+++ b/drivers/gpu/drm/psb/psb_fb.c 2009-04-07 13:28:38.000000000 -0700
7017@@ -0,0 +1,1687 @@
7018+/**************************************************************************
7019+ * Copyright (c) 2007, Intel Corporation.
7020+ * All Rights Reserved.
7021+ *
7022+ * This program is free software; you can redistribute it and/or modify it
7023+ * under the terms and conditions of the GNU General Public License,
7024+ * version 2, as published by the Free Software Foundation.
7025+ *
7026+ * This program is distributed in the hope it will be useful, but WITHOUT
7027+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
7028+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
7029+ * more details.
7030+ *
7031+ * You should have received a copy of the GNU General Public License along with
7032+ * this program; if not, write to the Free Software Foundation, Inc.,
7033+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
7034+ *
7035+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
7036+ * develop this driver.
7037+ *
7038+ **************************************************************************/
7039+
7040+#include <linux/module.h>
7041+#include <linux/kernel.h>
7042+#include <linux/errno.h>
7043+#include <linux/string.h>
7044+#include <linux/mm.h>
7045+#include <linux/tty.h>
7046+#include <linux/slab.h>
7047+#include <linux/delay.h>
7048+#include <linux/fb.h>
7049+#include <linux/init.h>
7050+#include <linux/console.h>
7051+
7052+#include <drm/drmP.h>
7053+#include <drm/drm.h>
7054+#include <drm/drm_crtc.h>
7055+
7056+#include "psb_drv.h"
7057+#include "psb_intel_reg.h"
7058+#include "psb_intel_drv.h"
7059+#include "ttm/ttm_userobj_api.h"
7060+#include "psb_fb.h"
7061+#include "psb_sgx.h"
7062+
7063+static int fill_fb_bitfield(struct fb_var_screeninfo *var, int depth)
7064+{
7065+ switch (depth) {
7066+ case 8:
7067+ var->red.offset = 0;
7068+ var->green.offset = 0;
7069+ var->blue.offset = 0;
7070+ var->red.length = 8;
7071+ var->green.length = 8;
7072+ var->blue.length = 8;
7073+ var->transp.length = 0;
7074+ var->transp.offset = 0;
7075+ break;
7076+ case 15:
7077+ var->red.offset = 10;
7078+ var->green.offset = 5;
7079+ var->blue.offset = 0;
7080+ var->red.length = 5;
7081+ var->green.length = 5;
7082+ var->blue.length = 5;
7083+ var->transp.length = 1;
7084+ var->transp.offset = 15;
7085+ break;
7086+ case 16:
7087+ var->red.offset = 11;
7088+ var->green.offset = 5;
7089+ var->blue.offset = 0;
7090+ var->red.length = 5;
7091+ var->green.length = 6;
7092+ var->blue.length = 5;
7093+ var->transp.length = 0;
7094+ var->transp.offset = 0;
7095+ break;
7096+ case 24:
7097+ var->red.offset = 16;
7098+ var->green.offset = 8;
7099+ var->blue.offset = 0;
7100+ var->red.length = 8;
7101+ var->green.length = 8;
7102+ var->blue.length = 8;
7103+ var->transp.length = 0;
7104+ var->transp.offset = 0;
7105+ break;
7106+ case 32:
7107+ var->red.offset = 16;
7108+ var->green.offset = 8;
7109+ var->blue.offset = 0;
7110+ var->red.length = 8;
7111+ var->green.length = 8;
7112+ var->blue.length = 8;
7113+ var->transp.length = 8;
7114+ var->transp.offset = 24;
7115+ break;
7116+ default:
7117+ return -EINVAL;
7118+ }
7119+
7120+ return 0;
7121+}
7122+
7123+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
7124+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
7125+ struct drm_file *file_priv,
7126+ unsigned int *handle);
7127+
7128+static const struct drm_framebuffer_funcs psb_fb_funcs = {
7129+ .destroy = psb_user_framebuffer_destroy,
7130+ .create_handle = psb_user_framebuffer_create_handle,
7131+};
7132+
7133+struct psbfb_par {
7134+ struct drm_device *dev;
7135+ struct psb_framebuffer *psbfb;
7136+
7137+ int dpms_state;
7138+
7139+ int crtc_count;
7140+ /* crtc currently bound to this */
7141+ uint32_t crtc_ids[2];
7142+};
7143+
7144+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
7145+
7146+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
7147+ unsigned blue, unsigned transp,
7148+ struct fb_info *info)
7149+{
7150+ struct psbfb_par *par = info->par;
7151+ struct drm_framebuffer *fb = &par->psbfb->base;
7152+ uint32_t v;
7153+
7154+ if (!fb)
7155+ return -ENOMEM;
7156+
7157+ if (regno > 255)
7158+ return 1;
7159+
7160+#if 0 /* JB: not drop, check that this works */
7161+ if (fb->bits_per_pixel == 8) {
7162+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
7163+ head) {
7164+ for (i = 0; i < par->crtc_count; i++)
7165+ if (crtc->base.id == par->crtc_ids[i])
7166+ break;
7167+
7168+ if (i == par->crtc_count)
7169+ continue;
7170+
7171+ if (crtc->funcs->gamma_set)
7172+ crtc->funcs->gamma_set(crtc, red, green,
7173+ blue, regno);
7174+ }
7175+ return 0;
7176+ }
7177+#endif
7178+
7179+ red = CMAP_TOHW(red, info->var.red.length);
7180+ blue = CMAP_TOHW(blue, info->var.blue.length);
7181+ green = CMAP_TOHW(green, info->var.green.length);
7182+ transp = CMAP_TOHW(transp, info->var.transp.length);
7183+
7184+ v = (red << info->var.red.offset) |
7185+ (green << info->var.green.offset) |
7186+ (blue << info->var.blue.offset) |
7187+ (transp << info->var.transp.offset);
7188+
7189+ if (regno < 16) {
7190+ switch (fb->bits_per_pixel) {
7191+ case 16:
7192+ ((uint32_t *) info->pseudo_palette)[regno] = v;
7193+ break;
7194+ case 24:
7195+ case 32:
7196+ ((uint32_t *) info->pseudo_palette)[regno] = v;
7197+ break;
7198+ }
7199+ }
7200+
7201+ return 0;
7202+}
7203+
7204+static struct drm_display_mode *psbfb_find_first_mode(struct
7205+ fb_var_screeninfo
7206+ *var,
7207+ struct fb_info *info,
7208+ struct drm_crtc
7209+ *crtc)
7210+{
7211+ struct psbfb_par *par = info->par;
7212+ struct drm_device *dev = par->dev;
7213+ struct drm_display_mode *drm_mode;
7214+ struct drm_display_mode *last_mode = NULL;
7215+ struct drm_connector *connector;
7216+ int found;
7217+
7218+ found = 0;
7219+ list_for_each_entry(connector, &dev->mode_config.connector_list,
7220+ head) {
7221+ if (connector->encoder && connector->encoder->crtc == crtc) {
7222+ found = 1;
7223+ break;
7224+ }
7225+ }
7226+
7227+ /* found no connector, bail */
7228+ if (!found)
7229+ return NULL;
7230+
7231+ found = 0;
7232+ list_for_each_entry(drm_mode, &connector->modes, head) {
7233+ if (drm_mode->hdisplay == var->xres &&
7234+ drm_mode->vdisplay == var->yres
7235+ && drm_mode->clock != 0) {
7236+ found = 1;
7237+ last_mode = drm_mode;
7238+ }
7239+ }
7240+
7241+ /* No mode matching mode found */
7242+ if (!found)
7243+ return NULL;
7244+
7245+ return last_mode;
7246+}
7247+
7248+static int psbfb_check_var(struct fb_var_screeninfo *var,
7249+ struct fb_info *info)
7250+{
7251+ struct psbfb_par *par = info->par;
7252+ struct psb_framebuffer *psbfb = par->psbfb;
7253+ struct drm_device *dev = par->dev;
7254+ int ret;
7255+ int depth;
7256+ int pitch;
7257+ int bpp = var->bits_per_pixel;
7258+
7259+ if (!psbfb)
7260+ return -ENOMEM;
7261+
7262+ if (!var->pixclock)
7263+ return -EINVAL;
7264+
7265+ /* don't support virtuals for now */
7266+ if (var->xres_virtual > var->xres)
7267+ return -EINVAL;
7268+
7269+ if (var->yres_virtual > var->yres)
7270+ return -EINVAL;
7271+
7272+ switch (bpp) {
7273+#if 0 /* JB: for now only support true color */
7274+ case 8:
7275+ depth = 8;
7276+ break;
7277+#endif
7278+ case 16:
7279+ depth = (var->green.length == 6) ? 16 : 15;
7280+ break;
7281+ case 24: /* assume this is 32bpp / depth 24 */
7282+ bpp = 32;
7283+ /* fallthrough */
7284+ case 32:
7285+ depth = (var->transp.length > 0) ? 32 : 24;
7286+ break;
7287+ default:
7288+ return -EINVAL;
7289+ }
7290+
7291+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
7292+
7293+ /* Check that we can resize */
7294+ if ((pitch * var->yres) > (psbfb->bo->num_pages << PAGE_SHIFT)) {
7295+#if 1
7296+ /* Need to resize the fb object.
7297+ * But the generic fbdev code doesn't really understand
7298+ * that we can do this. So disable for now.
7299+ */
7300+ DRM_INFO("Can't support requested size, too big!\n");
7301+ return -EINVAL;
7302+#else
7303+ struct drm_psb_private *dev_priv = psb_priv(dev);
7304+ struct ttm_bo_device *bdev = &dev_priv->bdev;
7305+ struct ttm_buffer_object *fbo = NULL;
7306+ struct ttm_bo_kmap_obj tmp_kmap;
7307+
7308+ /* a temporary BO to check if we could resize in setpar.
7309+ * Therefore no need to set NO_EVICT.
7310+ */
7311+ ret = ttm_buffer_object_create(bdev,
7312+ pitch * var->yres,
7313+ ttm_bo_type_kernel,
7314+ TTM_PL_FLAG_TT |
7315+ TTM_PL_FLAG_VRAM |
7316+ TTM_PL_FLAG_NO_EVICT,
7317+ 0, 0, &fbo);
7318+ if (ret || !fbo)
7319+ return -ENOMEM;
7320+
7321+ ret = ttm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
7322+ if (ret) {
7323+ ttm_bo_usage_deref_unlocked(&fbo);
7324+ return -EINVAL;
7325+ }
7326+
7327+ ttm_bo_kunmap(&tmp_kmap);
7328+ /* destroy our current fbo! */
7329+ ttm_bo_usage_deref_unlocked(&fbo);
7330+#endif
7331+ }
7332+
7333+ ret = fill_fb_bitfield(var, depth);
7334+ if (ret)
7335+ return ret;
7336+
7337+#if 1
7338+ /* Here we walk the output mode list and look for modes. If we haven't
7339+ * got it, then bail. Not very nice, so this is disabled.
7340+ * In the set_par code, we create our mode based on the incoming
7341+ * parameters. Nicer, but may not be desired by some.
7342+ */
7343+ {
7344+ struct drm_crtc *crtc;
7345+ int i;
7346+
7347+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
7348+ head) {
7349+ struct psb_intel_crtc *psb_intel_crtc =
7350+ to_psb_intel_crtc(crtc);
7351+
7352+ for (i = 0; i < par->crtc_count; i++)
7353+ if (crtc->base.id == par->crtc_ids[i])
7354+ break;
7355+
7356+ if (i == par->crtc_count)
7357+ continue;
7358+
7359+ if (psb_intel_crtc->mode_set.num_connectors == 0)
7360+ continue;
7361+
7362+ if (!psbfb_find_first_mode(&info->var, info, crtc))
7363+ return -EINVAL;
7364+ }
7365+ }
7366+#else
7367+ (void) i;
7368+ (void) dev; /* silence warnings */
7369+ (void) crtc;
7370+ (void) drm_mode;
7371+ (void) connector;
7372+#endif
7373+
7374+ return 0;
7375+}
7376+
7377+/* this will let fbcon do the mode init */
7378+static int psbfb_set_par(struct fb_info *info)
7379+{
7380+ struct psbfb_par *par = info->par;
7381+ struct psb_framebuffer *psbfb = par->psbfb;
7382+ struct drm_framebuffer *fb = &psbfb->base;
7383+ struct drm_device *dev = par->dev;
7384+ struct fb_var_screeninfo *var = &info->var;
7385+ struct drm_psb_private *dev_priv = dev->dev_private;
7386+ struct drm_display_mode *drm_mode;
7387+ int pitch;
7388+ int depth;
7389+ int bpp = var->bits_per_pixel;
7390+
7391+ if (!fb)
7392+ return -ENOMEM;
7393+
7394+ switch (bpp) {
7395+ case 8:
7396+ depth = 8;
7397+ break;
7398+ case 16:
7399+ depth = (var->green.length == 6) ? 16 : 15;
7400+ break;
7401+ case 24: /* assume this is 32bpp / depth 24 */
7402+ bpp = 32;
7403+ /* fallthrough */
7404+ case 32:
7405+ depth = (var->transp.length > 0) ? 32 : 24;
7406+ break;
7407+ default:
7408+ DRM_ERROR("Illegal BPP\n");
7409+ return -EINVAL;
7410+ }
7411+
7412+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
7413+
7414+ if ((pitch * var->yres) > (psbfb->bo->num_pages << PAGE_SHIFT)) {
7415+#if 1
7416+ /* Need to resize the fb object.
7417+ * But the generic fbdev code doesn't really understand
7418+ * that we can do this. So disable for now.
7419+ */
7420+ DRM_INFO("Can't support requested size, too big!\n");
7421+ return -EINVAL;
7422+#else
7423+ int ret;
7424+ struct ttm_buffer_object *fbo = NULL, *tfbo;
7425+ struct ttm_bo_kmap_obj tmp_kmap, tkmap;
7426+
7427+ ret = ttm_buffer_object_create(bdev,
7428+ pitch * var->yres,
7429+ ttm_bo_type_kernel,
7430+ TTM_PL_FLAG_MEM_TT |
7431+ TTM_PL_FLAG_MEM_VRAM |
7432+ TTM_PL_FLAG_NO_EVICT,
7433+ 0, 0, &fbo);
7434+ if (ret || !fbo) {
7435+ DRM_ERROR
7436+ ("failed to allocate new resized framebuffer\n");
7437+ return -ENOMEM;
7438+ }
7439+
7440+ ret = ttm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
7441+ if (ret) {
7442+ DRM_ERROR("failed to kmap framebuffer.\n");
7443+ ttm_bo_usage_deref_unlocked(&fbo);
7444+ return -EINVAL;
7445+ }
7446+
7447+ DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n",
7448+ fb->width, fb->height, fb->offset, fbo);
7449+
7450+ /* set new screen base */
7451+ info->screen_base = tmp_kmap.virtual;
7452+
7453+ tkmap = fb->kmap;
7454+ fb->kmap = tmp_kmap;
7455+ ttm_bo_kunmap(&tkmap);
7456+
7457+ tfbo = fb->bo;
7458+ fb->bo = fbo;
7459+ ttm_bo_usage_deref_unlocked(&tfbo);
7460+#endif
7461+ }
7462+
7463+ psbfb->offset = psbfb->bo->offset - dev_priv->pg->gatt_start;
7464+ fb->width = var->xres;
7465+ fb->height = var->yres;
7466+ fb->bits_per_pixel = bpp;
7467+ fb->pitch = pitch;
7468+ fb->depth = depth;
7469+
7470+ info->fix.line_length = psbfb->base.pitch;
7471+ info->fix.visual =
7472+ (psbfb->base.depth ==
7473+ 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
7474+
7475+ /* some fbdev's apps don't want these to change */
7476+ info->fix.smem_start = dev->mode_config.fb_base + psbfb->offset;
7477+
7478+#if 0
7479+ /* relates to resize - disable */
7480+ info->fix.smem_len = info->fix.line_length * var->yres;
7481+ info->screen_size = info->fix.smem_len; /* ??? */
7482+#endif
7483+
7484+ /* Should we walk the output's modelist or just create our own ???
7485+ * For now, we create and destroy a mode based on the incoming
7486+ * parameters. But there's commented out code below which scans
7487+ * the output list too.
7488+ */
7489+#if 1
7490+ /* This code is now in the for loop futher down. */
7491+#endif
7492+
7493+ {
7494+ struct drm_crtc *crtc;
7495+ int ret;
7496+ int i;
7497+
7498+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
7499+ head) {
7500+ struct psb_intel_crtc *psb_intel_crtc =
7501+ to_psb_intel_crtc(crtc);
7502+
7503+ for (i = 0; i < par->crtc_count; i++)
7504+ if (crtc->base.id == par->crtc_ids[i])
7505+ break;
7506+
7507+ if (i == par->crtc_count)
7508+ continue;
7509+
7510+ if (psb_intel_crtc->mode_set.num_connectors == 0)
7511+ continue;
7512+
7513+#if 1
7514+ drm_mode =
7515+ psbfb_find_first_mode(&info->var, info, crtc);
7516+ if (!drm_mode)
7517+ DRM_ERROR("No matching mode found\n");
7518+ psb_intel_crtc->mode_set.mode = drm_mode;
7519+#endif
7520+
7521+#if 0 /* FIXME: TH */
7522+ if (crtc->fb == psb_intel_crtc->mode_set.fb) {
7523+#endif
7524+ DRM_DEBUG
7525+ ("setting mode on crtc %p with id %u\n",
7526+ crtc, crtc->base.id);
7527+ ret =
7528+ crtc->funcs->
7529+ set_config(&psb_intel_crtc->mode_set);
7530+ if (ret) {
7531+ DRM_ERROR("Failed setting mode\n");
7532+ return ret;
7533+ }
7534+#if 0
7535+ }
7536+#endif
7537+ }
7538+ DRM_DEBUG("Set par returned OK.\n");
7539+ return 0;
7540+ }
7541+
7542+ return 0;
7543+}
7544+
7545+static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
7546+ unsigned size)
7547+{
7548+ int ret = 0;
7549+ int i;
7550+ unsigned submit_size;
7551+
7552+ while (size > 0) {
7553+ submit_size = (size < 0x60) ? size : 0x60;
7554+ size -= submit_size;
7555+ ret = psb_2d_wait_available(dev_priv, submit_size);
7556+ if (ret)
7557+ return ret;
7558+
7559+ submit_size <<= 2;
7560+ for (i = 0; i < submit_size; i += 4) {
7561+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
7562+ }
7563+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
7564+ }
7565+ return 0;
7566+}
7567+
7568+static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv,
7569+ uint32_t dst_offset, uint32_t dst_stride,
7570+ uint32_t dst_format, uint16_t dst_x,
7571+ uint16_t dst_y, uint16_t size_x,
7572+ uint16_t size_y, uint32_t fill)
7573+{
7574+ uint32_t buffer[10];
7575+ uint32_t *buf;
7576+
7577+ buf = buffer;
7578+
7579+ *buf++ = PSB_2D_FENCE_BH;
7580+
7581+ *buf++ =
7582+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
7583+ PSB_2D_DST_STRIDE_SHIFT);
7584+ *buf++ = dst_offset;
7585+
7586+ *buf++ =
7587+ PSB_2D_BLIT_BH |
7588+ PSB_2D_ROT_NONE |
7589+ PSB_2D_COPYORDER_TL2BR |
7590+ PSB_2D_DSTCK_DISABLE |
7591+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
7592+
7593+ *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT;
7594+ *buf++ =
7595+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
7596+ PSB_2D_DST_YSTART_SHIFT);
7597+ *buf++ =
7598+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
7599+ PSB_2D_DST_YSIZE_SHIFT);
7600+ *buf++ = PSB_2D_FLUSH_BH;
7601+
7602+ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
7603+}
7604+
7605+static void psbfb_fillrect_accel(struct fb_info *info,
7606+ const struct fb_fillrect *r)
7607+{
7608+ struct psbfb_par *par = info->par;
7609+ struct psb_framebuffer *psbfb = par->psbfb;
7610+ struct drm_framebuffer *fb = &psbfb->base;
7611+ struct drm_psb_private *dev_priv = par->dev->dev_private;
7612+ uint32_t offset;
7613+ uint32_t stride;
7614+ uint32_t format;
7615+
7616+ if (!fb)
7617+ return;
7618+
7619+ offset = psbfb->offset;
7620+ stride = fb->pitch;
7621+
7622+ switch (fb->depth) {
7623+ case 8:
7624+ format = PSB_2D_DST_332RGB;
7625+ break;
7626+ case 15:
7627+ format = PSB_2D_DST_555RGB;
7628+ break;
7629+ case 16:
7630+ format = PSB_2D_DST_565RGB;
7631+ break;
7632+ case 24:
7633+ case 32:
7634+ /* this is wrong but since we don't do blending its okay */
7635+ format = PSB_2D_DST_8888ARGB;
7636+ break;
7637+ default:
7638+ /* software fallback */
7639+ cfb_fillrect(info, r);
7640+ return;
7641+ }
7642+
7643+ psb_accel_2d_fillrect(dev_priv,
7644+ offset, stride, format,
7645+ r->dx, r->dy, r->width, r->height, r->color);
7646+}
7647+
7648+static void psbfb_fillrect(struct fb_info *info,
7649+ const struct fb_fillrect *rect)
7650+{
7651+ struct psbfb_par *par = info->par;
7652+ struct drm_device *dev = par->dev;
7653+ struct drm_psb_private *dev_priv = dev->dev_private;
7654+
7655+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
7656+ return;
7657+
7658+ if (info->flags & FBINFO_HWACCEL_DISABLED)
7659+ return cfb_fillrect(info, rect);
7660+
7661+ if (psb_2d_trylock(dev_priv)) {
7662+ psb_check_power_state(dev, PSB_DEVICE_SGX);
7663+ psbfb_fillrect_accel(info, rect);
7664+ psb_2d_unlock(dev_priv);
7665+ if (drm_psb_ospm && IS_MRST(dev))
7666+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
7667+ } else
7668+ cfb_fillrect(info, rect);
7669+}
7670+
7671+uint32_t psb_accel_2d_copy_direction(int xdir, int ydir)
7672+{
7673+ if (xdir < 0)
7674+ return (ydir <
7675+ 0) ? PSB_2D_COPYORDER_BR2TL :
7676+ PSB_2D_COPYORDER_TR2BL;
7677+ else
7678+ return (ydir <
7679+ 0) ? PSB_2D_COPYORDER_BL2TR :
7680+ PSB_2D_COPYORDER_TL2BR;
7681+}
7682+
7683+/*
7684+ * @srcOffset in bytes
7685+ * @srcStride in bytes
7686+ * @srcFormat psb 2D format defines
7687+ * @dstOffset in bytes
7688+ * @dstStride in bytes
7689+ * @dstFormat psb 2D format defines
7690+ * @srcX offset in pixels
7691+ * @srcY offset in pixels
7692+ * @dstX offset in pixels
7693+ * @dstY offset in pixels
7694+ * @sizeX of the copied area
7695+ * @sizeY of the copied area
7696+ */
7697+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
7698+ uint32_t src_offset, uint32_t src_stride,
7699+ uint32_t src_format, uint32_t dst_offset,
7700+ uint32_t dst_stride, uint32_t dst_format,
7701+ uint16_t src_x, uint16_t src_y,
7702+ uint16_t dst_x, uint16_t dst_y,
7703+ uint16_t size_x, uint16_t size_y)
7704+{
7705+ uint32_t blit_cmd;
7706+ uint32_t buffer[10];
7707+ uint32_t *buf;
7708+ uint32_t direction;
7709+
7710+ buf = buffer;
7711+
7712+ direction =
7713+ psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
7714+
7715+ if (direction == PSB_2D_COPYORDER_BR2TL ||
7716+ direction == PSB_2D_COPYORDER_TR2BL) {
7717+ src_x += size_x - 1;
7718+ dst_x += size_x - 1;
7719+ }
7720+ if (direction == PSB_2D_COPYORDER_BR2TL ||
7721+ direction == PSB_2D_COPYORDER_BL2TR) {
7722+ src_y += size_y - 1;
7723+ dst_y += size_y - 1;
7724+ }
7725+
7726+ blit_cmd =
7727+ PSB_2D_BLIT_BH |
7728+ PSB_2D_ROT_NONE |
7729+ PSB_2D_DSTCK_DISABLE |
7730+ PSB_2D_SRCCK_DISABLE |
7731+ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
7732+
7733+ *buf++ = PSB_2D_FENCE_BH;
7734+ *buf++ =
7735+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
7736+ PSB_2D_DST_STRIDE_SHIFT);
7737+ *buf++ = dst_offset;
7738+ *buf++ =
7739+ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
7740+ PSB_2D_SRC_STRIDE_SHIFT);
7741+ *buf++ = src_offset;
7742+ *buf++ =
7743+ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
7744+ (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
7745+ *buf++ = blit_cmd;
7746+ *buf++ =
7747+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
7748+ PSB_2D_DST_YSTART_SHIFT);
7749+ *buf++ =
7750+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
7751+ PSB_2D_DST_YSIZE_SHIFT);
7752+ *buf++ = PSB_2D_FLUSH_BH;
7753+
7754+ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
7755+}
7756+
7757+static void psbfb_copyarea_accel(struct fb_info *info,
7758+ const struct fb_copyarea *a)
7759+{
7760+ struct psbfb_par *par = info->par;
7761+ struct psb_framebuffer *psbfb = par->psbfb;
7762+ struct drm_framebuffer *fb = &psbfb->base;
7763+ struct drm_psb_private *dev_priv = par->dev->dev_private;
7764+ uint32_t offset;
7765+ uint32_t stride;
7766+ uint32_t src_format;
7767+ uint32_t dst_format;
7768+
7769+ if (!fb)
7770+ return;
7771+
7772+ offset = psbfb->offset;
7773+ stride = fb->pitch;
7774+
7775+ switch (fb->depth) {
7776+ case 8:
7777+ src_format = PSB_2D_SRC_332RGB;
7778+ dst_format = PSB_2D_DST_332RGB;
7779+ break;
7780+ case 15:
7781+ src_format = PSB_2D_SRC_555RGB;
7782+ dst_format = PSB_2D_DST_555RGB;
7783+ break;
7784+ case 16:
7785+ src_format = PSB_2D_SRC_565RGB;
7786+ dst_format = PSB_2D_DST_565RGB;
7787+ break;
7788+ case 24:
7789+ case 32:
7790+ /* this is wrong but since we don't do blending its okay */
7791+ src_format = PSB_2D_SRC_8888ARGB;
7792+ dst_format = PSB_2D_DST_8888ARGB;
7793+ break;
7794+ default:
7795+ /* software fallback */
7796+ cfb_copyarea(info, a);
7797+ return;
7798+ }
7799+
7800+ psb_accel_2d_copy(dev_priv,
7801+ offset, stride, src_format,
7802+ offset, stride, dst_format,
7803+ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
7804+}
7805+
7806+static void psbfb_copyarea(struct fb_info *info,
7807+ const struct fb_copyarea *region)
7808+{
7809+ struct psbfb_par *par = info->par;
7810+ struct drm_device *dev = par->dev;
7811+ struct drm_psb_private *dev_priv = dev->dev_private;
7812+
7813+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
7814+ return;
7815+
7816+ if (info->flags & FBINFO_HWACCEL_DISABLED)
7817+ return cfb_copyarea(info, region);
7818+
7819+ if (psb_2d_trylock(dev_priv)) {
7820+ psb_check_power_state(dev, PSB_DEVICE_SGX);
7821+ psbfb_copyarea_accel(info, region);
7822+ psb_2d_unlock(dev_priv);
7823+ if (drm_psb_ospm && IS_MRST(dev))
7824+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
7825+ } else
7826+ cfb_copyarea(info, region);
7827+}
7828+
7829+void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
7830+{
7831+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
7832+ return;
7833+
7834+ cfb_imageblit(info, image);
7835+}
7836+
7837+static void psbfb_onoff(struct fb_info *info, int dpms_mode)
7838+{
7839+ struct psbfb_par *par = info->par;
7840+ struct drm_device *dev = par->dev;
7841+ struct drm_crtc *crtc;
7842+ struct drm_encoder *encoder;
7843+ int i;
7844+
7845+ /*
7846+ * For each CRTC in this fb, find all associated encoders
7847+ * and turn them off, then turn off the CRTC.
7848+ */
7849+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7850+ struct drm_crtc_helper_funcs *crtc_funcs =
7851+ crtc->helper_private;
7852+
7853+ for (i = 0; i < par->crtc_count; i++)
7854+ if (crtc->base.id == par->crtc_ids[i])
7855+ break;
7856+
7857+ if (i == par->crtc_count)
7858+ continue;
7859+
7860+ if (dpms_mode == DRM_MODE_DPMS_ON)
7861+ crtc_funcs->dpms(crtc, dpms_mode);
7862+
7863+ /* Found a CRTC on this fb, now find encoders */
7864+ list_for_each_entry(encoder,
7865+ &dev->mode_config.encoder_list, head) {
7866+ if (encoder->crtc == crtc) {
7867+ struct drm_encoder_helper_funcs
7868+ *encoder_funcs;
7869+ encoder_funcs = encoder->helper_private;
7870+ encoder_funcs->dpms(encoder, dpms_mode);
7871+ }
7872+ }
7873+
7874+ if (dpms_mode == DRM_MODE_DPMS_OFF)
7875+ crtc_funcs->dpms(crtc, dpms_mode);
7876+ }
7877+}
7878+
7879+static int psbfb_blank(int blank_mode, struct fb_info *info)
7880+{
7881+ struct psbfb_par *par = info->par;
7882+
7883+ par->dpms_state = blank_mode;
7884+ PSB_DEBUG_PM("psbfb_blank \n");
7885+ switch (blank_mode) {
7886+ case FB_BLANK_UNBLANK:
7887+ psbfb_onoff(info, DRM_MODE_DPMS_ON);
7888+ break;
7889+ case FB_BLANK_NORMAL:
7890+ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
7891+ break;
7892+ case FB_BLANK_HSYNC_SUSPEND:
7893+ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
7894+ break;
7895+ case FB_BLANK_VSYNC_SUSPEND:
7896+ psbfb_onoff(info, DRM_MODE_DPMS_SUSPEND);
7897+ break;
7898+ case FB_BLANK_POWERDOWN:
7899+ psbfb_onoff(info, DRM_MODE_DPMS_OFF);
7900+ break;
7901+ }
7902+
7903+ return 0;
7904+}
7905+
7906+
7907+static int psbfb_kms_off(struct drm_device *dev, int suspend)
7908+{
7909+ struct drm_framebuffer *fb = 0;
7910+ DRM_DEBUG("psbfb_kms_off_ioctl\n");
7911+
7912+ mutex_lock(&dev->mode_config.mutex);
7913+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
7914+ struct fb_info *info = fb->fbdev;
7915+
7916+ if (suspend)
7917+ fb_set_suspend(info, 1);
7918+ }
7919+ mutex_unlock(&dev->mode_config.mutex);
7920+
7921+ return 0;
7922+}
7923+
7924+int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
7925+ struct drm_file *file_priv)
7926+{
7927+ int ret;
7928+
7929+ if (drm_psb_no_fb)
7930+ return 0;
7931+ acquire_console_sem();
7932+ ret = psbfb_kms_off(dev, 0);
7933+ release_console_sem();
7934+
7935+ return ret;
7936+}
7937+
7938+static int psbfb_kms_on(struct drm_device *dev, int resume)
7939+{
7940+ struct drm_framebuffer *fb = 0;
7941+
7942+ DRM_DEBUG("psbfb_kms_on_ioctl\n");
7943+
7944+ mutex_lock(&dev->mode_config.mutex);
7945+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
7946+ struct fb_info *info = fb->fbdev;
7947+
7948+ if (resume)
7949+ fb_set_suspend(info, 0);
7950+
7951+ }
7952+ mutex_unlock(&dev->mode_config.mutex);
7953+
7954+ return 0;
7955+}
7956+
7957+int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
7958+ struct drm_file *file_priv)
7959+{
7960+ int ret;
7961+
7962+ if (drm_psb_no_fb)
7963+ return 0;
7964+ acquire_console_sem();
7965+ ret = psbfb_kms_on(dev, 0);
7966+ release_console_sem();
7967+ drm_helper_disable_unused_functions(dev);
7968+ return ret;
7969+}
7970+
7971+void psbfb_suspend(struct drm_device *dev)
7972+{
7973+ acquire_console_sem();
7974+ psbfb_kms_off(dev, 1);
7975+ release_console_sem();
7976+}
7977+
7978+void psbfb_resume(struct drm_device *dev)
7979+{
7980+ acquire_console_sem();
7981+ psbfb_kms_on(dev, 1);
7982+ release_console_sem();
7983+ drm_helper_disable_unused_functions(dev);
7984+}
7985+
7986+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
7987+{
7988+ struct psbfb_par *par = info->par;
7989+ struct psb_framebuffer *psbfb = par->psbfb;
7990+ struct ttm_buffer_object *bo = psbfb->bo;
7991+ unsigned long size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
7992+ unsigned long offset = vma->vm_pgoff;
7993+
7994+ if (vma->vm_pgoff != 0)
7995+ return -EINVAL;
7996+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
7997+ return -EINVAL;
7998+ if (offset + size > bo->num_pages)
7999+ return -EINVAL;
8000+
8001+ mutex_lock(&bo->mutex);
8002+ if (!psbfb->addr_space)
8003+ psbfb->addr_space = vma->vm_file->f_mapping;
8004+ mutex_unlock(&bo->mutex);
8005+
8006+ return ttm_fbdev_mmap(vma, bo);
8007+}
8008+
8009+int psbfb_sync(struct fb_info *info)
8010+{
8011+ struct psbfb_par *par = info->par;
8012+ struct drm_psb_private *dev_priv = par->dev->dev_private;
8013+
8014+ if (psb_2d_trylock(dev_priv)) {
8015+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i0)
8016+ psb_idle_2d(par->dev);
8017+ psb_2d_unlock(dev_priv);
8018+ } else
8019+ udelay(5);
8020+
8021+ return 0;
8022+}
8023+
8024+static struct fb_ops psbfb_ops = {
8025+ .owner = THIS_MODULE,
8026+ .fb_check_var = psbfb_check_var,
8027+ .fb_set_par = psbfb_set_par,
8028+ .fb_setcolreg = psbfb_setcolreg,
8029+ .fb_fillrect = psbfb_fillrect,
8030+ .fb_copyarea = psbfb_copyarea,
8031+ .fb_imageblit = psbfb_imageblit,
8032+ .fb_mmap = psbfb_mmap,
8033+ .fb_sync = psbfb_sync,
8034+ .fb_blank = psbfb_blank,
8035+};
8036+
8037+static struct drm_mode_set panic_mode;
8038+
8039+int psbfb_panic(struct notifier_block *n, unsigned long ununsed,
8040+ void *panic_str)
8041+{
8042+ DRM_ERROR("panic occurred, switching back to text console\n");
8043+ drm_crtc_helper_set_config(&panic_mode);
8044+
8045+ return 0;
8046+}
8047+EXPORT_SYMBOL(psbfb_panic);
8048+
8049+static struct notifier_block paniced = {
8050+ .notifier_call = psbfb_panic,
8051+};
8052+
8053+
8054+static struct drm_framebuffer *psb_framebuffer_create
8055+ (struct drm_device *dev, struct drm_mode_fb_cmd *r,
8056+ void *mm_private)
8057+{
8058+ struct psb_framebuffer *fb;
8059+ int ret;
8060+
8061+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
8062+ if (!fb)
8063+ return NULL;
8064+
8065+ ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
8066+
8067+ if (ret)
8068+ goto err;
8069+
8070+ drm_helper_mode_fill_fb_struct(&fb->base, r);
8071+
8072+ fb->bo = mm_private;
8073+
8074+ return &fb->base;
8075+
8076+err:
8077+ kfree(fb);
8078+ return NULL;
8079+}
8080+
8081+static struct drm_framebuffer *psb_user_framebuffer_create
8082+ (struct drm_device *dev, struct drm_file *filp,
8083+ struct drm_mode_fb_cmd *r)
8084+{
8085+ struct ttm_buffer_object *bo = NULL;
8086+ uint64_t size;
8087+
8088+ bo = ttm_buffer_object_lookup(psb_fpriv(filp)->tfile, r->handle);
8089+ if (!bo)
8090+ return NULL;
8091+
8092+ /* JB: TODO not drop, make smarter */
8093+ size = ((uint64_t) bo->num_pages) << PAGE_SHIFT;
8094+ if (size < r->width * r->height * 4)
8095+ return NULL;
8096+
8097+ /* JB: TODO not drop, refcount buffer */
8098+ return psb_framebuffer_create(dev, r, bo);
8099+}
8100+
8101+int psbfb_create(struct drm_device *dev, uint32_t fb_width,
8102+ uint32_t fb_height, uint32_t surface_width,
8103+ uint32_t surface_height, struct psb_framebuffer **psbfb_p)
8104+{
8105+ struct fb_info *info;
8106+ struct psbfb_par *par;
8107+ struct drm_framebuffer *fb;
8108+ struct psb_framebuffer *psbfb;
8109+ struct ttm_bo_kmap_obj tmp_kmap;
8110+ struct drm_mode_fb_cmd mode_cmd;
8111+ struct device *device = &dev->pdev->dev;
8112+ struct ttm_bo_device *bdev = &psb_priv(dev)->bdev;
8113+ int size, aligned_size, ret;
8114+ struct ttm_buffer_object *fbo = NULL;
8115+ bool is_iomem;
8116+
8117+ mode_cmd.width = surface_width; /* crtc->desired_mode->hdisplay; */
8118+ mode_cmd.height = surface_height; /* crtc->desired_mode->vdisplay; */
8119+
8120+ mode_cmd.bpp = 32;
8121+ mode_cmd.pitch = mode_cmd.width * ((mode_cmd.bpp + 1) / 8);
8122+ mode_cmd.depth = 24;
8123+
8124+ size = mode_cmd.pitch * mode_cmd.height;
8125+ aligned_size = ALIGN(size, PAGE_SIZE);
8126+ ret = ttm_buffer_object_create(bdev,
8127+ aligned_size,
8128+ ttm_bo_type_kernel,
8129+ TTM_PL_FLAG_TT |
8130+ TTM_PL_FLAG_VRAM |
8131+ TTM_PL_FLAG_NO_EVICT,
8132+ 0, 0, 0, NULL, &fbo);
8133+
8134+ if (unlikely(ret != 0)) {
8135+ DRM_ERROR("failed to allocate framebuffer.\n");
8136+ return -ENOMEM;
8137+ }
8138+
8139+ mutex_lock(&dev->struct_mutex);
8140+ fb = psb_framebuffer_create(dev, &mode_cmd, fbo);
8141+ if (!fb) {
8142+ DRM_ERROR("failed to allocate fb.\n");
8143+ ret = -ENOMEM;
8144+ goto out_err0;
8145+ }
8146+ psbfb = to_psb_fb(fb);
8147+ psbfb->bo = fbo;
8148+
8149+ list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
8150+ info = framebuffer_alloc(sizeof(struct psbfb_par), device);
8151+ if (!info) {
8152+ ret = -ENOMEM;
8153+ goto out_err1;
8154+ }
8155+
8156+ par = info->par;
8157+ par->psbfb = psbfb;
8158+
8159+ strcpy(info->fix.id, "psbfb");
8160+ info->fix.type = FB_TYPE_PACKED_PIXELS;
8161+ info->fix.visual = FB_VISUAL_TRUECOLOR;
8162+ info->fix.type_aux = 0;
8163+ info->fix.xpanstep = 1; /* doing it in hw */
8164+ info->fix.ypanstep = 1; /* doing it in hw */
8165+ info->fix.ywrapstep = 0;
8166+ info->fix.accel = FB_ACCEL_I830;
8167+ info->fix.type_aux = 0;
8168+
8169+ info->flags = FBINFO_DEFAULT;
8170+
8171+ info->fbops = &psbfb_ops;
8172+
8173+ info->fix.line_length = fb->pitch;
8174+ info->fix.smem_start =
8175+ dev->mode_config.fb_base + psbfb->bo->offset;
8176+ info->fix.smem_len = size;
8177+
8178+ info->flags = FBINFO_DEFAULT;
8179+
8180+ ret = ttm_bo_kmap(psbfb->bo, 0, psbfb->bo->num_pages, &tmp_kmap);
8181+ if (ret) {
8182+ DRM_ERROR("error mapping fb: %d\n", ret);
8183+ goto out_err2;
8184+ }
8185+
8186+
8187+ info->screen_base = ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem);
8188+ info->screen_size = size;
8189+
8190+ if (is_iomem)
8191+ memset_io(info->screen_base, 0, size);
8192+ else
8193+ memset(info->screen_base, 0, size);
8194+
8195+ info->pseudo_palette = fb->pseudo_palette;
8196+ info->var.xres_virtual = fb->width;
8197+ info->var.yres_virtual = fb->height;
8198+ info->var.bits_per_pixel = fb->bits_per_pixel;
8199+ info->var.xoffset = 0;
8200+ info->var.yoffset = 0;
8201+ info->var.activate = FB_ACTIVATE_NOW;
8202+ info->var.height = -1;
8203+ info->var.width = -1;
8204+
8205+ info->var.xres = fb_width;
8206+ info->var.yres = fb_height;
8207+
8208+ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
8209+ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
8210+
8211+ info->pixmap.size = 64 * 1024;
8212+ info->pixmap.buf_align = 8;
8213+ info->pixmap.access_align = 32;
8214+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
8215+ info->pixmap.scan_align = 1;
8216+
8217+ DRM_DEBUG("fb depth is %d\n", fb->depth);
8218+ DRM_DEBUG(" pitch is %d\n", fb->pitch);
8219+ fill_fb_bitfield(&info->var, fb->depth);
8220+
8221+ fb->fbdev = info;
8222+
8223+ par->dev = dev;
8224+
8225+ /* To allow resizing without swapping buffers */
8226+ printk(KERN_INFO"allocated %dx%d fb: 0x%08lx, bo %p\n",
8227+ psbfb->base.width,
8228+ psbfb->base.height, psbfb->bo->offset, psbfb->bo);
8229+
8230+ if (psbfb_p)
8231+ *psbfb_p = psbfb;
8232+
8233+ mutex_unlock(&dev->struct_mutex);
8234+
8235+ return 0;
8236+out_err2:
8237+ unregister_framebuffer(info);
8238+out_err1:
8239+ fb->funcs->destroy(fb);
8240+out_err0:
8241+ mutex_unlock(&dev->struct_mutex);
8242+ ttm_bo_unref(&fbo);
8243+ return ret;
8244+}
8245+
8246+static int psbfb_multi_fb_probe_crtc(struct drm_device *dev,
8247+ struct drm_crtc *crtc)
8248+{
8249+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
8250+ struct drm_framebuffer *fb = crtc->fb;
8251+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
8252+ struct drm_connector *connector;
8253+ struct fb_info *info;
8254+ struct psbfb_par *par;
8255+ struct drm_mode_set *modeset;
8256+ unsigned int width, height;
8257+ int new_fb = 0;
8258+ int ret, i, conn_count;
8259+
8260+ if (!drm_helper_crtc_in_use(crtc))
8261+ return 0;
8262+
8263+ if (!crtc->desired_mode)
8264+ return 0;
8265+
8266+ width = crtc->desired_mode->hdisplay;
8267+ height = crtc->desired_mode->vdisplay;
8268+
8269+ /* is there an fb bound to this crtc already */
8270+ if (!psb_intel_crtc->mode_set.fb) {
8271+ ret =
8272+ psbfb_create(dev, width, height, width, height,
8273+ &psbfb);
8274+ if (ret)
8275+ return -EINVAL;
8276+ new_fb = 1;
8277+ } else {
8278+ fb = psb_intel_crtc->mode_set.fb;
8279+ if ((fb->width < width) || (fb->height < height))
8280+ return -EINVAL;
8281+ }
8282+
8283+ info = fb->fbdev;
8284+ par = info->par;
8285+
8286+ modeset = &psb_intel_crtc->mode_set;
8287+ modeset->fb = fb;
8288+ conn_count = 0;
8289+ list_for_each_entry(connector, &dev->mode_config.connector_list,
8290+ head) {
8291+ if (connector->encoder)
8292+ if (connector->encoder->crtc == modeset->crtc) {
8293+ modeset->connectors[conn_count] =
8294+ connector;
8295+ conn_count++;
8296+ if (conn_count > INTELFB_CONN_LIMIT)
8297+ BUG();
8298+ }
8299+ }
8300+
8301+ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
8302+ modeset->connectors[i] = NULL;
8303+
8304+ par->crtc_ids[0] = crtc->base.id;
8305+
8306+ modeset->num_connectors = conn_count;
8307+ if (modeset->mode != modeset->crtc->desired_mode)
8308+ modeset->mode = modeset->crtc->desired_mode;
8309+
8310+ par->crtc_count = 1;
8311+
8312+ if (new_fb) {
8313+ info->var.pixclock = -1;
8314+ if (register_framebuffer(info) < 0)
8315+ return -EINVAL;
8316+ } else
8317+ psbfb_set_par(info);
8318+
8319+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
8320+ info->fix.id);
8321+
8322+ /* Switch back to kernel console on panic */
8323+ panic_mode = *modeset;
8324+ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
8325+ printk(KERN_INFO "registered panic notifier\n");
8326+
8327+ return 0;
8328+}
8329+
8330+static int psbfb_multi_fb_probe(struct drm_device *dev)
8331+{
8332+
8333+ struct drm_crtc *crtc;
8334+ int ret = 0;
8335+
8336+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8337+ ret = psbfb_multi_fb_probe_crtc(dev, crtc);
8338+ if (ret)
8339+ return ret;
8340+ }
8341+ return ret;
8342+}
8343+
8344+static int psbfb_single_fb_probe(struct drm_device *dev)
8345+{
8346+ struct drm_crtc *crtc;
8347+ struct drm_connector *connector;
8348+ unsigned int fb_width = (unsigned) -1, fb_height = (unsigned) -1;
8349+ unsigned int surface_width = 0, surface_height = 0;
8350+ int new_fb = 0;
8351+ int crtc_count = 0;
8352+ int ret, i, conn_count = 0;
8353+ struct fb_info *info;
8354+ struct psbfb_par *par;
8355+ struct drm_mode_set *modeset = NULL;
8356+ struct drm_framebuffer *fb = NULL;
8357+ struct psb_framebuffer *psbfb = NULL;
8358+
8359+ /* first up get a count of crtcs now in use and
8360+ * new min/maxes width/heights */
8361+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8362+ if (drm_helper_crtc_in_use(crtc)) {
8363+ if (crtc->desired_mode) {
8364+ fb = crtc->fb;
8365+ if (crtc->desired_mode->hdisplay <
8366+ fb_width)
8367+ fb_width =
8368+ crtc->desired_mode->hdisplay;
8369+
8370+ if (crtc->desired_mode->vdisplay <
8371+ fb_height)
8372+ fb_height =
8373+ crtc->desired_mode->vdisplay;
8374+
8375+ if (crtc->desired_mode->hdisplay >
8376+ surface_width)
8377+ surface_width =
8378+ crtc->desired_mode->hdisplay;
8379+
8380+ if (crtc->desired_mode->vdisplay >
8381+ surface_height)
8382+ surface_height =
8383+ crtc->desired_mode->vdisplay;
8384+
8385+ }
8386+ crtc_count++;
8387+ }
8388+ }
8389+
8390+ if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
8391+ /* hmm everyone went away - assume VGA cable just fell out
8392+ and will come back later. */
8393+ return 0;
8394+ }
8395+
8396+ /* do we have an fb already? */
8397+ if (list_empty(&dev->mode_config.fb_kernel_list)) {
8398+ /* create an fb if we don't have one */
8399+ ret =
8400+ psbfb_create(dev, fb_width, fb_height, surface_width,
8401+ surface_height, &psbfb);
8402+ if (ret)
8403+ return -EINVAL;
8404+ new_fb = 1;
8405+ fb = &psbfb->base;
8406+ } else {
8407+ fb = list_first_entry(&dev->mode_config.fb_kernel_list,
8408+ struct drm_framebuffer, filp_head);
8409+
8410+ /* if someone hotplugs something bigger than we have already
8411+ * allocated, we are pwned. As really we can't resize an
8412+ * fbdev that is in the wild currently due to fbdev not really
8413+ * being designed for the lower layers moving stuff around
8414+ * under it. - so in the grand style of things - punt. */
8415+ if ((fb->width < surface_width)
8416+ || (fb->height < surface_height)) {
8417+ DRM_ERROR
8418+ ("Framebuffer not large enough to scale"
8419+ " console onto.\n");
8420+ return -EINVAL;
8421+ }
8422+ }
8423+
8424+ info = fb->fbdev;
8425+ par = info->par;
8426+
8427+ crtc_count = 0;
8428+ /* okay we need to setup new connector sets in the crtcs */
8429+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8430+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
8431+ modeset = &psb_intel_crtc->mode_set;
8432+ modeset->fb = fb;
8433+ conn_count = 0;
8434+ list_for_each_entry(connector,
8435+ &dev->mode_config.connector_list,
8436+ head) {
8437+ if (connector->encoder)
8438+ if (connector->encoder->crtc ==
8439+ modeset->crtc) {
8440+ modeset->connectors[conn_count] =
8441+ connector;
8442+ conn_count++;
8443+ if (conn_count >
8444+ INTELFB_CONN_LIMIT)
8445+ BUG();
8446+ }
8447+ }
8448+
8449+ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
8450+ modeset->connectors[i] = NULL;
8451+
8452+ par->crtc_ids[crtc_count++] = crtc->base.id;
8453+
8454+ modeset->num_connectors = conn_count;
8455+ if (modeset->mode != modeset->crtc->desired_mode)
8456+ modeset->mode = modeset->crtc->desired_mode;
8457+ }
8458+ par->crtc_count = crtc_count;
8459+
8460+ if (new_fb) {
8461+ info->var.pixclock = -1;
8462+ if (register_framebuffer(info) < 0)
8463+ return -EINVAL;
8464+ } else
8465+ psbfb_set_par(info);
8466+
8467+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
8468+ info->fix.id);
8469+
8470+ /* Switch back to kernel console on panic */
8471+ panic_mode = *modeset;
8472+ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
8473+ printk(KERN_INFO "registered panic notifier\n");
8474+
8475+ return 0;
8476+}
8477+
8478+int psbfb_probe(struct drm_device *dev)
8479+{
8480+ int ret = 0;
8481+
8482+ DRM_DEBUG("\n");
8483+
8484+ /* something has changed in the lower levels of hell - deal with it
8485+ here */
8486+
8487+ /* two modes : a) 1 fb to rule all crtcs.
8488+ b) one fb per crtc.
8489+ two actions 1) new connected device
8490+ 2) device removed.
8491+ case a/1 : if the fb surface isn't big enough -
8492+ resize the surface fb.
8493+ if the fb size isn't big enough - resize fb into surface.
8494+ if everything big enough configure the new crtc/etc.
8495+ case a/2 : undo the configuration
8496+ possibly resize down the fb to fit the new configuration.
8497+ case b/1 : see if it is on a new crtc - setup a new fb and add it.
8498+ case b/2 : teardown the new fb.
8499+ */
8500+
8501+ /* mode a first */
8502+ /* search for an fb */
8503+ if (0 /*i915_fbpercrtc == 1 */)
8504+ ret = psbfb_multi_fb_probe(dev);
8505+ else
8506+ ret = psbfb_single_fb_probe(dev);
8507+
8508+ return ret;
8509+}
8510+EXPORT_SYMBOL(psbfb_probe);
8511+
8512+int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
8513+{
8514+ struct fb_info *info;
8515+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
8516+
8517+ if (drm_psb_no_fb)
8518+ return 0;
8519+
8520+ info = fb->fbdev;
8521+
8522+ if (info) {
8523+ unregister_framebuffer(info);
8524+ ttm_bo_kunmap(&psbfb->kmap);
8525+ ttm_bo_unref(&psbfb->bo);
8526+ framebuffer_release(info);
8527+ }
8528+
8529+ atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
8530+ memset(&panic_mode, 0, sizeof(struct drm_mode_set));
8531+ return 0;
8532+}
8533+EXPORT_SYMBOL(psbfb_remove);
8534+
8535+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
8536+ struct drm_file *file_priv,
8537+ unsigned int *handle)
8538+{
8539+ /* JB: TODO currently we can't go from a bo to a handle with ttm */
8540+ (void) file_priv;
8541+ *handle = 0;
8542+ return 0;
8543+}
8544+
8545+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
8546+{
8547+ struct drm_device *dev = fb->dev;
8548+ if (fb->fbdev)
8549+ psbfb_remove(dev, fb);
8550+
8551+ /* JB: TODO not drop, refcount buffer */
8552+ drm_framebuffer_cleanup(fb);
8553+
8554+ kfree(fb);
8555+}
8556+
8557+static const struct drm_mode_config_funcs psb_mode_funcs = {
8558+ .fb_create = psb_user_framebuffer_create,
8559+ .fb_changed = psbfb_probe,
8560+};
8561+
8562+static void psb_setup_outputs(struct drm_device *dev)
8563+{
8564+ struct drm_psb_private *dev_priv =
8565+ (struct drm_psb_private *) dev->dev_private;
8566+ struct drm_connector *connector;
8567+
8568+ if (IS_MRST(dev)) {
8569+ if (dev_priv->iLVDS_enable)
8570+ /* Set up integrated LVDS for MRST */
8571+ mrst_lvds_init(dev, &dev_priv->mode_dev);
8572+ else {
8573+ /* Set up integrated MIPI for MRST */
8574+ mrst_dsi_init(dev, &dev_priv->mode_dev);
8575+ }
8576+ } else {
8577+ psb_intel_lvds_init(dev, &dev_priv->mode_dev);
8578+ /* psb_intel_sdvo_init(dev, SDVOB); */
8579+ }
8580+
8581+ list_for_each_entry(connector, &dev->mode_config.connector_list,
8582+ head) {
8583+ struct psb_intel_output *psb_intel_output =
8584+ to_psb_intel_output(connector);
8585+ struct drm_encoder *encoder = &psb_intel_output->enc;
8586+ int crtc_mask = 0, clone_mask = 0;
8587+
8588+ /* valid crtcs */
8589+ switch (psb_intel_output->type) {
8590+ case INTEL_OUTPUT_SDVO:
8591+ crtc_mask = ((1 << 0) | (1 << 1));
8592+ clone_mask = (1 << INTEL_OUTPUT_SDVO);
8593+ break;
8594+ case INTEL_OUTPUT_LVDS:
8595+ if (IS_MRST(dev))
8596+ crtc_mask = (1 << 0);
8597+ else
8598+ crtc_mask = (1 << 1);
8599+
8600+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
8601+ break;
8602+ case INTEL_OUTPUT_MIPI:
8603+ crtc_mask = (1 << 0);
8604+ clone_mask = (1 << INTEL_OUTPUT_MIPI);
8605+ break;
8606+ }
8607+ encoder->possible_crtcs = crtc_mask;
8608+ encoder->possible_clones =
8609+ psb_intel_connector_clones(dev, clone_mask);
8610+ }
8611+}
8612+
8613+static void *psb_bo_from_handle(struct drm_device *dev,
8614+ struct drm_file *file_priv,
8615+ unsigned int handle)
8616+{
8617+ return ttm_buffer_object_lookup(psb_fpriv(file_priv)->tfile,
8618+ handle);
8619+}
8620+
8621+static size_t psb_bo_size(struct drm_device *dev, void *bof)
8622+{
8623+ struct ttm_buffer_object *bo = bof;
8624+ return bo->num_pages << PAGE_SHIFT;
8625+}
8626+
8627+static size_t psb_bo_offset(struct drm_device *dev, void *bof)
8628+{
8629+ struct drm_psb_private *dev_priv =
8630+ (struct drm_psb_private *) dev->dev_private;
8631+ struct ttm_buffer_object *bo = bof;
8632+
8633+ size_t offset = bo->offset - dev_priv->pg->gatt_start;
8634+ DRM_DEBUG("Offset %u\n", offset);
8635+ return offset;
8636+}
8637+
8638+static int psb_bo_pin_for_scanout(struct drm_device *dev, void *bo)
8639+{
8640+#if 0 /* JB: Not used for the drop */
8641+ struct ttm_buffer_object *bo = bof;
8642+ We should do things like check if
8643+ the buffer is in a scanout : able
8644+ place.And make sure that its pinned.
8645+#endif
8646+ return 0;
8647+ }
8648+
8649+ static int psb_bo_unpin_for_scanout(struct drm_device *dev,
8650+ void *bo) {
8651+#if 0 /* JB: Not used for the drop */
8652+ struct ttm_buffer_object *bo = bof;
8653+#endif
8654+ return 0;
8655+ }
8656+
8657+ void psb_modeset_init(struct drm_device *dev)
8658+ {
8659+ struct drm_psb_private *dev_priv =
8660+ (struct drm_psb_private *) dev->dev_private;
8661+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
8662+ int i;
8663+ int num_pipe;
8664+
8665+ /* Init mm functions */
8666+ mode_dev->bo_from_handle = psb_bo_from_handle;
8667+ mode_dev->bo_size = psb_bo_size;
8668+ mode_dev->bo_offset = psb_bo_offset;
8669+ mode_dev->bo_pin_for_scanout = psb_bo_pin_for_scanout;
8670+ mode_dev->bo_unpin_for_scanout = psb_bo_unpin_for_scanout;
8671+
8672+ drm_mode_config_init(dev);
8673+
8674+ dev->mode_config.min_width = 0;
8675+ dev->mode_config.min_height = 0;
8676+
8677+ dev->mode_config.funcs = (void *) &psb_mode_funcs;
8678+
8679+ dev->mode_config.max_width = 2048;
8680+ dev->mode_config.max_height = 2048;
8681+
8682+ /* set memory base */
8683+ dev->mode_config.fb_base =
8684+ pci_resource_start(dev->pdev, 0);
8685+
8686+ if (IS_MRST(dev))
8687+ num_pipe = 1;
8688+ else
8689+ num_pipe = 2;
8690+
8691+
8692+ for (i = 0; i < num_pipe; i++)
8693+ psb_intel_crtc_init(dev, i, mode_dev);
8694+
8695+ psb_setup_outputs(dev);
8696+
8697+ /* setup fbs */
8698+ /* drm_initial_config(dev, false); */
8699+ }
8700+
8701+ void psb_modeset_cleanup(struct drm_device *dev)
8702+ {
8703+ drm_mode_config_cleanup(dev);
8704+ }
8705diff -uNr a/drivers/gpu/drm/psb/psb_fb.h b/drivers/gpu/drm/psb/psb_fb.h
8706--- a/drivers/gpu/drm/psb/psb_fb.h 1969-12-31 16:00:00.000000000 -0800
8707+++ b/drivers/gpu/drm/psb/psb_fb.h 2009-04-07 13:28:38.000000000 -0700
8708@@ -0,0 +1,47 @@
8709+/*
8710+ * Copyright (c) 2008, Intel Corporation
8711+ *
8712+ * Permission is hereby granted, free of charge, to any person obtaining a
8713+ * copy of this software and associated documentation files (the "Software"),
8714+ * to deal in the Software without restriction, including without limitation
8715+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8716+ * and/or sell copies of the Software, and to permit persons to whom the
8717+ * Software is furnished to do so, subject to the following conditions:
8718+ *
8719+ * The above copyright notice and this permission notice (including the next
8720+ * paragraph) shall be included in all copies or substantial portions of the
8721+ * Software.
8722+ *
8723+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
8724+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
8725+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
8726+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
8727+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
8728+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
8729+ * SOFTWARE.
8730+ *
8731+ * Authors:
8732+ * Eric Anholt <eric@anholt.net>
8733+ *
8734+ **/
8735+
8736+#ifndef _PSB_FB_H_
8737+#define _PSB_FB_H_
8738+
8739+struct psb_framebuffer {
8740+ struct drm_framebuffer base;
8741+ struct address_space *addr_space;
8742+ struct ttm_buffer_object *bo;
8743+ struct ttm_bo_kmap_obj kmap;
8744+ uint64_t offset;
8745+};
8746+
8747+#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
8748+
8749+
8750+extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
8751+
8752+extern int psb_2d_submit(struct drm_psb_private *, uint32_t *, uint32_t);
8753+
8754+#endif
8755+
8756diff -uNr a/drivers/gpu/drm/psb/psb_fence.c b/drivers/gpu/drm/psb/psb_fence.c
8757--- a/drivers/gpu/drm/psb/psb_fence.c 1969-12-31 16:00:00.000000000 -0800
8758+++ b/drivers/gpu/drm/psb/psb_fence.c 2009-04-07 13:28:38.000000000 -0700
8759@@ -0,0 +1,343 @@
8760+/**************************************************************************
8761+ * Copyright (c) 2007, Intel Corporation.
8762+ * All Rights Reserved.
8763+ *
8764+ * This program is free software; you can redistribute it and/or modify it
8765+ * under the terms and conditions of the GNU General Public License,
8766+ * version 2, as published by the Free Software Foundation.
8767+ *
8768+ * This program is distributed in the hope it will be useful, but WITHOUT
8769+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8770+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8771+ * more details.
8772+ *
8773+ * You should have received a copy of the GNU General Public License along with
8774+ * this program; if not, write to the Free Software Foundation, Inc.,
8775+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8776+ *
8777+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
8778+ * develop this driver.
8779+ *
8780+ **************************************************************************/
8781+/*
8782+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
8783+ */
8784+
8785+#include <drm/drmP.h>
8786+#include "psb_drv.h"
8787+
8788+static void psb_print_ta_fence_status(struct ttm_fence_device *fdev)
8789+{
8790+ struct drm_psb_private *dev_priv =
8791+ container_of(fdev, struct drm_psb_private, fdev);
8792+ struct psb_scheduler_seq *seq = dev_priv->scheduler.seq;
8793+ int i;
8794+
8795+ for (i=0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) {
8796+ DRM_INFO("Type 0x%02x, sequence %lu, reported %d\n",
8797+ (1 << i),
8798+ (unsigned long) seq->sequence,
8799+ seq->reported);
8800+ seq++;
8801+ }
8802+}
8803+
8804+static void psb_poll_ta(struct ttm_fence_device *fdev,
8805+ uint32_t waiting_types)
8806+{
8807+ struct drm_psb_private *dev_priv =
8808+ container_of(fdev, struct drm_psb_private, fdev);
8809+ uint32_t cur_flag = 1;
8810+ uint32_t flags = 0;
8811+ uint32_t sequence = 0;
8812+ uint32_t remaining = 0xFFFFFFFF;
8813+ uint32_t diff;
8814+
8815+ struct psb_scheduler *scheduler;
8816+ struct psb_scheduler_seq *seq;
8817+ struct ttm_fence_class_manager *fc =
8818+ &fdev->fence_class[PSB_ENGINE_TA];
8819+
8820+ scheduler = &dev_priv->scheduler;
8821+ seq = scheduler->seq;
8822+
8823+ while (likely(waiting_types & remaining)) {
8824+ if (!(waiting_types & cur_flag))
8825+ goto skip;
8826+ if (seq->reported)
8827+ goto skip;
8828+ if (flags == 0)
8829+ sequence = seq->sequence;
8830+ else if (sequence != seq->sequence) {
8831+ ttm_fence_handler(fdev, PSB_ENGINE_TA,
8832+ sequence, flags, 0);
8833+ sequence = seq->sequence;
8834+ flags = 0;
8835+ }
8836+ flags |= cur_flag;
8837+
8838+ /*
8839+ * Sequence may not have ended up on the ring yet.
8840+ * In that case, report it but don't mark it as
8841+ * reported. A subsequent poll will report it again.
8842+ */
8843+
8844+ diff = (fc->latest_queued_sequence - sequence) &
8845+ fc->sequence_mask;
8846+ if (diff < fc->wrap_diff)
8847+ seq->reported = 1;
8848+
8849+skip:
8850+ cur_flag <<= 1;
8851+ remaining <<= 1;
8852+ seq++;
8853+ }
8854+
8855+ if (flags)
8856+ ttm_fence_handler(fdev, PSB_ENGINE_TA, sequence, flags, 0);
8857+
8858+}
8859+
8860+static void psb_poll_other(struct ttm_fence_device *fdev,
8861+ uint32_t fence_class, uint32_t waiting_types)
8862+{
8863+ struct drm_psb_private *dev_priv =
8864+ container_of(fdev, struct drm_psb_private, fdev);
8865+ struct ttm_fence_class_manager *fc =
8866+ &fdev->fence_class[fence_class];
8867+ uint32_t sequence;
8868+
8869+ if (unlikely(!dev_priv))
8870+ return;
8871+
8872+ if (waiting_types) {
8873+ switch (fence_class) {
8874+ case PSB_ENGINE_VIDEO:
8875+ sequence = dev_priv->msvdx_current_sequence;
8876+ break;
8877+ case LNC_ENGINE_ENCODE:
8878+ sequence = dev_priv->topaz_current_sequence;
8879+ break;
8880+ default:
8881+ sequence = dev_priv->comm[fence_class << 4];
8882+ break;
8883+ }
8884+
8885+ ttm_fence_handler(fdev, fence_class, sequence,
8886+ _PSB_FENCE_TYPE_EXE, 0);
8887+
8888+ switch (fence_class) {
8889+ case PSB_ENGINE_2D:
8890+ if (dev_priv->fence0_irq_on && !fc->waiting_types) {
8891+ psb_2D_irq_off(dev_priv);
8892+ dev_priv->fence0_irq_on = 0;
8893+ } else if (!dev_priv->fence0_irq_on
8894+ && fc->waiting_types) {
8895+ psb_2D_irq_on(dev_priv);
8896+ dev_priv->fence0_irq_on = 1;
8897+ }
8898+ break;
8899+#if 0
8900+ /*
8901+ * FIXME: MSVDX irq switching
8902+ */
8903+
8904+ case PSB_ENGINE_VIDEO:
8905+ if (dev_priv->fence2_irq_on && !fc->waiting_types) {
8906+ psb_msvdx_irq_off(dev_priv);
8907+ dev_priv->fence2_irq_on = 0;
8908+ } else if (!dev_priv->fence2_irq_on
8909+ && fc->pending_exe_flush) {
8910+ psb_msvdx_irq_on(dev_priv);
8911+ dev_priv->fence2_irq_on = 1;
8912+ }
8913+ break;
8914+#endif
8915+ default:
8916+ return;
8917+ }
8918+ }
8919+}
8920+
8921+static void psb_fence_poll(struct ttm_fence_device *fdev,
8922+ uint32_t fence_class, uint32_t waiting_types)
8923+{
8924+ if (unlikely((PSB_D_PM & drm_psb_debug) && (fence_class == 0)))
8925+ PSB_DEBUG_PM("psb_fence_poll: %d\n", fence_class);
8926+ switch (fence_class) {
8927+ case PSB_ENGINE_TA:
8928+ psb_poll_ta(fdev, waiting_types);
8929+ break;
8930+ default:
8931+ psb_poll_other(fdev, fence_class, waiting_types);
8932+ break;
8933+ }
8934+}
8935+
8936+void psb_fence_error(struct drm_device *dev,
8937+ uint32_t fence_class,
8938+ uint32_t sequence, uint32_t type, int error)
8939+{
8940+ struct drm_psb_private *dev_priv = psb_priv(dev);
8941+ struct ttm_fence_device *fdev = &dev_priv->fdev;
8942+ unsigned long irq_flags;
8943+ struct ttm_fence_class_manager *fc =
8944+ &fdev->fence_class[fence_class];
8945+
8946+ BUG_ON(fence_class >= PSB_NUM_ENGINES);
8947+ write_lock_irqsave(&fc->lock, irq_flags);
8948+ ttm_fence_handler(fdev, fence_class, sequence, type, error);
8949+ write_unlock_irqrestore(&fc->lock, irq_flags);
8950+}
8951+
8952+int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
8953+ uint32_t fence_class,
8954+ uint32_t flags, uint32_t *sequence,
8955+ unsigned long *timeout_jiffies)
8956+{
8957+ struct drm_psb_private *dev_priv =
8958+ container_of(fdev, struct drm_psb_private, fdev);
8959+ uint32_t seq = 0;
8960+ int ret;
8961+
8962+ if (!dev_priv)
8963+ return -EINVAL;
8964+
8965+ if (fence_class >= PSB_NUM_ENGINES)
8966+ return -EINVAL;
8967+
8968+ switch (fence_class) {
8969+ case PSB_ENGINE_2D:
8970+ spin_lock(&dev_priv->sequence_lock);
8971+ seq = ++dev_priv->sequence[fence_class];
8972+ spin_unlock(&dev_priv->sequence_lock);
8973+ ret = psb_blit_sequence(dev_priv, seq);
8974+ if (ret)
8975+ return ret;
8976+ break;
8977+ case PSB_ENGINE_VIDEO:
8978+ spin_lock(&dev_priv->sequence_lock);
8979+ seq = dev_priv->sequence[fence_class]++;
8980+ spin_unlock(&dev_priv->sequence_lock);
8981+ break;
8982+ case LNC_ENGINE_ENCODE:
8983+ spin_lock(&dev_priv->sequence_lock);
8984+ seq = dev_priv->sequence[fence_class]++;
8985+ spin_unlock(&dev_priv->sequence_lock);
8986+ break;
8987+ default:
8988+ spin_lock(&dev_priv->sequence_lock);
8989+ seq = dev_priv->sequence[fence_class];
8990+ spin_unlock(&dev_priv->sequence_lock);
8991+ }
8992+
8993+ *sequence = seq;
8994+
8995+ if (fence_class == PSB_ENGINE_TA)
8996+ *timeout_jiffies = jiffies + DRM_HZ / 2;
8997+ else
8998+ *timeout_jiffies = jiffies + DRM_HZ * 3;
8999+
9000+ return 0;
9001+}
9002+
9003+uint32_t psb_fence_advance_sequence(struct drm_device *dev,
9004+ uint32_t fence_class)
9005+{
9006+ struct drm_psb_private *dev_priv =
9007+ (struct drm_psb_private *) dev->dev_private;
9008+ uint32_t sequence;
9009+
9010+ spin_lock(&dev_priv->sequence_lock);
9011+ sequence = ++dev_priv->sequence[fence_class];
9012+ spin_unlock(&dev_priv->sequence_lock);
9013+
9014+ return sequence;
9015+}
9016+
9017+static void psb_fence_lockup(struct ttm_fence_object *fence,
9018+ uint32_t fence_types)
9019+{
9020+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
9021+
9022+ if (fence->fence_class == PSB_ENGINE_TA) {
9023+
9024+ /*
9025+ * The 3D engine has its own lockup detection.
9026+ * Just extend the fence expiry time.
9027+ */
9028+
9029+ DRM_INFO("Extending 3D fence timeout.\n");
9030+ write_lock(&fc->lock);
9031+
9032+ DRM_INFO("Sequence %lu, types 0x%08x signaled 0x%08x\n",
9033+ (unsigned long) fence->sequence, fence_types,
9034+ fence->info.signaled_types);
9035+
9036+ if (time_after_eq(jiffies, fence->timeout_jiffies))
9037+ fence->timeout_jiffies = jiffies + DRM_HZ / 2;
9038+
9039+ psb_print_ta_fence_status(fence->fdev);
9040+ write_unlock(&fc->lock);
9041+ } else {
9042+ DRM_ERROR
9043+ ("GPU timeout (probable lockup) detected on engine %u "
9044+ "fence type 0x%08x\n",
9045+ (unsigned int) fence->fence_class,
9046+ (unsigned int) fence_types);
9047+ write_lock(&fc->lock);
9048+ ttm_fence_handler(fence->fdev, fence->fence_class,
9049+ fence->sequence, fence_types, -EBUSY);
9050+ write_unlock(&fc->lock);
9051+ }
9052+}
9053+
9054+void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
9055+{
9056+ struct drm_psb_private *dev_priv = psb_priv(dev);
9057+ struct ttm_fence_device *fdev = &dev_priv->fdev;
9058+ struct ttm_fence_class_manager *fc =
9059+ &fdev->fence_class[fence_class];
9060+ unsigned long irq_flags;
9061+
9062+#ifdef FIX_TG_16
9063+ if (fence_class == PSB_ENGINE_2D) {
9064+
9065+ if ((atomic_read(&dev_priv->ta_wait_2d_irq) == 1) &&
9066+ (PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
9067+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
9068+ _PSB_C2B_STATUS_BUSY) == 0))
9069+ psb_resume_ta_2d_idle(dev_priv);
9070+ }
9071+#endif
9072+ write_lock_irqsave(&fc->lock, irq_flags);
9073+ psb_fence_poll(fdev, fence_class, fc->waiting_types);
9074+ write_unlock_irqrestore(&fc->lock, irq_flags);
9075+}
9076+
9077+
9078+static struct ttm_fence_driver psb_ttm_fence_driver = {
9079+ .has_irq = NULL,
9080+ .emit = psb_fence_emit_sequence,
9081+ .flush = NULL,
9082+ .poll = psb_fence_poll,
9083+ .needed_flush = NULL,
9084+ .wait = NULL,
9085+ .signaled = NULL,
9086+ .lockup = psb_fence_lockup,
9087+};
9088+
9089+int psb_ttm_fence_device_init(struct ttm_fence_device *fdev)
9090+{
9091+ struct drm_psb_private *dev_priv =
9092+ container_of(fdev, struct drm_psb_private, fdev);
9093+ struct ttm_fence_class_init fci = {.wrap_diff = (1 << 30),
9094+ .flush_diff = (1 << 29),
9095+ .sequence_mask = 0xFFFFFFFF
9096+ };
9097+
9098+ return ttm_fence_device_init(PSB_NUM_ENGINES,
9099+ dev_priv->mem_global_ref.object,
9100+ fdev, &fci, 1,
9101+ &psb_ttm_fence_driver);
9102+}
9103diff -uNr a/drivers/gpu/drm/psb/psb_gtt.c b/drivers/gpu/drm/psb/psb_gtt.c
9104--- a/drivers/gpu/drm/psb/psb_gtt.c 1969-12-31 16:00:00.000000000 -0800
9105+++ b/drivers/gpu/drm/psb/psb_gtt.c 2009-04-07 13:28:38.000000000 -0700
9106@@ -0,0 +1,257 @@
9107+/**************************************************************************
9108+ * Copyright (c) 2007, Intel Corporation.
9109+ * All Rights Reserved.
9110+ *
9111+ * This program is free software; you can redistribute it and/or modify it
9112+ * under the terms and conditions of the GNU General Public License,
9113+ * version 2, as published by the Free Software Foundation.
9114+ *
9115+ * This program is distributed in the hope it will be useful, but WITHOUT
9116+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9117+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9118+ * more details.
9119+ *
9120+ * You should have received a copy of the GNU General Public License along with
9121+ * this program; if not, write to the Free Software Foundation, Inc.,
9122+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9123+ *
9124+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
9125+ * develop this driver.
9126+ *
9127+ **************************************************************************/
9128+/*
9129+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
9130+ */
9131+#include <drm/drmP.h>
9132+#include "psb_drv.h"
9133+
9134+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
9135+{
9136+ uint32_t mask = PSB_PTE_VALID;
9137+
9138+ if (type & PSB_MMU_CACHED_MEMORY)
9139+ mask |= PSB_PTE_CACHED;
9140+ if (type & PSB_MMU_RO_MEMORY)
9141+ mask |= PSB_PTE_RO;
9142+ if (type & PSB_MMU_WO_MEMORY)
9143+ mask |= PSB_PTE_WO;
9144+
9145+ return (pfn << PAGE_SHIFT) | mask;
9146+}
9147+
9148+struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
9149+{
9150+ struct psb_gtt *tmp = drm_calloc(1, sizeof(*tmp), DRM_MEM_DRIVER);
9151+
9152+ if (!tmp)
9153+ return NULL;
9154+
9155+ init_rwsem(&tmp->sem);
9156+ tmp->dev = dev;
9157+
9158+ return tmp;
9159+}
9160+
9161+void psb_gtt_takedown(struct psb_gtt *pg, int free)
9162+{
9163+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
9164+
9165+ if (!pg)
9166+ return;
9167+
9168+ if (pg->gtt_map) {
9169+ iounmap(pg->gtt_map);
9170+ pg->gtt_map = NULL;
9171+ }
9172+ if (pg->initialized) {
9173+ pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
9174+ pg->gmch_ctrl);
9175+ PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
9176+ (void) PSB_RVDC32(PSB_PGETBL_CTL);
9177+ }
9178+ if (free)
9179+ drm_free(pg, sizeof(*pg), DRM_MEM_DRIVER);
9180+}
9181+
9182+int psb_gtt_init(struct psb_gtt *pg, int resume)
9183+{
9184+ struct drm_device *dev = pg->dev;
9185+ struct drm_psb_private *dev_priv = dev->dev_private;
9186+ unsigned gtt_pages;
9187+ unsigned long stolen_size, vram_stolen_size, ci_stolen_size;
9188+ unsigned i, num_pages;
9189+ unsigned pfn_base;
9190+
9191+ int ret = 0;
9192+ uint32_t pte;
9193+
9194+ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
9195+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
9196+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
9197+
9198+ pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
9199+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
9200+ (void) PSB_RVDC32(PSB_PGETBL_CTL);
9201+
9202+ pg->initialized = 1;
9203+
9204+ pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
9205+
9206+ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
9207+ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
9208+ gtt_pages =
9209+ pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
9210+ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
9211+ >> PAGE_SHIFT;
9212+
9213+ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
9214+ vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
9215+
9216+ ci_stolen_size = dev_priv->ci_region_size;
9217+ /* add CI & RAR share buffer space to stolen_size */
9218+ /* stolen_size = vram_stolen_size + ci_stolen_size; */
9219+ stolen_size = vram_stolen_size;
9220+
9221+ PSB_DEBUG_INIT("GTT phys start: 0x%08x.\n", pg->gtt_phys_start);
9222+ PSB_DEBUG_INIT("GTT start: 0x%08x.\n", pg->gtt_start);
9223+ PSB_DEBUG_INIT("GATT start: 0x%08x.\n", pg->gatt_start);
9224+ PSB_DEBUG_INIT("GTT pages: %u\n", gtt_pages);
9225+ PSB_DEBUG_INIT("Stolen size: %lu kiB\n", stolen_size / 1024);
9226+
9227+ if (resume && (gtt_pages != pg->gtt_pages) &&
9228+ (stolen_size != pg->stolen_size)) {
9229+ DRM_ERROR("GTT resume error.\n");
9230+ ret = -EINVAL;
9231+ goto out_err;
9232+ }
9233+
9234+ pg->gtt_pages = gtt_pages;
9235+ pg->stolen_size = stolen_size;
9236+ pg->vram_stolen_size = vram_stolen_size;
9237+ pg->ci_stolen_size = ci_stolen_size;
9238+ pg->gtt_map =
9239+ ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
9240+ if (!pg->gtt_map) {
9241+ DRM_ERROR("Failure to map gtt.\n");
9242+ ret = -ENOMEM;
9243+ goto out_err;
9244+ }
9245+
9246+ /*
9247+ * insert vram stolen pages.
9248+ */
9249+
9250+ pfn_base = pg->stolen_base >> PAGE_SHIFT;
9251+ num_pages = vram_stolen_size >> PAGE_SHIFT;
9252+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
9253+ num_pages, pfn_base);
9254+ for (i = 0; i < num_pages; ++i) {
9255+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
9256+ iowrite32(pte, pg->gtt_map + i);
9257+ }
9258+#if 0
9259+ /*
9260+ * insert CI stolen pages
9261+ */
9262+
9263+ pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT;
9264+ num_pages = ci_stolen_size >> PAGE_SHIFT;
9265+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
9266+ num_pages, pfn_base);
9267+ for (; i < num_pages; ++i) {
9268+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
9269+ iowrite32(pte, pg->gtt_map + i);
9270+ }
9271+#endif
9272+ /*
9273+ * Init rest of gtt.
9274+ */
9275+
9276+ pfn_base = page_to_pfn(dev_priv->scratch_page);
9277+ pte = psb_gtt_mask_pte(pfn_base, 0);
9278+ PSB_DEBUG_INIT("Initializing the rest of a total "
9279+ "of %d gtt pages.\n", pg->gatt_pages);
9280+
9281+ for (; i < pg->gatt_pages; ++i)
9282+ iowrite32(pte, pg->gtt_map + i);
9283+ (void) ioread32(pg->gtt_map + i - 1);
9284+
9285+ return 0;
9286+
9287+out_err:
9288+ psb_gtt_takedown(pg, 0);
9289+ return ret;
9290+}
9291+
9292+int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
9293+ unsigned offset_pages, unsigned num_pages,
9294+ unsigned desired_tile_stride,
9295+ unsigned hw_tile_stride, int type)
9296+{
9297+ unsigned rows = 1;
9298+ unsigned add;
9299+ unsigned row_add;
9300+ unsigned i;
9301+ unsigned j;
9302+ uint32_t *cur_page = NULL;
9303+ uint32_t pte;
9304+
9305+ if (hw_tile_stride)
9306+ rows = num_pages / desired_tile_stride;
9307+ else
9308+ desired_tile_stride = num_pages;
9309+
9310+ add = desired_tile_stride;
9311+ row_add = hw_tile_stride;
9312+
9313+ down_read(&pg->sem);
9314+ for (i = 0; i < rows; ++i) {
9315+ cur_page = pg->gtt_map + offset_pages;
9316+ for (j = 0; j < desired_tile_stride; ++j) {
9317+ pte =
9318+ psb_gtt_mask_pte(page_to_pfn(*pages++), type);
9319+ iowrite32(pte, cur_page++);
9320+ }
9321+ offset_pages += add;
9322+ }
9323+ (void) ioread32(cur_page - 1);
9324+ up_read(&pg->sem);
9325+
9326+ return 0;
9327+}
9328+
9329+int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
9330+ unsigned num_pages, unsigned desired_tile_stride,
9331+ unsigned hw_tile_stride)
9332+{
9333+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
9334+ unsigned rows = 1;
9335+ unsigned add;
9336+ unsigned row_add;
9337+ unsigned i;
9338+ unsigned j;
9339+ uint32_t *cur_page = NULL;
9340+ unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
9341+ uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
9342+
9343+ if (hw_tile_stride)
9344+ rows = num_pages / desired_tile_stride;
9345+ else
9346+ desired_tile_stride = num_pages;
9347+
9348+ add = desired_tile_stride;
9349+ row_add = hw_tile_stride;
9350+
9351+ down_read(&pg->sem);
9352+ for (i = 0; i < rows; ++i) {
9353+ cur_page = pg->gtt_map + offset_pages;
9354+ for (j = 0; j < desired_tile_stride; ++j)
9355+ iowrite32(pte, cur_page++);
9356+
9357+ offset_pages += add;
9358+ }
9359+ (void) ioread32(cur_page - 1);
9360+ up_read(&pg->sem);
9361+
9362+ return 0;
9363+}
9364diff -uNr a/drivers/gpu/drm/psb/psb_intel_display.c b/drivers/gpu/drm/psb/psb_intel_display.c
9365--- a/drivers/gpu/drm/psb/psb_intel_display.c 1969-12-31 16:00:00.000000000 -0800
9366+++ b/drivers/gpu/drm/psb/psb_intel_display.c 2009-04-07 13:28:38.000000000 -0700
9367@@ -0,0 +1,2435 @@
9368+/*
9369+ * Copyright © 2006-2007 Intel Corporation
9370+ *
9371+ * Permission is hereby granted, free of charge, to any person obtaining a
9372+ * copy of this software and associated documentation files (the "Software"),
9373+ * to deal in the Software without restriction, including without limitation
9374+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9375+ * and/or sell copies of the Software, and to permit persons to whom the
9376+ * Software is furnished to do so, subject to the following conditions:
9377+ *
9378+ * The above copyright notice and this permission notice (including the next
9379+ * paragraph) shall be included in all copies or substantial portions of the
9380+ * Software.
9381+ *
9382+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
9383+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
9384+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
9385+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
9386+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
9387+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
9388+ * DEALINGS IN THE SOFTWARE.
9389+ *
9390+ * Authors:
9391+ * Eric Anholt <eric@anholt.net>
9392+ */
9393+
9394+#include <linux/i2c.h>
9395+
9396+#include <drm/drm_crtc_helper.h>
9397+#include "psb_fb.h"
9398+#include "psb_intel_display.h"
9399+
9400+
9401+struct psb_intel_clock_t {
9402+ /* given values */
9403+ int n;
9404+ int m1, m2;
9405+ int p1, p2;
9406+ /* derived values */
9407+ int dot;
9408+ int vco;
9409+ int m;
9410+ int p;
9411+};
9412+
9413+struct psb_intel_range_t {
9414+ int min, max;
9415+};
9416+
9417+struct psb_intel_p2_t {
9418+ int dot_limit;
9419+ int p2_slow, p2_fast;
9420+};
9421+
9422+#define INTEL_P2_NUM 2
9423+
9424+struct psb_intel_limit_t {
9425+ struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
9426+ struct psb_intel_p2_t p2;
9427+};
9428+
9429+#define I8XX_DOT_MIN 25000
9430+#define I8XX_DOT_MAX 350000
9431+#define I8XX_VCO_MIN 930000
9432+#define I8XX_VCO_MAX 1400000
9433+#define I8XX_N_MIN 3
9434+#define I8XX_N_MAX 16
9435+#define I8XX_M_MIN 96
9436+#define I8XX_M_MAX 140
9437+#define I8XX_M1_MIN 18
9438+#define I8XX_M1_MAX 26
9439+#define I8XX_M2_MIN 6
9440+#define I8XX_M2_MAX 16
9441+#define I8XX_P_MIN 4
9442+#define I8XX_P_MAX 128
9443+#define I8XX_P1_MIN 2
9444+#define I8XX_P1_MAX 33
9445+#define I8XX_P1_LVDS_MIN 1
9446+#define I8XX_P1_LVDS_MAX 6
9447+#define I8XX_P2_SLOW 4
9448+#define I8XX_P2_FAST 2
9449+#define I8XX_P2_LVDS_SLOW 14
9450+#define I8XX_P2_LVDS_FAST 14 /* No fast option */
9451+#define I8XX_P2_SLOW_LIMIT 165000
9452+
9453+#define I9XX_DOT_MIN 20000
9454+#define I9XX_DOT_MAX 400000
9455+#define I9XX_VCO_MIN 1400000
9456+#define I9XX_VCO_MAX 2800000
9457+#define I9XX_N_MIN 3
9458+#define I9XX_N_MAX 8
9459+#define I9XX_M_MIN 70
9460+#define I9XX_M_MAX 120
9461+#define I9XX_M1_MIN 10
9462+#define I9XX_M1_MAX 20
9463+#define I9XX_M2_MIN 5
9464+#define I9XX_M2_MAX 9
9465+#define I9XX_P_SDVO_DAC_MIN 5
9466+#define I9XX_P_SDVO_DAC_MAX 80
9467+#define I9XX_P_LVDS_MIN 7
9468+#define I9XX_P_LVDS_MAX 98
9469+#define I9XX_P1_MIN 1
9470+#define I9XX_P1_MAX 8
9471+#define I9XX_P2_SDVO_DAC_SLOW 10
9472+#define I9XX_P2_SDVO_DAC_FAST 5
9473+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
9474+#define I9XX_P2_LVDS_SLOW 14
9475+#define I9XX_P2_LVDS_FAST 7
9476+#define I9XX_P2_LVDS_SLOW_LIMIT 112000
9477+
9478+#define INTEL_LIMIT_I8XX_DVO_DAC 0
9479+#define INTEL_LIMIT_I8XX_LVDS 1
9480+#define INTEL_LIMIT_I9XX_SDVO_DAC 2
9481+#define INTEL_LIMIT_I9XX_LVDS 3
9482+
9483+static const struct psb_intel_limit_t psb_intel_limits[] = {
9484+ { /* INTEL_LIMIT_I8XX_DVO_DAC */
9485+ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
9486+ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
9487+ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
9488+ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
9489+ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
9490+ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
9491+ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
9492+ .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
9493+ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
9494+ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
9495+ },
9496+ { /* INTEL_LIMIT_I8XX_LVDS */
9497+ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
9498+ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
9499+ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
9500+ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
9501+ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
9502+ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
9503+ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
9504+ .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
9505+ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
9506+ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
9507+ },
9508+ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
9509+ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
9510+ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
9511+ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
9512+ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
9513+ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
9514+ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
9515+ .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
9516+ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
9517+ .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
9518+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
9519+ I9XX_P2_SDVO_DAC_FAST},
9520+ },
9521+ { /* INTEL_LIMIT_I9XX_LVDS */
9522+ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
9523+ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
9524+ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
9525+ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
9526+ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
9527+ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
9528+ .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
9529+ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
9530+ /* The single-channel range is 25-112Mhz, and dual-channel
9531+ * is 80-224Mhz. Prefer single channel as much as possible.
9532+ */
9533+ .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
9534+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
9535+ },
9536+};
9537+
9538+static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
9539+{
9540+ struct drm_device *dev = crtc->dev;
9541+ const struct psb_intel_limit_t *limit;
9542+
9543+ if (IS_I9XX(dev)) {
9544+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
9545+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
9546+ else
9547+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
9548+ } else {
9549+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
9550+ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_LVDS];
9551+ else
9552+ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
9553+ }
9554+ return limit;
9555+}
9556+
9557+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
9558+
9559+static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
9560+{
9561+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
9562+ clock->p = clock->p1 * clock->p2;
9563+ clock->vco = refclk * clock->m / (clock->n + 2);
9564+ clock->dot = clock->vco / clock->p;
9565+}
9566+
9567+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
9568+
9569+static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
9570+{
9571+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
9572+ clock->p = clock->p1 * clock->p2;
9573+ clock->vco = refclk * clock->m / (clock->n + 2);
9574+ clock->dot = clock->vco / clock->p;
9575+}
9576+
9577+static void psb_intel_clock(struct drm_device *dev, int refclk,
9578+ struct psb_intel_clock_t *clock)
9579+{
9580+ if (IS_I9XX(dev))
9581+ return i9xx_clock(refclk, clock);
9582+ else
9583+ return i8xx_clock(refclk, clock);
9584+}
9585+
9586+/**
9587+ * Returns whether any output on the specified pipe is of the specified type
9588+ */
9589+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
9590+{
9591+ struct drm_device *dev = crtc->dev;
9592+ struct drm_mode_config *mode_config = &dev->mode_config;
9593+ struct drm_connector *l_entry;
9594+
9595+ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
9596+ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
9597+ struct psb_intel_output *psb_intel_output =
9598+ to_psb_intel_output(l_entry);
9599+ if (psb_intel_output->type == type)
9600+ return true;
9601+ }
9602+ }
9603+ return false;
9604+}
9605+
9606+#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
9607+/**
9608+ * Returns whether the given set of divisors are valid for a given refclk with
9609+ * the given connectors.
9610+ */
9611+
9612+static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
9613+ struct psb_intel_clock_t *clock)
9614+{
9615+ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
9616+
9617+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
9618+ INTELPllInvalid("p1 out of range\n");
9619+ if (clock->p < limit->p.min || limit->p.max < clock->p)
9620+ INTELPllInvalid("p out of range\n");
9621+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
9622+ INTELPllInvalid("m2 out of range\n");
9623+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
9624+ INTELPllInvalid("m1 out of range\n");
9625+ if (clock->m1 <= clock->m2)
9626+ INTELPllInvalid("m1 <= m2\n");
9627+ if (clock->m < limit->m.min || limit->m.max < clock->m)
9628+ INTELPllInvalid("m out of range\n");
9629+ if (clock->n < limit->n.min || limit->n.max < clock->n)
9630+ INTELPllInvalid("n out of range\n");
9631+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
9632+ INTELPllInvalid("vco out of range\n");
9633+ /* XXX: We may need to be checking "Dot clock"
9634+ * depending on the multiplier, connector, etc.,
9635+ * rather than just a single range.
9636+ */
9637+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
9638+ INTELPllInvalid("dot out of range\n");
9639+
9640+ return true;
9641+}
9642+
9643+/**
9644+ * Returns a set of divisors for the desired target clock with the given
9645+ * refclk, or FALSE. The returned values represent the clock equation:
9646+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
9647+ */
9648+static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
9649+ int refclk,
9650+ struct psb_intel_clock_t *best_clock)
9651+{
9652+ struct drm_device *dev = crtc->dev;
9653+ struct psb_intel_clock_t clock;
9654+ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
9655+ int err = target;
9656+
9657+ if (IS_I9XX(dev) && psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
9658+ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
9659+ /*
9660+ * For LVDS, if the panel is on, just rely on its current
9661+ * settings for dual-channel. We haven't figured out how to
9662+ * reliably set up different single/dual channel state, if we
9663+ * even can.
9664+ */
9665+ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
9666+ LVDS_CLKB_POWER_UP)
9667+ clock.p2 = limit->p2.p2_fast;
9668+ else
9669+ clock.p2 = limit->p2.p2_slow;
9670+ } else {
9671+ if (target < limit->p2.dot_limit)
9672+ clock.p2 = limit->p2.p2_slow;
9673+ else
9674+ clock.p2 = limit->p2.p2_fast;
9675+ }
9676+
9677+ memset(best_clock, 0, sizeof(*best_clock));
9678+
9679+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
9680+ clock.m1++) {
9681+ for (clock.m2 = limit->m2.min;
9682+ clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
9683+ clock.m2++) {
9684+ for (clock.n = limit->n.min;
9685+ clock.n <= limit->n.max; clock.n++) {
9686+ for (clock.p1 = limit->p1.min;
9687+ clock.p1 <= limit->p1.max;
9688+ clock.p1++) {
9689+ int this_err;
9690+
9691+ psb_intel_clock(dev, refclk, &clock);
9692+
9693+ if (!psb_intel_PLL_is_valid
9694+ (crtc, &clock))
9695+ continue;
9696+
9697+ this_err = abs(clock.dot - target);
9698+ if (this_err < err) {
9699+ *best_clock = clock;
9700+ err = this_err;
9701+ }
9702+ }
9703+ }
9704+ }
9705+ }
9706+
9707+ return err != target;
9708+}
9709+
9710+void psb_intel_wait_for_vblank(struct drm_device *dev)
9711+{
9712+ /* Wait for 20ms, i.e. one cycle at 50hz. */
9713+ udelay(20000);
9714+}
9715+
9716+int psb_intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb)
9717+{
9718+ struct drm_device *dev = crtc->dev;
9719+ /* struct drm_i915_master_private *master_priv; */
9720+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
9721+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
9722+ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
9723+ int pipe = psb_intel_crtc->pipe;
9724+ unsigned long Start, Offset;
9725+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
9726+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
9727+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
9728+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
9729+ u32 dspcntr;
9730+
9731+ /* no fb bound */
9732+ if (!crtc->fb) {
9733+ DRM_DEBUG("No FB bound\n");
9734+ return 0;
9735+ }
9736+
9737+ if (IS_MRST(dev) && (pipe == 0))
9738+ dspbase = MRST_DSPABASE;
9739+
9740+ Start = mode_dev->bo_offset(dev, psbfb->bo);
9741+ Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
9742+
9743+ REG_WRITE(dspstride, crtc->fb->pitch);
9744+
9745+ dspcntr = REG_READ(dspcntr_reg);
9746+ switch (crtc->fb->bits_per_pixel) {
9747+ case 8:
9748+ dspcntr |= DISPPLANE_8BPP;
9749+ break;
9750+ case 16:
9751+ if (crtc->fb->depth == 15)
9752+ dspcntr |= DISPPLANE_15_16BPP;
9753+ else
9754+ dspcntr |= DISPPLANE_16BPP;
9755+ break;
9756+ case 24:
9757+ case 32:
9758+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
9759+ break;
9760+ default:
9761+ DRM_ERROR("Unknown color depth\n");
9762+ return -EINVAL;
9763+ }
9764+ REG_WRITE(dspcntr_reg, dspcntr);
9765+
9766+ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
9767+ if (IS_I965G(dev) || IS_MRST(dev)) {
9768+ REG_WRITE(dspbase, Offset);
9769+ REG_READ(dspbase);
9770+ REG_WRITE(dspsurf, Start);
9771+ REG_READ(dspsurf);
9772+ } else {
9773+ REG_WRITE(dspbase, Start + Offset);
9774+ REG_READ(dspbase);
9775+ }
9776+
9777+ if (!dev->primary->master)
9778+ return 0;
9779+
9780+#if 0 /* JB: Enable sarea later */
9781+ master_priv = dev->primary->master->driver_priv;
9782+ if (!master_priv->sarea_priv)
9783+ return 0;
9784+
9785+ switch (pipe) {
9786+ case 0:
9787+ master_priv->sarea_priv->planeA_x = x;
9788+ master_priv->sarea_priv->planeA_y = y;
9789+ break;
9790+ case 1:
9791+ master_priv->sarea_priv->planeB_x = x;
9792+ master_priv->sarea_priv->planeB_y = y;
9793+ break;
9794+ default:
9795+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
9796+ break;
9797+ }
9798+#endif
9799+}
9800+
9801+
9802+
9803+/**
9804+ * Sets the power management mode of the pipe and plane.
9805+ *
9806+ * This code should probably grow support for turning the cursor off and back
9807+ * on appropriately at the same time as we're turning the pipe off/on.
9808+ */
9809+static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
9810+{
9811+ struct drm_device *dev = crtc->dev;
9812+ /* struct drm_i915_master_private *master_priv; */
9813+ /* struct drm_i915_private *dev_priv = dev->dev_private; */
9814+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
9815+ int pipe = psb_intel_crtc->pipe;
9816+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
9817+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
9818+ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
9819+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
9820+ u32 temp;
9821+ bool enabled;
9822+
9823+ /* XXX: When our outputs are all unaware of DPMS modes other than off
9824+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
9825+ */
9826+ switch (mode) {
9827+ case DRM_MODE_DPMS_ON:
9828+ case DRM_MODE_DPMS_STANDBY:
9829+ case DRM_MODE_DPMS_SUSPEND:
9830+ /* Enable the DPLL */
9831+ temp = REG_READ(dpll_reg);
9832+ if ((temp & DPLL_VCO_ENABLE) == 0) {
9833+ REG_WRITE(dpll_reg, temp);
9834+ REG_READ(dpll_reg);
9835+ /* Wait for the clocks to stabilize. */
9836+ udelay(150);
9837+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
9838+ REG_READ(dpll_reg);
9839+ /* Wait for the clocks to stabilize. */
9840+ udelay(150);
9841+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
9842+ REG_READ(dpll_reg);
9843+ /* Wait for the clocks to stabilize. */
9844+ udelay(150);
9845+ }
9846+
9847+ /* Enable the pipe */
9848+ temp = REG_READ(pipeconf_reg);
9849+ if ((temp & PIPEACONF_ENABLE) == 0)
9850+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
9851+
9852+ /* Enable the plane */
9853+ temp = REG_READ(dspcntr_reg);
9854+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
9855+ REG_WRITE(dspcntr_reg,
9856+ temp | DISPLAY_PLANE_ENABLE);
9857+ /* Flush the plane changes */
9858+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
9859+ }
9860+
9861+ psb_intel_crtc_load_lut(crtc);
9862+
9863+ /* Give the overlay scaler a chance to enable
9864+ * if it's on this pipe */
9865+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
9866+ break;
9867+ case DRM_MODE_DPMS_OFF:
9868+ /* Give the overlay scaler a chance to disable
9869+ * if it's on this pipe */
9870+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
9871+
9872+ /* Disable the VGA plane that we never use */
9873+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
9874+
9875+ /* Disable display plane */
9876+ temp = REG_READ(dspcntr_reg);
9877+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
9878+ REG_WRITE(dspcntr_reg,
9879+ temp & ~DISPLAY_PLANE_ENABLE);
9880+ /* Flush the plane changes */
9881+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
9882+ REG_READ(dspbase_reg);
9883+ }
9884+
9885+ if (!IS_I9XX(dev)) {
9886+ /* Wait for vblank for the disable to take effect */
9887+ psb_intel_wait_for_vblank(dev);
9888+ }
9889+
9890+ /* Next, disable display pipes */
9891+ temp = REG_READ(pipeconf_reg);
9892+ if ((temp & PIPEACONF_ENABLE) != 0) {
9893+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
9894+ REG_READ(pipeconf_reg);
9895+ }
9896+
9897+ /* Wait for vblank for the disable to take effect. */
9898+ psb_intel_wait_for_vblank(dev);
9899+
9900+ temp = REG_READ(dpll_reg);
9901+ if ((temp & DPLL_VCO_ENABLE) != 0) {
9902+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
9903+ REG_READ(dpll_reg);
9904+ }
9905+
9906+ /* Wait for the clocks to turn off. */
9907+ udelay(150);
9908+ break;
9909+ }
9910+
9911+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
9912+
9913+#if 0 /* JB: Add vblank support later */
9914+ if (enabled)
9915+ dev_priv->vblank_pipe |= (1 << pipe);
9916+ else
9917+ dev_priv->vblank_pipe &= ~(1 << pipe);
9918+#endif
9919+
9920+ psb_intel_crtc->dpms_mode = mode;
9921+
9922+#if 0 /* JB: Add sarea support later */
9923+ if (!dev->primary->master)
9924+ return 0;
9925+
9926+ master_priv = dev->primary->master->driver_priv;
9927+ if (!master_priv->sarea_priv)
9928+ return 0;
9929+
9930+ switch (pipe) {
9931+ case 0:
9932+ master_priv->sarea_priv->planeA_w =
9933+ enabled ? crtc->mode.hdisplay : 0;
9934+ master_priv->sarea_priv->planeA_h =
9935+ enabled ? crtc->mode.vdisplay : 0;
9936+ break;
9937+ case 1:
9938+ master_priv->sarea_priv->planeB_w =
9939+ enabled ? crtc->mode.hdisplay : 0;
9940+ master_priv->sarea_priv->planeB_h =
9941+ enabled ? crtc->mode.vdisplay : 0;
9942+ break;
9943+ default:
9944+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
9945+ break;
9946+ }
9947+#endif
9948+}
9949+
9950+static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
9951+{
9952+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
9953+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
9954+}
9955+
9956+static void psb_intel_crtc_commit(struct drm_crtc *crtc)
9957+{
9958+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
9959+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
9960+}
9961+
9962+void psb_intel_encoder_prepare(struct drm_encoder *encoder)
9963+{
9964+ struct drm_encoder_helper_funcs *encoder_funcs =
9965+ encoder->helper_private;
9966+ /* lvds has its own version of prepare see psb_intel_lvds_prepare */
9967+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
9968+}
9969+
9970+void psb_intel_encoder_commit(struct drm_encoder *encoder)
9971+{
9972+ struct drm_encoder_helper_funcs *encoder_funcs =
9973+ encoder->helper_private;
9974+ /* lvds has its own version of commit see psb_intel_lvds_commit */
9975+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
9976+}
9977+
9978+static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
9979+ struct drm_display_mode *mode,
9980+ struct drm_display_mode *adjusted_mode)
9981+{
9982+ return true;
9983+}
9984+
9985+
9986+/** Returns the core display clock speed for i830 - i945 */
9987+static int psb_intel_get_core_clock_speed(struct drm_device *dev)
9988+{
9989+#if 0 /* JB: Look into this more */
9990+ /* Core clock values taken from the published datasheets.
9991+ * The 830 may go up to 166 Mhz, which we should check.
9992+ */
9993+ if (IS_I945G(dev))
9994+ return 400000;
9995+ else if (IS_I915G(dev))
9996+ return 333000;
9997+ else if (IS_I945GM(dev) || IS_845G(dev))
9998+ return 200000;
9999+ else if (IS_I915GM(dev)) {
10000+ u16 gcfgc = 0;
10001+
10002+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
10003+
10004+ if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
10005+ return 133000;
10006+ else {
10007+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
10008+ case GC_DISPLAY_CLOCK_333_MHZ:
10009+ return 333000;
10010+ default:
10011+ case GC_DISPLAY_CLOCK_190_200_MHZ:
10012+ return 190000;
10013+ }
10014+ }
10015+ } else if (IS_I865G(dev))
10016+ return 266000;
10017+ else if (IS_I855(dev)) {
10018+#if 0
10019+ PCITAG bridge = pciTag(0, 0, 0);
10020+ /* This is always the host bridge */
10021+ u16 hpllcc = pciReadWord(bridge, HPLLCC);
10022+
10023+#endif
10024+ u16 hpllcc = 0;
10025+ /* Assume that the hardware is in the high speed state. This
10026+ * should be the default.
10027+ */
10028+ switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
10029+ case GC_CLOCK_133_200:
10030+ case GC_CLOCK_100_200:
10031+ return 200000;
10032+ case GC_CLOCK_166_250:
10033+ return 250000;
10034+ case GC_CLOCK_100_133:
10035+ return 133000;
10036+ }
10037+ } else /* 852, 830 */
10038+ return 133000;
10039+#endif
10040+ return 0; /* Silence gcc warning */
10041+}
10042+
10043+
10044+/**
10045+ * Return the pipe currently connected to the panel fitter,
10046+ * or -1 if the panel fitter is not present or not in use
10047+ */
10048+static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
10049+{
10050+ u32 pfit_control;
10051+
10052+ /* i830 doesn't have a panel fitter */
10053+ if (IS_I830(dev))
10054+ return -1;
10055+
10056+ pfit_control = REG_READ(PFIT_CONTROL);
10057+
10058+ /* See if the panel fitter is in use */
10059+ if ((pfit_control & PFIT_ENABLE) == 0)
10060+ return -1;
10061+
10062+ /* 965 can place panel fitter on either pipe */
10063+ if (IS_I965G(dev) || IS_MRST(dev))
10064+ return (pfit_control >> 29) & 0x3;
10065+
10066+ /* older chips can only use pipe 1 */
10067+ return 1;
10068+}
10069+
10070+static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
10071+ struct drm_display_mode *mode,
10072+ struct drm_display_mode *adjusted_mode,
10073+ int x, int y,
10074+ struct drm_framebuffer *old_fb)
10075+{
10076+ struct drm_device *dev = crtc->dev;
10077+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10078+ int pipe = psb_intel_crtc->pipe;
10079+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
10080+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
10081+ int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
10082+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
10083+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
10084+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
10085+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
10086+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
10087+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
10088+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
10089+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
10090+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
10091+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
10092+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
10093+ int refclk;
10094+ struct psb_intel_clock_t clock;
10095+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
10096+ bool ok, is_sdvo = false, is_dvo = false;
10097+ bool is_crt = false, is_lvds = false, is_tv = false;
10098+ struct drm_mode_config *mode_config = &dev->mode_config;
10099+ struct drm_connector *connector;
10100+
10101+ list_for_each_entry(connector, &mode_config->connector_list, head) {
10102+ struct psb_intel_output *psb_intel_output =
10103+ to_psb_intel_output(connector);
10104+
10105+ if (!connector->encoder
10106+ || connector->encoder->crtc != crtc)
10107+ continue;
10108+
10109+ switch (psb_intel_output->type) {
10110+ case INTEL_OUTPUT_LVDS:
10111+ is_lvds = true;
10112+ break;
10113+ case INTEL_OUTPUT_SDVO:
10114+ is_sdvo = true;
10115+ break;
10116+ case INTEL_OUTPUT_DVO:
10117+ is_dvo = true;
10118+ break;
10119+ case INTEL_OUTPUT_TVOUT:
10120+ is_tv = true;
10121+ break;
10122+ case INTEL_OUTPUT_ANALOG:
10123+ is_crt = true;
10124+ break;
10125+ }
10126+ }
10127+
10128+ if (IS_I9XX(dev))
10129+ refclk = 96000;
10130+ else
10131+ refclk = 48000;
10132+
10133+ ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
10134+ &clock);
10135+ if (!ok) {
10136+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
10137+ return 0;
10138+ }
10139+
10140+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
10141+
10142+ dpll = DPLL_VGA_MODE_DIS;
10143+ if (IS_I9XX(dev)) {
10144+ if (is_lvds) {
10145+ dpll |= DPLLB_MODE_LVDS;
10146+ if (IS_POULSBO(dev))
10147+ dpll |= DPLL_DVO_HIGH_SPEED;
10148+ } else
10149+ dpll |= DPLLB_MODE_DAC_SERIAL;
10150+ if (is_sdvo) {
10151+ dpll |= DPLL_DVO_HIGH_SPEED;
10152+ if (IS_I945G(dev) || IS_I945GM(dev)) {
10153+ int sdvo_pixel_multiply =
10154+ adjusted_mode->clock / mode->clock;
10155+ dpll |=
10156+ (sdvo_pixel_multiply -
10157+ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
10158+ }
10159+ }
10160+
10161+ /* compute bitmask from p1 value */
10162+ dpll |= (1 << (clock.p1 - 1)) << 16;
10163+ switch (clock.p2) {
10164+ case 5:
10165+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
10166+ break;
10167+ case 7:
10168+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
10169+ break;
10170+ case 10:
10171+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
10172+ break;
10173+ case 14:
10174+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
10175+ break;
10176+ }
10177+ if (IS_I965G(dev))
10178+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
10179+ } else {
10180+ if (is_lvds) {
10181+ dpll |=
10182+ (1 << (clock.p1 - 1)) <<
10183+ DPLL_FPA01_P1_POST_DIV_SHIFT;
10184+ } else {
10185+ if (clock.p1 == 2)
10186+ dpll |= PLL_P1_DIVIDE_BY_TWO;
10187+ else
10188+ dpll |=
10189+ (clock.p1 -
10190+ 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
10191+ if (clock.p2 == 4)
10192+ dpll |= PLL_P2_DIVIDE_BY_4;
10193+ }
10194+ }
10195+
10196+ if (is_tv) {
10197+ /* XXX: just matching BIOS for now */
10198+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
10199+ dpll |= 3;
10200+ }
10201+#if 0
10202+ else if (is_lvds)
10203+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
10204+#endif
10205+ else
10206+ dpll |= PLL_REF_INPUT_DREFCLK;
10207+
10208+ /* setup pipeconf */
10209+ pipeconf = REG_READ(pipeconf_reg);
10210+
10211+ /* Set up the display plane register */
10212+ dspcntr = DISPPLANE_GAMMA_ENABLE;
10213+
10214+ if (pipe == 0)
10215+ dspcntr |= DISPPLANE_SEL_PIPE_A;
10216+ else
10217+ dspcntr |= DISPPLANE_SEL_PIPE_B;
10218+
10219+ if (pipe == 0 && !IS_I965G(dev)) {
10220+ /* Enable pixel doubling when the dot clock is > 90%
10221+ * of the (display) core speed.
10222+ *
10223+ * XXX: No double-wide on 915GM pipe B.
10224+ * Is that the only reason for the
10225+ * pipe == 0 check?
10226+ */
10227+ if (mode->clock > psb_intel_get_core_clock_speed(dev) * 9 / 10)
10228+ pipeconf |= PIPEACONF_DOUBLE_WIDE;
10229+ else
10230+ pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
10231+ }
10232+
10233+ dspcntr |= DISPLAY_PLANE_ENABLE;
10234+ pipeconf |= PIPEACONF_ENABLE;
10235+ dpll |= DPLL_VCO_ENABLE;
10236+
10237+
10238+ /* Disable the panel fitter if it was on our pipe */
10239+ if (psb_intel_panel_fitter_pipe(dev) == pipe)
10240+ REG_WRITE(PFIT_CONTROL, 0);
10241+
10242+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
10243+ drm_mode_debug_printmodeline(mode);
10244+
10245+#if 0
10246+ if (!xf86ModesEqual(mode, adjusted_mode)) {
10247+ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
10248+ "Adjusted mode for pipe %c:\n",
10249+ pipe == 0 ? 'A' : 'B');
10250+ xf86PrintModeline(pScrn->scrnIndex, mode);
10251+ }
10252+ i830PrintPll("chosen", &clock);
10253+#endif
10254+
10255+ if (dpll & DPLL_VCO_ENABLE) {
10256+ REG_WRITE(fp_reg, fp);
10257+ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
10258+ REG_READ(dpll_reg);
10259+ udelay(150);
10260+ }
10261+
10262+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
10263+ * This is an exception to the general rule that mode_set doesn't turn
10264+ * things on.
10265+ */
10266+ if (is_lvds) {
10267+ u32 lvds = REG_READ(LVDS);
10268+
10269+ lvds |=
10270+ LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
10271+ LVDS_PIPEB_SELECT;
10272+ /* Set the B0-B3 data pairs corresponding to
10273+ * whether we're going to
10274+ * set the DPLLs for dual-channel mode or not.
10275+ */
10276+ if (clock.p2 == 7)
10277+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
10278+ else
10279+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
10280+
10281+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
10282+ * appropriately here, but we need to look more
10283+ * thoroughly into how panels behave in the two modes.
10284+ */
10285+
10286+ REG_WRITE(LVDS, lvds);
10287+ REG_READ(LVDS);
10288+ }
10289+
10290+ REG_WRITE(fp_reg, fp);
10291+ REG_WRITE(dpll_reg, dpll);
10292+ REG_READ(dpll_reg);
10293+ /* Wait for the clocks to stabilize. */
10294+ udelay(150);
10295+
10296+ if (IS_I965G(dev)) {
10297+ int sdvo_pixel_multiply =
10298+ adjusted_mode->clock / mode->clock;
10299+ REG_WRITE(dpll_md_reg,
10300+ (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
10301+ ((sdvo_pixel_multiply -
10302+ 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
10303+ } else {
10304+ /* write it again -- the BIOS does, after all */
10305+ REG_WRITE(dpll_reg, dpll);
10306+ }
10307+ REG_READ(dpll_reg);
10308+ /* Wait for the clocks to stabilize. */
10309+ udelay(150);
10310+
10311+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
10312+ ((adjusted_mode->crtc_htotal - 1) << 16));
10313+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
10314+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
10315+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
10316+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
10317+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
10318+ ((adjusted_mode->crtc_vtotal - 1) << 16));
10319+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
10320+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
10321+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
10322+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
10323+ /* pipesrc and dspsize control the size that is scaled from,
10324+ * which should always be the user's requested size.
10325+ */
10326+ REG_WRITE(dspsize_reg,
10327+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
10328+ REG_WRITE(dsppos_reg, 0);
10329+ REG_WRITE(pipesrc_reg,
10330+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
10331+ REG_WRITE(pipeconf_reg, pipeconf);
10332+ REG_READ(pipeconf_reg);
10333+
10334+ psb_intel_wait_for_vblank(dev);
10335+
10336+ REG_WRITE(dspcntr_reg, dspcntr);
10337+
10338+ /* Flush the plane changes */
10339+ {
10340+ struct drm_crtc_helper_funcs *crtc_funcs =
10341+ crtc->helper_private;
10342+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
10343+ }
10344+
10345+ psb_intel_wait_for_vblank(dev);
10346+
10347+ return 0;
10348+}
10349+
10350+/** Loads the palette/gamma unit for the CRTC with the prepared values */
10351+void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
10352+{
10353+ struct drm_device *dev = crtc->dev;
10354+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10355+ int palreg = (psb_intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
10356+ int i;
10357+
10358+ /* The clocks have to be on to load the palette. */
10359+ if (!crtc->enabled)
10360+ return;
10361+
10362+ for (i = 0; i < 256; i++) {
10363+ REG_WRITE(palreg + 4 * i,
10364+ (psb_intel_crtc->lut_r[i] << 16) |
10365+ (psb_intel_crtc->lut_g[i] << 8) |
10366+ psb_intel_crtc->lut_b[i]);
10367+ }
10368+}
10369+
10370+static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
10371+ struct drm_file *file_priv,
10372+ uint32_t handle,
10373+ uint32_t width, uint32_t height)
10374+{
10375+ struct drm_device *dev = crtc->dev;
10376+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10377+ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
10378+ int pipe = psb_intel_crtc->pipe;
10379+ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
10380+ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
10381+ uint32_t temp;
10382+ size_t addr = 0;
10383+ size_t size;
10384+ void *bo;
10385+ int ret;
10386+
10387+ DRM_DEBUG("\n");
10388+
10389+ /* if we want to turn of the cursor ignore width and height */
10390+ if (!handle) {
10391+ DRM_DEBUG("cursor off\n");
10392+ /* turn of the cursor */
10393+ temp = 0;
10394+ temp |= CURSOR_MODE_DISABLE;
10395+
10396+ REG_WRITE(control, temp);
10397+ REG_WRITE(base, 0);
10398+
10399+ /* unpin the old bo */
10400+ if (psb_intel_crtc->cursor_bo) {
10401+ mode_dev->bo_unpin_for_scanout(dev,
10402+ psb_intel_crtc->
10403+ cursor_bo);
10404+ psb_intel_crtc->cursor_bo = NULL;
10405+ }
10406+
10407+ return 0;
10408+ }
10409+
10410+ /* Currently we only support 64x64 cursors */
10411+ if (width != 64 || height != 64) {
10412+ DRM_ERROR("we currently only support 64x64 cursors\n");
10413+ return -EINVAL;
10414+ }
10415+
10416+ bo = mode_dev->bo_from_handle(dev, file_priv, handle);
10417+ if (!bo)
10418+ return -ENOENT;
10419+
10420+ ret = mode_dev->bo_pin_for_scanout(dev, bo);
10421+ if (ret)
10422+ return ret;
10423+
10424+ size = mode_dev->bo_size(dev, bo);
10425+ if (size < width * height * 4) {
10426+ DRM_ERROR("buffer is to small\n");
10427+ return -ENOMEM;
10428+ }
10429+
10430+ addr = mode_dev->bo_size(dev, bo);
10431+ if (mode_dev->cursor_needs_physical)
10432+ addr = dev->agp->base + addr;
10433+
10434+ psb_intel_crtc->cursor_addr = addr;
10435+ temp = 0;
10436+ /* set the pipe for the cursor */
10437+ temp |= (pipe << 28);
10438+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
10439+
10440+ REG_WRITE(control, temp);
10441+ REG_WRITE(base, addr);
10442+
10443+ /* unpin the old bo */
10444+ if (psb_intel_crtc->cursor_bo && psb_intel_crtc->cursor_bo != bo) {
10445+ mode_dev->bo_unpin_for_scanout(dev, psb_intel_crtc->cursor_bo);
10446+ psb_intel_crtc->cursor_bo = bo;
10447+ }
10448+
10449+ return 0;
10450+}
10451+
10452+static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
10453+{
10454+ struct drm_device *dev = crtc->dev;
10455+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10456+ int pipe = psb_intel_crtc->pipe;
10457+ uint32_t temp = 0;
10458+ uint32_t adder;
10459+
10460+ if (x < 0) {
10461+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
10462+ x = -x;
10463+ }
10464+ if (y < 0) {
10465+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
10466+ y = -y;
10467+ }
10468+
10469+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
10470+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
10471+
10472+ adder = psb_intel_crtc->cursor_addr;
10473+ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
10474+ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
10475+
10476+ return 0;
10477+}
10478+
10479+/** Sets the color ramps on behalf of RandR */
10480+void psb_intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
10481+ u16 blue, int regno)
10482+{
10483+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10484+
10485+ psb_intel_crtc->lut_r[regno] = red >> 8;
10486+ psb_intel_crtc->lut_g[regno] = green >> 8;
10487+ psb_intel_crtc->lut_b[regno] = blue >> 8;
10488+}
10489+
10490+static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
10491+ u16 *green, u16 *blue, uint32_t size)
10492+{
10493+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10494+ int i;
10495+
10496+ if (size != 256)
10497+ return;
10498+
10499+ for (i = 0; i < 256; i++) {
10500+ psb_intel_crtc->lut_r[i] = red[i] >> 8;
10501+ psb_intel_crtc->lut_g[i] = green[i] >> 8;
10502+ psb_intel_crtc->lut_b[i] = blue[i] >> 8;
10503+ }
10504+
10505+ psb_intel_crtc_load_lut(crtc);
10506+}
10507+
10508+/**
10509+ * Get a pipe with a simple mode set on it for doing load-based monitor
10510+ * detection.
10511+ *
10512+ * It will be up to the load-detect code to adjust the pipe as appropriate for
10513+ * its requirements. The pipe will be connected to no other outputs.
10514+ *
10515+ * Currently this code will only succeed if there is a pipe with no outputs
10516+ * configured for it. In the future, it could choose to temporarily disable
10517+ * some outputs to free up a pipe for its use.
10518+ *
10519+ * \return crtc, or NULL if no pipes are available.
10520+ */
10521+
10522+/* VESA 640x480x72Hz mode to set on the pipe */
10523+static struct drm_display_mode load_detect_mode = {
10524+ DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10525+ 704, 832, 0, 480, 489, 491, 520, 0,
10526+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10527+};
10528+
10529+struct drm_crtc *psb_intel_get_load_detect_pipe(struct psb_intel_output
10530+ *psb_intel_output,
10531+ struct drm_display_mode *mode,
10532+ int *dpms_mode)
10533+{
10534+ struct psb_intel_crtc *psb_intel_crtc;
10535+ struct drm_crtc *possible_crtc;
10536+ struct drm_crtc *supported_crtc = NULL;
10537+ struct drm_encoder *encoder = &psb_intel_output->enc;
10538+ struct drm_crtc *crtc = NULL;
10539+ struct drm_device *dev = encoder->dev;
10540+ struct drm_encoder_helper_funcs *encoder_funcs =
10541+ encoder->helper_private;
10542+ struct drm_crtc_helper_funcs *crtc_funcs;
10543+ int i = -1;
10544+
10545+ /*
10546+ * Algorithm gets a little messy:
10547+ * - if the connector already has an assigned crtc, use it (but make
10548+ * sure it's on first)
10549+ * - try to find the first unused crtc that can drive this connector,
10550+ * and use that if we find one
10551+ * - if there are no unused crtcs available, try to use the first
10552+ * one we found that supports the connector
10553+ */
10554+
10555+ /* See if we already have a CRTC for this connector */
10556+ if (encoder->crtc) {
10557+ crtc = encoder->crtc;
10558+ /* Make sure the crtc and connector are running */
10559+ psb_intel_crtc = to_psb_intel_crtc(crtc);
10560+ *dpms_mode = psb_intel_crtc->dpms_mode;
10561+ if (psb_intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
10562+ crtc_funcs = crtc->helper_private;
10563+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
10564+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
10565+ }
10566+ return crtc;
10567+ }
10568+
10569+ /* Find an unused one (if possible) */
10570+ list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list,
10571+ head) {
10572+ i++;
10573+ if (!(encoder->possible_crtcs & (1 << i)))
10574+ continue;
10575+ if (!possible_crtc->enabled) {
10576+ crtc = possible_crtc;
10577+ break;
10578+ }
10579+ if (!supported_crtc)
10580+ supported_crtc = possible_crtc;
10581+ }
10582+
10583+ /*
10584+ * If we didn't find an unused CRTC, don't use any.
10585+ */
10586+ if (!crtc)
10587+ return NULL;
10588+
10589+ encoder->crtc = crtc;
10590+ psb_intel_output->load_detect_temp = true;
10591+
10592+ psb_intel_crtc = to_psb_intel_crtc(crtc);
10593+ *dpms_mode = psb_intel_crtc->dpms_mode;
10594+
10595+ if (!crtc->enabled) {
10596+ if (!mode)
10597+ mode = &load_detect_mode;
10598+ drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb);
10599+ } else {
10600+ if (psb_intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
10601+ crtc_funcs = crtc->helper_private;
10602+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
10603+ }
10604+
10605+ /* Add this connector to the crtc */
10606+ encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode);
10607+ encoder_funcs->commit(encoder);
10608+ }
10609+ /* let the connector get through one full cycle before testing */
10610+ psb_intel_wait_for_vblank(dev);
10611+
10612+ return crtc;
10613+}
10614+
10615+void psb_intel_release_load_detect_pipe(struct psb_intel_output *psb_intel_output,
10616+ int dpms_mode)
10617+{
10618+ struct drm_encoder *encoder = &psb_intel_output->enc;
10619+ struct drm_device *dev = encoder->dev;
10620+ struct drm_crtc *crtc = encoder->crtc;
10621+ struct drm_encoder_helper_funcs *encoder_funcs =
10622+ encoder->helper_private;
10623+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
10624+
10625+ if (psb_intel_output->load_detect_temp) {
10626+ encoder->crtc = NULL;
10627+ psb_intel_output->load_detect_temp = false;
10628+ crtc->enabled = drm_helper_crtc_in_use(crtc);
10629+ drm_helper_disable_unused_functions(dev);
10630+ }
10631+
10632+ /* Switch crtc and output back off if necessary */
10633+ if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
10634+ if (encoder->crtc == crtc)
10635+ encoder_funcs->dpms(encoder, dpms_mode);
10636+ crtc_funcs->dpms(crtc, dpms_mode);
10637+ }
10638+}
10639+
10640+/* Returns the clock of the currently programmed mode of the given pipe. */
10641+static int psb_intel_crtc_clock_get(struct drm_device *dev,
10642+ struct drm_crtc *crtc)
10643+{
10644+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10645+ int pipe = psb_intel_crtc->pipe;
10646+ u32 dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
10647+ u32 fp;
10648+ struct psb_intel_clock_t clock;
10649+
10650+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10651+ fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
10652+ else
10653+ fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
10654+
10655+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10656+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10657+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10658+ if (IS_I9XX(dev)) {
10659+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10660+ DPLL_FPA01_P1_POST_DIV_SHIFT);
10661+
10662+ switch (dpll & DPLL_MODE_MASK) {
10663+ case DPLLB_MODE_DAC_SERIAL:
10664+ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10665+ 5 : 10;
10666+ break;
10667+ case DPLLB_MODE_LVDS:
10668+ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10669+ 7 : 14;
10670+ break;
10671+ default:
10672+ DRM_DEBUG("Unknown DPLL mode %08x in programmed "
10673+ "mode\n", (int) (dpll & DPLL_MODE_MASK));
10674+ return 0;
10675+ }
10676+
10677+ /* XXX: Handle the 100Mhz refclk */
10678+ i9xx_clock(96000, &clock);
10679+ } else {
10680+ bool is_lvds = (pipe == 1)
10681+ && (REG_READ(LVDS) & LVDS_PORT_EN);
10682+
10683+ if (is_lvds) {
10684+ clock.p1 =
10685+ ffs((dpll &
10686+ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10687+ DPLL_FPA01_P1_POST_DIV_SHIFT);
10688+ clock.p2 = 14;
10689+
10690+ if ((dpll & PLL_REF_INPUT_MASK) ==
10691+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
10692+ /* XXX: might not be 66MHz */
10693+ i8xx_clock(66000, &clock);
10694+ } else
10695+ i8xx_clock(48000, &clock);
10696+ } else {
10697+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
10698+ clock.p1 = 2;
10699+ else {
10700+ clock.p1 =
10701+ ((dpll &
10702+ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10703+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10704+ }
10705+ if (dpll & PLL_P2_DIVIDE_BY_4)
10706+ clock.p2 = 4;
10707+ else
10708+ clock.p2 = 2;
10709+
10710+ i8xx_clock(48000, &clock);
10711+ }
10712+ }
10713+
10714+ /* XXX: It would be nice to validate the clocks, but we can't reuse
10715+ * i830PllIsValid() because it relies on the xf86_config connector
10716+ * configuration being accurate, which it isn't necessarily.
10717+ */
10718+
10719+ return clock.dot;
10720+}
10721+
10722+/** Returns the currently programmed mode of the given pipe. */
10723+struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
10724+ struct drm_crtc *crtc)
10725+{
10726+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10727+ int pipe = psb_intel_crtc->pipe;
10728+ struct drm_display_mode *mode;
10729+ int htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
10730+ int hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
10731+ int vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
10732+ int vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
10733+
10734+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10735+ if (!mode)
10736+ return NULL;
10737+
10738+ mode->clock = psb_intel_crtc_clock_get(dev, crtc);
10739+ mode->hdisplay = (htot & 0xffff) + 1;
10740+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10741+ mode->hsync_start = (hsync & 0xffff) + 1;
10742+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10743+ mode->vdisplay = (vtot & 0xffff) + 1;
10744+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10745+ mode->vsync_start = (vsync & 0xffff) + 1;
10746+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10747+
10748+ drm_mode_set_name(mode);
10749+ drm_mode_set_crtcinfo(mode, 0);
10750+
10751+ return mode;
10752+}
10753+
10754+static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
10755+{
10756+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10757+
10758+ drm_crtc_cleanup(crtc);
10759+ kfree(psb_intel_crtc);
10760+}
10761+
10762+static const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
10763+ .dpms = psb_intel_crtc_dpms,
10764+ .mode_fixup = psb_intel_crtc_mode_fixup,
10765+ .mode_set = psb_intel_crtc_mode_set,
10766+ .mode_set_base = psb_intel_pipe_set_base,
10767+ .prepare = psb_intel_crtc_prepare,
10768+ .commit = psb_intel_crtc_commit,
10769+};
10770+
10771+static const struct drm_crtc_helper_funcs mrst_helper_funcs;
10772+
10773+const struct drm_crtc_funcs psb_intel_crtc_funcs = {
10774+ .cursor_set = psb_intel_crtc_cursor_set,
10775+ .cursor_move = psb_intel_crtc_cursor_move,
10776+ .gamma_set = psb_intel_crtc_gamma_set,
10777+ .set_config = drm_crtc_helper_set_config,
10778+ .destroy = psb_intel_crtc_destroy,
10779+};
10780+
10781+
10782+void psb_intel_crtc_init(struct drm_device *dev, int pipe,
10783+ struct psb_intel_mode_device *mode_dev)
10784+{
10785+ struct psb_intel_crtc *psb_intel_crtc;
10786+ int i;
10787+
10788+#if PRINT_JLIU7
10789+ DRM_INFO("JLIU7 enter psb_intel_crtc_init \n");
10790+#endif /* PRINT_JLIU7 */
10791+
10792+ /* We allocate a extra array of drm_connector pointers
10793+ * for fbdev after the crtc */
10794+ psb_intel_crtc =
10795+ kzalloc(sizeof(struct psb_intel_crtc) +
10796+ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
10797+ GFP_KERNEL);
10798+ if (psb_intel_crtc == NULL)
10799+ return;
10800+
10801+ drm_crtc_init(dev, &psb_intel_crtc->base, &psb_intel_crtc_funcs);
10802+
10803+ drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
10804+ psb_intel_crtc->pipe = pipe;
10805+ for (i = 0; i < 256; i++) {
10806+ psb_intel_crtc->lut_r[i] = i;
10807+ psb_intel_crtc->lut_g[i] = i;
10808+ psb_intel_crtc->lut_b[i] = i;
10809+ }
10810+
10811+ psb_intel_crtc->mode_dev = mode_dev;
10812+ psb_intel_crtc->cursor_addr = 0;
10813+ psb_intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
10814+
10815+ if (IS_MRST(dev)) {
10816+ drm_crtc_helper_add(&psb_intel_crtc->base, &mrst_helper_funcs);
10817+ } else {
10818+ drm_crtc_helper_add(&psb_intel_crtc->base,
10819+ &psb_intel_helper_funcs);
10820+ }
10821+
10822+ /* Setup the array of drm_connector pointer array */
10823+ psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
10824+ psb_intel_crtc->mode_set.connectors =
10825+ (struct drm_connector **) (psb_intel_crtc + 1);
10826+ psb_intel_crtc->mode_set.num_connectors = 0;
10827+
10828+#if 0 /* JB: not drop, What should go in here? */
10829+ if (i915_fbpercrtc)
10830+#endif
10831+}
10832+
10833+struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
10834+{
10835+ struct drm_crtc *crtc = NULL;
10836+
10837+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10838+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
10839+ if (psb_intel_crtc->pipe == pipe)
10840+ break;
10841+ }
10842+ return crtc;
10843+}
10844+
10845+int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
10846+{
10847+ int index_mask = 0;
10848+ struct drm_connector *connector;
10849+ int entry = 0;
10850+
10851+ list_for_each_entry(connector, &dev->mode_config.connector_list,
10852+ head) {
10853+ struct psb_intel_output *psb_intel_output =
10854+ to_psb_intel_output(connector);
10855+ if (type_mask & (1 << psb_intel_output->type))
10856+ index_mask |= (1 << entry);
10857+ entry++;
10858+ }
10859+ return index_mask;
10860+}
10861+
10862+#if 0 /* JB: Should be per device */
10863+static void psb_intel_setup_outputs(struct drm_device *dev)
10864+{
10865+ struct drm_connector *connector;
10866+
10867+ psb_intel_crt_init(dev);
10868+
10869+ /* Set up integrated LVDS */
10870+ if (IS_MOBILE(dev) && !IS_I830(dev))
10871+ psb_intel_lvds_init(dev);
10872+
10873+ if (IS_I9XX(dev)) {
10874+ psb_intel_sdvo_init(dev, SDVOB);
10875+ psb_intel_sdvo_init(dev, SDVOC);
10876+ } else
10877+ psb_intel_dvo_init(dev);
10878+
10879+ if (IS_I9XX(dev) && !IS_I915G(dev))
10880+ psb_intel_tv_init(dev);
10881+
10882+ list_for_each_entry(connector, &dev->mode_config.connector_list,
10883+ head) {
10884+ struct psb_intel_output *psb_intel_output =
10885+ to_psb_intel_output(connector);
10886+ struct drm_encoder *encoder = &psb_intel_output->enc;
10887+ int crtc_mask = 0, clone_mask = 0;
10888+
10889+ /* valid crtcs */
10890+ switch (psb_intel_output->type) {
10891+ case INTEL_OUTPUT_DVO:
10892+ case INTEL_OUTPUT_SDVO:
10893+ crtc_mask = ((1 << 0) | (1 << 1));
10894+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
10895+ (1 << INTEL_OUTPUT_DVO) |
10896+ (1 << INTEL_OUTPUT_SDVO));
10897+ break;
10898+ case INTEL_OUTPUT_ANALOG:
10899+ crtc_mask = ((1 << 0) | (1 << 1));
10900+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
10901+ (1 << INTEL_OUTPUT_DVO) |
10902+ (1 << INTEL_OUTPUT_SDVO));
10903+ break;
10904+ case INTEL_OUTPUT_LVDS:
10905+ crtc_mask = (1 << 1);
10906+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
10907+ break;
10908+ case INTEL_OUTPUT_TVOUT:
10909+ crtc_mask = ((1 << 0) | (1 << 1));
10910+ clone_mask = (1 << INTEL_OUTPUT_TVOUT);
10911+ break;
10912+ }
10913+ encoder->possible_crtcs = crtc_mask;
10914+ encoder->possible_clones =
10915+ psb_intel_connector_clones(dev, clone_mask);
10916+ }
10917+}
10918+#endif
10919+
10920+#if 0 /* JB: Rework framebuffer code into something none device specific */
10921+static void psb_intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
10922+{
10923+ struct psb_intel_framebuffer *psb_intel_fb = to_psb_intel_framebuffer(fb);
10924+ struct drm_device *dev = fb->dev;
10925+
10926+ if (fb->fbdev)
10927+ intelfb_remove(dev, fb);
10928+
10929+ drm_framebuffer_cleanup(fb);
10930+ drm_gem_object_unreference(fb->mm_private);
10931+
10932+ kfree(psb_intel_fb);
10933+}
10934+
10935+static int psb_intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
10936+ struct drm_file *file_priv,
10937+ unsigned int *handle)
10938+{
10939+ struct drm_gem_object *object = fb->mm_private;
10940+
10941+ return drm_gem_handle_create(file_priv, object, handle);
10942+}
10943+
10944+static const struct drm_framebuffer_funcs psb_intel_fb_funcs = {
10945+ .destroy = psb_intel_user_framebuffer_destroy,
10946+ .create_handle = psb_intel_user_framebuffer_create_handle,
10947+};
10948+
10949+struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device *dev,
10950+ struct drm_mode_fb_cmd
10951+ *mode_cmd,
10952+ void *mm_private)
10953+{
10954+ struct psb_intel_framebuffer *psb_intel_fb;
10955+
10956+ psb_intel_fb = kzalloc(sizeof(*psb_intel_fb), GFP_KERNEL);
10957+ if (!psb_intel_fb)
10958+ return NULL;
10959+
10960+ if (!drm_framebuffer_init(dev, &psb_intel_fb->base, &psb_intel_fb_funcs))
10961+ return NULL;
10962+
10963+ drm_helper_mode_fill_fb_struct(&psb_intel_fb->base, mode_cmd);
10964+
10965+ return &psb_intel_fb->base;
10966+}
10967+
10968+
10969+static struct drm_framebuffer *psb_intel_user_framebuffer_create(struct
10970+ drm_device
10971+ *dev,
10972+ struct
10973+ drm_file
10974+ *filp,
10975+ struct
10976+ drm_mode_fb_cmd
10977+ *mode_cmd)
10978+{
10979+ struct drm_gem_object *obj;
10980+
10981+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
10982+ if (!obj)
10983+ return NULL;
10984+
10985+ return psb_intel_framebuffer_create(dev, mode_cmd, obj);
10986+}
10987+
10988+static int psb_intel_insert_new_fb(struct drm_device *dev,
10989+ struct drm_file *file_priv,
10990+ struct drm_framebuffer *fb,
10991+ struct drm_mode_fb_cmd *mode_cmd)
10992+{
10993+ struct psb_intel_framebuffer *psb_intel_fb;
10994+ struct drm_gem_object *obj;
10995+ struct drm_crtc *crtc;
10996+
10997+ psb_intel_fb = to_psb_intel_framebuffer(fb);
10998+
10999+ mutex_lock(&dev->struct_mutex);
11000+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
11001+
11002+ if (!obj) {
11003+ mutex_unlock(&dev->struct_mutex);
11004+ return -EINVAL;
11005+ }
11006+ drm_gem_object_unreference(psb_intel_fb->base.mm_private);
11007+ drm_helper_mode_fill_fb_struct(fb, mode_cmd, obj);
11008+ mutex_unlock(&dev->struct_mutex);
11009+
11010+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
11011+ if (crtc->fb == fb) {
11012+ struct drm_crtc_helper_funcs *crtc_funcs =
11013+ crtc->helper_private;
11014+ crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y);
11015+ }
11016+ }
11017+ return 0;
11018+}
11019+
11020+static const struct drm_mode_config_funcs psb_intel_mode_funcs = {
11021+ .resize_fb = psb_intel_insert_new_fb,
11022+ .fb_create = psb_intel_user_framebuffer_create,
11023+ .fb_changed = intelfb_probe,
11024+};
11025+#endif
11026+
11027+#if 0 /* Should be per device */
11028+void psb_intel_modeset_init(struct drm_device *dev)
11029+{
11030+ int num_pipe;
11031+ int i;
11032+
11033+ drm_mode_config_init(dev);
11034+
11035+ dev->mode_config.min_width = 0;
11036+ dev->mode_config.min_height = 0;
11037+
11038+ dev->mode_config.funcs = (void *) &psb_intel_mode_funcs;
11039+
11040+ if (IS_I965G(dev)) {
11041+ dev->mode_config.max_width = 8192;
11042+ dev->mode_config.max_height = 8192;
11043+ } else {
11044+ dev->mode_config.max_width = 2048;
11045+ dev->mode_config.max_height = 2048;
11046+ }
11047+
11048+ /* set memory base */
11049+ if (IS_I9XX(dev))
11050+ dev->mode_config.fb_base =
11051+ pci_resource_start(dev->pdev, 2);
11052+ else
11053+ dev->mode_config.fb_base =
11054+ pci_resource_start(dev->pdev, 0);
11055+
11056+ if (IS_MOBILE(dev) || IS_I9XX(dev))
11057+ num_pipe = 2;
11058+ else
11059+ num_pipe = 1;
11060+ DRM_DEBUG("%d display pipe%s available.\n",
11061+ num_pipe, num_pipe > 1 ? "s" : "");
11062+
11063+ for (i = 0; i < num_pipe; i++)
11064+ psb_intel_crtc_init(dev, i);
11065+
11066+ psb_intel_setup_outputs(dev);
11067+
11068+ /* setup fbs */
11069+ /* drm_initial_config(dev, false); */
11070+}
11071+#endif
11072+
11073+void psb_intel_modeset_cleanup(struct drm_device *dev)
11074+{
11075+ drm_mode_config_cleanup(dev);
11076+}
11077+
11078+
11079+/* current intel driver doesn't take advantage of encoders
11080+ always give back the encoder for the connector
11081+*/
11082+struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
11083+{
11084+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
11085+
11086+ return &psb_intel_output->enc;
11087+}
11088+
11089+/* MRST_PLATFORM start */
11090+
11091+#if DUMP_REGISTER
11092+void dump_dc_registers(struct drm_device *dev)
11093+{
11094+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
11095+ unsigned int i = 0;
11096+
11097+ DRM_INFO("jliu7 dump_dc_registers\n");
11098+
11099+
11100+ if (0x80000000 & REG_READ(0x70008)) {
11101+ for (i = 0x20a0; i < 0x20af; i += 4) {
11102+ DRM_INFO("jliu7 interrupt register=0x%x, value=%x\n", i, (unsigned int) REG_READ(i));
11103+ }
11104+
11105+ for (i = 0xf014; i < 0xf047; i += 4) {
11106+ DRM_INFO
11107+ ("jliu7 pipe A dpll register=0x%x, value=%x\n",
11108+ i, (unsigned int) REG_READ(i));
11109+ }
11110+
11111+ for (i = 0x60000; i < 0x6005f; i += 4) {
11112+ DRM_INFO
11113+ ("jliu7 pipe A timing register=0x%x, value=%x\n",
11114+ i, (unsigned int) REG_READ(i));
11115+ }
11116+
11117+ for (i = 0x61140; i < 0x61143; i += 4) {
11118+ DRM_INFO("jliu7 SDBOB register=0x%x, value=%x\n",
11119+ i, (unsigned int) REG_READ(i));
11120+ }
11121+
11122+ for (i = 0x61180; i < 0x6123F; i += 4) {
11123+ DRM_INFO
11124+ ("jliu7 LVDS PORT register=0x%x, value=%x\n",
11125+ i, (unsigned int) REG_READ(i));
11126+ }
11127+
11128+ for (i = 0x61254; i < 0x612AB; i += 4) {
11129+ DRM_INFO("jliu7 BLC register=0x%x, value=%x\n",
11130+ i, (unsigned int) REG_READ(i));
11131+ }
11132+
11133+ for (i = 0x70000; i < 0x70047; i += 4) {
11134+ DRM_INFO
11135+ ("jliu7 PIPE A control register=0x%x, value=%x\n",
11136+ i, (unsigned int) REG_READ(i));
11137+ }
11138+
11139+ for (i = 0x70180; i < 0x7020b; i += 4) {
11140+ DRM_INFO("jliu7 display A control register=0x%x,"
11141+ "value=%x\n", i,
11142+ (unsigned int) REG_READ(i));
11143+ }
11144+
11145+ for (i = 0x71400; i < 0x71403; i += 4) {
11146+ DRM_INFO
11147+ ("jliu7 VGA Display Plane Control register=0x%x,"
11148+ "value=%x\n", i, (unsigned int) REG_READ(i));
11149+ }
11150+ }
11151+
11152+ if (0x80000000 & REG_READ(0x71008)) {
11153+ for (i = 0x61000; i < 0x6105f; i += 4) {
11154+ DRM_INFO
11155+ ("jliu7 pipe B timing register=0x%x, value=%x\n",
11156+ i, (unsigned int) REG_READ(i));
11157+ }
11158+
11159+ for (i = 0x71000; i < 0x71047; i += 4) {
11160+ DRM_INFO
11161+ ("jliu7 PIPE B control register=0x%x, value=%x\n",
11162+ i, (unsigned int) REG_READ(i));
11163+ }
11164+
11165+ for (i = 0x71180; i < 0x7120b; i += 4) {
11166+ DRM_INFO("jliu7 display B control register=0x%x,"
11167+ "value=%x\n", i,
11168+ (unsigned int) REG_READ(i));
11169+ }
11170+ }
11171+#if 0
11172+ for (i = 0x70080; i < 0x700df; i += 4) {
11173+ DRM_INFO("jliu7 cursor A & B register=0x%x, value=%x\n",
11174+ i, (unsigned int) REG_READ(i));
11175+ }
11176+#endif
11177+
11178+}
11179+
11180+void dump_dsi_registers(struct drm_device *dev)
11181+{
11182+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
11183+ unsigned int i = 0;
11184+
11185+ DRM_INFO("jliu7 dump_dsi_registers\n");
11186+
11187+ for (i = 0xb000; i < 0xb064; i += 4) {
11188+ DRM_INFO("jliu7 MIPI IP register=0x%x, value=%x\n", i,
11189+ (unsigned int) REG_READ(i));
11190+ }
11191+
11192+ i = 0xb104;
11193+ DRM_INFO("jliu7 MIPI control register=0x%x, value=%x\n",
11194+ i, (unsigned int) REG_READ(i));
11195+}
11196+#endif /* DUMP_REGISTER */
11197+
11198+
11199+struct mrst_limit_t {
11200+ struct psb_intel_range_t dot, m, p1;
11201+};
11202+
11203+struct mrst_clock_t {
11204+ /* derived values */
11205+ int dot;
11206+ int m;
11207+ int p1;
11208+};
11209+
11210+#define MRST_LIMIT_LVDS_100L 0
11211+#define MRST_LIMIT_LVDS_83 1
11212+#define MRST_LIMIT_LVDS_100 2
11213+
11214+#define MRST_DOT_MIN 19750
11215+#define MRST_DOT_MAX 120000
11216+#define MRST_M_MIN_100L 20
11217+#define MRST_M_MIN_100 10
11218+#define MRST_M_MIN_83 12
11219+#define MRST_M_MAX_100L 34
11220+#define MRST_M_MAX_100 17
11221+#define MRST_M_MAX_83 20
11222+#define MRST_P1_MIN 2
11223+#define MRST_P1_MAX_0 7
11224+#define MRST_P1_MAX_1 8
11225+
11226+static const struct mrst_limit_t mrst_limits[] = {
11227+ { /* MRST_LIMIT_LVDS_100L */
11228+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
11229+ .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
11230+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
11231+ },
11232+ { /* MRST_LIMIT_LVDS_83L */
11233+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
11234+ .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
11235+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
11236+ },
11237+ { /* MRST_LIMIT_LVDS_100 */
11238+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
11239+ .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
11240+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
11241+ },
11242+};
11243+
11244+#define MRST_M_MIN 10
11245+static const u32 mrst_m_converts[] = {
11246+ 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
11247+ 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
11248+ 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
11249+};
11250+
11251+#define COUNT_MAX 0x10000000
11252+void mrstWaitForPipeDisable(struct drm_device *dev)
11253+{
11254+ int count, temp;
11255+
11256+ /* FIXME JLIU7_PO */
11257+ psb_intel_wait_for_vblank(dev);
11258+ return;
11259+
11260+ /* Wait for for the pipe disable to take effect. */
11261+ for (count = 0; count < COUNT_MAX; count++) {
11262+ temp = REG_READ(PIPEACONF);
11263+ if ((temp & PIPEACONF_PIPE_STATE) == 0)
11264+ break;
11265+ }
11266+
11267+ if (count == COUNT_MAX) {
11268+#if PRINT_JLIU7
11269+ DRM_INFO("JLIU7 mrstWaitForPipeDisable time out. \n");
11270+#endif /* PRINT_JLIU7 */
11271+ } else {
11272+#if PRINT_JLIU7
11273+ DRM_INFO("JLIU7 mrstWaitForPipeDisable cout = %d. \n",
11274+ count);
11275+#endif /* PRINT_JLIU7 */
11276+ }
11277+}
11278+
11279+void mrstWaitForPipeEnable(struct drm_device *dev)
11280+{
11281+ int count, temp;
11282+
11283+ /* FIXME JLIU7_PO */
11284+ psb_intel_wait_for_vblank(dev);
11285+ return;
11286+
11287+ /* Wait for for the pipe disable to take effect. */
11288+ for (count = 0; count < COUNT_MAX; count++) {
11289+ temp = REG_READ(PIPEACONF);
11290+ if ((temp & PIPEACONF_PIPE_STATE) == 1)
11291+ break;
11292+ }
11293+
11294+ if (count == COUNT_MAX) {
11295+#if PRINT_JLIU7
11296+ DRM_INFO("JLIU7 mrstWaitForPipeEnable time out. \n");
11297+#endif /* PRINT_JLIU7 */
11298+ } else {
11299+#if PRINT_JLIU7
11300+ DRM_INFO("JLIU7 mrstWaitForPipeEnable cout = %d. \n",
11301+ count);
11302+#endif /* PRINT_JLIU7 */
11303+ }
11304+}
11305+
11306+static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc)
11307+{
11308+ const struct mrst_limit_t *limit;
11309+ struct drm_device *dev = crtc->dev;
11310+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
11311+
11312+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
11313+ || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
11314+ if (dev_priv->sku_100L)
11315+ limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
11316+ if (dev_priv->sku_83)
11317+ limit = &mrst_limits[MRST_LIMIT_LVDS_83];
11318+ if (dev_priv->sku_100)
11319+ limit = &mrst_limits[MRST_LIMIT_LVDS_100];
11320+ } else {
11321+ limit = NULL;
11322+#if PRINT_JLIU7
11323+ DRM_INFO("JLIU7 jliu7 mrst_limit Wrong display type. \n");
11324+#endif /* PRINT_JLIU7 */
11325+ }
11326+
11327+ return limit;
11328+}
11329+
11330+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
11331+static void mrst_clock(int refclk, struct mrst_clock_t *clock)
11332+{
11333+ clock->dot = (refclk * clock->m) / (14 * clock->p1);
11334+}
11335+
11336+void mrstPrintPll(char *prefix, struct mrst_clock_t *clock)
11337+{
11338+#if PRINT_JLIU7
11339+ DRM_INFO
11340+ ("JLIU7 mrstPrintPll %s: dotclock = %d, m = %d, p1 = %d. \n",
11341+ prefix, clock->dot, clock->m, clock->p1);
11342+#endif /* PRINT_JLIU7 */
11343+}
11344+
11345+/**
11346+ * Returns a set of divisors for the desired target clock with the given refclk,
11347+ * or FALSE. Divisor values are the actual divisors for
11348+ */
11349+static bool
11350+mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
11351+ struct mrst_clock_t *best_clock)
11352+{
11353+ struct mrst_clock_t clock;
11354+ const struct mrst_limit_t *limit = mrst_limit(crtc);
11355+ int err = target;
11356+
11357+ memset(best_clock, 0, sizeof(*best_clock));
11358+
11359+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
11360+ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
11361+ clock.p1++) {
11362+ int this_err;
11363+
11364+ mrst_clock(refclk, &clock);
11365+
11366+ this_err = abs(clock.dot - target);
11367+ if (this_err < err) {
11368+ *best_clock = clock;
11369+ err = this_err;
11370+ }
11371+ }
11372+ }
11373+ DRM_DEBUG("mrstFindBestPLL err = %d.\n", err);
11374+
11375+ return err != target;
11376+}
11377+
11378+/**
11379+ * Sets the power management mode of the pipe and plane.
11380+ *
11381+ * This code should probably grow support for turning the cursor off and back
11382+ * on appropriately at the same time as we're turning the pipe off/on.
11383+ */
11384+static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
11385+{
11386+ struct drm_device *dev = crtc->dev;
11387+ /* struct drm_i915_master_private *master_priv; */
11388+ /* struct drm_i915_private *dev_priv = dev->dev_private; */
11389+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
11390+ int pipe = psb_intel_crtc->pipe;
11391+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
11392+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
11393+ int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
11394+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
11395+ u32 temp;
11396+ bool enabled;
11397+
11398+#if PRINT_JLIU7
11399+ DRM_INFO("JLIU7 enter mrst_crtc_dpms, mode = %d, pipe = %d \n",
11400+ mode, pipe);
11401+#endif /* PRINT_JLIU7 */
11402+
11403+ /* XXX: When our outputs are all unaware of DPMS modes other than off
11404+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
11405+ */
11406+ switch (mode) {
11407+ case DRM_MODE_DPMS_ON:
11408+ case DRM_MODE_DPMS_STANDBY:
11409+ case DRM_MODE_DPMS_SUSPEND:
11410+ /* Enable the DPLL */
11411+ temp = REG_READ(dpll_reg);
11412+ if ((temp & DPLL_VCO_ENABLE) == 0) {
11413+ REG_WRITE(dpll_reg, temp);
11414+ REG_READ(dpll_reg);
11415+ /* Wait for the clocks to stabilize. */
11416+ udelay(150);
11417+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
11418+ REG_READ(dpll_reg);
11419+ /* Wait for the clocks to stabilize. */
11420+ udelay(150);
11421+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
11422+ REG_READ(dpll_reg);
11423+ /* Wait for the clocks to stabilize. */
11424+ udelay(150);
11425+ }
11426+
11427+ /* Enable the pipe */
11428+ temp = REG_READ(pipeconf_reg);
11429+ if ((temp & PIPEACONF_ENABLE) == 0)
11430+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
11431+
11432+ /* Enable the plane */
11433+ temp = REG_READ(dspcntr_reg);
11434+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
11435+ REG_WRITE(dspcntr_reg,
11436+ temp | DISPLAY_PLANE_ENABLE);
11437+ /* Flush the plane changes */
11438+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
11439+ }
11440+
11441+ psb_intel_crtc_load_lut(crtc);
11442+
11443+ /* Give the overlay scaler a chance to enable
11444+ if it's on this pipe */
11445+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
11446+ break;
11447+ case DRM_MODE_DPMS_OFF:
11448+ /* Give the overlay scaler a chance to disable
11449+ * if it's on this pipe */
11450+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
11451+
11452+ /* Disable the VGA plane that we never use */
11453+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
11454+
11455+ /* Disable display plane */
11456+ temp = REG_READ(dspcntr_reg);
11457+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
11458+ REG_WRITE(dspcntr_reg,
11459+ temp & ~DISPLAY_PLANE_ENABLE);
11460+ /* Flush the plane changes */
11461+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
11462+ REG_READ(dspbase_reg);
11463+ }
11464+
11465+ if (!IS_I9XX(dev)) {
11466+ /* Wait for vblank for the disable to take effect */
11467+ psb_intel_wait_for_vblank(dev);
11468+ }
11469+
11470+ /* Next, disable display pipes */
11471+ temp = REG_READ(pipeconf_reg);
11472+ if ((temp & PIPEACONF_ENABLE) != 0) {
11473+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
11474+ REG_READ(pipeconf_reg);
11475+ }
11476+
11477+ /* Wait for for the pipe disable to take effect. */
11478+ mrstWaitForPipeDisable(dev);
11479+
11480+ temp = REG_READ(dpll_reg);
11481+ if ((temp & DPLL_VCO_ENABLE) != 0) {
11482+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
11483+ REG_READ(dpll_reg);
11484+ }
11485+
11486+ /* Wait for the clocks to turn off. */
11487+ udelay(150);
11488+ break;
11489+ }
11490+
11491+#if DUMP_REGISTER
11492+ dump_dc_registers(dev);
11493+#endif /* DUMP_REGISTER */
11494+
11495+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
11496+
11497+#if 0 /* JB: Add vblank support later */
11498+ if (enabled)
11499+ dev_priv->vblank_pipe |= (1 << pipe);
11500+ else
11501+ dev_priv->vblank_pipe &= ~(1 << pipe);
11502+#endif
11503+
11504+ psb_intel_crtc->dpms_mode = mode;
11505+
11506+#if 0 /* JB: Add sarea support later */
11507+ if (!dev->primary->master)
11508+ return;
11509+
11510+ master_priv = dev->primary->master->driver_priv;
11511+ if (!master_priv->sarea_priv)
11512+ return;
11513+
11514+ switch (pipe) {
11515+ case 0:
11516+ master_priv->sarea_priv->planeA_w =
11517+ enabled ? crtc->mode.hdisplay : 0;
11518+ master_priv->sarea_priv->planeA_h =
11519+ enabled ? crtc->mode.vdisplay : 0;
11520+ break;
11521+ case 1:
11522+ master_priv->sarea_priv->planeB_w =
11523+ enabled ? crtc->mode.hdisplay : 0;
11524+ master_priv->sarea_priv->planeB_h =
11525+ enabled ? crtc->mode.vdisplay : 0;
11526+ break;
11527+ default:
11528+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
11529+ break;
11530+ }
11531+#endif
11532+}
11533+
11534+static int mrst_crtc_mode_set(struct drm_crtc *crtc,
11535+ struct drm_display_mode *mode,
11536+ struct drm_display_mode *adjusted_mode,
11537+ int x, int y,
11538+ struct drm_framebuffer *old_fb)
11539+{
11540+ struct drm_device *dev = crtc->dev;
11541+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
11542+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
11543+ int pipe = psb_intel_crtc->pipe;
11544+ int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
11545+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
11546+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
11547+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
11548+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
11549+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
11550+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
11551+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
11552+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
11553+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
11554+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
11555+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
11556+ int refclk = 0;
11557+ struct mrst_clock_t clock;
11558+ u32 dpll = 0, fp = 0, dspcntr, pipeconf, lvdsport;
11559+ bool ok, is_sdvo = false;
11560+ bool is_crt = false, is_lvds = false, is_tv = false;
11561+ bool is_mipi = false;
11562+ struct drm_mode_config *mode_config = &dev->mode_config;
11563+ struct drm_connector *connector;
11564+ struct psb_intel_output *psb_intel_output;
11565+
11566+#if PRINT_JLIU7
11567+ DRM_INFO("JLIU7 enter mrst_crtc_mode_set \n");
11568+#endif /* PRINT_JLIU7 */
11569+
11570+ list_for_each_entry(connector, &mode_config->connector_list, head) {
11571+ psb_intel_output = to_psb_intel_output(connector);
11572+
11573+ if (!connector->encoder
11574+ || connector->encoder->crtc != crtc)
11575+ continue;
11576+
11577+ switch (psb_intel_output->type) {
11578+ case INTEL_OUTPUT_LVDS:
11579+ is_lvds = true;
11580+ break;
11581+ case INTEL_OUTPUT_SDVO:
11582+ is_sdvo = true;
11583+ break;
11584+ case INTEL_OUTPUT_TVOUT:
11585+ is_tv = true;
11586+ break;
11587+ case INTEL_OUTPUT_ANALOG:
11588+ is_crt = true;
11589+ break;
11590+ case INTEL_OUTPUT_MIPI:
11591+ is_mipi = true;
11592+ break;
11593+ }
11594+ }
11595+
11596+ if (is_lvds | is_mipi) {
11597+ /*FIXME JLIU7 Get panel power delay parameters from
11598+ config data */
11599+ REG_WRITE(0x61208, 0x25807d0);
11600+ REG_WRITE(0x6120c, 0x1f407d0);
11601+ REG_WRITE(0x61210, 0x270f04);
11602+ }
11603+
11604+ /* Disable the VGA plane that we never use */
11605+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
11606+
11607+ /* Disable the panel fitter if it was on our pipe */
11608+ if (psb_intel_panel_fitter_pipe(dev) == pipe)
11609+ REG_WRITE(PFIT_CONTROL, 0);
11610+
11611+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
11612+ drm_mode_debug_printmodeline(mode);
11613+
11614+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
11615+ ((adjusted_mode->crtc_htotal - 1) << 16));
11616+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
11617+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
11618+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
11619+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
11620+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
11621+ ((adjusted_mode->crtc_vtotal - 1) << 16));
11622+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
11623+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
11624+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
11625+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
11626+ /* pipesrc and dspsize control the size that is scaled from,
11627+ * which should always be the user's requested size.
11628+ */
11629+ REG_WRITE(dspsize_reg,
11630+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
11631+ REG_WRITE(pipesrc_reg,
11632+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
11633+
11634+ /* Flush the plane changes */
11635+ {
11636+ struct drm_crtc_helper_funcs *crtc_funcs =
11637+ crtc->helper_private;
11638+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
11639+ }
11640+
11641+ /* setup pipeconf */
11642+ pipeconf = REG_READ(pipeconf_reg);
11643+
11644+ /* Set up the display plane register */
11645+ dspcntr = REG_READ(dspcntr_reg);
11646+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
11647+
11648+ if (pipe == 0)
11649+ dspcntr |= DISPPLANE_SEL_PIPE_A;
11650+ else
11651+ dspcntr |= DISPPLANE_SEL_PIPE_B;
11652+
11653+ dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
11654+ dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
11655+
11656+ if (is_mipi)
11657+ return 0;
11658+
11659+ if (dev_priv->sku_100L)
11660+ refclk = 100000;
11661+ else if (dev_priv->sku_83)
11662+ refclk = 166000;
11663+ else if (dev_priv->sku_100)
11664+ refclk = 200000;
11665+
11666+ dpll = 0; /*BIT16 = 0 for 100MHz reference */
11667+
11668+ ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
11669+
11670+ if (!ok) {
11671+#if 0 /* FIXME JLIU7 */
11672+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
11673+ return;
11674+#endif /* FIXME JLIU7 */
11675+#if PRINT_JLIU7
11676+ DRM_INFO
11677+ ("JLIU7 mrstFindBestPLL fail in mrst_crtc_mode_set. \n");
11678+#endif /* PRINT_JLIU7 */
11679+ } else {
11680+#if PRINT_JLIU7
11681+ DRM_INFO("JLIU7 mrst_crtc_mode_set pixel clock = %d,"
11682+ "m = %x, p1 = %x. \n", clock.dot, clock.m,
11683+ clock.p1);
11684+#endif /* PRINT_JLIU7 */
11685+ }
11686+
11687+ fp = mrst_m_converts[(clock.m - MRST_M_MIN)] << 8;
11688+
11689+ dpll |= DPLL_VGA_MODE_DIS;
11690+
11691+
11692+ dpll |= DPLL_VCO_ENABLE;
11693+
11694+ if (is_lvds)
11695+ dpll |= DPLLA_MODE_LVDS;
11696+ else
11697+ dpll |= DPLLB_MODE_DAC_SERIAL;
11698+
11699+ if (is_sdvo) {
11700+ int sdvo_pixel_multiply =
11701+ adjusted_mode->clock / mode->clock;
11702+
11703+ dpll |= DPLL_DVO_HIGH_SPEED;
11704+ dpll |=
11705+ (sdvo_pixel_multiply -
11706+ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
11707+ }
11708+
11709+
11710+ /* compute bitmask from p1 value */
11711+ dpll |= (1 << (clock.p1 - 2)) << 17;
11712+
11713+ dpll |= DPLL_VCO_ENABLE;
11714+
11715+#if PRINT_JLIU7
11716+ mrstPrintPll("chosen", &clock);
11717+#endif /* PRINT_JLIU7 */
11718+
11719+#if 0
11720+ if (!xf86ModesEqual(mode, adjusted_mode)) {
11721+ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
11722+ "Adjusted mode for pipe %c:\n",
11723+ pipe == 0 ? 'A' : 'B');
11724+ xf86PrintModeline(pScrn->scrnIndex, mode);
11725+ }
11726+ i830PrintPll("chosen", &clock);
11727+#endif
11728+
11729+ if (dpll & DPLL_VCO_ENABLE) {
11730+ REG_WRITE(fp_reg, fp);
11731+ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
11732+ REG_READ(dpll_reg);
11733+/* FIXME jliu7 check the DPLLA lock bit PIPEACONF[29] */
11734+ udelay(150);
11735+ }
11736+
11737+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
11738+ * This is an exception to the general rule that mode_set doesn't turn
11739+ * things on.
11740+ */
11741+ if (is_lvds) {
11742+
11743+ /* FIXME JLIU7 need to support 24bit panel */
11744+#if MRST_24BIT_LVDS
11745+ lvdsport =
11746+ (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN
11747+ | LVDS_A3_POWER_UP | LVDS_A0A2_CLKA_POWER_UP;
11748+
11749+#if MRST_24BIT_DOT_1
11750+ lvdsport |= MRST_PANEL_24_DOT_1_FORMAT;
11751+#endif /* MRST_24BIT_DOT_1 */
11752+
11753+#else /* MRST_24BIT_LVDS */
11754+ lvdsport =
11755+ (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN;
11756+#endif /* MRST_24BIT_LVDS */
11757+
11758+#if MRST_24BIT_WA
11759+ lvdsport = 0x80300340;
11760+#else /* MRST_24BIT_DOT_WA */
11761+ lvdsport = 0x82300300;
11762+#endif /* MRST_24BIT_DOT_WA */
11763+
11764+ REG_WRITE(LVDS, lvdsport);
11765+ REG_READ(LVDS);
11766+ }
11767+
11768+ REG_WRITE(fp_reg, fp);
11769+ REG_WRITE(dpll_reg, dpll);
11770+ REG_READ(dpll_reg);
11771+ /* Wait for the clocks to stabilize. */
11772+ udelay(150);
11773+
11774+ /* write it again -- the BIOS does, after all */
11775+ REG_WRITE(dpll_reg, dpll);
11776+ REG_READ(dpll_reg);
11777+ /* Wait for the clocks to stabilize. */
11778+ udelay(150);
11779+
11780+ REG_WRITE(pipeconf_reg, pipeconf);
11781+ REG_READ(pipeconf_reg);
11782+
11783+ /* Wait for for the pipe enable to take effect. */
11784+ mrstWaitForPipeEnable(dev);
11785+
11786+ REG_WRITE(dspcntr_reg, dspcntr);
11787+ psb_intel_wait_for_vblank(dev);
11788+
11789+ return 0;
11790+}
11791+
11792+
11793+static const struct drm_crtc_helper_funcs mrst_helper_funcs = {
11794+ .dpms = mrst_crtc_dpms,
11795+ .mode_fixup = psb_intel_crtc_mode_fixup,
11796+ .mode_set = mrst_crtc_mode_set,
11797+ .mode_set_base = psb_intel_pipe_set_base,
11798+ .prepare = psb_intel_crtc_prepare,
11799+ .commit = psb_intel_crtc_commit,
11800+};
11801+
11802+/* MRST_PLATFORM end */
11803diff -uNr a/drivers/gpu/drm/psb/psb_intel_display.h b/drivers/gpu/drm/psb/psb_intel_display.h
11804--- a/drivers/gpu/drm/psb/psb_intel_display.h 1969-12-31 16:00:00.000000000 -0800
11805+++ b/drivers/gpu/drm/psb/psb_intel_display.h 2009-04-07 13:28:38.000000000 -0700
11806@@ -0,0 +1,31 @@
11807+
11808+/* copyright (c) 2008, Intel Corporation
11809+ * Permission is hereby granted, free of charge, to any person obtaining a
11810+ * copy of this software and associated documentation files (the "Software"),
11811+ * to deal in the Software without restriction, including without limitation
11812+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11813+ * and/or sell copies of the Software, and to permit persons to whom the
11814+ * Software is furnished to do so, subject to the following conditions:
11815+ *
11816+ * The above copyright notice and this permission notice (including the next
11817+ * paragraph) shall be included in all copies or substantial portions of the
11818+ * Software.
11819+ *
11820+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
11821+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11822+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
11823+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
11824+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
11825+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
11826+ * DEALINGS IN THE SOFTWARE.
11827+ *
11828+ * Authors:
11829+ * Eric Anholt <eric@anholt.net>
11830+ */
11831+
11832+#ifndef _INTEL_DISPLAY_H_
11833+#define _INTEL_DISPLAY_H_
11834+
11835+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
11836+
11837+#endif
11838diff -uNr a/drivers/gpu/drm/psb/psb_intel_drv.h b/drivers/gpu/drm/psb/psb_intel_drv.h
11839--- a/drivers/gpu/drm/psb/psb_intel_drv.h 1969-12-31 16:00:00.000000000 -0800
11840+++ b/drivers/gpu/drm/psb/psb_intel_drv.h 2009-04-07 13:28:38.000000000 -0700
11841@@ -0,0 +1,192 @@
11842+/*
11843+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
11844+ * Copyright (c) 2007 Intel Corporation
11845+ * Jesse Barnes <jesse.barnes@intel.com>
11846+ */
11847+#ifndef __INTEL_DRV_H__
11848+#define __INTEL_DRV_H__
11849+
11850+#include <linux/i2c.h>
11851+#include <linux/i2c-id.h>
11852+#include <linux/i2c-algo-bit.h>
11853+#include <drm/drm_crtc.h>
11854+
11855+#include <drm/drm_crtc_helper.h>
11856+
11857+/*
11858+ * MOORESTOWN defines
11859+ */
11860+#define MRST_I2C 0
11861+
11862+#define DUMP_REGISTER 0
11863+#define MRST_24BIT_LVDS 1
11864+#define MRST_24BIT_DOT_1 0
11865+#define MRST_24BIT_WA 0
11866+
11867+#define PRINT_JLIU7 0
11868+#define DELAY_TIME1 80 /* 1000 = 1ms */
11869+
11870+/*
11871+ * Display related stuff
11872+ */
11873+
11874+/* store information about an Ixxx DVO */
11875+/* The i830->i865 use multiple DVOs with multiple i2cs */
11876+/* the i915, i945 have a single sDVO i2c bus - which is different */
11877+#define MAX_OUTPUTS 6
11878+/* maximum connectors per crtcs in the mode set */
11879+#define INTELFB_CONN_LIMIT 4
11880+
11881+#define INTEL_I2C_BUS_DVO 1
11882+#define INTEL_I2C_BUS_SDVO 2
11883+
11884+/* these are outputs from the chip - integrated only
11885+ * external chips are via DVO or SDVO output */
11886+#define INTEL_OUTPUT_UNUSED 0
11887+#define INTEL_OUTPUT_ANALOG 1
11888+#define INTEL_OUTPUT_DVO 2
11889+#define INTEL_OUTPUT_SDVO 3
11890+#define INTEL_OUTPUT_LVDS 4
11891+#define INTEL_OUTPUT_TVOUT 5
11892+#define INTEL_OUTPUT_MIPI 6
11893+
11894+#define INTEL_DVO_CHIP_NONE 0
11895+#define INTEL_DVO_CHIP_LVDS 1
11896+#define INTEL_DVO_CHIP_TMDS 2
11897+#define INTEL_DVO_CHIP_TVOUT 4
11898+
11899+/**
11900+ * Hold information useally put on the device driver privates here,
11901+ * since it needs to be shared across multiple of devices drivers privates.
11902+ */
11903+struct psb_intel_mode_device {
11904+
11905+ /*
11906+ * Abstracted memory manager operations
11907+ */
11908+ void *(*bo_from_handle) (struct drm_device *dev,
11909+ struct drm_file *file_priv,
11910+ unsigned int handle);
11911+ size_t(*bo_size) (struct drm_device *dev, void *bo);
11912+ size_t(*bo_offset) (struct drm_device *dev, void *bo);
11913+ int (*bo_pin_for_scanout) (struct drm_device *dev, void *bo);
11914+ int (*bo_unpin_for_scanout) (struct drm_device *dev, void *bo);
11915+
11916+ /*
11917+ * Cursor
11918+ */
11919+ int cursor_needs_physical;
11920+
11921+ /*
11922+ * LVDS info
11923+ */
11924+ int backlight_duty_cycle; /* restore backlight to this value */
11925+ bool panel_wants_dither;
11926+ struct drm_display_mode *panel_fixed_mode;
11927+ struct drm_display_mode *vbt_mode; /* if any */
11928+
11929+ uint32_t saveBLC_PWM_CTL;
11930+};
11931+
11932+struct psb_intel_i2c_chan {
11933+ /* for getting at dev. private (mmio etc.) */
11934+ struct drm_device *drm_dev;
11935+ u32 reg; /* GPIO reg */
11936+ struct i2c_adapter adapter;
11937+ struct i2c_algo_bit_data algo;
11938+ u8 slave_addr;
11939+};
11940+
11941+struct psb_intel_output {
11942+ struct drm_connector base;
11943+
11944+ struct drm_encoder enc;
11945+ int type;
11946+ struct psb_intel_i2c_chan *i2c_bus; /* for control functions */
11947+ struct psb_intel_i2c_chan *ddc_bus; /* for DDC only stuff */
11948+ bool load_detect_temp;
11949+ void *dev_priv;
11950+
11951+ struct psb_intel_mode_device *mode_dev;
11952+
11953+};
11954+
11955+struct psb_intel_crtc {
11956+ struct drm_crtc base;
11957+ int pipe;
11958+ int plane;
11959+ uint32_t cursor_addr;
11960+ u8 lut_r[256], lut_g[256], lut_b[256];
11961+ int dpms_mode;
11962+ struct psb_intel_framebuffer *fbdev_fb;
11963+ /* a mode_set for fbdev users on this crtc */
11964+ struct drm_mode_set mode_set;
11965+
11966+ /* current bo we scanout from */
11967+ void *scanout_bo;
11968+
11969+ /* current bo we cursor from */
11970+ void *cursor_bo;
11971+
11972+ struct psb_intel_mode_device *mode_dev;
11973+};
11974+
11975+#define to_psb_intel_crtc(x) container_of(x, struct psb_intel_crtc, base)
11976+#define to_psb_intel_output(x) container_of(x, struct psb_intel_output, base)
11977+#define enc_to_psb_intel_output(x) container_of(x, struct psb_intel_output, enc)
11978+#define to_psb_intel_framebuffer(x) container_of(x, struct psb_intel_framebuffer, base)
11979+
11980+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
11981+ const u32 reg, const char *name);
11982+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
11983+int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output);
11984+extern bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output);
11985+
11986+extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
11987+ struct psb_intel_mode_device *mode_dev);
11988+extern void psb_intel_crt_init(struct drm_device *dev);
11989+extern void psb_intel_sdvo_init(struct drm_device *dev, int output_device);
11990+extern void psb_intel_dvo_init(struct drm_device *dev);
11991+extern void psb_intel_tv_init(struct drm_device *dev);
11992+extern void psb_intel_lvds_init(struct drm_device *dev,
11993+ struct psb_intel_mode_device *mode_dev);
11994+extern void mrst_lvds_init(struct drm_device *dev,
11995+ struct psb_intel_mode_device *mode_dev);
11996+extern void mrst_dsi_init(struct drm_device *dev,
11997+ struct psb_intel_mode_device *mode_dev);
11998+
11999+extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
12000+extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
12001+extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
12002+
12003+extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
12004+ *connector);
12005+
12006+extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
12007+ struct drm_crtc *crtc);
12008+extern void psb_intel_wait_for_vblank(struct drm_device *dev);
12009+extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
12010+ int pipe);
12011+extern struct drm_crtc *psb_intel_get_load_detect_pipe
12012+ (struct psb_intel_output *psb_intel_output,
12013+ struct drm_display_mode *mode, int *dpms_mode);
12014+extern void psb_intel_release_load_detect_pipe(struct psb_intel_output
12015+ *psb_intel_output, int dpms_mode);
12016+
12017+extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
12018+ int sdvoB);
12019+extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
12020+extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
12021+ int enable);
12022+extern int intelfb_probe(struct drm_device *dev);
12023+extern int intelfb_remove(struct drm_device *dev,
12024+ struct drm_framebuffer *fb);
12025+extern void psb_intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red,
12026+ u16 green, u16 blue, int regno);
12027+
12028+extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
12029+ *dev, struct
12030+ drm_mode_fb_cmd
12031+ *mode_cmd,
12032+ void *mm_private);
12033+#endif /* __INTEL_DRV_H__ */
12034diff -uNr a/drivers/gpu/drm/psb/psb_intel_dsi.c b/drivers/gpu/drm/psb/psb_intel_dsi.c
12035--- a/drivers/gpu/drm/psb/psb_intel_dsi.c 1969-12-31 16:00:00.000000000 -0800
12036+++ b/drivers/gpu/drm/psb/psb_intel_dsi.c 2009-04-07 13:28:38.000000000 -0700
12037@@ -0,0 +1,1644 @@
12038+/*
12039+ * Copyright © 2006-2007 Intel Corporation
12040+ *
12041+ * Permission is hereby granted, free of charge, to any person obtaining a
12042+ * copy of this software and associated documentation files (the "Software"),
12043+ * to deal in the Software without restriction, including without limitation
12044+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12045+ * and/or sell copies of the Software, and to permit persons to whom the
12046+ * Software is furnished to do so, subject to the following conditions:
12047+ *
12048+ * The above copyright notice and this permission notice (including the next
12049+ * paragraph) shall be included in all copies or substantial portions of the
12050+ * Software.
12051+ *
12052+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12053+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12054+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
12055+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
12056+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
12057+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
12058+ * DEALINGS IN THE SOFTWARE.
12059+ *
12060+ * Authors:
12061+ * jim liu <jim.liu@intel.com>
12062+ */
12063+
12064+#include <linux/backlight.h>
12065+#include <drm/drm_crtc.h>
12066+#include <drm/drm_edid.h>
12067+
12068+#define DRM_MODE_ENCODER_MIPI 5
12069+#define DRM_MODE_CONNECTOR_MIPI 13
12070+
12071+#if DUMP_REGISTER
12072+extern void dump_dsi_registers(struct drm_device *dev);
12073+#endif /* DUMP_REGISTER */
12074+
12075+int dsi_backlight; /* restore backlight to this value */
12076+
12077+/**
12078+ * Returns the maximum level of the backlight duty cycle field.
12079+ */
12080+static u32 mrst_dsi_get_max_backlight(struct drm_device *dev)
12081+{
12082+#if PRINT_JLIU7
12083+ DRM_INFO("JLIU7 enter mrst_dsi_get_max_backlight \n");
12084+#endif /* PRINT_JLIU7 */
12085+
12086+ return BRIGHTNESS_MAX_LEVEL;
12087+
12088+/* FIXME jliu7 need to revisit */
12089+}
12090+
12091+/**
12092+ * Sets the backlight level.
12093+ *
12094+ * \param level backlight level, from 0 to psb_intel_dsi_get_max_backlight().
12095+ */
12096+static void mrst_dsi_set_backlight(struct drm_device *dev, int level)
12097+{
12098+ u32 blc_pwm_ctl;
12099+ u32 max_pwm_blc;
12100+
12101+#if PRINT_JLIU7
12102+ DRM_INFO("JLIU7 enter mrst_dsi_set_backlight \n");
12103+#endif /* PRINT_JLIU7 */
12104+
12105+#if 1 /* FIXME JLIU7 */
12106+ return;
12107+#endif /* FIXME JLIU7 */
12108+
12109+ /* Provent LVDS going to total black */
12110+ if (level < 20)
12111+ level = 20;
12112+
12113+ max_pwm_blc = mrst_lvds_get_PWM_ctrl_freq(dev);
12114+
12115+ if (max_pwm_blc ==0)
12116+ {
12117+ return;
12118+ }
12119+
12120+ blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
12121+
12122+ if (blc_pol == BLC_POLARITY_INVERSE) {
12123+ blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl;
12124+ }
12125+
12126+ REG_WRITE(BLC_PWM_CTL,
12127+ (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) |
12128+ blc_pwm_ctl);
12129+}
12130+
12131+/**
12132+ * Sets the power state for the panel.
12133+ */
12134+static void mrst_dsi_set_power(struct drm_device *dev,
12135+ struct psb_intel_output *output, bool on)
12136+{
12137+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12138+ u32 pp_status;
12139+
12140+#if PRINT_JLIU7
12141+ DRM_INFO("JLIU7 enter mrst_dsi_set_power \n");
12142+#endif /* PRINT_JLIU7 */
12143+ /*
12144+ * The DIS device must be ready before we can change power state.
12145+ */
12146+ if (!dev_priv->dsi_device_ready)
12147+ {
12148+ return;
12149+ }
12150+
12151+ /*
12152+ * We don't support dual DSI yet. May be in POR in the future.
12153+ */
12154+ if (dev_priv->dual_display)
12155+ {
12156+ return;
12157+ }
12158+
12159+ if (on) {
12160+ if (dev_priv->dpi & (!dev_priv->dpi_panel_on))
12161+ {
12162+
12163+#if PRINT_JLIU7
12164+ DRM_INFO("JLIU7 mrst_dsi_set_power dpi = on \n");
12165+#endif /* PRINT_JLIU7 */
12166+ REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON);
12167+#if 0 /*FIXME JLIU7 */
12168+ REG_WRITE(DPI_DATA_REG, DPI_BACK_LIGHT_ON_DATA);
12169+ REG_WRITE(DPI_CONTROL_REG, DPI_BACK_LIGHT_ON);
12170+#endif /*FIXME JLIU7 */
12171+
12172+ dev_priv->dpi_panel_on = true;
12173+
12174+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
12175+ POWER_TARGET_ON);
12176+ do {
12177+ pp_status = REG_READ(PP_STATUS);
12178+ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
12179+ }
12180+ else if ((!dev_priv->dpi) & (!dev_priv->dbi_panel_on))
12181+ {
12182+#if PRINT_JLIU7
12183+ DRM_INFO("JLIU7 mrst_dsi_set_power dbi = on \n");
12184+#endif /* PRINT_JLIU7 */
12185+
12186+ dev_priv->DBI_CB_pointer = 0;
12187+ /* exit sleep mode */
12188+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = exit_sleep_mode;
12189+
12190+#if 0 /*FIXME JLIU7 */
12191+ /* Check MIPI Adatper command registers */
12192+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
12193+#endif /*FIXME JLIU7 */
12194+
12195+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
12196+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
12197+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
12198+
12199+ /* The host processor must wait five milliseconds after sending exit_sleep_mode command before sending another
12200+ command. This delay allows the supply voltages and clock circuits to stabilize */
12201+ udelay(5000);
12202+
12203+ dev_priv->DBI_CB_pointer = 0;
12204+
12205+ /* set display on */
12206+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = set_display_on ;
12207+
12208+#if 0 /*FIXME JLIU7 */
12209+ /* Check MIPI Adatper command registers */
12210+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
12211+#endif /*FIXME JLIU7 */
12212+
12213+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
12214+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
12215+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
12216+
12217+ dev_priv->dbi_panel_on = true;
12218+ }
12219+/*FIXME JLIU7 */
12220+/* Need to figure out how to control the MIPI panel power on sequence*/
12221+
12222+ mrst_dsi_set_backlight(dev, dsi_backlight);
12223+ }
12224+ else
12225+ {
12226+ mrst_dsi_set_backlight(dev, 0);
12227+/*FIXME JLIU7 */
12228+/* Need to figure out how to control the MIPI panel power down sequence*/
12229+ /*
12230+ * Only save the current backlight value if we're going from
12231+ * on to off.
12232+ */
12233+ if (dev_priv->dpi & dev_priv->dpi_panel_on)
12234+ {
12235+#if PRINT_JLIU7
12236+ DRM_INFO("JLIU7 mrst_dsi_set_power dpi = off \n");
12237+#endif /* PRINT_JLIU7 */
12238+
12239+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
12240+ ~POWER_TARGET_ON);
12241+ do {
12242+ pp_status = REG_READ(PP_STATUS);
12243+ } while (pp_status & PP_ON);
12244+
12245+#if 0 /*FIXME JLIU7 */
12246+ REG_WRITE(DPI_DATA_REG, DPI_BACK_LIGHT_OFF_DATA);
12247+ REG_WRITE(DPI_CONTROL_REG, DPI_BACK_LIGHT_OFF);
12248+#endif /*FIXME JLIU7 */
12249+ REG_WRITE(DPI_CONTROL_REG, DPI_SHUT_DOWN);
12250+ dev_priv->dpi_panel_on = false;
12251+ }
12252+ else if ((!dev_priv->dpi) & dev_priv->dbi_panel_on)
12253+ {
12254+#if PRINT_JLIU7
12255+ DRM_INFO("JLIU7 mrst_dsi_set_power dbi = off \n");
12256+#endif /* PRINT_JLIU7 */
12257+ dev_priv->DBI_CB_pointer = 0;
12258+ /* enter sleep mode */
12259+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = enter_sleep_mode;
12260+
12261+ /* Check MIPI Adatper command registers */
12262+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
12263+
12264+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
12265+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
12266+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
12267+ dev_priv->dbi_panel_on = false;
12268+ }
12269+ }
12270+}
12271+
12272+static void mrst_dsi_dpms(struct drm_encoder *encoder, int mode)
12273+{
12274+ struct drm_device *dev = encoder->dev;
12275+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
12276+
12277+#if PRINT_JLIU7
12278+ DRM_INFO("JLIU7 enter mrst_dsi_dpms \n");
12279+#endif /* PRINT_JLIU7 */
12280+
12281+ if (mode == DRM_MODE_DPMS_ON)
12282+ mrst_dsi_set_power(dev, output, true);
12283+ else
12284+ mrst_dsi_set_power(dev, output, false);
12285+
12286+ /* XXX: We never power down the DSI pairs. */
12287+}
12288+
12289+static void mrst_dsi_save(struct drm_connector *connector)
12290+{
12291+#if 0 /* JB: Disable for drop */
12292+ struct drm_device *dev = connector->dev;
12293+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12294+
12295+#if PRINT_JLIU7
12296+ DRM_INFO("JLIU7 enter mrst_dsi_save \n");
12297+#endif /* PRINT_JLIU7 */
12298+
12299+ dev_priv->savePP_ON = REG_READ(LVDSPP_ON);
12300+ dev_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
12301+ dev_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
12302+ dev_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
12303+ dev_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
12304+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
12305+ BACKLIGHT_DUTY_CYCLE_MASK);
12306+
12307+ /*
12308+ * make backlight to full brightness
12309+ */
12310+ dsi_backlight = mrst_dsi_get_max_backlight(dev);
12311+#endif
12312+}
12313+
12314+static void mrst_dsi_restore(struct drm_connector *connector)
12315+{
12316+#if 0 /* JB: Disable for drop */
12317+ struct drm_device *dev = connector->dev;
12318+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12319+
12320+#if PRINT_JLIU7
12321+ DRM_INFO("JLIU7 enter mrst_dsi_restore \n");
12322+#endif /* PRINT_JLIU7 */
12323+
12324+ REG_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
12325+ REG_WRITE(LVDSPP_ON, dev_priv->savePP_ON);
12326+ REG_WRITE(LVDSPP_OFF, dev_priv->savePP_OFF);
12327+ REG_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
12328+ REG_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
12329+ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
12330+ mrst_dsi_set_power(dev, true);
12331+ else
12332+ mrst_dsi_set_power(dev, false);
12333+#endif
12334+}
12335+
12336+static void mrst_dsi_prepare(struct drm_encoder *encoder)
12337+{
12338+ struct drm_device *dev = encoder->dev;
12339+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
12340+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
12341+
12342+#if PRINT_JLIU7
12343+ DRM_INFO("JLIU7 enter mrst_dsi_prepare \n");
12344+#endif /* PRINT_JLIU7 */
12345+
12346+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
12347+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
12348+ BACKLIGHT_DUTY_CYCLE_MASK);
12349+
12350+ mrst_dsi_set_power(dev, output, false);
12351+}
12352+
12353+static void mrst_dsi_commit( struct drm_encoder *encoder)
12354+{
12355+ struct drm_device *dev = encoder->dev;
12356+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
12357+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
12358+
12359+#if PRINT_JLIU7
12360+ DRM_INFO("JLIU7 enter mrst_dsi_commit \n");
12361+#endif /* PRINT_JLIU7 */
12362+
12363+ if (mode_dev->backlight_duty_cycle == 0)
12364+ mode_dev->backlight_duty_cycle =
12365+ mrst_dsi_get_max_backlight(dev);
12366+
12367+ mrst_dsi_set_power(dev, output, true);
12368+
12369+#if DUMP_REGISTER
12370+ dump_dsi_registers(dev);
12371+#endif /* DUMP_REGISTER */
12372+}
12373+
12374+/* ************************************************************************* *\
12375+FUNCTION: GetHS_TX_timeoutCount
12376+ `
12377+DESCRIPTION: In burst mode, value greater than one DPI line Time in byte clock
12378+ (txbyteclkhs). To timeout this timer 1+ of the above said value is recommended.
12379+
12380+ In non-burst mode, Value greater than one DPI frame time in byte clock(txbyteclkhs).
12381+ To timeout this timer 1+ of the above said value is recommended.
12382+
12383+\* ************************************************************************* */
12384+static u32 GetHS_TX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
12385+{
12386+
12387+ u32 timeoutCount = 0, HTOT_count = 0, VTOT_count = 0, HTotalPixel = 0;
12388+
12389+ /* Total pixels need to be transfer per line*/
12390+ HTotalPixel = (dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch) * dev_priv->laneCount + dev_priv->HactiveArea;
12391+
12392+ /* byte count = (pixel count * bits per pixel) / 8 */
12393+ HTOT_count = (HTotalPixel * dev_priv->bpp) / 8;
12394+
12395+ if (dev_priv->videoModeFormat == BURST_MODE)
12396+ {
12397+ timeoutCount = HTOT_count + 1;
12398+#if 1 /*FIXME remove it after power-on */
12399+ VTOT_count = dev_priv->VactiveArea + dev_priv->VbackPorch + dev_priv->VfrontPorch
12400+ + dev_priv->VsyncWidth;
12401+ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
12402+ timeoutCount = (HTOT_count * VTOT_count) + 1;
12403+#endif
12404+ }
12405+ else
12406+ {
12407+ VTOT_count = dev_priv->VactiveArea + dev_priv->VbackPorch + dev_priv->VfrontPorch
12408+ + dev_priv->VsyncWidth;
12409+ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
12410+ timeoutCount = (HTOT_count * VTOT_count) + 1;
12411+ }
12412+
12413+ return timeoutCount & 0xFFFF;
12414+}
12415+
12416+/* ************************************************************************* *\
12417+FUNCTION: GetLP_RX_timeoutCount
12418+
12419+DESCRIPTION: The timeout value is protocol specific. Time out value is calculated
12420+ from txclkesc(50ns).
12421+
12422+ Minimum value =
12423+ Time to send one Trigger message = 4 X txclkesc [Escape mode entry sequence)
12424+ + 8-bit trigger message (2x8xtxclkesc)
12425+ +1 txclksesc [stop_state]
12426+ = 21 X txclkesc [ 15h]
12427+
12428+ Maximum Value =
12429+ Time to send a long packet with maximum payload data
12430+ = 4 X txclkesc [Escape mode entry sequence)
12431+ + 8-bit Low power data transmission Command (2x8xtxclkesc)
12432+ + packet header [ 4X8X2X txclkesc]
12433+ +payload [ nX8X2Xtxclkesc]
12434+ +CRC[2X8X2txclkesc]
12435+ +1 txclksesc [stop_state]
12436+ = 117 txclkesc +n[payload in terms of bytes]X16txclkesc.
12437+
12438+\* ************************************************************************* */
12439+static u32 GetLP_RX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
12440+{
12441+
12442+ u32 timeoutCount = 0;
12443+
12444+ if (dev_priv->config_phase)
12445+ {
12446+ /* Assuming 256 byte DDB data.*/
12447+ timeoutCount = 117 + 256 * 16;
12448+ }
12449+ else
12450+ {
12451+ /* For DPI video only mode use the minimum value.*/
12452+ timeoutCount = 0x15;
12453+#if 1 /*FIXME remove it after power-on */
12454+ /* Assuming 256 byte DDB data.*/
12455+ timeoutCount = 117 + 256 * 16;
12456+#endif
12457+ }
12458+
12459+ return timeoutCount;
12460+}
12461+
12462+/* ************************************************************************* *\
12463+FUNCTION: GetHSA_Count
12464+
12465+DESCRIPTION: Shows the horizontal sync value in terms of byte clock
12466+ (txbyteclkhs)
12467+ Minimum HSA period should be sufficient to transmit a hsync start short
12468+ packet(4 bytes)
12469+ i) For Non-burst Mode with sync pulse, Min value – 4 in decimal [plus
12470+ an optional 6 bytes for a zero payload blanking packet]. But if
12471+ the value is less than 10 but more than 4, then this count will
12472+ be added to the HBP’s count for one lane.
12473+ ii) For Non-Burst Sync Event & Burst Mode, there is no HSA, so you
12474+ can program this to zero. If you program this register, these
12475+ byte values will be added to HBP.
12476+ iii) For Burst mode of operation, normally the values programmed in
12477+ terms of byte clock are based on the principle - time for transfering
12478+ HSA in Burst mode is the same as in non-bust mode.
12479+\* ************************************************************************* */
12480+static u32 GetHSA_Count(DRM_DRIVER_PRIVATE_T *dev_priv)
12481+{
12482+ u32 HSA_count;
12483+ u32 HSA_countX8;
12484+
12485+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
12486+ HSA_countX8 = dev_priv->HsyncWidth * dev_priv->bpp;
12487+
12488+ if (dev_priv->videoModeFormat == BURST_MODE)
12489+ {
12490+ HSA_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
12491+ }
12492+
12493+ HSA_count = HSA_countX8 / 8;
12494+
12495+ return HSA_count;
12496+}
12497+
12498+/* ************************************************************************* *\
12499+FUNCTION: GetHBP_Count
12500+
12501+DESCRIPTION: Shows the horizontal back porch value in terms of txbyteclkhs.
12502+ Minimum HBP period should be sufficient to transmit a “hsync end short
12503+ packet(4 bytes) + Blanking packet overhead(6 bytes) + RGB packet header(4 bytes)”
12504+ For Burst mode of operation, normally the values programmed in terms of
12505+ byte clock are based on the principle - time for transfering HBP
12506+ in Burst mode is the same as in non-bust mode.
12507+
12508+ Min value – 14 in decimal [ accounted with zero payload for blanking packet] for one lane.
12509+ Max value – any value greater than 14 based on DPI resolution
12510+\* ************************************************************************* */
12511+static u32 GetHBP_Count(DRM_DRIVER_PRIVATE_T *dev_priv)
12512+{
12513+ u32 HBP_count;
12514+ u32 HBP_countX8;
12515+
12516+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
12517+ HBP_countX8 = dev_priv->HbackPorch * dev_priv->bpp;
12518+
12519+ if (dev_priv->videoModeFormat == BURST_MODE)
12520+ {
12521+ HBP_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
12522+ }
12523+
12524+ HBP_count = HBP_countX8 / 8;
12525+
12526+ return HBP_count;
12527+}
12528+
12529+/* ************************************************************************* *\
12530+FUNCTION: GetHFP_Count
12531+
12532+DESCRIPTION: Shows the horizontal front porch value in terms of txbyteclkhs.
12533+ Minimum HFP period should be sufficient to transmit “RGB Data packet
12534+ footer(2 bytes) + Blanking packet overhead(6 bytes)” for non burst mode.
12535+
12536+ For burst mode, Minimum HFP period should be sufficient to transmit
12537+ Blanking packet overhead(6 bytes)”
12538+
12539+ For Burst mode of operation, normally the values programmed in terms of
12540+ byte clock are based on the principle - time for transfering HFP
12541+ in Burst mode is the same as in non-bust mode.
12542+
12543+ Min value – 8 in decimal for non-burst mode [accounted with zero payload
12544+ for blanking packet] for one lane.
12545+ Min value – 6 in decimal for burst mode for one lane.
12546+
12547+ Max value – any value greater than the minimum vaue based on DPI resolution
12548+\* ************************************************************************* */
12549+static u32 GetHFP_Count(DRM_DRIVER_PRIVATE_T *dev_priv)
12550+{
12551+ u32 HFP_count;
12552+ u32 HFP_countX8;
12553+
12554+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
12555+ HFP_countX8 = dev_priv->HfrontPorch * dev_priv->bpp;
12556+
12557+ if (dev_priv->videoModeFormat == BURST_MODE)
12558+ {
12559+ HFP_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
12560+ }
12561+
12562+ HFP_count = HFP_countX8 / 8;
12563+
12564+ return HFP_count;
12565+}
12566+
12567+/* ************************************************************************* *\
12568+FUNCTION: GetHAdr_Count
12569+
12570+DESCRIPTION: Shows the horizontal active area value in terms of txbyteclkhs.
12571+ In Non Burst Mode, Count equal to RGB word count value
12572+
12573+ In Burst Mode, RGB pixel packets are time-compressed, leaving more time
12574+ during a scan line for LP mode (saving power) or for multiplexing
12575+ other transmissions onto the DSI link. Hence, the count equals the
12576+ time in txbyteclkhs for sending time compressed RGB pixels plus
12577+ the time needed for moving to power save mode or the time needed
12578+ for secondary channel to use the DSI link.
12579+
12580+ But if the left out time for moving to low power mode is less than
12581+ 8 txbyteclkhs [2txbyteclkhs for RGB data packet footer and
12582+ 6txbyteclkhs for a blanking packet with zero payload], then
12583+ this count will be added to the HFP's count for one lane.
12584+
12585+ Min value – 8 in decimal for non-burst mode [accounted with zero payload
12586+ for blanking packet] for one lane.
12587+ Min value – 6 in decimal for burst mode for one lane.
12588+
12589+ Max value – any value greater than the minimum vaue based on DPI resolution
12590+\* ************************************************************************* */
12591+static u32 GetHAdr_Count(DRM_DRIVER_PRIVATE_T *dev_priv)
12592+{
12593+ u32 HAdr_count;
12594+ u32 HAdr_countX8;
12595+
12596+ /* byte clock count = (pixel clock count * bits per pixel) /8 */
12597+ HAdr_countX8 = dev_priv->HactiveArea * dev_priv->bpp;
12598+
12599+ if (dev_priv->videoModeFormat == BURST_MODE)
12600+ {
12601+ HAdr_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
12602+ }
12603+
12604+ HAdr_count = HAdr_countX8 / 8;
12605+
12606+ return HAdr_count;
12607+}
12608+
12609+/* ************************************************************************* *\
12610+FUNCTION: GetHighLowSwitchCount
12611+
12612+DESCRIPTION: High speed to low power or Low power to high speed switching time
12613+ in terms byte clock (txbyteclkhs). This value is based on the
12614+ byte clock (txbyteclkhs) and low power clock frequency (txclkesc)
12615+
12616+ Typical value - Number of byte clocks required to switch from low power mode
12617+ to high speed mode after "txrequesths" is asserted.
12618+
12619+ The worst count value among the low to high or high to low switching time
12620+ in terms of txbyteclkhs has to be programmed in this register.
12621+
12622+ Usefull Formulae:
12623+ DDR clock period = 2 times UI
12624+ txbyteclkhs clock = 8 times UI
12625+ Tlpx = 1 / txclkesc
12626+ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE (from Standard D-PHY spec)
12627+ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] + 1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
12628+ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE (from Standard D-PHY spec)
12629+ Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx]
12630+\* ************************************************************************* */
12631+static u32 GetHighLowSwitchCount(DRM_DRIVER_PRIVATE_T *dev_priv)
12632+{
12633+ u32 HighLowSwitchCount, HighToLowSwitchCount, LowToHighSwitchCount;
12634+
12635+/* ************************************************************************* *\
12636+ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE (from Standard D-PHY spec)
12637+ Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx]
12638+
12639+ Tlpx = 50 ns, Using max txclkesc (20MHz)
12640+
12641+ txbyteclkhs_period = 4000 / dev_priv->DDR_Clock; in ns
12642+ UI_period = 500 / dev_priv->DDR_Clock; in ns
12643+
12644+ HS_to_LP = Ths-trail = 18 * UI_period + 4 * Tlpx
12645+ = 9000 / dev_priv->DDR_Clock + 200;
12646+
12647+ HighToLowSwitchCount = HS_to_LP / txbyteclkhs_period
12648+ = (9000 / dev_priv->DDR_Clock + 200) / (4000 / dev_priv->DDR_Clock)
12649+ = (9000 + (200 * dev_priv->DDR_Clock)) / 4000
12650+
12651+\* ************************************************************************* */
12652+ HighToLowSwitchCount = (9000 + (200 * dev_priv->DDR_Clock)) / 4000 + 1;
12653+
12654+/* ************************************************************************* *\
12655+ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE (from Standard D-PHY spec)
12656+ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] + 1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
12657+
12658+ LP_to_HS = 10 * UI_period + 5 * Tlpx =
12659+ = 5000 / dev_priv->DDR_Clock + 250;
12660+
12661+ LowToHighSwitchCount = LP_to_HS / txbyteclkhs_period
12662+ = (5000 / dev_priv->DDR_Clock + 250) / (4000 / dev_priv->DDR_Clock)
12663+ = (5000 + (250 * dev_priv->DDR_Clock)) / 4000
12664+
12665+\* ************************************************************************* */
12666+ LowToHighSwitchCount = (5000 + (250 * dev_priv->DDR_Clock)) / 4000 + 1;
12667+
12668+ if (HighToLowSwitchCount > LowToHighSwitchCount)
12669+ {
12670+ HighLowSwitchCount = HighToLowSwitchCount;
12671+ }
12672+ else
12673+ {
12674+ HighLowSwitchCount = LowToHighSwitchCount;
12675+ }
12676+
12677+
12678+ /* FIXME jliu need to fine tune the above formulae and remove the following after power on */
12679+ if (HighLowSwitchCount < 0x1f)
12680+ HighLowSwitchCount = 0x1f;
12681+
12682+ return HighLowSwitchCount;
12683+}
12684+
12685+/* ************************************************************************* *\
12686+FUNCTION: mrst_gen_long_write
12687+ `
12688+DESCRIPTION:
12689+
12690+\* ************************************************************************* */
12691+static void mrst_gen_long_write(struct drm_device *dev, u32 *data, u16 wc,u8 vc)
12692+{
12693+ u32 gen_data_reg = HS_GEN_DATA_REG;
12694+ u32 gen_ctrl_reg = HS_GEN_CTRL_REG;
12695+ u32 date_full_bit = HS_DATA_FIFO_FULL;
12696+ u32 control_full_bit = HS_CTRL_FIFO_FULL;
12697+ u16 wc_saved = wc;
12698+
12699+#if PRINT_JLIU7
12700+ DRM_INFO("JLIU7 enter mrst_gen_long_write \n");
12701+#endif /* PRINT_JLIU7 */
12702+
12703+ /* sanity check */
12704+ if (vc > 4)
12705+ {
12706+ DRM_ERROR(KERN_ERR "MIPI Virtual channel Can't greater than 4. \n");
12707+ return;
12708+ }
12709+
12710+
12711+ if (0) /* FIXME JLIU7 check if it is in LP*/
12712+ {
12713+ gen_data_reg = LP_GEN_DATA_REG;
12714+ gen_ctrl_reg = LP_GEN_CTRL_REG;
12715+ date_full_bit = LP_DATA_FIFO_FULL;
12716+ control_full_bit = LP_CTRL_FIFO_FULL;
12717+ }
12718+
12719+ while (wc >= 4)
12720+ {
12721+ /* Check if MIPI IP generic data fifo is not full */
12722+ while ((REG_READ(GEN_FIFO_STAT_REG) & date_full_bit) == date_full_bit);
12723+
12724+ /* write to data buffer */
12725+ REG_WRITE(gen_data_reg, *data);
12726+
12727+ wc -= 4;
12728+ data ++;
12729+ }
12730+
12731+ switch (wc)
12732+ {
12733+ case 1:
12734+ REG_WRITE8(gen_data_reg, *((u8 *)data));
12735+ break;
12736+ case 2:
12737+ REG_WRITE16(gen_data_reg, *((u16 *)data));
12738+ break;
12739+ case 3:
12740+ REG_WRITE16(gen_data_reg, *((u16 *)data));
12741+ data = (u32*)((u8*) data + 2);
12742+ REG_WRITE8(gen_data_reg, *((u8 *)data));
12743+ break;
12744+ }
12745+
12746+ /* Check if MIPI IP generic control fifo is not full */
12747+ while ((REG_READ(GEN_FIFO_STAT_REG) & control_full_bit) == control_full_bit);
12748+ /* write to control buffer */
12749+ REG_WRITE(gen_ctrl_reg, 0x29 | (wc_saved << 8) | (vc << 6));
12750+}
12751+
12752+/* ************************************************************************* *\
12753+FUNCTION: mrst_init_HIMAX_MIPI_bridge
12754+ `
12755+DESCRIPTION:
12756+
12757+\* ************************************************************************* */
12758+static void mrst_init_HIMAX_MIPI_bridge(struct drm_device *dev)
12759+{
12760+ u32 gen_data[2];
12761+ u16 wc = 0;
12762+ u8 vc =0;
12763+ u32 gen_data_intel = 0x200105;
12764+
12765+#if PRINT_JLIU7
12766+ DRM_INFO("JLIU7 enter mrst_init_HIMAX_MIPI_bridge \n");
12767+#endif /* PRINT_JLIU7 */
12768+
12769+ /* exit sleep mode */
12770+ wc = 0x5;
12771+ gen_data[0] = gen_data_intel | (0x11 << 24);
12772+ gen_data[1] = 0;
12773+ mrst_gen_long_write(dev, gen_data, wc, vc);
12774+
12775+ /* set_pixel_format */
12776+ gen_data[0] = gen_data_intel | (0x3A << 24);
12777+ gen_data[1] = 0x77;
12778+ mrst_gen_long_write(dev, gen_data, wc, vc);
12779+
12780+ /* Set resolution for (800X480) */
12781+ wc = 0x8;
12782+ gen_data[0] = gen_data_intel | (0x2A << 24);
12783+ gen_data[1] = 0x1F030000;
12784+ mrst_gen_long_write(dev, gen_data, wc, vc);
12785+ gen_data[0] = gen_data_intel | (0x2B << 24);
12786+ gen_data[1] = 0xDF010000;
12787+ mrst_gen_long_write(dev, gen_data, wc, vc);
12788+
12789+ /* System control */
12790+ wc = 0x6;
12791+ gen_data[0] = gen_data_intel | (0xEE << 24);
12792+ gen_data[1] = 0x10FA;
12793+ mrst_gen_long_write(dev, gen_data, wc, vc);
12794+
12795+ /* INPUT TIMING FOR TEST PATTERN(800X480) */
12796+ /* H-size */
12797+ gen_data[1] = 0x2000;
12798+ mrst_gen_long_write(dev, gen_data, wc, vc);
12799+ gen_data[1] = 0x0301;
12800+ mrst_gen_long_write(dev, gen_data, wc, vc);
12801+
12802+ /* V-size */
12803+ gen_data[1] = 0xE002;
12804+ mrst_gen_long_write(dev, gen_data, wc, vc);
12805+ gen_data[1] = 0x0103;
12806+ mrst_gen_long_write(dev, gen_data, wc, vc);
12807+
12808+ /* H-total */
12809+ gen_data[1] = 0x2004;
12810+ mrst_gen_long_write(dev, gen_data, wc, vc);
12811+ gen_data[1] = 0x0405;
12812+ mrst_gen_long_write(dev, gen_data, wc, vc);
12813+
12814+ /* V-total */
12815+ gen_data[1] = 0x0d06;
12816+ mrst_gen_long_write(dev, gen_data, wc, vc);
12817+ gen_data[1] = 0x0207;
12818+ mrst_gen_long_write(dev, gen_data, wc, vc);
12819+
12820+ /* H-blank */
12821+ gen_data[1] = 0x0308;
12822+ mrst_gen_long_write(dev, gen_data, wc, vc);
12823+ gen_data[1] = 0x0009;
12824+ mrst_gen_long_write(dev, gen_data, wc, vc);
12825+
12826+ /* H-blank */
12827+ gen_data[1] = 0x030A;
12828+ mrst_gen_long_write(dev, gen_data, wc, vc);
12829+ gen_data[1] = 0x000B;
12830+ mrst_gen_long_write(dev, gen_data, wc, vc);
12831+
12832+ /* H-start */
12833+ gen_data[1] = 0xD80C;
12834+ mrst_gen_long_write(dev, gen_data, wc, vc);
12835+ gen_data[1] = 0x000D;
12836+ mrst_gen_long_write(dev, gen_data, wc, vc);
12837+
12838+ /* V-start */
12839+ gen_data[1] = 0x230E;
12840+ mrst_gen_long_write(dev, gen_data, wc, vc);
12841+ gen_data[1] = 0x000F;
12842+ mrst_gen_long_write(dev, gen_data, wc, vc);
12843+
12844+ /* RGB domain */
12845+ gen_data[1] = 0x0027;
12846+ mrst_gen_long_write(dev, gen_data, wc, vc);
12847+
12848+ /* INP_FORM Setting */
12849+ /* set_1 */
12850+ gen_data[1] = 0x1C10;
12851+ mrst_gen_long_write(dev, gen_data, wc, vc);
12852+
12853+ /* set_2 */
12854+ gen_data[1] = 0x0711;
12855+ mrst_gen_long_write(dev, gen_data, wc, vc);
12856+
12857+ /* set_3 */
12858+ gen_data[1] = 0x0012;
12859+ mrst_gen_long_write(dev, gen_data, wc, vc);
12860+
12861+ /* set_4 */
12862+ gen_data[1] = 0x0013;
12863+ mrst_gen_long_write(dev, gen_data, wc, vc);
12864+
12865+ /* set_5 */
12866+ gen_data[1] = 0x2314;
12867+ mrst_gen_long_write(dev, gen_data, wc, vc);
12868+
12869+ /* set_6 */
12870+ gen_data[1] = 0x0015;
12871+ mrst_gen_long_write(dev, gen_data, wc, vc);
12872+
12873+ /* set_7 */
12874+ gen_data[1] = 0x2316;
12875+ mrst_gen_long_write(dev, gen_data, wc, vc);
12876+
12877+ /* set_8 */
12878+ gen_data[1] = 0x0017;
12879+ mrst_gen_long_write(dev, gen_data, wc, vc);
12880+
12881+ /* set_1 */
12882+ gen_data[1] = 0x0330;
12883+ mrst_gen_long_write(dev, gen_data, wc, vc);
12884+
12885+ /* FRC Setting */
12886+ /* FRC_set_2 */
12887+ gen_data[1] = 0x237A;
12888+ mrst_gen_long_write(dev, gen_data, wc, vc);
12889+
12890+ /* FRC_set_3 */
12891+ gen_data[1] = 0x4C7B;
12892+ mrst_gen_long_write(dev, gen_data, wc, vc);
12893+
12894+ /* FRC_set_4 */
12895+ gen_data[1] = 0x037C;
12896+ mrst_gen_long_write(dev, gen_data, wc, vc);
12897+
12898+ /* FRC_set_5 */
12899+ gen_data[1] = 0x3482;
12900+ mrst_gen_long_write(dev, gen_data, wc, vc);
12901+
12902+ /* FRC_set_7 */
12903+ gen_data[1] = 0x1785;
12904+ mrst_gen_long_write(dev, gen_data, wc, vc);
12905+
12906+#if 0
12907+ /* FRC_set_8 */
12908+ gen_data[1] = 0xD08F;
12909+ mrst_gen_long_write(dev, gen_data, wc, vc);
12910+#endif
12911+
12912+ /* OUTPUT TIMING FOR TEST PATTERN (800X480) */
12913+ /* out_htotal */
12914+ gen_data[1] = 0x2090;
12915+ mrst_gen_long_write(dev, gen_data, wc, vc);
12916+ gen_data[1] = 0x0491;
12917+ mrst_gen_long_write(dev, gen_data, wc, vc);
12918+
12919+ /* out_hsync */
12920+ gen_data[1] = 0x0392;
12921+ mrst_gen_long_write(dev, gen_data, wc, vc);
12922+ gen_data[1] = 0x0093;
12923+ mrst_gen_long_write(dev, gen_data, wc, vc);
12924+
12925+ /* out_hstart */
12926+ gen_data[1] = 0xD894;
12927+ mrst_gen_long_write(dev, gen_data, wc, vc);
12928+ gen_data[1] = 0x0095;
12929+ mrst_gen_long_write(dev, gen_data, wc, vc);
12930+
12931+ /* out_hsize */
12932+ gen_data[1] = 0x2096;
12933+ mrst_gen_long_write(dev, gen_data, wc, vc);
12934+ gen_data[1] = 0x0397;
12935+ mrst_gen_long_write(dev, gen_data, wc, vc);
12936+
12937+ /* out_vtotal */
12938+ gen_data[1] = 0x0D98;
12939+ mrst_gen_long_write(dev, gen_data, wc, vc);
12940+ gen_data[1] = 0x0299;
12941+ mrst_gen_long_write(dev, gen_data, wc, vc);
12942+
12943+ /* out_vsync */
12944+ gen_data[1] = 0x039A;
12945+ mrst_gen_long_write(dev, gen_data, wc, vc);
12946+ gen_data[1] = 0x009B;
12947+ mrst_gen_long_write(dev, gen_data, wc, vc);
12948+
12949+ /* out_vstart */
12950+ gen_data[1] = 0x239C;
12951+ mrst_gen_long_write(dev, gen_data, wc, vc);
12952+ gen_data[1] = 0x009D;
12953+ mrst_gen_long_write(dev, gen_data, wc, vc);
12954+
12955+ /* out_vsize */
12956+ gen_data[1] = 0xE09E;
12957+ mrst_gen_long_write(dev, gen_data, wc, vc);
12958+ gen_data[1] = 0x019F;
12959+ mrst_gen_long_write(dev, gen_data, wc, vc);
12960+
12961+ /* FRC_set_6 */
12962+ gen_data[1] = 0x9084;
12963+ mrst_gen_long_write(dev, gen_data, wc, vc);
12964+
12965+ /* Other setting */
12966+ gen_data[1] = 0x0526;
12967+ mrst_gen_long_write(dev, gen_data, wc, vc);
12968+
12969+ /* RBG domain */
12970+ gen_data[1] = 0x1177;
12971+ mrst_gen_long_write(dev, gen_data, wc, vc);
12972+
12973+ /* rgbw */
12974+ /* set_1 */
12975+ gen_data[1] = 0xD28F;
12976+ mrst_gen_long_write(dev, gen_data, wc, vc);
12977+
12978+ /* set_2 */
12979+ gen_data[1] = 0x02D0;
12980+ mrst_gen_long_write(dev, gen_data, wc, vc);
12981+
12982+ /* set_3 */
12983+ gen_data[1] = 0x08D1;
12984+ mrst_gen_long_write(dev, gen_data, wc, vc);
12985+
12986+ /* set_4 */
12987+ gen_data[1] = 0x05D2;
12988+ mrst_gen_long_write(dev, gen_data, wc, vc);
12989+
12990+ /* set_5 */
12991+ gen_data[1] = 0x24D4;
12992+ mrst_gen_long_write(dev, gen_data, wc, vc);
12993+
12994+ /* set_6 */
12995+ gen_data[1] = 0x00D5;
12996+ mrst_gen_long_write(dev, gen_data, wc, vc);
12997+ gen_data[1] = 0x02D7;
12998+ mrst_gen_long_write(dev, gen_data, wc, vc);
12999+ gen_data[1] = 0x00D8;
13000+ mrst_gen_long_write(dev, gen_data, wc, vc);
13001+
13002+ gen_data[1] = 0x48F3;
13003+ mrst_gen_long_write(dev, gen_data, wc, vc);
13004+ gen_data[1] = 0xD4F2;
13005+ mrst_gen_long_write(dev, gen_data, wc, vc);
13006+ gen_data[1] = 0x3D8E;
13007+ mrst_gen_long_write(dev, gen_data, wc, vc);
13008+ gen_data[1] = 0x60FD;
13009+ mrst_gen_long_write(dev, gen_data, wc, vc);
13010+ gen_data[1] = 0x00B5;
13011+ mrst_gen_long_write(dev, gen_data, wc, vc);
13012+ gen_data[1] = 0x48F4;
13013+ mrst_gen_long_write(dev, gen_data, wc, vc);
13014+
13015+ /* inside patten */
13016+ gen_data[1] = 0x0060;
13017+ mrst_gen_long_write(dev, gen_data, wc, vc);
13018+}
13019+
13020+/* ************************************************************************* *\
13021+FUNCTION: mrst_init_NSC_MIPI_bridge
13022+ `
13023+DESCRIPTION:
13024+
13025+\* ************************************************************************* */
13026+static void mrst_init_NSC_MIPI_bridge(struct drm_device *dev)
13027+{
13028+
13029+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13030+#if PRINT_JLIU7
13031+ DRM_INFO("JLIU7 enter mrst_init_NSC_MIPI_bridge.\n");
13032+#endif /* PRINT_JLIU7 */
13033+ /* Program MIPI IP to 50MHz DSI, Non-Burst mode with sync event,
13034+ 1 or 2 Data Lanes */
13035+
13036+ udelay(DELAY_TIME1);
13037+ /* enable RGB24*/
13038+ REG_WRITE(LP_GEN_CTRL_REG, 0x003205e3);
13039+
13040+ udelay(DELAY_TIME1);
13041+ /* enable all error reporting*/
13042+ REG_WRITE(LP_GEN_CTRL_REG, 0x000040e3);
13043+ udelay(DELAY_TIME1);
13044+ REG_WRITE(LP_GEN_CTRL_REG, 0x000041e3);
13045+
13046+ udelay(DELAY_TIME1);
13047+ /* enable 2 data lane; video shaping & error reporting */
13048+ REG_WRITE(LP_GEN_CTRL_REG, 0x00a842e3); /* 0x006842e3 for 1 data lane */
13049+
13050+ udelay(DELAY_TIME1);
13051+ /* HS timeout */
13052+ REG_WRITE(LP_GEN_CTRL_REG, 0x009243e3);
13053+
13054+ udelay(DELAY_TIME1);
13055+ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
13056+ REG_WRITE(LP_GEN_CTRL_REG, 0x00e645e3);
13057+
13058+ /* enable all virtual channels */
13059+ REG_WRITE(LP_GEN_CTRL_REG, 0x000f46e3);
13060+
13061+ /* set output strength to low-drive */
13062+ REG_WRITE(LP_GEN_CTRL_REG, 0x00007de3);
13063+
13064+ if (dev_priv->sku_83)
13065+ {
13066+ /* set escape clock to divede by 8 */
13067+ REG_WRITE(LP_GEN_CTRL_REG, 0x000044e3);
13068+ }
13069+ else if(dev_priv->sku_100L)
13070+ {
13071+ /* set escape clock to divede by 16 */
13072+ REG_WRITE(LP_GEN_CTRL_REG, 0x001044e3);
13073+ }
13074+ else if(dev_priv->sku_100)
13075+ {
13076+ /* set escape clock to divede by 32*/
13077+ REG_WRITE(LP_GEN_CTRL_REG, 0x003044e3);
13078+
13079+ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
13080+ REG_WRITE(LP_GEN_CTRL_REG, 0x00ec45e3);
13081+ }
13082+
13083+ /* CFG_VALID=1; RGB_CLK_EN=1. */
13084+ REG_WRITE(LP_GEN_CTRL_REG, 0x00057fe3);
13085+
13086+}
13087+
13088+static void mrst_dsi_mode_set(struct drm_encoder *encoder,
13089+ struct drm_display_mode *mode,
13090+ struct drm_display_mode *adjusted_mode)
13091+{
13092+ struct drm_device *dev = encoder->dev;
13093+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13094+ u32 pfit_control;
13095+ u32 dsiFuncPrgValue = 0;
13096+ u32 SupportedFormat = 0;
13097+ u32 channelNumber = 0;
13098+ u32 DBI_dataWidth = 0;
13099+ u32 resolution = 0;
13100+ u32 mipiport = 0;
13101+
13102+#if PRINT_JLIU7
13103+ DRM_INFO("JLIU7 enter mrst_dsi_mode_set \n");
13104+#endif /* PRINT_JLIU7 */
13105+
13106+ switch (dev_priv->bpp)
13107+ {
13108+ case 16:
13109+ SupportedFormat = RGB_565_FMT;
13110+ break;
13111+ case 18:
13112+ SupportedFormat = RGB_666_FMT;
13113+ break;
13114+ case 24:
13115+ SupportedFormat = RGB_888_FMT;
13116+ break;
13117+ default:
13118+ DRM_INFO("mrst_dsi_mode_set, invalid bpp \n");
13119+ break;
13120+ }
13121+
13122+ resolution = dev_priv->HactiveArea | (dev_priv->VactiveArea << RES_V_POS);
13123+
13124+ if (dev_priv->dpi)
13125+ {
13126+ /* Enable automatic panel scaling for non-native modes so that they fill
13127+ * the screen. Should be enabled before the pipe is enabled, according to
13128+ * register description and PRM.
13129+ */
13130+ /*FIXME JLIU7, enable Auto-scale only */
13131+ /*
13132+ * Enable automatic panel scaling so that non-native modes fill the
13133+ * screen. Should be enabled before the pipe is enabled, according to
13134+ * register description and PRM.
13135+ */
13136+#if 0 /*JLIU7_PO */
13137+ if (mode->hdisplay != adjusted_mode->hdisplay ||
13138+ mode->vdisplay != adjusted_mode->vdisplay)
13139+ {
13140+ pfit_control = PFIT_ENABLE;
13141+ }
13142+ else
13143+#endif /*JLIU7_PO */
13144+ {
13145+ pfit_control = 0;
13146+ }
13147+ REG_WRITE(PFIT_CONTROL, pfit_control);
13148+
13149+ /* Enable MIPI Port */
13150+ mipiport = MIPI_PORT_EN;
13151+ REG_WRITE(MIPI, mipiport);
13152+
13153+ /* JLIU7_FIXME set MIPI clock ratio to 1:1 for NSC init */
13154+ REG_WRITE(MIPI_CONTROL_REG, 0x00000018);
13155+
13156+ /* Enable all the error interrupt */
13157+ REG_WRITE(INTR_EN_REG, 0xffffffff);
13158+ REG_WRITE(TURN_AROUND_TIMEOUT_REG, 0x0000000F);
13159+ REG_WRITE(DEVICE_RESET_REG, 0x000000ff); /* old value = 0x00000015 may depends on the DSI RX device*/
13160+ REG_WRITE(INIT_COUNT_REG, 0x00000fff); /* Minimum value = 0x000007d0 */
13161+
13162+ SupportedFormat <<= FMT_DPI_POS;
13163+ dsiFuncPrgValue = dev_priv->laneCount | SupportedFormat;
13164+ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
13165+
13166+ REG_WRITE(DPI_RESOLUTION_REG, resolution);
13167+ REG_WRITE(DBI_RESOLUTION_REG, 0x00000000);
13168+
13169+ REG_WRITE(VERT_SYNC_PAD_COUNT_REG, dev_priv->VsyncWidth);
13170+ REG_WRITE(VERT_BACK_PORCH_COUNT_REG, dev_priv->VbackPorch);
13171+ REG_WRITE(VERT_FRONT_PORCH_COUNT_REG, dev_priv->VfrontPorch);
13172+
13173+#if 1 /*JLIU7_PO hard coded for NSC PO */
13174+ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, 0x1e);
13175+ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, 0x18);
13176+ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, 0x8);
13177+ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, 0x4b0);
13178+#else /*JLIU7_PO hard coded for NSC PO */
13179+ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, GetHSA_Count(dev_priv));
13180+ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, GetHBP_Count(dev_priv));
13181+ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, GetHFP_Count(dev_priv));
13182+ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, GetHAdr_Count(dev_priv));
13183+#endif /*JLIU7_PO hard coded for NSC PO */
13184+ REG_WRITE(VIDEO_FMT_REG, dev_priv->videoModeFormat);
13185+ }
13186+ else
13187+ {
13188+ /* JLIU7 FIXME VIRTUAL_CHANNEL_NUMBER_1 or VIRTUAL_CHANNEL_NUMBER_0*/
13189+ channelNumber = VIRTUAL_CHANNEL_NUMBER_1 << DBI_CHANNEL_NUMBER_POS;
13190+ DBI_dataWidth = DBI_DATA_WIDTH_16BIT << DBI_DATA_WIDTH_POS;
13191+ dsiFuncPrgValue = dev_priv->laneCount | channelNumber | DBI_dataWidth;
13192+ /* JLIU7 FIXME */
13193+ SupportedFormat <<= FMT_DBI_POS;
13194+ dsiFuncPrgValue |= SupportedFormat;
13195+ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
13196+
13197+ REG_WRITE(DPI_RESOLUTION_REG, 0x00000000);
13198+ REG_WRITE(DBI_RESOLUTION_REG, resolution);
13199+ }
13200+
13201+#if 1 /*JLIU7_PO hard code for NSC PO */
13202+ REG_WRITE(HS_TX_TIMEOUT_REG, 0xffff);
13203+ REG_WRITE(LP_RX_TIMEOUT_REG, 0xffff);
13204+
13205+ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, 0x46);
13206+#else /*JLIU7_PO hard code for NSC PO */
13207+ REG_WRITE(HS_TX_TIMEOUT_REG, GetHS_TX_timeoutCount(dev_priv));
13208+ REG_WRITE(LP_RX_TIMEOUT_REG, GetLP_RX_timeoutCount(dev_priv));
13209+
13210+ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, GetHighLowSwitchCount(dev_priv));
13211+#endif /*JLIU7_PO hard code for NSC PO */
13212+
13213+
13214+ REG_WRITE(EOT_DISABLE_REG, 0x00000000);
13215+
13216+ /* FIXME JLIU7 for NSC PO */
13217+ REG_WRITE(LP_BYTECLK_REG, 0x00000004);
13218+
13219+ REG_WRITE(DEVICE_READY_REG, 0x00000001);
13220+ REG_WRITE(DPI_CONTROL_REG, 0x00000002); /* Turn On */
13221+
13222+ dev_priv->dsi_device_ready = true;
13223+
13224+#if 0 /*JLIU7_PO */
13225+ mrst_init_HIMAX_MIPI_bridge(dev);
13226+#endif /*JLIU7_PO */
13227+ mrst_init_NSC_MIPI_bridge(dev);
13228+
13229+ if (dev_priv->sku_100L)
13230+ /* Set DSI link to 100MHz; 2:1 clock ratio */
13231+ REG_WRITE(MIPI_CONTROL_REG, 0x00000009);
13232+
13233+ REG_WRITE(PIPEACONF, dev_priv->pipeconf);
13234+ REG_READ(PIPEACONF);
13235+
13236+ /* Wait for 20ms for the pipe enable to take effect. */
13237+ udelay(20000);
13238+
13239+ /* JLIU7_PO hard code for NSC PO Program the display FIFO watermarks */
13240+ REG_WRITE(DSPARB, 0x00001d9c);
13241+ REG_WRITE(DSPFW1, 0xfc0f0f18);
13242+ REG_WRITE(DSPFW5, 0x04140404);
13243+ REG_WRITE(DSPFW6, 0x000001f0);
13244+
13245+ REG_WRITE(DSPACNTR, dev_priv->dspcntr);
13246+
13247+ /* Wait for 20ms for the plane enable to take effect. */
13248+ udelay(20000);
13249+}
13250+
13251+/**
13252+ * Detect the MIPI connection.
13253+ *
13254+ * This always returns CONNECTOR_STATUS_CONNECTED.
13255+ * This connector should only have
13256+ * been set up if the MIPI was actually connected anyway.
13257+ */
13258+static enum drm_connector_status mrst_dsi_detect(struct drm_connector
13259+ *connector)
13260+{
13261+#if PRINT_JLIU7
13262+ DRM_INFO("JLIU7 enter mrst_dsi_detect \n");
13263+#endif /* PRINT_JLIU7 */
13264+
13265+ return connector_status_connected;
13266+}
13267+
13268+/**
13269+ * Return the list of MIPI DDB modes if available.
13270+ */
13271+static int mrst_dsi_get_modes(struct drm_connector *connector)
13272+{
13273+ struct drm_device *dev = connector->dev;
13274+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
13275+ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
13276+
13277+/* FIXME get the MIPI DDB modes */
13278+
13279+ /* Didn't get an DDB, so
13280+ * Set wide sync ranges so we get all modes
13281+ * handed to valid_mode for checking
13282+ */
13283+ connector->display_info.min_vfreq = 0;
13284+ connector->display_info.max_vfreq = 200;
13285+ connector->display_info.min_hfreq = 0;
13286+ connector->display_info.max_hfreq = 200;
13287+
13288+ if (mode_dev->panel_fixed_mode != NULL) {
13289+ struct drm_display_mode *mode =
13290+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
13291+ drm_mode_probed_add(connector, mode);
13292+ return 1;
13293+ }
13294+
13295+ return 0;
13296+}
13297+
13298+static const struct drm_encoder_helper_funcs mrst_dsi_helper_funcs = {
13299+ .dpms = mrst_dsi_dpms,
13300+ .mode_fixup = psb_intel_lvds_mode_fixup,
13301+ .prepare = mrst_dsi_prepare,
13302+ .mode_set = mrst_dsi_mode_set,
13303+ .commit = mrst_dsi_commit,
13304+};
13305+
13306+static const struct drm_connector_helper_funcs
13307+ mrst_dsi_connector_helper_funcs = {
13308+ .get_modes = mrst_dsi_get_modes,
13309+ .mode_valid = psb_intel_lvds_mode_valid,
13310+ .best_encoder = psb_intel_best_encoder,
13311+};
13312+
13313+static const struct drm_connector_funcs mrst_dsi_connector_funcs = {
13314+ .save = mrst_dsi_save,
13315+ .restore = mrst_dsi_restore,
13316+ .detect = mrst_dsi_detect,
13317+ .fill_modes = drm_helper_probe_single_connector_modes,
13318+ .destroy = psb_intel_lvds_destroy,
13319+};
13320+
13321+/** Returns the panel fixed mode from configuration. */
13322+/** FIXME JLIU7 need to revist it. */
13323+struct drm_display_mode *mrst_dsi_get_configuration_mode(struct drm_device *dev)
13324+{
13325+ struct drm_display_mode *mode;
13326+
13327+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
13328+ if (!mode)
13329+ return NULL;
13330+
13331+#if 1 /*FIXME jliu7 remove it later */
13332+ /* copy from SV - hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */
13333+ mode->hdisplay = 800;
13334+ mode->vdisplay = 480;
13335+ mode->hsync_start = 808;
13336+ mode->hsync_end = 848;
13337+ mode->htotal = 880;
13338+ mode->vsync_start = 482;
13339+ mode->vsync_end = 483;
13340+ mode->vtotal = 486;
13341+ mode->clock = 33264;
13342+#endif /*FIXME jliu7 remove it later */
13343+
13344+#if 0 /*FIXME jliu7 remove it later */
13345+ /* hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */
13346+ mode->hdisplay = 800;
13347+ mode->vdisplay = 480;
13348+ mode->hsync_start = 836;
13349+ mode->hsync_end = 846;
13350+ mode->htotal = 1056;
13351+ mode->vsync_start = 489;
13352+ mode->vsync_end = 491;
13353+ mode->vtotal = 525;
13354+ mode->clock = 33264;
13355+#endif /*FIXME jliu7 remove it later */
13356+
13357+#if 0 /*FIXME jliu7 remove it later */
13358+ /* hard coded fixed mode for LVDS 800x480 */
13359+ mode->hdisplay = 800;
13360+ mode->vdisplay = 480;
13361+ mode->hsync_start = 801;
13362+ mode->hsync_end = 802;
13363+ mode->htotal = 1024;
13364+ mode->vsync_start = 481;
13365+ mode->vsync_end = 482;
13366+ mode->vtotal = 525;
13367+ mode->clock = 30994;
13368+#endif /*FIXME jliu7 remove it later */
13369+
13370+#if 0 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec */
13371+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
13372+ mode->hdisplay = 1024;
13373+ mode->vdisplay = 600;
13374+ mode->hsync_start = 1072;
13375+ mode->hsync_end = 1104;
13376+ mode->htotal = 1184;
13377+ mode->vsync_start = 603;
13378+ mode->vsync_end = 604;
13379+ mode->vtotal = 608;
13380+ mode->clock = 53990;
13381+#endif /*FIXME jliu7 remove it later */
13382+
13383+#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
13384+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
13385+ mode->hdisplay = 1024;
13386+ mode->vdisplay = 600;
13387+ mode->hsync_start = 1104;
13388+ mode->hsync_end = 1136;
13389+ mode->htotal = 1184;
13390+ mode->vsync_start = 603;
13391+ mode->vsync_end = 604;
13392+ mode->vtotal = 608;
13393+ mode->clock = 53990;
13394+#endif /*FIXME jliu7 remove it later */
13395+
13396+#if 0 /*FIXME jliu7 remove it later */
13397+ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
13398+ mode->hdisplay = 1024;
13399+ mode->vdisplay = 600;
13400+ mode->hsync_start = 1124;
13401+ mode->hsync_end = 1204;
13402+ mode->htotal = 1312;
13403+ mode->vsync_start = 607;
13404+ mode->vsync_end = 610;
13405+ mode->vtotal = 621;
13406+ mode->clock = 48885;
13407+#endif /*FIXME jliu7 remove it later */
13408+
13409+#if 0 /*FIXME jliu7 remove it later */
13410+ /* hard coded fixed mode for LVDS 1024x768 */
13411+ mode->hdisplay = 1024;
13412+ mode->vdisplay = 768;
13413+ mode->hsync_start = 1048;
13414+ mode->hsync_end = 1184;
13415+ mode->htotal = 1344;
13416+ mode->vsync_start = 771;
13417+ mode->vsync_end = 777;
13418+ mode->vtotal = 806;
13419+ mode->clock = 65000;
13420+#endif /*FIXME jliu7 remove it later */
13421+
13422+#if 0 /*FIXME jliu7 remove it later */
13423+ /* hard coded fixed mode for LVDS 1366x768 */
13424+ mode->hdisplay = 1366;
13425+ mode->vdisplay = 768;
13426+ mode->hsync_start = 1430;
13427+ mode->hsync_end = 1558;
13428+ mode->htotal = 1664;
13429+ mode->vsync_start = 769;
13430+ mode->vsync_end = 770;
13431+ mode->vtotal = 776;
13432+ mode->clock = 77500;
13433+#endif /*FIXME jliu7 remove it later */
13434+
13435+ drm_mode_set_name(mode);
13436+ drm_mode_set_crtcinfo(mode, 0);
13437+
13438+ return mode;
13439+}
13440+
13441+/* ************************************************************************* *\
13442+FUNCTION: mrstDSI_clockInit
13443+ `
13444+DESCRIPTION:
13445+
13446+\* ************************************************************************* */
13447+static u32 sku_83_mipi_2xclk[4] = {166667, 333333, 444444, 666667};
13448+static u32 sku_100_mipi_2xclk[4] = {200000, 400000, 533333, 800000};
13449+static u32 sku_100L_mipi_2xclk[4] = {100000, 200000, 266667, 400000};
13450+#define MIPI_2XCLK_COUNT 0x04
13451+
13452+static bool mrstDSI_clockInit(DRM_DRIVER_PRIVATE_T *dev_priv)
13453+{
13454+ u32 Htotal = 0, Vtotal = 0, RRate = 0, mipi_2xclk = 0;
13455+ u32 i = 0;
13456+ u32 *p_mipi_2xclk = NULL;
13457+
13458+ (void)GetHS_TX_timeoutCount;
13459+ (void)GetLP_RX_timeoutCount;
13460+ (void)GetHSA_Count;
13461+ (void)GetHBP_Count;
13462+ (void)GetHFP_Count;
13463+ (void)GetHAdr_Count;
13464+ (void)GetHighLowSwitchCount;
13465+ (void)mrst_init_HIMAX_MIPI_bridge;
13466+
13467+#if 0 /* JLIU7_PO old values */
13468+ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */
13469+ dev_priv->pixelClock = 33264; /*KHz*/
13470+ dev_priv->HsyncWidth = 10;
13471+ dev_priv->HbackPorch = 210;
13472+ dev_priv->HfrontPorch = 36;
13473+ dev_priv->HactiveArea = 800;
13474+ dev_priv->VsyncWidth = 2;
13475+ dev_priv->VbackPorch = 34;
13476+ dev_priv->VfrontPorch = 9;
13477+ dev_priv->VactiveArea = 480;
13478+ dev_priv->bpp = 24;
13479+
13480+ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */
13481+ dev_priv->dbi_pixelClock = 33264; /*KHz*/
13482+ dev_priv->dbi_HsyncWidth = 10;
13483+ dev_priv->dbi_HbackPorch = 210;
13484+ dev_priv->dbi_HfrontPorch = 36;
13485+ dev_priv->dbi_HactiveArea = 800;
13486+ dev_priv->dbi_VsyncWidth = 2;
13487+ dev_priv->dbi_VbackPorch = 34;
13488+ dev_priv->dbi_VfrontPorch = 9;
13489+ dev_priv->dbi_VactiveArea = 480;
13490+ dev_priv->dbi_bpp = 24;
13491+#else /* JLIU7_PO old values */
13492+ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */
13493+ /* FIXME Pre-Si value, 1 or 2 lanes; 50MHz; Non-Burst w/ sync event */
13494+ dev_priv->pixelClock = 33264; /*KHz*/
13495+ dev_priv->HsyncWidth = 10;
13496+ dev_priv->HbackPorch = 8;
13497+ dev_priv->HfrontPorch = 3;
13498+ dev_priv->HactiveArea = 800;
13499+ dev_priv->VsyncWidth = 2;
13500+ dev_priv->VbackPorch = 3;
13501+ dev_priv->VfrontPorch = 2;
13502+ dev_priv->VactiveArea = 480;
13503+ dev_priv->bpp = 24;
13504+
13505+ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */
13506+ dev_priv->dbi_pixelClock = 33264; /*KHz*/
13507+ dev_priv->dbi_HsyncWidth = 10;
13508+ dev_priv->dbi_HbackPorch = 8;
13509+ dev_priv->dbi_HfrontPorch = 3;
13510+ dev_priv->dbi_HactiveArea = 800;
13511+ dev_priv->dbi_VsyncWidth = 2;
13512+ dev_priv->dbi_VbackPorch = 3;
13513+ dev_priv->dbi_VfrontPorch = 2;
13514+ dev_priv->dbi_VactiveArea = 480;
13515+ dev_priv->dbi_bpp = 24;
13516+#endif /* JLIU7_PO old values */
13517+
13518+ Htotal = dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch + dev_priv->HactiveArea;
13519+ Vtotal = dev_priv->VsyncWidth + dev_priv->VbackPorch + dev_priv->VfrontPorch + dev_priv->VactiveArea;
13520+
13521+ RRate = ((dev_priv->pixelClock * 1000) / (Htotal * Vtotal)) + 1;
13522+
13523+ dev_priv->RRate = RRate;
13524+
13525+ /* ddr clock frequence = (pixel clock frequence * bits per pixel)/2*/
13526+ mipi_2xclk = (dev_priv->pixelClock * dev_priv->bpp) / dev_priv->laneCount; /* KHz */
13527+ dev_priv->DDR_Clock_Calculated = mipi_2xclk / 2; /* KHz */
13528+
13529+ DRM_DEBUG("mrstDSI_clockInit RRate = %d, mipi_2xclk = %d. \n", RRate, mipi_2xclk);
13530+
13531+ if (dev_priv->sku_100)
13532+ {
13533+ p_mipi_2xclk = sku_100_mipi_2xclk;
13534+ }
13535+ else if (dev_priv->sku_100L)
13536+ {
13537+ p_mipi_2xclk = sku_100L_mipi_2xclk;
13538+ }
13539+ else
13540+ {
13541+ p_mipi_2xclk = sku_83_mipi_2xclk;
13542+ }
13543+
13544+ for (; i < MIPI_2XCLK_COUNT; i++)
13545+ {
13546+ if ((dev_priv->DDR_Clock_Calculated * 2) < p_mipi_2xclk[i])
13547+ break;
13548+ }
13549+
13550+ if (i == MIPI_2XCLK_COUNT)
13551+ {
13552+ DRM_DEBUG("mrstDSI_clockInit the DDR clock is too big, DDR_Clock_Calculated is = %d\n", dev_priv->DDR_Clock_Calculated);
13553+ return false;
13554+ }
13555+
13556+ dev_priv->DDR_Clock = p_mipi_2xclk[i] / 2;
13557+ dev_priv->ClockBits = i;
13558+
13559+#if 0 /*JLIU7_PO */
13560+#if 0 /* FIXME remove it after power on*/
13561+ mipiControlReg = REG_READ(MIPI_CONTROL_REG) & (~MIPI_2X_CLOCK_BITS);
13562+ mipiControlReg |= i;
13563+ REG_WRITE(MIPI_CONTROL_REG, mipiControlReg);
13564+#else /* FIXME remove it after power on*/
13565+ mipiControlReg |= i;
13566+ REG_WRITE(MIPI_CONTROL_REG, mipiControlReg);
13567+#endif /* FIXME remove it after power on*/
13568+#endif /*JLIU7_PO */
13569+
13570+#if 1 /* FIXME remove it after power on*/
13571+ DRM_DEBUG("mrstDSI_clockInit, mipi_2x_clock_divider = 0x%x, DDR_Clock_Calculated is = %d\n", i, dev_priv->DDR_Clock_Calculated);
13572+#endif /* FIXME remove it after power on*/
13573+
13574+ return true;
13575+}
13576+
13577+/**
13578+ * mrst_dsi_init - setup MIPI connectors on this device
13579+ * @dev: drm device
13580+ *
13581+ * Create the connector, try to figure out what
13582+ * modes we can display on the MIPI panel (if present).
13583+ */
13584+void mrst_dsi_init(struct drm_device *dev,
13585+ struct psb_intel_mode_device *mode_dev)
13586+{
13587+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13588+ struct psb_intel_output *psb_intel_output;
13589+ struct drm_connector *connector;
13590+ struct drm_encoder *encoder;
13591+
13592+#if PRINT_JLIU7
13593+ DRM_INFO("JLIU7 enter mrst_dsi_init \n");
13594+#endif /* PRINT_JLIU7 */
13595+
13596+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
13597+ if (!psb_intel_output)
13598+ return;
13599+
13600+ psb_intel_output->mode_dev = mode_dev;
13601+ connector = &psb_intel_output->base;
13602+ encoder = &psb_intel_output->enc;
13603+ drm_connector_init(dev, &psb_intel_output->base,
13604+ &mrst_dsi_connector_funcs,
13605+ DRM_MODE_CONNECTOR_MIPI);
13606+
13607+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
13608+ DRM_MODE_ENCODER_MIPI);
13609+
13610+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
13611+ &psb_intel_output->enc);
13612+ psb_intel_output->type = INTEL_OUTPUT_MIPI;
13613+
13614+ drm_encoder_helper_add(encoder, &mrst_dsi_helper_funcs);
13615+ drm_connector_helper_add(connector,
13616+ &mrst_dsi_connector_helper_funcs);
13617+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
13618+ connector->interlace_allowed = false;
13619+ connector->doublescan_allowed = false;
13620+
13621+ dsi_backlight = BRIGHTNESS_MAX_LEVEL;
13622+ blc_pol = BLC_POLARITY_INVERSE;
13623+ blc_freq = 0xc8;
13624+
13625+ /*
13626+ * MIPI discovery:
13627+ * 1) check for DDB data
13628+ * 2) check for VBT data
13629+ * 4) make sure lid is open
13630+ * if closed, act like it's not there for now
13631+ */
13632+
13633+ /* FIXME jliu7 we only support DPI */
13634+ dev_priv->dpi = true;
13635+
13636+ /* FIXME hard coded 4 lanes for Himax HX8858-A, 2 lanes for NSC LM2550 */
13637+ dev_priv->laneCount = 2;
13638+
13639+ /* FIXME hard coded for NSC PO. */
13640+ /* We only support BUST_MODE */
13641+ dev_priv->videoModeFormat = NON_BURST_MODE_SYNC_EVENTS; /* BURST_MODE */
13642+ /* FIXME change it to true if GET_DDB works */
13643+ dev_priv->config_phase = false;
13644+
13645+ if (!mrstDSI_clockInit(dev_priv))
13646+ {
13647+ DRM_DEBUG("Can't iniitialize MRST DSI clock.\n");
13648+#if 0 /* FIXME JLIU7 */
13649+ goto failed_find;
13650+#endif /* FIXME JLIU7 */
13651+ }
13652+
13653+ /*
13654+ * If we didn't get DDB data, try geting panel timing
13655+ * from configuration data
13656+ */
13657+ mode_dev->panel_fixed_mode = mrst_dsi_get_configuration_mode(dev);
13658+
13659+ if (mode_dev->panel_fixed_mode) {
13660+ mode_dev->panel_fixed_mode->type |=
13661+ DRM_MODE_TYPE_PREFERRED;
13662+ goto out; /* FIXME: check for quirks */
13663+ }
13664+
13665+ /* If we still don't have a mode after all that, give up. */
13666+ if (!mode_dev->panel_fixed_mode) {
13667+ DRM_DEBUG
13668+ ("Found no modes on the lvds, ignoring the LVDS\n");
13669+ goto failed_find;
13670+ }
13671+
13672+out:
13673+ drm_sysfs_connector_add(connector);
13674+ return;
13675+
13676+failed_find:
13677+ DRM_DEBUG("No MIIP modes found, disabling.\n");
13678+ drm_encoder_cleanup(encoder);
13679+ drm_connector_cleanup(connector);
13680+ kfree(connector);
13681+}
13682diff -uNr a/drivers/gpu/drm/psb/psb_intel_i2c.c b/drivers/gpu/drm/psb/psb_intel_i2c.c
13683--- a/drivers/gpu/drm/psb/psb_intel_i2c.c 1969-12-31 16:00:00.000000000 -0800
13684+++ b/drivers/gpu/drm/psb/psb_intel_i2c.c 2009-04-07 13:28:38.000000000 -0700
13685@@ -0,0 +1,179 @@
13686+/*
13687+ * Copyright © 2006-2007 Intel Corporation
13688+ *
13689+ * Permission is hereby granted, free of charge, to any person obtaining a
13690+ * copy of this software and associated documentation files (the "Software"),
13691+ * to deal in the Software without restriction, including without limitation
13692+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13693+ * and/or sell copies of the Software, and to permit persons to whom the
13694+ * Software is furnished to do so, subject to the following conditions:
13695+ *
13696+ * The above copyright notice and this permission notice (including the next
13697+ * paragraph) shall be included in all copies or substantial portions of the
13698+ * Software.
13699+ *
13700+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13701+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13702+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
13703+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
13704+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
13705+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
13706+ * DEALINGS IN THE SOFTWARE.
13707+ *
13708+ * Authors:
13709+ * Eric Anholt <eric@anholt.net>
13710+ */
13711+/*
13712+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
13713+ * Jesse Barnes <jesse.barnes@intel.com>
13714+ */
13715+
13716+#include <linux/i2c.h>
13717+#include <linux/i2c-id.h>
13718+#include <linux/i2c-algo-bit.h>
13719+
13720+/*
13721+ * Intel GPIO access functions
13722+ */
13723+
13724+#define I2C_RISEFALL_TIME 20
13725+
13726+static int get_clock(void *data)
13727+{
13728+ struct psb_intel_i2c_chan *chan = data;
13729+ struct drm_device *dev = chan->drm_dev;
13730+ u32 val;
13731+
13732+ val = REG_READ(chan->reg);
13733+ return (val & GPIO_CLOCK_VAL_IN) != 0;
13734+}
13735+
13736+static int get_data(void *data)
13737+{
13738+ struct psb_intel_i2c_chan *chan = data;
13739+ struct drm_device *dev = chan->drm_dev;
13740+ u32 val;
13741+
13742+ val = REG_READ(chan->reg);
13743+ return (val & GPIO_DATA_VAL_IN) != 0;
13744+}
13745+
13746+static void set_clock(void *data, int state_high)
13747+{
13748+ struct psb_intel_i2c_chan *chan = data;
13749+ struct drm_device *dev = chan->drm_dev;
13750+ u32 reserved = 0, clock_bits;
13751+
13752+ /* On most chips, these bits must be preserved in software. */
13753+ if (!IS_I830(dev) && !IS_845G(dev))
13754+ reserved =
13755+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
13756+ GPIO_CLOCK_PULLUP_DISABLE);
13757+
13758+ if (state_high)
13759+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
13760+ else
13761+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
13762+ GPIO_CLOCK_VAL_MASK;
13763+ REG_WRITE(chan->reg, reserved | clock_bits);
13764+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
13765+}
13766+
13767+static void set_data(void *data, int state_high)
13768+{
13769+ struct psb_intel_i2c_chan *chan = data;
13770+ struct drm_device *dev = chan->drm_dev;
13771+ u32 reserved = 0, data_bits;
13772+
13773+ /* On most chips, these bits must be preserved in software. */
13774+ if (!IS_I830(dev) && !IS_845G(dev))
13775+ reserved =
13776+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
13777+ GPIO_CLOCK_PULLUP_DISABLE);
13778+
13779+ if (state_high)
13780+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
13781+ else
13782+ data_bits =
13783+ GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
13784+ GPIO_DATA_VAL_MASK;
13785+
13786+ REG_WRITE(chan->reg, reserved | data_bits);
13787+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
13788+}
13789+
13790+/**
13791+ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
13792+ * @dev: DRM device
13793+ * @output: driver specific output device
13794+ * @reg: GPIO reg to use
13795+ * @name: name for this bus
13796+ *
13797+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
13798+ * in output probing and control (e.g. DDC or SDVO control functions).
13799+ *
13800+ * Possible values for @reg include:
13801+ * %GPIOA
13802+ * %GPIOB
13803+ * %GPIOC
13804+ * %GPIOD
13805+ * %GPIOE
13806+ * %GPIOF
13807+ * %GPIOG
13808+ * %GPIOH
13809+ * see PRM for details on how these different busses are used.
13810+ */
13811+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
13812+ const u32 reg, const char *name)
13813+{
13814+ struct psb_intel_i2c_chan *chan;
13815+
13816+ chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
13817+ if (!chan)
13818+ goto out_free;
13819+
13820+ chan->drm_dev = dev;
13821+ chan->reg = reg;
13822+ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
13823+ chan->adapter.owner = THIS_MODULE;
13824+ chan->adapter.algo_data = &chan->algo;
13825+ chan->adapter.dev.parent = &dev->pdev->dev;
13826+ chan->algo.setsda = set_data;
13827+ chan->algo.setscl = set_clock;
13828+ chan->algo.getsda = get_data;
13829+ chan->algo.getscl = get_clock;
13830+ chan->algo.udelay = 20;
13831+ chan->algo.timeout = usecs_to_jiffies(2200);
13832+ chan->algo.data = chan;
13833+
13834+ i2c_set_adapdata(&chan->adapter, chan);
13835+
13836+ if (i2c_bit_add_bus(&chan->adapter))
13837+ goto out_free;
13838+
13839+ /* JJJ: raise SCL and SDA? */
13840+ set_data(chan, 1);
13841+ set_clock(chan, 1);
13842+ udelay(20);
13843+
13844+ return chan;
13845+
13846+out_free:
13847+ kfree(chan);
13848+ return NULL;
13849+}
13850+
13851+/**
13852+ * psb_intel_i2c_destroy - unregister and free i2c bus resources
13853+ * @output: channel to free
13854+ *
13855+ * Unregister the adapter from the i2c layer, then free the structure.
13856+ */
13857+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
13858+{
13859+ if (!chan)
13860+ return;
13861+
13862+ i2c_del_adapter(&chan->adapter);
13863+ kfree(chan);
13864+}
13865diff -uNr a/drivers/gpu/drm/psb/psb_intel_lvds.c b/drivers/gpu/drm/psb/psb_intel_lvds.c
13866--- a/drivers/gpu/drm/psb/psb_intel_lvds.c 1969-12-31 16:00:00.000000000 -0800
13867+++ b/drivers/gpu/drm/psb/psb_intel_lvds.c 2009-04-07 13:28:38.000000000 -0700
13868@@ -0,0 +1,1015 @@
13869+/*
13870+ * Copyright © 2006-2007 Intel Corporation
13871+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
13872+ *
13873+ * Permission is hereby granted, free of charge, to any person obtaining a
13874+ * copy of this software and associated documentation files (the "Software"),
13875+ * to deal in the Software without restriction, including without limitation
13876+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13877+ * and/or sell copies of the Software, and to permit persons to whom the
13878+ * Software is furnished to do so, subject to the following conditions:
13879+ *
13880+ * The above copyright notice and this permission notice (including the next
13881+ * paragraph) shall be included in all copies or substantial portions of the
13882+ * Software.
13883+ *
13884+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13885+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13886+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
13887+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
13888+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
13889+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
13890+ * DEALINGS IN THE SOFTWARE.
13891+ *
13892+ * Authors:
13893+ * Eric Anholt <eric@anholt.net>
13894+ * Dave Airlie <airlied@linux.ie>
13895+ * Jesse Barnes <jesse.barnes@intel.com>
13896+ */
13897+
13898+#include <linux/i2c.h>
13899+#include <drm/drm_crtc.h>
13900+#include <drm/drm_edid.h>
13901+/* MRST defines start */
13902+uint8_t blc_type;
13903+uint8_t blc_pol;
13904+uint8_t blc_freq;
13905+uint8_t blc_minbrightness;
13906+uint8_t blc_i2caddr;
13907+uint8_t blc_brightnesscmd;
13908+int lvds_backlight; /* restore backlight to this value */
13909+
13910+u32 CoreClock;
13911+u32 PWMControlRegFreq;
13912+/* MRST defines end */
13913+
13914+/**
13915+ * Sets the backlight level.
13916+ *
13917+ * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight().
13918+ */
13919+static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
13920+{
13921+ u32 blc_pwm_ctl;
13922+
13923+ blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
13924+ REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
13925+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
13926+}
13927+
13928+/**
13929+ * Returns the maximum level of the backlight duty cycle field.
13930+ */
13931+static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
13932+{
13933+ return ((REG_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
13934+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
13935+}
13936+
13937+/**
13938+ * Sets the power state for the panel.
13939+ */
13940+static void psb_intel_lvds_set_power(struct drm_device *dev,
13941+ struct psb_intel_output *output, bool on)
13942+{
13943+ u32 pp_status;
13944+
13945+ if (on) {
13946+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
13947+ POWER_TARGET_ON);
13948+ do {
13949+ pp_status = REG_READ(PP_STATUS);
13950+ } while ((pp_status & PP_ON) == 0);
13951+
13952+ psb_intel_lvds_set_backlight(dev,
13953+ output->
13954+ mode_dev->backlight_duty_cycle);
13955+ } else {
13956+ psb_intel_lvds_set_backlight(dev, 0);
13957+
13958+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
13959+ ~POWER_TARGET_ON);
13960+ do {
13961+ pp_status = REG_READ(PP_STATUS);
13962+ } while (pp_status & PP_ON);
13963+ }
13964+}
13965+
13966+static void psb_intel_lvds_dpms(struct drm_encoder *encoder, int mode)
13967+{
13968+ struct drm_device *dev = encoder->dev;
13969+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
13970+
13971+ if (mode == DRM_MODE_DPMS_ON)
13972+ psb_intel_lvds_set_power(dev, output, true);
13973+ else
13974+ psb_intel_lvds_set_power(dev, output, false);
13975+
13976+ /* XXX: We never power down the LVDS pairs. */
13977+}
13978+
13979+static void psb_intel_lvds_save(struct drm_connector *connector)
13980+{
13981+#if 0 /* JB: Disable for drop */
13982+ struct drm_device *dev = connector->dev;
13983+
13984+ dev_priv->savePP_ON = REG_READ(PP_ON_DELAYS);
13985+ dev_priv->savePP_OFF = REG_READ(PP_OFF_DELAYS);
13986+ dev_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
13987+ dev_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);
13988+ dev_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
13989+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
13990+ BACKLIGHT_DUTY_CYCLE_MASK);
13991+
13992+ /*
13993+ * If the light is off at server startup, just make it full brightness
13994+ */
13995+ if (dev_priv->backlight_duty_cycle == 0)
13996+ dev_priv->backlight_duty_cycle =
13997+ psb_intel_lvds_get_max_backlight(dev);
13998+#endif
13999+}
14000+
14001+static void psb_intel_lvds_restore(struct drm_connector *connector)
14002+{
14003+#if 0 /* JB: Disable for drop */
14004+ struct drm_device *dev = connector->dev;
14005+
14006+ REG_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
14007+ REG_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON);
14008+ REG_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF);
14009+ REG_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
14010+ REG_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
14011+ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
14012+ psb_intel_lvds_set_power(dev, true);
14013+ else
14014+ psb_intel_lvds_set_power(dev, false);
14015+#endif
14016+}
14017+
14018+static int psb_intel_lvds_mode_valid(struct drm_connector *connector,
14019+ struct drm_display_mode *mode)
14020+{
14021+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
14022+ struct drm_display_mode *fixed_mode =
14023+ psb_intel_output->mode_dev->panel_fixed_mode;
14024+
14025+#if PRINT_JLIU7
14026+ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_valid \n");
14027+#endif /* PRINT_JLIU7 */
14028+
14029+ if (fixed_mode) {
14030+ if (mode->hdisplay > fixed_mode->hdisplay)
14031+ return MODE_PANEL;
14032+ if (mode->vdisplay > fixed_mode->vdisplay)
14033+ return MODE_PANEL;
14034+ }
14035+ return MODE_OK;
14036+}
14037+
14038+static bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
14039+ struct drm_display_mode *mode,
14040+ struct drm_display_mode *adjusted_mode)
14041+{
14042+ struct psb_intel_mode_device *mode_dev =
14043+ enc_to_psb_intel_output(encoder)->mode_dev;
14044+ struct drm_device *dev = encoder->dev;
14045+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc);
14046+ struct drm_encoder *tmp_encoder;
14047+
14048+#if PRINT_JLIU7
14049+ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_fixup \n");
14050+#endif /* PRINT_JLIU7 */
14051+
14052+ /* Should never happen!! */
14053+ if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
14054+ printk(KERN_ERR
14055+ "Can't support LVDS/MIPI on pipe B on MRST\n");
14056+ return false;
14057+ } else if (!IS_MRST(dev) && !IS_I965G(dev)
14058+ && psb_intel_crtc->pipe == 0) {
14059+ printk(KERN_ERR "Can't support LVDS on pipe A\n");
14060+ return false;
14061+ }
14062+ /* Should never happen!! */
14063+ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
14064+ head) {
14065+ if (tmp_encoder != encoder
14066+ && tmp_encoder->crtc == encoder->crtc) {
14067+ printk(KERN_ERR "Can't enable LVDS and another "
14068+ "encoder on the same pipe\n");
14069+ return false;
14070+ }
14071+ }
14072+
14073+ /*
14074+ * If we have timings from the BIOS for the panel, put them in
14075+ * to the adjusted mode. The CRTC will be set up for this mode,
14076+ * with the panel scaling set up to source from the H/VDisplay
14077+ * of the original mode.
14078+ */
14079+ if (mode_dev->panel_fixed_mode != NULL) {
14080+ adjusted_mode->hdisplay =
14081+ mode_dev->panel_fixed_mode->hdisplay;
14082+ adjusted_mode->hsync_start =
14083+ mode_dev->panel_fixed_mode->hsync_start;
14084+ adjusted_mode->hsync_end =
14085+ mode_dev->panel_fixed_mode->hsync_end;
14086+ adjusted_mode->htotal = mode_dev->panel_fixed_mode->htotal;
14087+ adjusted_mode->vdisplay =
14088+ mode_dev->panel_fixed_mode->vdisplay;
14089+ adjusted_mode->vsync_start =
14090+ mode_dev->panel_fixed_mode->vsync_start;
14091+ adjusted_mode->vsync_end =
14092+ mode_dev->panel_fixed_mode->vsync_end;
14093+ adjusted_mode->vtotal = mode_dev->panel_fixed_mode->vtotal;
14094+ adjusted_mode->clock = mode_dev->panel_fixed_mode->clock;
14095+ drm_mode_set_crtcinfo(adjusted_mode,
14096+ CRTC_INTERLACE_HALVE_V);
14097+ }
14098+
14099+ /*
14100+ * XXX: It would be nice to support lower refresh rates on the
14101+ * panels to reduce power consumption, and perhaps match the
14102+ * user's requested refresh rate.
14103+ */
14104+
14105+ return true;
14106+}
14107+
14108+static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
14109+{
14110+ struct drm_device *dev = encoder->dev;
14111+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
14112+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
14113+
14114+#if PRINT_JLIU7
14115+ DRM_INFO("JLIU7 enter psb_intel_lvds_prepare \n");
14116+#endif /* PRINT_JLIU7 */
14117+
14118+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
14119+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
14120+ BACKLIGHT_DUTY_CYCLE_MASK);
14121+
14122+ psb_intel_lvds_set_power(dev, output, false);
14123+}
14124+
14125+static void psb_intel_lvds_commit(struct drm_encoder *encoder)
14126+{
14127+ struct drm_device *dev = encoder->dev;
14128+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
14129+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
14130+
14131+#if PRINT_JLIU7
14132+ DRM_INFO("JLIU7 enter psb_intel_lvds_commit \n");
14133+#endif /* PRINT_JLIU7 */
14134+
14135+ if (mode_dev->backlight_duty_cycle == 0)
14136+ mode_dev->backlight_duty_cycle =
14137+ psb_intel_lvds_get_max_backlight(dev);
14138+
14139+ psb_intel_lvds_set_power(dev, output, true);
14140+}
14141+
14142+static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
14143+ struct drm_display_mode *mode,
14144+ struct drm_display_mode *adjusted_mode)
14145+{
14146+ struct psb_intel_mode_device *mode_dev =
14147+ enc_to_psb_intel_output(encoder)->mode_dev;
14148+ struct drm_device *dev = encoder->dev;
14149+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc);
14150+ u32 pfit_control;
14151+
14152+ /*
14153+ * The LVDS pin pair will already have been turned on in the
14154+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
14155+ * settings.
14156+ */
14157+
14158+ /*
14159+ * Enable automatic panel scaling so that non-native modes fill the
14160+ * screen. Should be enabled before the pipe is enabled, according to
14161+ * register description and PRM.
14162+ */
14163+ if (mode->hdisplay != adjusted_mode->hdisplay ||
14164+ mode->vdisplay != adjusted_mode->vdisplay)
14165+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
14166+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
14167+ HORIZ_INTERP_BILINEAR);
14168+ else
14169+ pfit_control = 0;
14170+
14171+ if (!IS_I965G(dev)) {
14172+ if (mode_dev->panel_wants_dither)
14173+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
14174+ } else
14175+ pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
14176+
14177+ REG_WRITE(PFIT_CONTROL, pfit_control);
14178+}
14179+
14180+/**
14181+ * Detect the LVDS connection.
14182+ *
14183+ * This always returns CONNECTOR_STATUS_CONNECTED.
14184+ * This connector should only have
14185+ * been set up if the LVDS was actually connected anyway.
14186+ */
14187+static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
14188+ *connector)
14189+{
14190+ return connector_status_connected;
14191+}
14192+
14193+/**
14194+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
14195+ */
14196+static int psb_intel_lvds_get_modes(struct drm_connector *connector)
14197+{
14198+ struct drm_device *dev = connector->dev;
14199+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
14200+ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
14201+ int ret = 0;
14202+
14203+ if (!IS_MRST(dev))
14204+ ret = psb_intel_ddc_get_modes(psb_intel_output);
14205+
14206+ if (ret)
14207+ return ret;
14208+
14209+ /* Didn't get an EDID, so
14210+ * Set wide sync ranges so we get all modes
14211+ * handed to valid_mode for checking
14212+ */
14213+ connector->display_info.min_vfreq = 0;
14214+ connector->display_info.max_vfreq = 200;
14215+ connector->display_info.min_hfreq = 0;
14216+ connector->display_info.max_hfreq = 200;
14217+
14218+ if (mode_dev->panel_fixed_mode != NULL) {
14219+ struct drm_display_mode *mode =
14220+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
14221+ drm_mode_probed_add(connector, mode);
14222+ return 1;
14223+ }
14224+
14225+ return 0;
14226+}
14227+
14228+/**
14229+ * psb_intel_lvds_destroy - unregister and free LVDS structures
14230+ * @connector: connector to free
14231+ *
14232+ * Unregister the DDC bus for this connector then free the driver private
14233+ * structure.
14234+ */
14235+static void psb_intel_lvds_destroy(struct drm_connector *connector)
14236+{
14237+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
14238+
14239+ if (psb_intel_output->ddc_bus)
14240+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
14241+ drm_sysfs_connector_remove(connector);
14242+ drm_connector_cleanup(connector);
14243+ kfree(connector);
14244+}
14245+
14246+static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
14247+ .dpms = psb_intel_lvds_dpms,
14248+ .mode_fixup = psb_intel_lvds_mode_fixup,
14249+ .prepare = psb_intel_lvds_prepare,
14250+ .mode_set = psb_intel_lvds_mode_set,
14251+ .commit = psb_intel_lvds_commit,
14252+};
14253+
14254+static const struct drm_connector_helper_funcs
14255+ psb_intel_lvds_connector_helper_funcs = {
14256+ .get_modes = psb_intel_lvds_get_modes,
14257+ .mode_valid = psb_intel_lvds_mode_valid,
14258+ .best_encoder = psb_intel_best_encoder,
14259+};
14260+
14261+static const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
14262+ .save = psb_intel_lvds_save,
14263+ .restore = psb_intel_lvds_restore,
14264+ .detect = psb_intel_lvds_detect,
14265+ .fill_modes = drm_helper_probe_single_connector_modes,
14266+ .destroy = psb_intel_lvds_destroy,
14267+};
14268+
14269+
14270+static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
14271+{
14272+ drm_encoder_cleanup(encoder);
14273+}
14274+
14275+static const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
14276+ .destroy = psb_intel_lvds_enc_destroy,
14277+};
14278+
14279+
14280+
14281+/**
14282+ * psb_intel_lvds_init - setup LVDS connectors on this device
14283+ * @dev: drm device
14284+ *
14285+ * Create the connector, register the LVDS DDC bus, and try to figure out what
14286+ * modes we can display on the LVDS panel (if present).
14287+ */
14288+void psb_intel_lvds_init(struct drm_device *dev,
14289+ struct psb_intel_mode_device *mode_dev)
14290+{
14291+ struct psb_intel_output *psb_intel_output;
14292+ struct drm_connector *connector;
14293+ struct drm_encoder *encoder;
14294+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
14295+ struct drm_crtc *crtc;
14296+ u32 lvds;
14297+ int pipe;
14298+
14299+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
14300+ if (!psb_intel_output)
14301+ return;
14302+
14303+ psb_intel_output->mode_dev = mode_dev;
14304+ connector = &psb_intel_output->base;
14305+ encoder = &psb_intel_output->enc;
14306+ drm_connector_init(dev, &psb_intel_output->base,
14307+ &psb_intel_lvds_connector_funcs,
14308+ DRM_MODE_CONNECTOR_LVDS);
14309+
14310+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
14311+ DRM_MODE_ENCODER_LVDS);
14312+
14313+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
14314+ &psb_intel_output->enc);
14315+ psb_intel_output->type = INTEL_OUTPUT_LVDS;
14316+
14317+ drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
14318+ drm_connector_helper_add(connector,
14319+ &psb_intel_lvds_connector_helper_funcs);
14320+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
14321+ connector->interlace_allowed = false;
14322+ connector->doublescan_allowed = false;
14323+
14324+
14325+ /*
14326+ * LVDS discovery:
14327+ * 1) check for EDID on DDC
14328+ * 2) check for VBT data
14329+ * 3) check to see if LVDS is already on
14330+ * if none of the above, no panel
14331+ * 4) make sure lid is open
14332+ * if closed, act like it's not there for now
14333+ */
14334+
14335+ /* Set up the DDC bus. */
14336+ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
14337+ if (!psb_intel_output->ddc_bus) {
14338+ dev_printk(KERN_ERR, &dev->pdev->dev,
14339+ "DDC bus registration " "failed.\n");
14340+ goto failed_ddc;
14341+ }
14342+
14343+ /*
14344+ * Attempt to get the fixed panel mode from DDC. Assume that the
14345+ * preferred mode is the right one.
14346+ */
14347+ psb_intel_ddc_get_modes(psb_intel_output);
14348+ list_for_each_entry(scan, &connector->probed_modes, head) {
14349+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
14350+ mode_dev->panel_fixed_mode =
14351+ drm_mode_duplicate(dev, scan);
14352+ goto out; /* FIXME: check for quirks */
14353+ }
14354+ }
14355+
14356+ /* Failed to get EDID, what about VBT? */
14357+ if (mode_dev->vbt_mode)
14358+ mode_dev->panel_fixed_mode =
14359+ drm_mode_duplicate(dev, mode_dev->vbt_mode);
14360+
14361+ /*
14362+ * If we didn't get EDID, try checking if the panel is already turned
14363+ * on. If so, assume that whatever is currently programmed is the
14364+ * correct mode.
14365+ */
14366+ lvds = REG_READ(LVDS);
14367+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
14368+ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
14369+
14370+ if (crtc && (lvds & LVDS_PORT_EN)) {
14371+ mode_dev->panel_fixed_mode =
14372+ psb_intel_crtc_mode_get(dev, crtc);
14373+ if (mode_dev->panel_fixed_mode) {
14374+ mode_dev->panel_fixed_mode->type |=
14375+ DRM_MODE_TYPE_PREFERRED;
14376+ goto out; /* FIXME: check for quirks */
14377+ }
14378+ }
14379+
14380+ /* If we still don't have a mode after all that, give up. */
14381+ if (!mode_dev->panel_fixed_mode) {
14382+ DRM_DEBUG
14383+ ("Found no modes on the lvds, ignoring the LVDS\n");
14384+ goto failed_find;
14385+ }
14386+
14387+ /* FIXME: detect aopen & mac mini type stuff automatically? */
14388+ /*
14389+ * Blacklist machines with BIOSes that list an LVDS panel without
14390+ * actually having one.
14391+ */
14392+ if (IS_I945GM(dev)) {
14393+ /* aopen mini pc */
14394+ if (dev->pdev->subsystem_vendor == 0xa0a0) {
14395+ DRM_DEBUG
14396+ ("Suspected AOpen Mini PC, ignoring the LVDS\n");
14397+ goto failed_find;
14398+ }
14399+
14400+ if ((dev->pdev->subsystem_vendor == 0x8086) &&
14401+ (dev->pdev->subsystem_device == 0x7270)) {
14402+ /* It's a Mac Mini or Macbook Pro. */
14403+
14404+ if (mode_dev->panel_fixed_mode != NULL &&
14405+ mode_dev->panel_fixed_mode->hdisplay == 800 &&
14406+ mode_dev->panel_fixed_mode->vdisplay == 600) {
14407+ DRM_DEBUG
14408+ ("Suspected Mac Mini, ignoring the LVDS\n");
14409+ goto failed_find;
14410+ }
14411+ }
14412+ }
14413+
14414+out:
14415+ drm_sysfs_connector_add(connector);
14416+
14417+#if PRINT_JLIU7
14418+ DRM_INFO("PRINT_JLIU7 hdisplay = %d\n",
14419+ mode_dev->panel_fixed_mode->hdisplay);
14420+ DRM_INFO("PRINT_JLIU7 vdisplay = %d\n",
14421+ mode_dev->panel_fixed_mode->vdisplay);
14422+ DRM_INFO("PRINT_JLIU7 hsync_start = %d\n",
14423+ mode_dev->panel_fixed_mode->hsync_start);
14424+ DRM_INFO("PRINT_JLIU7 hsync_end = %d\n",
14425+ mode_dev->panel_fixed_mode->hsync_end);
14426+ DRM_INFO("PRINT_JLIU7 htotal = %d\n",
14427+ mode_dev->panel_fixed_mode->htotal);
14428+ DRM_INFO("PRINT_JLIU7 vsync_start = %d\n",
14429+ mode_dev->panel_fixed_mode->vsync_start);
14430+ DRM_INFO("PRINT_JLIU7 vsync_end = %d\n",
14431+ mode_dev->panel_fixed_mode->vsync_end);
14432+ DRM_INFO("PRINT_JLIU7 vtotal = %d\n",
14433+ mode_dev->panel_fixed_mode->vtotal);
14434+ DRM_INFO("PRINT_JLIU7 clock = %d\n",
14435+ mode_dev->panel_fixed_mode->clock);
14436+#endif /* PRINT_JLIU7 */
14437+ return;
14438+
14439+failed_find:
14440+ if (psb_intel_output->ddc_bus)
14441+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
14442+failed_ddc:
14443+ drm_encoder_cleanup(encoder);
14444+ drm_connector_cleanup(connector);
14445+ kfree(connector);
14446+}
14447+
14448+/* MRST platform start */
14449+
14450+/*
14451+ * FIXME need to move to register define head file
14452+ */
14453+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
14454+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
14455+
14456+/* The max/min PWM frequency in BPCR[31:17] - */
14457+/* The smallest number is 1 (not 0) that can fit in the
14458+ * 15-bit field of the and then*/
14459+/* shifts to the left by one bit to get the actual 16-bit
14460+ * value that the 15-bits correspond to.*/
14461+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
14462+
14463+#define BRIGHTNESS_MAX_LEVEL 100
14464+#define BLC_PWM_PRECISION_FACTOR 10 /* 10000000 */
14465+#define BLC_PWM_FREQ_CALC_CONSTANT 32
14466+#define MHz 1000000
14467+#define BLC_POLARITY_NORMAL 0
14468+#define BLC_POLARITY_INVERSE 1
14469+
14470+/**
14471+ * Calculate PWM control register value.
14472+ */
14473+static bool mrstLVDSCalculatePWMCtrlRegFreq(struct drm_device *dev)
14474+{
14475+ unsigned long value = 0;
14476+ if (blc_freq == 0) {
14477+ /* DRM_ERROR(KERN_ERR "mrstLVDSCalculatePWMCtrlRegFreq:
14478+ * Frequency Requested is 0.\n"); */
14479+ return false;
14480+ }
14481+
14482+ value = (CoreClock * MHz);
14483+ value = (value / BLC_PWM_FREQ_CALC_CONSTANT);
14484+ value = (value * BLC_PWM_PRECISION_FACTOR);
14485+ value = (value / blc_freq);
14486+ value = (value / BLC_PWM_PRECISION_FACTOR);
14487+
14488+ if (value > (unsigned long) MRST_BLC_MAX_PWM_REG_FREQ) {
14489+ return 0;
14490+ } else {
14491+ PWMControlRegFreq = (u32) value;
14492+ return 1;
14493+ }
14494+}
14495+
14496+/**
14497+ * Returns the maximum level of the backlight duty cycle field.
14498+ */
14499+static u32 mrst_lvds_get_PWM_ctrl_freq(struct drm_device *dev)
14500+{
14501+ u32 max_pwm_blc = 0;
14502+
14503+#if PRINT_JLIU7
14504+ DRM_INFO("JLIU7 enter mrst_lvds_get_PWM_ctrl_freq \n");
14505+#endif /* PRINT_JLIU7 */
14506+
14507+/*FIXME JLIU7 get the PWM frequency from configuration */
14508+
14509+ max_pwm_blc =
14510+ (REG_READ(BLC_PWM_CTL) & MRST_BACKLIGHT_MODULATION_FREQ_MASK)
14511+ >> MRST_BACKLIGHT_MODULATION_FREQ_SHIFT;
14512+
14513+
14514+ if (!max_pwm_blc) {
14515+ if (mrstLVDSCalculatePWMCtrlRegFreq(dev))
14516+ max_pwm_blc = PWMControlRegFreq;
14517+ }
14518+
14519+ return max_pwm_blc;
14520+}
14521+
14522+/**
14523+ * Sets the backlight level.
14524+ *
14525+ * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight().
14526+ */
14527+static void mrst_lvds_set_backlight(struct drm_device *dev, int level)
14528+{
14529+ u32 blc_pwm_ctl;
14530+ u32 max_pwm_blc;
14531+#if PRINT_JLIU7
14532+ DRM_INFO("JLIU7 enter mrst_lvds_set_backlight \n");
14533+#endif /* PRINT_JLIU7 */
14534+
14535+#if 1 /* FIXME JLIU7 */
14536+ return;
14537+#endif /* FIXME JLIU7 */
14538+
14539+ /* Provent LVDS going to total black */
14540+ if (level < 20)
14541+ level = 20;
14542+
14543+ max_pwm_blc = mrst_lvds_get_PWM_ctrl_freq(dev);
14544+
14545+ if (max_pwm_blc == 0)
14546+ return;
14547+
14548+ blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
14549+
14550+ if (blc_pol == BLC_POLARITY_INVERSE)
14551+ blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl;
14552+
14553+ REG_WRITE(BLC_PWM_CTL,
14554+ (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) |
14555+ blc_pwm_ctl);
14556+}
14557+
14558+/**
14559+ * Sets the power state for the panel.
14560+ */
14561+static void mrst_lvds_set_power(struct drm_device *dev,
14562+ struct psb_intel_output *output, bool on)
14563+{
14564+ u32 pp_status;
14565+
14566+#if PRINT_JLIU7
14567+ DRM_INFO("JLIU7 enter mrst_lvds_set_power \n");
14568+#endif /* PRINT_JLIU7 */
14569+
14570+ if (on) {
14571+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
14572+ POWER_TARGET_ON);
14573+ do {
14574+ pp_status = REG_READ(PP_STATUS);
14575+ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
14576+
14577+ mrst_lvds_set_backlight(dev, lvds_backlight);
14578+ } else {
14579+ mrst_lvds_set_backlight(dev, 0);
14580+
14581+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
14582+ ~POWER_TARGET_ON);
14583+ do {
14584+ pp_status = REG_READ(PP_STATUS);
14585+ } while (pp_status & PP_ON);
14586+ }
14587+}
14588+
14589+static void mrst_lvds_dpms(struct drm_encoder *encoder, int mode)
14590+{
14591+ struct drm_device *dev = encoder->dev;
14592+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
14593+
14594+#if PRINT_JLIU7
14595+ DRM_INFO("JLIU7 enter mrst_lvds_dpms \n");
14596+#endif /* PRINT_JLIU7 */
14597+
14598+ if (mode == DRM_MODE_DPMS_ON)
14599+ mrst_lvds_set_power(dev, output, true);
14600+ else
14601+ mrst_lvds_set_power(dev, output, false);
14602+
14603+ /* XXX: We never power down the LVDS pairs. */
14604+}
14605+
14606+static void mrst_lvds_mode_set(struct drm_encoder *encoder,
14607+ struct drm_display_mode *mode,
14608+ struct drm_display_mode *adjusted_mode)
14609+{
14610+ struct psb_intel_mode_device *mode_dev =
14611+ enc_to_psb_intel_output(encoder)->mode_dev;
14612+ struct drm_device *dev = encoder->dev;
14613+ u32 pfit_control;
14614+ u32 lvds_port;
14615+
14616+#if PRINT_JLIU7
14617+ DRM_INFO("JLIU7 enter mrst_lvds_mode_set \n");
14618+#endif /* PRINT_JLIU7 */
14619+
14620+ /*
14621+ * The LVDS pin pair will already have been turned on in the
14622+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
14623+ * settings.
14624+ */
14625+ /*FIXME JLIU7 Get panel power delay parameters from config data */
14626+ REG_WRITE(0x61208, 0x25807d0);
14627+ REG_WRITE(0x6120c, 0x1f407d0);
14628+ REG_WRITE(0x61210, 0x270f04);
14629+
14630+ lvds_port = (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN;
14631+
14632+ if (mode_dev->panel_wants_dither)
14633+ lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
14634+
14635+ REG_WRITE(LVDS, lvds_port);
14636+
14637+ /*
14638+ * Enable automatic panel scaling so that non-native modes fill the
14639+ * screen. Should be enabled before the pipe is enabled, according to
14640+ * register description and PRM.
14641+ */
14642+ if (mode->hdisplay != adjusted_mode->hdisplay ||
14643+ mode->vdisplay != adjusted_mode->vdisplay)
14644+ pfit_control = PFIT_ENABLE;
14645+ else
14646+ pfit_control = 0;
14647+
14648+ REG_WRITE(PFIT_CONTROL, pfit_control);
14649+}
14650+
14651+
14652+static const struct drm_encoder_helper_funcs mrst_lvds_helper_funcs = {
14653+ .dpms = mrst_lvds_dpms,
14654+ .mode_fixup = psb_intel_lvds_mode_fixup,
14655+ .prepare = psb_intel_lvds_prepare,
14656+ .mode_set = mrst_lvds_mode_set,
14657+ .commit = psb_intel_lvds_commit,
14658+};
14659+
14660+/** Returns the panel fixed mode from configuration. */
14661+/** FIXME JLIU7 need to revist it. */
14662+struct drm_display_mode *mrst_lvds_get_configuration_mode(struct drm_device
14663+ *dev)
14664+{
14665+ struct drm_display_mode *mode;
14666+
14667+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
14668+ if (!mode)
14669+ return NULL;
14670+
14671+#if 0 /*FIXME jliu7 remove it later */
14672+ /* hard coded fixed mode for TPO LTPS LPJ040K001A */
14673+ mode->hdisplay = 800;
14674+ mode->vdisplay = 480;
14675+ mode->hsync_start = 836;
14676+ mode->hsync_end = 846;
14677+ mode->htotal = 1056;
14678+ mode->vsync_start = 489;
14679+ mode->vsync_end = 491;
14680+ mode->vtotal = 525;
14681+ mode->clock = 33264;
14682+#endif /*FIXME jliu7 remove it later */
14683+
14684+#if 0 /*FIXME jliu7 remove it later */
14685+ /* hard coded fixed mode for LVDS 800x480 */
14686+ mode->hdisplay = 800;
14687+ mode->vdisplay = 480;
14688+ mode->hsync_start = 801;
14689+ mode->hsync_end = 802;
14690+ mode->htotal = 1024;
14691+ mode->vsync_start = 481;
14692+ mode->vsync_end = 482;
14693+ mode->vtotal = 525;
14694+ mode->clock = 30994;
14695+#endif /*FIXME jliu7 remove it later */
14696+
14697+#if 1 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec */
14698+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
14699+ mode->hdisplay = 1024;
14700+ mode->vdisplay = 600;
14701+ mode->hsync_start = 1072;
14702+ mode->hsync_end = 1104;
14703+ mode->htotal = 1184;
14704+ mode->vsync_start = 603;
14705+ mode->vsync_end = 604;
14706+ mode->vtotal = 608;
14707+ mode->clock = 53990;
14708+#endif /*FIXME jliu7 remove it later */
14709+
14710+#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
14711+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
14712+ mode->hdisplay = 1024;
14713+ mode->vdisplay = 600;
14714+ mode->hsync_start = 1104;
14715+ mode->hsync_end = 1136;
14716+ mode->htotal = 1184;
14717+ mode->vsync_start = 603;
14718+ mode->vsync_end = 604;
14719+ mode->vtotal = 608;
14720+ mode->clock = 53990;
14721+#endif /*FIXME jliu7 remove it later */
14722+
14723+#if 0 /*FIXME jliu7 remove it later */
14724+ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
14725+ mode->hdisplay = 1024;
14726+ mode->vdisplay = 600;
14727+ mode->hsync_start = 1124;
14728+ mode->hsync_end = 1204;
14729+ mode->htotal = 1312;
14730+ mode->vsync_start = 607;
14731+ mode->vsync_end = 610;
14732+ mode->vtotal = 621;
14733+ mode->clock = 48885;
14734+#endif /*FIXME jliu7 remove it later */
14735+
14736+#if 0 /*FIXME jliu7 remove it later */
14737+ /* hard coded fixed mode for LVDS 1024x768 */
14738+ mode->hdisplay = 1024;
14739+ mode->vdisplay = 768;
14740+ mode->hsync_start = 1048;
14741+ mode->hsync_end = 1184;
14742+ mode->htotal = 1344;
14743+ mode->vsync_start = 771;
14744+ mode->vsync_end = 777;
14745+ mode->vtotal = 806;
14746+ mode->clock = 65000;
14747+#endif /*FIXME jliu7 remove it later */
14748+
14749+#if 0 /*FIXME jliu7 remove it later */
14750+ /* hard coded fixed mode for LVDS 1366x768 */
14751+ mode->hdisplay = 1366;
14752+ mode->vdisplay = 768;
14753+ mode->hsync_start = 1430;
14754+ mode->hsync_end = 1558;
14755+ mode->htotal = 1664;
14756+ mode->vsync_start = 769;
14757+ mode->vsync_end = 770;
14758+ mode->vtotal = 776;
14759+ mode->clock = 77500;
14760+#endif /*FIXME jliu7 remove it later */
14761+
14762+ drm_mode_set_name(mode);
14763+ drm_mode_set_crtcinfo(mode, 0);
14764+
14765+ return mode;
14766+}
14767+
14768+/**
14769+ * mrst_lvds_init - setup LVDS connectors on this device
14770+ * @dev: drm device
14771+ *
14772+ * Create the connector, register the LVDS DDC bus, and try to figure out what
14773+ * modes we can display on the LVDS panel (if present).
14774+ */
14775+void mrst_lvds_init(struct drm_device *dev,
14776+ struct psb_intel_mode_device *mode_dev)
14777+{
14778+ struct psb_intel_output *psb_intel_output;
14779+ struct drm_connector *connector;
14780+ struct drm_encoder *encoder;
14781+#if MRST_I2C
14782+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
14783+#endif
14784+#if PRINT_JLIU7
14785+ DRM_INFO("JLIU7 enter mrst_lvds_init \n");
14786+#endif /* PRINT_JLIU7 */
14787+
14788+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
14789+ if (!psb_intel_output)
14790+ return;
14791+
14792+ psb_intel_output->mode_dev = mode_dev;
14793+ connector = &psb_intel_output->base;
14794+ encoder = &psb_intel_output->enc;
14795+ drm_connector_init(dev, &psb_intel_output->base,
14796+ &psb_intel_lvds_connector_funcs,
14797+ DRM_MODE_CONNECTOR_LVDS);
14798+
14799+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
14800+ DRM_MODE_ENCODER_LVDS);
14801+
14802+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
14803+ &psb_intel_output->enc);
14804+ psb_intel_output->type = INTEL_OUTPUT_LVDS;
14805+
14806+ drm_encoder_helper_add(encoder, &mrst_lvds_helper_funcs);
14807+ drm_connector_helper_add(connector,
14808+ &psb_intel_lvds_connector_helper_funcs);
14809+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
14810+ connector->interlace_allowed = false;
14811+ connector->doublescan_allowed = false;
14812+
14813+ lvds_backlight = BRIGHTNESS_MAX_LEVEL;
14814+
14815+ /*
14816+ * LVDS discovery:
14817+ * 1) check for EDID on DDC
14818+ * 2) check for VBT data
14819+ * 3) check to see if LVDS is already on
14820+ * if none of the above, no panel
14821+ * 4) make sure lid is open
14822+ * if closed, act like it's not there for now
14823+ */
14824+
14825+#if MRST_I2C
14826+ /* Set up the DDC bus. */
14827+ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
14828+ if (!psb_intel_output->ddc_bus) {
14829+ dev_printk(KERN_ERR, &dev->pdev->dev,
14830+ "DDC bus registration " "failed.\n");
14831+ goto failed_ddc;
14832+ }
14833+
14834+ /*
14835+ * Attempt to get the fixed panel mode from DDC. Assume that the
14836+ * preferred mode is the right one.
14837+ */
14838+ psb_intel_ddc_get_modes(psb_intel_output);
14839+ list_for_each_entry(scan, &connector->probed_modes, head) {
14840+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
14841+ mode_dev->panel_fixed_mode =
14842+ drm_mode_duplicate(dev, scan);
14843+ goto out; /* FIXME: check for quirks */
14844+ }
14845+ }
14846+#endif /* MRST_I2C */
14847+
14848+ /*
14849+ * If we didn't get EDID, try geting panel timing
14850+ * from configuration data
14851+ */
14852+ mode_dev->panel_fixed_mode = mrst_lvds_get_configuration_mode(dev);
14853+
14854+ if (mode_dev->panel_fixed_mode) {
14855+ mode_dev->panel_fixed_mode->type |=
14856+ DRM_MODE_TYPE_PREFERRED;
14857+ goto out; /* FIXME: check for quirks */
14858+ }
14859+
14860+ /* If we still don't have a mode after all that, give up. */
14861+ if (!mode_dev->panel_fixed_mode) {
14862+ DRM_DEBUG
14863+ ("Found no modes on the lvds, ignoring the LVDS\n");
14864+ goto failed_find;
14865+ }
14866+
14867+out:
14868+ drm_sysfs_connector_add(connector);
14869+ return;
14870+
14871+failed_find:
14872+ DRM_DEBUG("No LVDS modes found, disabling.\n");
14873+ if (psb_intel_output->ddc_bus)
14874+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
14875+#if MRST_I2C
14876+failed_ddc:
14877+#endif
14878+ drm_encoder_cleanup(encoder);
14879+ drm_connector_cleanup(connector);
14880+ kfree(connector);
14881+}
14882+
14883+/* MRST platform end */
14884diff -uNr a/drivers/gpu/drm/psb/psb_intel_modes.c b/drivers/gpu/drm/psb/psb_intel_modes.c
14885--- a/drivers/gpu/drm/psb/psb_intel_modes.c 1969-12-31 16:00:00.000000000 -0800
14886+++ b/drivers/gpu/drm/psb/psb_intel_modes.c 2009-04-07 13:28:38.000000000 -0700
14887@@ -0,0 +1,64 @@
14888+/*
14889+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
14890+ * Copyright (c) 2007 Intel Corporation
14891+ * Jesse Barnes <jesse.barnes@intel.com>
14892+ */
14893+
14894+#include <linux/i2c.h>
14895+#include <linux/fb.h>
14896+#include <drm/drmP.h>
14897+#include "psb_intel_drv.h"
14898+
14899+/**
14900+ * psb_intel_ddc_probe
14901+ *
14902+ */
14903+bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output)
14904+{
14905+ u8 out_buf[] = { 0x0, 0x0 };
14906+ u8 buf[2];
14907+ int ret;
14908+ struct i2c_msg msgs[] = {
14909+ {
14910+ .addr = 0x50,
14911+ .flags = 0,
14912+ .len = 1,
14913+ .buf = out_buf,
14914+ },
14915+ {
14916+ .addr = 0x50,
14917+ .flags = I2C_M_RD,
14918+ .len = 1,
14919+ .buf = buf,
14920+ }
14921+ };
14922+
14923+ ret = i2c_transfer(&psb_intel_output->ddc_bus->adapter, msgs, 2);
14924+ if (ret == 2)
14925+ return true;
14926+
14927+ return false;
14928+}
14929+
14930+/**
14931+ * psb_intel_ddc_get_modes - get modelist from monitor
14932+ * @connector: DRM connector device to use
14933+ *
14934+ * Fetch the EDID information from @connector using the DDC bus.
14935+ */
14936+int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output)
14937+{
14938+ struct edid *edid;
14939+ int ret = 0;
14940+
14941+ edid =
14942+ drm_get_edid(&psb_intel_output->base,
14943+ &psb_intel_output->ddc_bus->adapter);
14944+ if (edid) {
14945+ drm_mode_connector_update_edid_property(&psb_intel_output->
14946+ base, edid);
14947+ ret = drm_add_edid_modes(&psb_intel_output->base, edid);
14948+ kfree(edid);
14949+ }
14950+ return ret;
14951+}
14952diff -uNr a/drivers/gpu/drm/psb/psb_intel_reg.h b/drivers/gpu/drm/psb/psb_intel_reg.h
14953--- a/drivers/gpu/drm/psb/psb_intel_reg.h 1969-12-31 16:00:00.000000000 -0800
14954+++ b/drivers/gpu/drm/psb/psb_intel_reg.h 2009-04-07 13:28:38.000000000 -0700
14955@@ -0,0 +1,972 @@
14956+#define BLC_PWM_CTL 0x61254
14957+#define BLC_PWM_CTL2 0x61250
14958+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
14959+/**
14960+ * This is the most significant 15 bits of the number of backlight cycles in a
14961+ * complete cycle of the modulated backlight control.
14962+ *
14963+ * The actual value is this field multiplied by two.
14964+ */
14965+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
14966+#define BLM_LEGACY_MODE (1 << 16)
14967+/**
14968+ * This is the number of cycles out of the backlight modulation cycle for which
14969+ * the backlight is on.
14970+ *
14971+ * This field must be no greater than the number of cycles in the complete
14972+ * backlight modulation cycle.
14973+ */
14974+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
14975+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
14976+
14977+#define I915_GCFGC 0xf0
14978+#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
14979+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
14980+#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
14981+#define I915_DISPLAY_CLOCK_MASK (7 << 4)
14982+
14983+#define I855_HPLLCC 0xc0
14984+#define I855_CLOCK_CONTROL_MASK (3 << 0)
14985+#define I855_CLOCK_133_200 (0 << 0)
14986+#define I855_CLOCK_100_200 (1 << 0)
14987+#define I855_CLOCK_100_133 (2 << 0)
14988+#define I855_CLOCK_166_250 (3 << 0)
14989+
14990+/* I830 CRTC registers */
14991+#define HTOTAL_A 0x60000
14992+#define HBLANK_A 0x60004
14993+#define HSYNC_A 0x60008
14994+#define VTOTAL_A 0x6000c
14995+#define VBLANK_A 0x60010
14996+#define VSYNC_A 0x60014
14997+#define PIPEASRC 0x6001c
14998+#define BCLRPAT_A 0x60020
14999+#define VSYNCSHIFT_A 0x60028
15000+
15001+#define HTOTAL_B 0x61000
15002+#define HBLANK_B 0x61004
15003+#define HSYNC_B 0x61008
15004+#define VTOTAL_B 0x6100c
15005+#define VBLANK_B 0x61010
15006+#define VSYNC_B 0x61014
15007+#define PIPEBSRC 0x6101c
15008+#define BCLRPAT_B 0x61020
15009+#define VSYNCSHIFT_B 0x61028
15010+
15011+#define PP_STATUS 0x61200
15012+# define PP_ON (1 << 31)
15013+/**
15014+ * Indicates that all dependencies of the panel are on:
15015+ *
15016+ * - PLL enabled
15017+ * - pipe enabled
15018+ * - LVDS/DVOB/DVOC on
15019+ */
15020+# define PP_READY (1 << 30)
15021+# define PP_SEQUENCE_NONE (0 << 28)
15022+# define PP_SEQUENCE_ON (1 << 28)
15023+# define PP_SEQUENCE_OFF (2 << 28)
15024+# define PP_SEQUENCE_MASK 0x30000000
15025+#define PP_CONTROL 0x61204
15026+# define POWER_TARGET_ON (1 << 0)
15027+
15028+#define LVDSPP_ON 0x61208
15029+#define LVDSPP_OFF 0x6120c
15030+#define PP_CYCLE 0x61210
15031+
15032+#define PFIT_CONTROL 0x61230
15033+# define PFIT_ENABLE (1 << 31)
15034+# define PFIT_PIPE_MASK (3 << 29)
15035+# define PFIT_PIPE_SHIFT 29
15036+# define VERT_INTERP_DISABLE (0 << 10)
15037+# define VERT_INTERP_BILINEAR (1 << 10)
15038+# define VERT_INTERP_MASK (3 << 10)
15039+# define VERT_AUTO_SCALE (1 << 9)
15040+# define HORIZ_INTERP_DISABLE (0 << 6)
15041+# define HORIZ_INTERP_BILINEAR (1 << 6)
15042+# define HORIZ_INTERP_MASK (3 << 6)
15043+# define HORIZ_AUTO_SCALE (1 << 5)
15044+# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
15045+
15046+#define PFIT_PGM_RATIOS 0x61234
15047+# define PFIT_VERT_SCALE_MASK 0xfff00000
15048+# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
15049+
15050+#define PFIT_AUTO_RATIOS 0x61238
15051+
15052+
15053+#define DPLL_A 0x06014
15054+#define DPLL_B 0x06018
15055+# define DPLL_VCO_ENABLE (1 << 31)
15056+# define DPLL_DVO_HIGH_SPEED (1 << 30)
15057+# define DPLL_SYNCLOCK_ENABLE (1 << 29)
15058+# define DPLL_VGA_MODE_DIS (1 << 28)
15059+# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
15060+# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
15061+# define DPLL_MODE_MASK (3 << 26)
15062+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
15063+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
15064+# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
15065+# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
15066+# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
15067+# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
15068+/**
15069+ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
15070+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
15071+ */
15072+# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
15073+/**
15074+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
15075+ * this field (only one bit may be set).
15076+ */
15077+# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
15078+# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
15079+# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
15080+ * in DVO non-gang */
15081+# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
15082+# define PLL_REF_INPUT_DREFCLK (0 << 13)
15083+# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
15084+# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
15085+ * TVCLKIN */
15086+# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
15087+# define PLL_REF_INPUT_MASK (3 << 13)
15088+# define PLL_LOAD_PULSE_PHASE_SHIFT 9
15089+/*
15090+ * Parallel to Serial Load Pulse phase selection.
15091+ * Selects the phase for the 10X DPLL clock for the PCIe
15092+ * digital display port. The range is 4 to 13; 10 or more
15093+ * is just a flip delay. The default is 6
15094+ */
15095+# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
15096+# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
15097+
15098+/**
15099+ * SDVO multiplier for 945G/GM. Not used on 965.
15100+ *
15101+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
15102+ */
15103+# define SDVO_MULTIPLIER_MASK 0x000000ff
15104+# define SDVO_MULTIPLIER_SHIFT_HIRES 4
15105+# define SDVO_MULTIPLIER_SHIFT_VGA 0
15106+
15107+/** @defgroup DPLL_MD
15108+ * @{
15109+ */
15110+/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
15111+#define DPLL_A_MD 0x0601c
15112+/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
15113+#define DPLL_B_MD 0x06020
15114+/**
15115+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
15116+ *
15117+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
15118+ */
15119+# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
15120+# define DPLL_MD_UDI_DIVIDER_SHIFT 24
15121+/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
15122+# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
15123+# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
15124+/**
15125+ * SDVO/UDI pixel multiplier.
15126+ *
15127+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
15128+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
15129+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
15130+ * dummy bytes in the datastream at an increased clock rate, with both sides of
15131+ * the link knowing how many bytes are fill.
15132+ *
15133+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
15134+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
15135+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
15136+ * through an SDVO command.
15137+ *
15138+ * This register field has values of multiplication factor minus 1, with
15139+ * a maximum multiplier of 5 for SDVO.
15140+ */
15141+# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
15142+# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
15143+/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
15144+ * This best be set to the default value (3) or the CRT won't work. No,
15145+ * I don't entirely understand what this does...
15146+ */
15147+# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
15148+# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
15149+/** @} */
15150+
15151+#define DPLL_TEST 0x606c
15152+# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
15153+# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
15154+# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
15155+# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
15156+# define DPLLB_TEST_N_BYPASS (1 << 19)
15157+# define DPLLB_TEST_M_BYPASS (1 << 18)
15158+# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
15159+# define DPLLA_TEST_N_BYPASS (1 << 3)
15160+# define DPLLA_TEST_M_BYPASS (1 << 2)
15161+# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
15162+
15163+#define ADPA 0x61100
15164+#define ADPA_DAC_ENABLE (1<<31)
15165+#define ADPA_DAC_DISABLE 0
15166+#define ADPA_PIPE_SELECT_MASK (1<<30)
15167+#define ADPA_PIPE_A_SELECT 0
15168+#define ADPA_PIPE_B_SELECT (1<<30)
15169+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
15170+#define ADPA_SETS_HVPOLARITY 0
15171+#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
15172+#define ADPA_VSYNC_CNTL_ENABLE 0
15173+#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
15174+#define ADPA_HSYNC_CNTL_ENABLE 0
15175+#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
15176+#define ADPA_VSYNC_ACTIVE_LOW 0
15177+#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
15178+#define ADPA_HSYNC_ACTIVE_LOW 0
15179+
15180+#define FPA0 0x06040
15181+#define FPA1 0x06044
15182+#define FPB0 0x06048
15183+#define FPB1 0x0604c
15184+# define FP_N_DIV_MASK 0x003f0000
15185+# define FP_N_DIV_SHIFT 16
15186+# define FP_M1_DIV_MASK 0x00003f00
15187+# define FP_M1_DIV_SHIFT 8
15188+# define FP_M2_DIV_MASK 0x0000003f
15189+# define FP_M2_DIV_SHIFT 0
15190+
15191+
15192+#define PORT_HOTPLUG_EN 0x61110
15193+# define SDVOB_HOTPLUG_INT_EN (1 << 26)
15194+# define SDVOC_HOTPLUG_INT_EN (1 << 25)
15195+# define TV_HOTPLUG_INT_EN (1 << 18)
15196+# define CRT_HOTPLUG_INT_EN (1 << 9)
15197+# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
15198+
15199+#define PORT_HOTPLUG_STAT 0x61114
15200+# define CRT_HOTPLUG_INT_STATUS (1 << 11)
15201+# define TV_HOTPLUG_INT_STATUS (1 << 10)
15202+# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
15203+# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
15204+# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
15205+# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
15206+# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
15207+# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
15208+
15209+#define SDVOB 0x61140
15210+#define SDVOC 0x61160
15211+#define SDVO_ENABLE (1 << 31)
15212+#define SDVO_PIPE_B_SELECT (1 << 30)
15213+#define SDVO_STALL_SELECT (1 << 29)
15214+#define SDVO_INTERRUPT_ENABLE (1 << 26)
15215+/**
15216+ * 915G/GM SDVO pixel multiplier.
15217+ *
15218+ * Programmed value is multiplier - 1, up to 5x.
15219+ *
15220+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
15221+ */
15222+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
15223+#define SDVO_PORT_MULTIPLY_SHIFT 23
15224+#define SDVO_PHASE_SELECT_MASK (15 << 19)
15225+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
15226+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
15227+#define SDVOC_GANG_MODE (1 << 16)
15228+#define SDVO_BORDER_ENABLE (1 << 7)
15229+#define SDVOB_PCIE_CONCURRENCY (1 << 3)
15230+#define SDVO_DETECTED (1 << 2)
15231+/* Bits to be preserved when writing */
15232+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
15233+#define SDVOC_PRESERVE_MASK (1 << 17)
15234+
15235+/** @defgroup LVDS
15236+ * @{
15237+ */
15238+/**
15239+ * This register controls the LVDS output enable, pipe selection, and data
15240+ * format selection.
15241+ *
15242+ * All of the clock/data pairs are force powered down by power sequencing.
15243+ */
15244+#define LVDS 0x61180
15245+/**
15246+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
15247+ * the DPLL semantics change when the LVDS is assigned to that pipe.
15248+ */
15249+# define LVDS_PORT_EN (1 << 31)
15250+/** Selects pipe B for LVDS data. Must be set on pre-965. */
15251+# define LVDS_PIPEB_SELECT (1 << 30)
15252+
15253+/**
15254+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
15255+ * pixel.
15256+ */
15257+# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
15258+# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
15259+# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
15260+/**
15261+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
15262+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
15263+ * on.
15264+ */
15265+# define LVDS_A3_POWER_MASK (3 << 6)
15266+# define LVDS_A3_POWER_DOWN (0 << 6)
15267+# define LVDS_A3_POWER_UP (3 << 6)
15268+/**
15269+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
15270+ * is set.
15271+ */
15272+# define LVDS_CLKB_POWER_MASK (3 << 4)
15273+# define LVDS_CLKB_POWER_DOWN (0 << 4)
15274+# define LVDS_CLKB_POWER_UP (3 << 4)
15275+
15276+/**
15277+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
15278+ * setting for whether we are in dual-channel mode. The B3 pair will
15279+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
15280+ */
15281+# define LVDS_B0B3_POWER_MASK (3 << 2)
15282+# define LVDS_B0B3_POWER_DOWN (0 << 2)
15283+# define LVDS_B0B3_POWER_UP (3 << 2)
15284+
15285+#define PIPEACONF 0x70008
15286+#define PIPEACONF_ENABLE (1<<31)
15287+#define PIPEACONF_DISABLE 0
15288+#define PIPEACONF_DOUBLE_WIDE (1<<30)
15289+#define I965_PIPECONF_ACTIVE (1<<30)
15290+#define PIPEACONF_SINGLE_WIDE 0
15291+#define PIPEACONF_PIPE_UNLOCKED 0
15292+#define PIPEACONF_PIPE_LOCKED (1<<25)
15293+#define PIPEACONF_PALETTE 0
15294+#define PIPEACONF_GAMMA (1<<24)
15295+#define PIPECONF_FORCE_BORDER (1<<25)
15296+#define PIPECONF_PROGRESSIVE (0 << 21)
15297+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
15298+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
15299+
15300+#define PIPEBCONF 0x71008
15301+#define PIPEBCONF_ENABLE (1<<31)
15302+#define PIPEBCONF_DISABLE 0
15303+#define PIPEBCONF_DOUBLE_WIDE (1<<30)
15304+#define PIPEBCONF_DISABLE 0
15305+#define PIPEBCONF_GAMMA (1<<24)
15306+#define PIPEBCONF_PALETTE 0
15307+
15308+#define PIPEBGCMAXRED 0x71010
15309+#define PIPEBGCMAXGREEN 0x71014
15310+#define PIPEBGCMAXBLUE 0x71018
15311+#define PIPEBSTAT 0x71024
15312+#define PIPEBFRAMEHIGH 0x71040
15313+#define PIPEBFRAMEPIXEL 0x71044
15314+
15315+#define DSPARB 0x70030
15316+#define DSPFW1 0x70034
15317+#define DSPFW2 0x70038
15318+#define DSPFW3 0x7003c
15319+#define DSPFW4 0x70050
15320+#define DSPFW5 0x70054
15321+#define DSPFW6 0x70058
15322+
15323+#define DSPACNTR 0x70180
15324+#define DSPBCNTR 0x71180
15325+#define DISPLAY_PLANE_ENABLE (1<<31)
15326+#define DISPLAY_PLANE_DISABLE 0
15327+#define DISPPLANE_GAMMA_ENABLE (1<<30)
15328+#define DISPPLANE_GAMMA_DISABLE 0
15329+#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
15330+#define DISPPLANE_8BPP (0x2<<26)
15331+#define DISPPLANE_15_16BPP (0x4<<26)
15332+#define DISPPLANE_16BPP (0x5<<26)
15333+#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
15334+#define DISPPLANE_32BPP (0x7<<26)
15335+#define DISPPLANE_STEREO_ENABLE (1<<25)
15336+#define DISPPLANE_STEREO_DISABLE 0
15337+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
15338+#define DISPPLANE_SEL_PIPE_A 0
15339+#define DISPPLANE_SEL_PIPE_B (1<<24)
15340+#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
15341+#define DISPPLANE_SRC_KEY_DISABLE 0
15342+#define DISPPLANE_LINE_DOUBLE (1<<20)
15343+#define DISPPLANE_NO_LINE_DOUBLE 0
15344+#define DISPPLANE_STEREO_POLARITY_FIRST 0
15345+#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
15346+/* plane B only */
15347+#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
15348+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
15349+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
15350+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
15351+
15352+#define DSPABASE 0x70184
15353+#define DSPASTRIDE 0x70188
15354+
15355+#define DSPBBASE 0x71184
15356+#define DSPBADDR DSPBBASE
15357+#define DSPBSTRIDE 0x71188
15358+
15359+#define DSPAKEYVAL 0x70194
15360+#define DSPAKEYMASK 0x70198
15361+
15362+#define DSPAPOS 0x7018C /* reserved */
15363+#define DSPASIZE 0x70190
15364+#define DSPBPOS 0x7118C
15365+#define DSPBSIZE 0x71190
15366+
15367+#define DSPASURF 0x7019C
15368+#define DSPATILEOFF 0x701A4
15369+
15370+#define DSPBSURF 0x7119C
15371+#define DSPBTILEOFF 0x711A4
15372+
15373+#define VGACNTRL 0x71400
15374+# define VGA_DISP_DISABLE (1 << 31)
15375+# define VGA_2X_MODE (1 << 30)
15376+# define VGA_PIPE_B_SELECT (1 << 29)
15377+
15378+/*
15379+ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
15380+ * of video memory available to the BIOS in SWF1.
15381+ */
15382+
15383+#define SWF0 0x71410
15384+#define SWF1 0x71414
15385+#define SWF2 0x71418
15386+#define SWF3 0x7141c
15387+#define SWF4 0x71420
15388+#define SWF5 0x71424
15389+#define SWF6 0x71428
15390+
15391+/*
15392+ * 855 scratch registers.
15393+ */
15394+#define SWF00 0x70410
15395+#define SWF01 0x70414
15396+#define SWF02 0x70418
15397+#define SWF03 0x7041c
15398+#define SWF04 0x70420
15399+#define SWF05 0x70424
15400+#define SWF06 0x70428
15401+
15402+#define SWF10 SWF0
15403+#define SWF11 SWF1
15404+#define SWF12 SWF2
15405+#define SWF13 SWF3
15406+#define SWF14 SWF4
15407+#define SWF15 SWF5
15408+#define SWF16 SWF6
15409+
15410+#define SWF30 0x72414
15411+#define SWF31 0x72418
15412+#define SWF32 0x7241c
15413+
15414+
15415+/*
15416+ * Palette registers
15417+ */
15418+#define PALETTE_A 0x0a000
15419+#define PALETTE_B 0x0a800
15420+
15421+#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
15422+#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
15423+#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
15424+#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
15425+#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
15426+
15427+
15428+/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G) */
15429+#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)
15430+#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
15431+#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
15432+#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG)
15433+
15434+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
15435+ (dev)->pci_device == 0x2982 || \
15436+ (dev)->pci_device == 0x2992 || \
15437+ (dev)->pci_device == 0x29A2 || \
15438+ (dev)->pci_device == 0x2A02 || \
15439+ (dev)->pci_device == 0x2A12)
15440+
15441+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
15442+
15443+#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
15444+ (dev)->pci_device == 0x29B2 || \
15445+ (dev)->pci_device == 0x29D2)
15446+
15447+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
15448+ IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev) || \
15449+ IS_MRST(dev))
15450+
15451+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
15452+ IS_I945GM(dev) || IS_I965GM(dev) || \
15453+ IS_POULSBO(dev) || IS_MRST(dev))
15454+
15455+/* Cursor A & B regs */
15456+#define CURACNTR 0x70080
15457+#define CURSOR_MODE_DISABLE 0x00
15458+#define CURSOR_MODE_64_32B_AX 0x07
15459+#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
15460+#define MCURSOR_GAMMA_ENABLE (1 << 26)
15461+#define CURABASE 0x70084
15462+#define CURAPOS 0x70088
15463+#define CURSOR_POS_MASK 0x007FF
15464+#define CURSOR_POS_SIGN 0x8000
15465+#define CURSOR_X_SHIFT 0
15466+#define CURSOR_Y_SHIFT 16
15467+#define CURBCNTR 0x700c0
15468+#define CURBBASE 0x700c4
15469+#define CURBPOS 0x700c8
15470+
15471+/*
15472+ * MOORESTOWN delta registers
15473+ */
15474+#define MRST_DPLL_A 0x0f014
15475+#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
15476+#define MRST_FPA0 0x0f040
15477+#define MRST_FPA1 0x0f044
15478+
15479+/* #define LVDS 0x61180 */
15480+# define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
15481+# define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
15482+# define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
15483+
15484+#define MIPI 0x61190
15485+# define MIPI_PORT_EN (1 << 31)
15486+
15487+/* #define PP_CONTROL 0x61204 */
15488+# define POWER_DOWN_ON_RESET (1 << 1)
15489+
15490+/* #define PFIT_CONTROL 0x61230 */
15491+# define PFIT_PIPE_SELECT (3 << 29)
15492+# define PFIT_PIPE_SELECT_SHIFT (29)
15493+
15494+/* #define BLC_PWM_CTL 0x61254 */
15495+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
15496+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
15497+
15498+/* #define PIPEACONF 0x70008 */
15499+#define PIPEACONF_PIPE_STATE (1<<30)
15500+/* #define DSPACNTR 0x70180 */
15501+#if 0 /*FIXME JLIU7 need to define the following */
15502+1000 = 32 - bit RGBX(10 : 10 : 10 : 2)
15503+pixel format.Ignore alpha.1010 = BGRX 10 : 10 : 10 : 2 1100 = 64 - bit RGBX
15504+(16 : 16 : 16 : 16) 16 bit floating point pixel format.
15505+Ignore alpha.1110 = 32 - bit RGBX(8 : 8 : 8 : 8) pixel format.
15506+ Ignore
15507+ alpha.
15508+#endif /*FIXME JLIU7 need to define the following */
15509+
15510+#define MRST_DSPABASE 0x7019c
15511+
15512+/*
15513+ * MOORESTOWN reserved registers
15514+ */
15515+#if 0
15516+#define DSPAPOS 0x7018C /* reserved */
15517+#define DSPASIZE 0x70190
15518+#endif
15519+/*
15520+ * Moorestown registers.
15521+ */
15522+/*===========================================================================
15523+; General Constants
15524+;--------------------------------------------------------------------------*/
15525+#define BIT0 0x00000001
15526+#define BIT1 0x00000002
15527+#define BIT2 0x00000004
15528+#define BIT3 0x00000008
15529+#define BIT4 0x00000010
15530+#define BIT5 0x00000020
15531+#define BIT6 0x00000040
15532+#define BIT7 0x00000080
15533+#define BIT8 0x00000100
15534+#define BIT9 0x00000200
15535+#define BIT10 0x00000400
15536+#define BIT11 0x00000800
15537+#define BIT12 0x00001000
15538+#define BIT13 0x00002000
15539+#define BIT14 0x00004000
15540+#define BIT15 0x00008000
15541+#define BIT16 0x00010000
15542+#define BIT17 0x00020000
15543+#define BIT18 0x00040000
15544+#define BIT19 0x00080000
15545+#define BIT20 0x00100000
15546+#define BIT21 0x00200000
15547+#define BIT22 0x00400000
15548+#define BIT23 0x00800000
15549+#define BIT24 0x01000000
15550+#define BIT25 0x02000000
15551+#define BIT26 0x04000000
15552+#define BIT27 0x08000000
15553+#define BIT28 0x10000000
15554+#define BIT29 0x20000000
15555+#define BIT30 0x40000000
15556+#define BIT31 0x80000000
15557+/*===========================================================================
15558+; MIPI IP registers
15559+;--------------------------------------------------------------------------*/
15560+#define DEVICE_READY_REG 0xb000
15561+#define INTR_STAT_REG 0xb004
15562+#define RX_SOT_ERROR BIT0
15563+#define RX_SOT_SYNC_ERROR BIT1
15564+#define RX_ESCAPE_MODE_ENTRY_ERROR BIT3
15565+#define RX_LP_TX_SYNC_ERROR BIT4
15566+#define RX_HS_RECEIVE_TIMEOUT_ERROR BIT5
15567+#define RX_FALSE_CONTROL_ERROR BIT6
15568+#define RX_ECC_SINGLE_BIT_ERROR BIT7
15569+#define RX_ECC_MULTI_BIT_ERROR BIT8
15570+#define RX_CHECKSUM_ERROR BIT9
15571+#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT10
15572+#define RX_DSI_VC_ID_INVALID BIT11
15573+#define TX_FALSE_CONTROL_ERROR BIT12
15574+#define TX_ECC_SINGLE_BIT_ERROR BIT13
15575+#define TX_ECC_MULTI_BIT_ERROR BIT14
15576+#define TX_CHECKSUM_ERROR BIT15
15577+#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT16
15578+#define TX_DSI_VC_ID_INVALID BIT17
15579+#define HIGH_CONTENTION BIT18
15580+#define LOW_CONTENTION BIT19
15581+#define DPI_FIFO_UNDER_RUN BIT20
15582+#define HS_TX_TIMEOUT BIT21
15583+#define LP_RX_TIMEOUT BIT22
15584+#define TURN_AROUND_ACK_TIMEOUT BIT23
15585+#define ACK_WITH_NO_ERROR BIT24
15586+#define INTR_EN_REG 0xb008
15587+#define DSI_FUNC_PRG_REG 0xb00c
15588+#define DPI_CHANNEL_NUMBER_POS 0x03
15589+#define DBI_CHANNEL_NUMBER_POS 0x05
15590+#define FMT_DPI_POS 0x07
15591+#define FMT_DBI_POS 0x0A
15592+#define DBI_DATA_WIDTH_POS 0x0D
15593+#define HS_TX_TIMEOUT_REG 0xb010
15594+#define LP_RX_TIMEOUT_REG 0xb014
15595+#define TURN_AROUND_TIMEOUT_REG 0xb018
15596+#define DEVICE_RESET_REG 0xb01C
15597+#define DPI_RESOLUTION_REG 0xb020
15598+#define RES_V_POS 0x10
15599+#define DBI_RESOLUTION_REG 0xb024
15600+#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
15601+#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
15602+#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
15603+#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
15604+#define VERT_SYNC_PAD_COUNT_REG 0xb038
15605+#define VERT_BACK_PORCH_COUNT_REG 0xb03c
15606+#define VERT_FRONT_PORCH_COUNT_REG 0xb040
15607+#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
15608+#define DPI_CONTROL_REG 0xb048
15609+#define DPI_SHUT_DOWN BIT0
15610+#define DPI_TURN_ON BIT1
15611+#define DPI_COLOR_MODE_ON BIT2
15612+#define DPI_COLOR_MODE_OFF BIT3
15613+#define DPI_BACK_LIGHT_ON BIT4
15614+#define DPI_BACK_LIGHT_OFF BIT5
15615+#define DPI_LP BIT6
15616+#define DPI_DATA_REG 0xb04c
15617+#define DPI_BACK_LIGHT_ON_DATA 0x07
15618+#define DPI_BACK_LIGHT_OFF_DATA 0x17
15619+#define INIT_COUNT_REG 0xb050
15620+#define MAX_RET_PAK_REG 0xb054
15621+#define VIDEO_FMT_REG 0xb058
15622+#define EOT_DISABLE_REG 0xb05c
15623+#define LP_BYTECLK_REG 0xb060
15624+#define LP_GEN_DATA_REG 0xb064
15625+#define HS_GEN_DATA_REG 0xb068
15626+#define LP_GEN_CTRL_REG 0xb06C
15627+#define HS_GEN_CTRL_REG 0xb070
15628+#define GEN_FIFO_STAT_REG 0xb074
15629+#define HS_DATA_FIFO_FULL BIT0
15630+#define HS_DATA_FIFO_HALF_EMPTY BIT1
15631+#define HS_DATA_FIFO_EMPTY BIT2
15632+#define LP_DATA_FIFO_FULL BIT8
15633+#define LP_DATA_FIFO_HALF_EMPTY BIT9
15634+#define LP_DATA_FIFO_EMPTY BIT10
15635+#define HS_CTRL_FIFO_FULL BIT16
15636+#define HS_CTRL_FIFO_HALF_EMPTY BIT17
15637+#define HS_CTRL_FIFO_EMPTY BIT18
15638+#define LP_CTRL_FIFO_FULL BIT24
15639+#define LP_CTRL_FIFO_HALF_EMPTY BIT25
15640+#define LP_CTRL_FIFO_EMPTY BIT26
15641+/*===========================================================================
15642+; MIPI Adapter registers
15643+;--------------------------------------------------------------------------*/
15644+#define MIPI_CONTROL_REG 0xb104
15645+#define MIPI_2X_CLOCK_BITS (BIT0 | BIT1)
15646+#define MIPI_DATA_ADDRESS_REG 0xb108
15647+#define MIPI_DATA_LENGTH_REG 0xb10C
15648+#define MIPI_COMMAND_ADDRESS_REG 0xb110
15649+#define MIPI_COMMAND_LENGTH_REG 0xb114
15650+#define MIPI_READ_DATA_RETURN_REG0 0xb118
15651+#define MIPI_READ_DATA_RETURN_REG1 0xb11C
15652+#define MIPI_READ_DATA_RETURN_REG2 0xb120
15653+#define MIPI_READ_DATA_RETURN_REG3 0xb124
15654+#define MIPI_READ_DATA_RETURN_REG4 0xb128
15655+#define MIPI_READ_DATA_RETURN_REG5 0xb12C
15656+#define MIPI_READ_DATA_RETURN_REG6 0xb130
15657+#define MIPI_READ_DATA_RETURN_REG7 0xb134
15658+#define MIPI_READ_DATA_VALID_REG 0xb138
15659+/* DBI COMMANDS */
15660+#define soft_reset 0x01
15661+/* ************************************************************************* *\
15662+The display module performs a software reset.
15663+Registers are written with their SW Reset default values.
15664+\* ************************************************************************* */
15665+#define get_power_mode 0x0a
15666+/* ************************************************************************* *\
15667+The display module returns the current power mode
15668+\* ************************************************************************* */
15669+#define get_address_mode 0x0b
15670+/* ************************************************************************* *\
15671+The display module returns the current status.
15672+\* ************************************************************************* */
15673+#define get_pixel_format 0x0c
15674+/* ************************************************************************* *\
15675+This command gets the pixel format for the RGB image data
15676+used by the interface.
15677+\* ************************************************************************* */
15678+#define get_display_mode 0x0d
15679+/* ************************************************************************* *\
15680+The display module returns the Display Image Mode status.
15681+\* ************************************************************************* */
15682+#define get_signal_mode 0x0e
15683+/* ************************************************************************* *\
15684+The display module returns the Display Signal Mode.
15685+\* ************************************************************************* */
15686+#define get_diagnostic_result 0x0f
15687+/* ************************************************************************* *\
15688+The display module returns the self-diagnostic results following
15689+a Sleep Out command.
15690+\* ************************************************************************* */
15691+#define enter_sleep_mode 0x10
15692+/* ************************************************************************* *\
15693+This command causes the display module to enter the Sleep mode.
15694+In this mode, all unnecessary blocks inside the display module are disabled
15695+except interface communication. This is the lowest power mode
15696+the display module supports.
15697+\* ************************************************************************* */
15698+#define exit_sleep_mode 0x11
15699+/* ************************************************************************* *\
15700+This command causes the display module to exit Sleep mode.
15701+All blocks inside the display module are enabled.
15702+\* ************************************************************************* */
15703+#define enter_partial_mode 0x12
15704+/* ************************************************************************* *\
15705+This command causes the display module to enter the Partial Display Mode.
15706+The Partial Display Mode window is described by the set_partial_area command.
15707+\* ************************************************************************* */
15708+#define enter_normal_mode 0x13
15709+/* ************************************************************************* *\
15710+This command causes the display module to enter the Normal mode.
15711+Normal Mode is defined as Partial Display mode and Scroll mode are off
15712+\* ************************************************************************* */
15713+#define exit_invert_mode 0x20
15714+/* ************************************************************************* *\
15715+This command causes the display module to stop inverting the image data on
15716+the display device. The frame memory contents remain unchanged.
15717+No status bits are changed.
15718+\* ************************************************************************* */
15719+#define enter_invert_mode 0x21
15720+/* ************************************************************************* *\
15721+This command causes the display module to invert the image data only on
15722+the display device. The frame memory contents remain unchanged.
15723+No status bits are changed.
15724+\* ************************************************************************* */
15725+#define set_gamma_curve 0x26
15726+/* ************************************************************************* *\
15727+This command selects the desired gamma curve for the display device.
15728+Four fixed gamma curves are defined in section DCS spec.
15729+\* ************************************************************************* */
15730+#define set_display_off 0x28
15731+/* ************************************************************************* *\
15732+This command causes the display module to stop displaying the image data
15733+on the display device. The frame memory contents remain unchanged.
15734+No status bits are changed.
15735+\* ************************************************************************* */
15736+#define set_display_on 0x29
15737+/* ************************************************************************* *\
15738+This command causes the display module to start displaying the image data
15739+on the display device. The frame memory contents remain unchanged.
15740+No status bits are changed.
15741+\* ************************************************************************* */
15742+#define set_column_address 0x2a
15743+/* ************************************************************************* *\
15744+This command defines the column extent of the frame memory accessed by the
15745+hostprocessor with the read_memory_continue and write_memory_continue commands.
15746+No status bits are changed.
15747+\* ************************************************************************* */
15748+#define set_page_address 0x2b
15749+/* ************************************************************************* *\
15750+This command defines the page extent of the frame memory accessed by the host
15751+processor with the write_memory_continue and read_memory_continue command.
15752+No status bits are changed.
15753+\* ************************************************************************* */
15754+#define write_mem_start 0x2c
15755+/* ************************************************************************* *\
15756+This command transfers image data from the host processor to the display
15757+module s frame memory starting at the pixel location specified by
15758+preceding set_column_address and set_page_address commands.
15759+\* ************************************************************************* */
15760+#define set_partial_area 0x30
15761+/* ************************************************************************* *\
15762+This command defines the Partial Display mode s display area.
15763+There are two parameters associated with
15764+this command, the first defines the Start Row (SR) and the second the End Row
15765+(ER). SR and ER refer to the Frame Memory Line Pointer.
15766+\* ************************************************************************* */
15767+#define set_scroll_area 0x33
15768+/* ************************************************************************* *\
15769+This command defines the display modules Vertical Scrolling Area.
15770+\* ************************************************************************* */
15771+#define set_tear_off 0x34
15772+/* ************************************************************************* *\
15773+This command turns off the display modules Tearing Effect output signal on
15774+the TE signal line.
15775+\* ************************************************************************* */
15776+#define set_tear_on 0x35
15777+/* ************************************************************************* *\
15778+This command turns on the display modules Tearing Effect output signal
15779+on the TE signal line.
15780+\* ************************************************************************* */
15781+#define set_address_mode 0x36
15782+/* ************************************************************************* *\
15783+This command sets the data order for transfers from the host processor to
15784+display modules frame memory,bits B[7:5] and B3, and from the display
15785+modules frame memory to the display device, bits B[2:0] and B4.
15786+\* ************************************************************************* */
15787+#define set_scroll_start 0x37
15788+/* ************************************************************************* *\
15789+This command sets the start of the vertical scrolling area in the frame memory.
15790+The vertical scrolling area is fully defined when this command is used with
15791+the set_scroll_area command The set_scroll_start command has one parameter,
15792+the Vertical Scroll Pointer. The VSP defines the line in the frame memory
15793+that is written to the display device as the first line of the vertical
15794+scroll area.
15795+\* ************************************************************************* */
15796+#define exit_idle_mode 0x38
15797+/* ************************************************************************* *\
15798+This command causes the display module to exit Idle mode.
15799+\* ************************************************************************* */
15800+#define enter_idle_mode 0x39
15801+/* ************************************************************************* *\
15802+This command causes the display module to enter Idle Mode.
15803+In Idle Mode, color expression is reduced. Colors are shown on the display
15804+device using the MSB of each of the R, G and B color components in the frame
15805+memory
15806+\* ************************************************************************* */
15807+#define set_pixel_format 0x3a
15808+/* ************************************************************************* *\
15809+This command sets the pixel format for the RGB image data used by the interface.
15810+Bits D[6:4] DPI Pixel Format Definition
15811+Bits D[2:0] DBI Pixel Format Definition
15812+Bits D7 and D3 are not used.
15813+\* ************************************************************************* */
15814+#define write_mem_cont 0x3c
15815+/* ************************************************************************* *\
15816+This command transfers image data from the host processor to the display
15817+module's frame memory continuing from the pixel location following the
15818+previous write_memory_continue or write_memory_start command.
15819+\* ************************************************************************* */
15820+#define set_tear_scanline 0x44
15821+/* ************************************************************************* *\
15822+This command turns on the display modules Tearing Effect output signal on the
15823+TE signal line when the display module reaches line N.
15824+\* ************************************************************************* */
15825+#define get_scanline 0x45
15826+/* ************************************************************************* *\
15827+The display module returns the current scanline, N, used to update the
15828+display device. The total number of scanlines on a display device is
15829+defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
15830+the first line of V Sync and is denoted as Line 0.
15831+When in Sleep Mode, the value returned by get_scanline is undefined.
15832+\* ************************************************************************* */
15833+/* DCS Interface Pixel Formats */
15834+#define DCS_PIXEL_FORMAT_3BPP 0x1
15835+#define DCS_PIXEL_FORMAT_8BPP 0x2
15836+#define DCS_PIXEL_FORMAT_12BPP 0x3
15837+#define DCS_PIXEL_FORMAT_16BPP 0x5
15838+#define DCS_PIXEL_FORMAT_18BPP 0x6
15839+#define DCS_PIXEL_FORMAT_24BPP 0x7
15840+/* ONE PARAMETER READ DATA */
15841+#define addr_mode_data 0xfc
15842+#define diag_res_data 0x00
15843+#define disp_mode_data 0x23
15844+#define pxl_fmt_data 0x77
15845+#define pwr_mode_data 0x74
15846+#define sig_mode_data 0x00
15847+/* TWO PARAMETERS READ DATA */
15848+#define scanline_data1 0xff
15849+#define scanline_data2 0xff
15850+/* DPI PIXEL FORMATS */
15851+#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
15852+#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
15853+#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
15854+ * 666 FORMAT
15855+ */
15856+#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
15857+#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode
15858+ * with Sync Pulse
15859+ */
15860+#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode
15861+ * with Sync events
15862+ */
15863+#define BURST_MODE 0x03 /* Burst Mode */
15864+#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
15865+#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
15866+#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
15867+#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
15868+#define DBI_NOT_SUPPORTED 0x00 /* command mode
15869+ * is not supported
15870+ */
15871+#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
15872+#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */
15873+#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */
15874+#define DBI_COMMAND_BUFFER_SIZE 0x120 /* Allocate at least
15875+ * 0x100 Byte with 32
15876+ * byte alignment
15877+ */
15878+#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least
15879+ * 0x100 Byte with 32
15880+ * byte alignment
15881+ */
15882+#define ALIGNMENT_32BYTE_MASK (~(BIT0|BIT1|BIT2|BIT3|BIT4))
15883+#define SKU_83 0x01
15884+#define SKU_100 0x02
15885+#define SKU_100L 0x04
15886+#define SKU_BYPASS 0x08
15887+#if 0
15888+/* ************************************************************************* *\
15889+DSI command data structure
15890+\* ************************************************************************* */
15891+union DSI_LONG_PACKET_HEADER {
15892+ u32 DSI_longPacketHeader;
15893+ struct {
15894+ u8 dataID;
15895+ u16 wordCount;
15896+ u8 ECC;
15897+ };
15898+#if 0 /*FIXME JLIU7 */
15899+ struct {
15900+ u8 DT:6;
15901+ u8 VC:2;
15902+ };
15903+#endif /*FIXME JLIU7 */
15904+};
15905+
15906+union MIPI_ADPT_CMD_LNG_REG {
15907+ u32 commnadLengthReg;
15908+ struct {
15909+ u8 command0;
15910+ u8 command1;
15911+ u8 command2;
15912+ u8 command3;
15913+ };
15914+};
15915+
15916+struct SET_COLUMN_ADDRESS_DATA {
15917+ u8 command;
15918+ u16 SC; /* Start Column */
15919+ u16 EC; /* End Column */
15920+};
15921+
15922+struct SET_PAGE_ADDRESS_DATA {
15923+ u8 command;
15924+ u16 SP; /* Start Page */
15925+ u16 EP; /* End Page */
15926+};
15927+#endif
15928diff -uNr a/drivers/gpu/drm/psb/psb_intel_sdvo.c b/drivers/gpu/drm/psb/psb_intel_sdvo.c
15929--- a/drivers/gpu/drm/psb/psb_intel_sdvo.c 1969-12-31 16:00:00.000000000 -0800
15930+++ b/drivers/gpu/drm/psb/psb_intel_sdvo.c 2009-04-07 13:28:38.000000000 -0700
15931@@ -0,0 +1,1232 @@
15932+/*
15933+ * Copyright © 2006-2007 Intel Corporation
15934+ *
15935+ * Permission is hereby granted, free of charge, to any person obtaining a
15936+ * copy of this software and associated documentation files (the "Software"),
15937+ * to deal in the Software without restriction, including without limitation
15938+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15939+ * and/or sell copies of the Software, and to permit persons to whom the
15940+ * Software is furnished to do so, subject to the following conditions:
15941+ *
15942+ * The above copyright notice and this permission notice (including the next
15943+ * paragraph) shall be included in all copies or substantial portions of the
15944+ * Software.
15945+ *
15946+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15947+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15948+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15949+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
15950+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
15951+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
15952+ * DEALINGS IN THE SOFTWARE.
15953+ *
15954+ * Authors:
15955+ * Eric Anholt <eric@anholt.net>
15956+ */
15957+/*
15958+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
15959+ * Jesse Barnes <jesse.barnes@intel.com>
15960+ */
15961+
15962+#include <linux/i2c.h>
15963+#include <linux/delay.h>
15964+#include <drm/drm_crtc.h>
15965+#include "psb_intel_sdvo_regs.h"
15966+
15967+struct psb_intel_sdvo_priv {
15968+ struct psb_intel_i2c_chan *i2c_bus;
15969+ int slaveaddr;
15970+ int output_device;
15971+
15972+ u16 active_outputs;
15973+
15974+ struct psb_intel_sdvo_caps caps;
15975+ int pixel_clock_min, pixel_clock_max;
15976+
15977+ int save_sdvo_mult;
15978+ u16 save_active_outputs;
15979+ struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
15980+ struct psb_intel_sdvo_dtd save_output_dtd[16];
15981+ u32 save_SDVOX;
15982+};
15983+
15984+/**
15985+ * Writes the SDVOB or SDVOC with the given value, but always writes both
15986+ * SDVOB and SDVOC to work around apparent hardware issues (according to
15987+ * comments in the BIOS).
15988+ */
15989+void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output, u32 val)
15990+{
15991+ struct drm_device *dev = psb_intel_output->base.dev;
15992+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
15993+ u32 bval = val, cval = val;
15994+ int i;
15995+
15996+ if (sdvo_priv->output_device == SDVOB)
15997+ cval = REG_READ(SDVOC);
15998+ else
15999+ bval = REG_READ(SDVOB);
16000+ /*
16001+ * Write the registers twice for luck. Sometimes,
16002+ * writing them only once doesn't appear to 'stick'.
16003+ * The BIOS does this too. Yay, magic
16004+ */
16005+ for (i = 0; i < 2; i++) {
16006+ REG_WRITE(SDVOB, bval);
16007+ REG_READ(SDVOB);
16008+ REG_WRITE(SDVOC, cval);
16009+ REG_READ(SDVOC);
16010+ }
16011+}
16012+
16013+static bool psb_intel_sdvo_read_byte(struct psb_intel_output *psb_intel_output,
16014+ u8 addr, u8 *ch)
16015+{
16016+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16017+ u8 out_buf[2];
16018+ u8 buf[2];
16019+ int ret;
16020+
16021+ struct i2c_msg msgs[] = {
16022+ {
16023+ .addr = sdvo_priv->i2c_bus->slave_addr,
16024+ .flags = 0,
16025+ .len = 1,
16026+ .buf = out_buf,
16027+ },
16028+ {
16029+ .addr = sdvo_priv->i2c_bus->slave_addr,
16030+ .flags = I2C_M_RD,
16031+ .len = 1,
16032+ .buf = buf,
16033+ }
16034+ };
16035+
16036+ out_buf[0] = addr;
16037+ out_buf[1] = 0;
16038+
16039+ ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2);
16040+ if (ret == 2) {
16041+ /* DRM_DEBUG("got back from addr %02X = %02x\n",
16042+ * out_buf[0], buf[0]);
16043+ */
16044+ *ch = buf[0];
16045+ return true;
16046+ }
16047+
16048+ DRM_DEBUG("i2c transfer returned %d\n", ret);
16049+ return false;
16050+}
16051+
16052+static bool psb_intel_sdvo_write_byte(struct psb_intel_output *psb_intel_output,
16053+ int addr, u8 ch)
16054+{
16055+ u8 out_buf[2];
16056+ struct i2c_msg msgs[] = {
16057+ {
16058+ .addr = psb_intel_output->i2c_bus->slave_addr,
16059+ .flags = 0,
16060+ .len = 2,
16061+ .buf = out_buf,
16062+ }
16063+ };
16064+
16065+ out_buf[0] = addr;
16066+ out_buf[1] = ch;
16067+
16068+ if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1)
16069+ return true;
16070+ return false;
16071+}
16072+
16073+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
16074+/** Mapping of command numbers to names, for debug output */
16075+const static struct _sdvo_cmd_name {
16076+ u8 cmd;
16077+ char *name;
16078+} sdvo_cmd_names[] = {
16079+SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
16080+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
16081+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
16082+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
16083+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
16084+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
16085+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
16086+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
16087+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
16088+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
16089+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
16090+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
16091+ SDVO_CMD_NAME_ENTRY
16092+ (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
16093+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
16094+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
16095+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
16096+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
16097+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
16098+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
16099+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
16100+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
16101+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
16102+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
16103+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
16104+ SDVO_CMD_NAME_ENTRY
16105+ (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
16106+ SDVO_CMD_NAME_ENTRY
16107+ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
16108+ SDVO_CMD_NAME_ENTRY
16109+ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
16110+ SDVO_CMD_NAME_ENTRY
16111+ (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
16112+ SDVO_CMD_NAME_ENTRY
16113+ (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
16114+ SDVO_CMD_NAME_ENTRY
16115+ (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
16116+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
16117+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
16118+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
16119+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
16120+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
16121+ SDVO_CMD_NAME_ENTRY
16122+ (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
16123+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),};
16124+
16125+#define SDVO_NAME(dev_priv) \
16126+ ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
16127+#define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv)
16128+
16129+static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output, u8 cmd,
16130+ void *args, int args_len)
16131+{
16132+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16133+ int i;
16134+
16135+ if (1) {
16136+ DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
16137+ for (i = 0; i < args_len; i++)
16138+ printk(KERN_INFO"%02X ", ((u8 *) args)[i]);
16139+ for (; i < 8; i++)
16140+ printk(" ");
16141+ for (i = 0;
16142+ i <
16143+ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]);
16144+ i++) {
16145+ if (cmd == sdvo_cmd_names[i].cmd) {
16146+ printk("(%s)", sdvo_cmd_names[i].name);
16147+ break;
16148+ }
16149+ }
16150+ if (i ==
16151+ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]))
16152+ printk("(%02X)", cmd);
16153+ printk("\n");
16154+ }
16155+
16156+ for (i = 0; i < args_len; i++) {
16157+ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_ARG_0 - i,
16158+ ((u8 *) args)[i]);
16159+ }
16160+
16161+ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd);
16162+}
16163+
16164+static const char *cmd_status_names[] = {
16165+ "Power on",
16166+ "Success",
16167+ "Not supported",
16168+ "Invalid arg",
16169+ "Pending",
16170+ "Target not specified",
16171+ "Scaling not supported"
16172+};
16173+
16174+static u8 psb_intel_sdvo_read_response(struct psb_intel_output *psb_intel_output,
16175+ void *response, int response_len)
16176+{
16177+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16178+ int i;
16179+ u8 status;
16180+ u8 retry = 50;
16181+
16182+ while (retry--) {
16183+ /* Read the command response */
16184+ for (i = 0; i < response_len; i++) {
16185+ psb_intel_sdvo_read_byte(psb_intel_output,
16186+ SDVO_I2C_RETURN_0 + i,
16187+ &((u8 *) response)[i]);
16188+ }
16189+
16190+ /* read the return status */
16191+ psb_intel_sdvo_read_byte(psb_intel_output, SDVO_I2C_CMD_STATUS,
16192+ &status);
16193+
16194+ if (1) {
16195+ DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
16196+ for (i = 0; i < response_len; i++)
16197+ printk(KERN_INFO"%02X ", ((u8 *) response)[i]);
16198+ for (; i < 8; i++)
16199+ printk(" ");
16200+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
16201+ printk(KERN_INFO"(%s)",
16202+ cmd_status_names[status]);
16203+ else
16204+ printk(KERN_INFO"(??? %d)", status);
16205+ printk("\n");
16206+ }
16207+
16208+ if (status != SDVO_CMD_STATUS_PENDING)
16209+ return status;
16210+
16211+ mdelay(50);
16212+ }
16213+
16214+ return status;
16215+}
16216+
16217+int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
16218+{
16219+ if (mode->clock >= 100000)
16220+ return 1;
16221+ else if (mode->clock >= 50000)
16222+ return 2;
16223+ else
16224+ return 4;
16225+}
16226+
16227+/**
16228+ * Don't check status code from this as it switches the bus back to the
16229+ * SDVO chips which defeats the purpose of doing a bus switch in the first
16230+ * place.
16231+ */
16232+void psb_intel_sdvo_set_control_bus_switch(struct psb_intel_output *psb_intel_output,
16233+ u8 target)
16234+{
16235+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
16236+ &target, 1);
16237+}
16238+
16239+static bool psb_intel_sdvo_set_target_input(struct psb_intel_output *psb_intel_output,
16240+ bool target_0, bool target_1)
16241+{
16242+ struct psb_intel_sdvo_set_target_input_args targets = { 0 };
16243+ u8 status;
16244+
16245+ if (target_0 && target_1)
16246+ return SDVO_CMD_STATUS_NOTSUPP;
16247+
16248+ if (target_1)
16249+ targets.target_1 = 1;
16250+
16251+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT,
16252+ &targets, sizeof(targets));
16253+
16254+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
16255+
16256+ return status == SDVO_CMD_STATUS_SUCCESS;
16257+}
16258+
16259+/**
16260+ * Return whether each input is trained.
16261+ *
16262+ * This function is making an assumption about the layout of the response,
16263+ * which should be checked against the docs.
16264+ */
16265+static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output
16266+ *psb_intel_output, bool *input_1,
16267+ bool *input_2)
16268+{
16269+ struct psb_intel_sdvo_get_trained_inputs_response response;
16270+ u8 status;
16271+
16272+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS,
16273+ NULL, 0);
16274+ status =
16275+ psb_intel_sdvo_read_response(psb_intel_output, &response,
16276+ sizeof(response));
16277+ if (status != SDVO_CMD_STATUS_SUCCESS)
16278+ return false;
16279+
16280+ *input_1 = response.input0_trained;
16281+ *input_2 = response.input1_trained;
16282+ return true;
16283+}
16284+
16285+static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output
16286+ *psb_intel_output, u16 *outputs)
16287+{
16288+ u8 status;
16289+
16290+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS,
16291+ NULL, 0);
16292+ status =
16293+ psb_intel_sdvo_read_response(psb_intel_output, outputs,
16294+ sizeof(*outputs));
16295+
16296+ return status == SDVO_CMD_STATUS_SUCCESS;
16297+}
16298+
16299+static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output
16300+ *psb_intel_output, u16 outputs)
16301+{
16302+ u8 status;
16303+
16304+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS,
16305+ &outputs, sizeof(outputs));
16306+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
16307+ return status == SDVO_CMD_STATUS_SUCCESS;
16308+}
16309+
16310+static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output
16311+ *psb_intel_output, int mode)
16312+{
16313+ u8 status, state = SDVO_ENCODER_STATE_ON;
16314+
16315+ switch (mode) {
16316+ case DRM_MODE_DPMS_ON:
16317+ state = SDVO_ENCODER_STATE_ON;
16318+ break;
16319+ case DRM_MODE_DPMS_STANDBY:
16320+ state = SDVO_ENCODER_STATE_STANDBY;
16321+ break;
16322+ case DRM_MODE_DPMS_SUSPEND:
16323+ state = SDVO_ENCODER_STATE_SUSPEND;
16324+ break;
16325+ case DRM_MODE_DPMS_OFF:
16326+ state = SDVO_ENCODER_STATE_OFF;
16327+ break;
16328+ }
16329+
16330+ psb_intel_sdvo_write_cmd(psb_intel_output,
16331+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
16332+ sizeof(state));
16333+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
16334+
16335+ return status == SDVO_CMD_STATUS_SUCCESS;
16336+}
16337+
16338+static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output
16339+ *psb_intel_output,
16340+ int *clock_min,
16341+ int *clock_max)
16342+{
16343+ struct psb_intel_sdvo_pixel_clock_range clocks;
16344+ u8 status;
16345+
16346+ psb_intel_sdvo_write_cmd(psb_intel_output,
16347+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL,
16348+ 0);
16349+
16350+ status =
16351+ psb_intel_sdvo_read_response(psb_intel_output, &clocks,
16352+ sizeof(clocks));
16353+
16354+ if (status != SDVO_CMD_STATUS_SUCCESS)
16355+ return false;
16356+
16357+ /* Convert the values from units of 10 kHz to kHz. */
16358+ *clock_min = clocks.min * 10;
16359+ *clock_max = clocks.max * 10;
16360+
16361+ return true;
16362+}
16363+
16364+static bool psb_intel_sdvo_set_target_output(struct psb_intel_output *psb_intel_output,
16365+ u16 outputs)
16366+{
16367+ u8 status;
16368+
16369+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT,
16370+ &outputs, sizeof(outputs));
16371+
16372+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
16373+ return status == SDVO_CMD_STATUS_SUCCESS;
16374+}
16375+
16376+static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output,
16377+ u8 cmd, struct psb_intel_sdvo_dtd *dtd)
16378+{
16379+ u8 status;
16380+
16381+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0);
16382+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
16383+ sizeof(dtd->part1));
16384+ if (status != SDVO_CMD_STATUS_SUCCESS)
16385+ return false;
16386+
16387+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0);
16388+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
16389+ sizeof(dtd->part2));
16390+ if (status != SDVO_CMD_STATUS_SUCCESS)
16391+ return false;
16392+
16393+ return true;
16394+}
16395+
16396+static bool psb_intel_sdvo_get_input_timing(struct psb_intel_output *psb_intel_output,
16397+ struct psb_intel_sdvo_dtd *dtd)
16398+{
16399+ return psb_intel_sdvo_get_timing(psb_intel_output,
16400+ SDVO_CMD_GET_INPUT_TIMINGS_PART1,
16401+ dtd);
16402+}
16403+
16404+static bool psb_intel_sdvo_get_output_timing(struct psb_intel_output *psb_intel_output,
16405+ struct psb_intel_sdvo_dtd *dtd)
16406+{
16407+ return psb_intel_sdvo_get_timing(psb_intel_output,
16408+ SDVO_CMD_GET_OUTPUT_TIMINGS_PART1,
16409+ dtd);
16410+}
16411+
16412+static bool psb_intel_sdvo_set_timing(struct psb_intel_output *psb_intel_output,
16413+ u8 cmd, struct psb_intel_sdvo_dtd *dtd)
16414+{
16415+ u8 status;
16416+
16417+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1,
16418+ sizeof(dtd->part1));
16419+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
16420+ if (status != SDVO_CMD_STATUS_SUCCESS)
16421+ return false;
16422+
16423+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2,
16424+ sizeof(dtd->part2));
16425+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
16426+ if (status != SDVO_CMD_STATUS_SUCCESS)
16427+ return false;
16428+
16429+ return true;
16430+}
16431+
16432+static bool psb_intel_sdvo_set_input_timing(struct psb_intel_output *psb_intel_output,
16433+ struct psb_intel_sdvo_dtd *dtd)
16434+{
16435+ return psb_intel_sdvo_set_timing(psb_intel_output,
16436+ SDVO_CMD_SET_INPUT_TIMINGS_PART1,
16437+ dtd);
16438+}
16439+
16440+static bool psb_intel_sdvo_set_output_timing(struct psb_intel_output *psb_intel_output,
16441+ struct psb_intel_sdvo_dtd *dtd)
16442+{
16443+ return psb_intel_sdvo_set_timing(psb_intel_output,
16444+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1,
16445+ dtd);
16446+}
16447+
16448+#if 0
16449+static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_output
16450+ *psb_intel_output,
16451+ struct psb_intel_sdvo_dtd
16452+ *dtd)
16453+{
16454+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16455+ u8 status;
16456+
16457+ psb_intel_sdvo_write_cmd(psb_intel_output,
16458+ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
16459+ NULL, 0);
16460+
16461+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
16462+ sizeof(dtd->part1));
16463+ if (status != SDVO_CMD_STATUS_SUCCESS)
16464+ return false;
16465+
16466+ psb_intel_sdvo_write_cmd(psb_intel_output,
16467+ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
16468+ NULL, 0);
16469+ status =
16470+ psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
16471+ sizeof(dtd->part2));
16472+ if (status != SDVO_CMD_STATUS_SUCCESS)
16473+ return false;
16474+
16475+ return true;
16476+}
16477+#endif
16478+
16479+static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output
16480+ *psb_intel_output)
16481+{
16482+ u8 response, status;
16483+
16484+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT,
16485+ NULL, 0);
16486+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1);
16487+
16488+ if (status != SDVO_CMD_STATUS_SUCCESS) {
16489+ DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
16490+ return SDVO_CLOCK_RATE_MULT_1X;
16491+ } else {
16492+ DRM_DEBUG("Current clock rate multiplier: %d\n", response);
16493+ }
16494+
16495+ return response;
16496+}
16497+
16498+static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output
16499+ *psb_intel_output, u8 val)
16500+{
16501+ u8 status;
16502+
16503+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT,
16504+ &val, 1);
16505+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
16506+ if (status != SDVO_CMD_STATUS_SUCCESS)
16507+ return false;
16508+
16509+ return true;
16510+}
16511+
16512+static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
16513+ struct drm_display_mode *mode,
16514+ struct drm_display_mode *adjusted_mode)
16515+{
16516+ /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
16517+ * device will be told of the multiplier during mode_set.
16518+ */
16519+ adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode);
16520+ return true;
16521+}
16522+
16523+static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
16524+ struct drm_display_mode *mode,
16525+ struct drm_display_mode *adjusted_mode)
16526+{
16527+ struct drm_device *dev = encoder->dev;
16528+ struct drm_crtc *crtc = encoder->crtc;
16529+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
16530+ struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder);
16531+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16532+ u16 width, height;
16533+ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
16534+ u16 h_sync_offset, v_sync_offset;
16535+ u32 sdvox;
16536+ struct psb_intel_sdvo_dtd output_dtd;
16537+ int sdvo_pixel_multiply;
16538+
16539+ if (!mode)
16540+ return;
16541+
16542+ width = mode->crtc_hdisplay;
16543+ height = mode->crtc_vdisplay;
16544+
16545+ /* do some mode translations */
16546+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
16547+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
16548+
16549+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
16550+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
16551+
16552+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
16553+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
16554+
16555+ output_dtd.part1.clock = mode->clock / 10;
16556+ output_dtd.part1.h_active = width & 0xff;
16557+ output_dtd.part1.h_blank = h_blank_len & 0xff;
16558+ output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
16559+ ((h_blank_len >> 8) & 0xf);
16560+ output_dtd.part1.v_active = height & 0xff;
16561+ output_dtd.part1.v_blank = v_blank_len & 0xff;
16562+ output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
16563+ ((v_blank_len >> 8) & 0xf);
16564+
16565+ output_dtd.part2.h_sync_off = h_sync_offset;
16566+ output_dtd.part2.h_sync_width = h_sync_len & 0xff;
16567+ output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
16568+ (v_sync_len & 0xf);
16569+ output_dtd.part2.sync_off_width_high =
16570+ ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) |
16571+ ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4);
16572+
16573+ output_dtd.part2.dtd_flags = 0x18;
16574+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
16575+ output_dtd.part2.dtd_flags |= 0x2;
16576+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
16577+ output_dtd.part2.dtd_flags |= 0x4;
16578+
16579+ output_dtd.part2.sdvo_flags = 0;
16580+ output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
16581+ output_dtd.part2.reserved = 0;
16582+
16583+ /* Set the output timing to the screen */
16584+ psb_intel_sdvo_set_target_output(psb_intel_output,
16585+ sdvo_priv->active_outputs);
16586+ psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd);
16587+
16588+ /* Set the input timing to the screen. Assume always input 0. */
16589+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
16590+
16591+ /* We would like to use i830_sdvo_create_preferred_input_timing() to
16592+ * provide the device with a timing it can support, if it supports that
16593+ * feature. However, presumably we would need to adjust the CRTC to
16594+ * output the preferred timing, and we don't support that currently.
16595+ */
16596+#if 0
16597+ success =
16598+ psb_intel_sdvo_create_preferred_input_timing(psb_intel_output, clock,
16599+ width, height);
16600+ if (success) {
16601+ struct psb_intel_sdvo_dtd *input_dtd;
16602+
16603+ psb_intel_sdvo_get_preferred_input_timing(psb_intel_output,
16604+ &input_dtd);
16605+ psb_intel_sdvo_set_input_timing(psb_intel_output, &input_dtd);
16606+ }
16607+#else
16608+ psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd);
16609+#endif
16610+
16611+ switch (psb_intel_sdvo_get_pixel_multiplier(mode)) {
16612+ case 1:
16613+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
16614+ SDVO_CLOCK_RATE_MULT_1X);
16615+ break;
16616+ case 2:
16617+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
16618+ SDVO_CLOCK_RATE_MULT_2X);
16619+ break;
16620+ case 4:
16621+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
16622+ SDVO_CLOCK_RATE_MULT_4X);
16623+ break;
16624+ }
16625+
16626+ /* Set the SDVO control regs. */
16627+ if (0 /*IS_I965GM(dev) */) {
16628+ sdvox = SDVO_BORDER_ENABLE;
16629+ } else {
16630+ sdvox = REG_READ(sdvo_priv->output_device);
16631+ switch (sdvo_priv->output_device) {
16632+ case SDVOB:
16633+ sdvox &= SDVOB_PRESERVE_MASK;
16634+ break;
16635+ case SDVOC:
16636+ sdvox &= SDVOC_PRESERVE_MASK;
16637+ break;
16638+ }
16639+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
16640+ }
16641+ if (psb_intel_crtc->pipe == 1)
16642+ sdvox |= SDVO_PIPE_B_SELECT;
16643+
16644+ sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode);
16645+ if (IS_I965G(dev)) {
16646+ /* done in crtc_mode_set as the dpll_md reg must be written
16647+ * early */
16648+ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
16649+ /* done in crtc_mode_set as it lives inside the
16650+ * dpll register */
16651+ } else {
16652+ sdvox |=
16653+ (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
16654+ }
16655+
16656+ psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox);
16657+}
16658+
16659+static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
16660+{
16661+ struct drm_device *dev = encoder->dev;
16662+ struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder);
16663+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16664+ u32 temp;
16665+
16666+ if (mode != DRM_MODE_DPMS_ON) {
16667+ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
16668+ if (0)
16669+ psb_intel_sdvo_set_encoder_power_state(psb_intel_output,
16670+ mode);
16671+
16672+ if (mode == DRM_MODE_DPMS_OFF) {
16673+ temp = REG_READ(sdvo_priv->output_device);
16674+ if ((temp & SDVO_ENABLE) != 0) {
16675+ psb_intel_sdvo_write_sdvox(psb_intel_output,
16676+ temp &
16677+ ~SDVO_ENABLE);
16678+ }
16679+ }
16680+ } else {
16681+ bool input1, input2;
16682+ int i;
16683+ u8 status;
16684+
16685+ temp = REG_READ(sdvo_priv->output_device);
16686+ if ((temp & SDVO_ENABLE) == 0)
16687+ psb_intel_sdvo_write_sdvox(psb_intel_output,
16688+ temp | SDVO_ENABLE);
16689+ for (i = 0; i < 2; i++)
16690+ psb_intel_wait_for_vblank(dev);
16691+
16692+ status =
16693+ psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1,
16694+ &input2);
16695+
16696+
16697+ /* Warn if the device reported failure to sync.
16698+ * A lot of SDVO devices fail to notify of sync, but it's
16699+ * a given it the status is a success, we succeeded.
16700+ */
16701+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
16702+ DRM_DEBUG
16703+ ("First %s output reported failure to sync\n",
16704+ SDVO_NAME(sdvo_priv));
16705+ }
16706+
16707+ if (0)
16708+ psb_intel_sdvo_set_encoder_power_state(psb_intel_output,
16709+ mode);
16710+ psb_intel_sdvo_set_active_outputs(psb_intel_output,
16711+ sdvo_priv->active_outputs);
16712+ }
16713+ return;
16714+}
16715+
16716+static void psb_intel_sdvo_save(struct drm_connector *connector)
16717+{
16718+ struct drm_device *dev = connector->dev;
16719+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
16720+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16721+ int o;
16722+
16723+ sdvo_priv->save_sdvo_mult =
16724+ psb_intel_sdvo_get_clock_rate_mult(psb_intel_output);
16725+ psb_intel_sdvo_get_active_outputs(psb_intel_output,
16726+ &sdvo_priv->save_active_outputs);
16727+
16728+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
16729+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
16730+ psb_intel_sdvo_get_input_timing(psb_intel_output,
16731+ &sdvo_priv->save_input_dtd_1);
16732+ }
16733+
16734+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
16735+ psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
16736+ psb_intel_sdvo_get_input_timing(psb_intel_output,
16737+ &sdvo_priv->save_input_dtd_2);
16738+ }
16739+
16740+ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
16741+ u16 this_output = (1 << o);
16742+ if (sdvo_priv->caps.output_flags & this_output) {
16743+ psb_intel_sdvo_set_target_output(psb_intel_output,
16744+ this_output);
16745+ psb_intel_sdvo_get_output_timing(psb_intel_output,
16746+ &sdvo_priv->
16747+ save_output_dtd[o]);
16748+ }
16749+ }
16750+
16751+ sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device);
16752+}
16753+
16754+static void psb_intel_sdvo_restore(struct drm_connector *connector)
16755+{
16756+ struct drm_device *dev = connector->dev;
16757+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
16758+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16759+ int o;
16760+ int i;
16761+ bool input1, input2;
16762+ u8 status;
16763+
16764+ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
16765+
16766+ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
16767+ u16 this_output = (1 << o);
16768+ if (sdvo_priv->caps.output_flags & this_output) {
16769+ psb_intel_sdvo_set_target_output(psb_intel_output,
16770+ this_output);
16771+ psb_intel_sdvo_set_output_timing(psb_intel_output,
16772+ &sdvo_priv->
16773+ save_output_dtd[o]);
16774+ }
16775+ }
16776+
16777+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
16778+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
16779+ psb_intel_sdvo_set_input_timing(psb_intel_output,
16780+ &sdvo_priv->save_input_dtd_1);
16781+ }
16782+
16783+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
16784+ psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
16785+ psb_intel_sdvo_set_input_timing(psb_intel_output,
16786+ &sdvo_priv->save_input_dtd_2);
16787+ }
16788+
16789+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
16790+ sdvo_priv->save_sdvo_mult);
16791+
16792+ REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
16793+
16794+ if (sdvo_priv->save_SDVOX & SDVO_ENABLE) {
16795+ for (i = 0; i < 2; i++)
16796+ psb_intel_wait_for_vblank(dev);
16797+ status =
16798+ psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1,
16799+ &input2);
16800+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
16801+ DRM_DEBUG
16802+ ("First %s output reported failure to sync\n",
16803+ SDVO_NAME(sdvo_priv));
16804+ }
16805+
16806+ psb_intel_sdvo_set_active_outputs(psb_intel_output,
16807+ sdvo_priv->save_active_outputs);
16808+}
16809+
16810+static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
16811+ struct drm_display_mode *mode)
16812+{
16813+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
16814+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
16815+
16816+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
16817+ return MODE_NO_DBLESCAN;
16818+
16819+ if (sdvo_priv->pixel_clock_min > mode->clock)
16820+ return MODE_CLOCK_LOW;
16821+
16822+ if (sdvo_priv->pixel_clock_max < mode->clock)
16823+ return MODE_CLOCK_HIGH;
16824+
16825+ return MODE_OK;
16826+}
16827+
16828+static bool psb_intel_sdvo_get_capabilities(struct psb_intel_output *psb_intel_output,
16829+ struct psb_intel_sdvo_caps *caps)
16830+{
16831+ u8 status;
16832+
16833+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL,
16834+ 0);
16835+ status =
16836+ psb_intel_sdvo_read_response(psb_intel_output, caps, sizeof(*caps));
16837+ if (status != SDVO_CMD_STATUS_SUCCESS)
16838+ return false;
16839+
16840+ return true;
16841+}
16842+
16843+struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
16844+{
16845+ struct drm_connector *connector = NULL;
16846+ struct psb_intel_output *iout = NULL;
16847+ struct psb_intel_sdvo_priv *sdvo;
16848+
16849+ /* find the sdvo connector */
16850+ list_for_each_entry(connector, &dev->mode_config.connector_list,
16851+ head) {
16852+ iout = to_psb_intel_output(connector);
16853+
16854+ if (iout->type != INTEL_OUTPUT_SDVO)
16855+ continue;
16856+
16857+ sdvo = iout->dev_priv;
16858+
16859+ if (sdvo->output_device == SDVOB && sdvoB)
16860+ return connector;
16861+
16862+ if (sdvo->output_device == SDVOC && !sdvoB)
16863+ return connector;
16864+
16865+ }
16866+
16867+ return NULL;
16868+}
16869+
16870+int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
16871+{
16872+ u8 response[2];
16873+ u8 status;
16874+ struct psb_intel_output *psb_intel_output;
16875+ DRM_DEBUG("\n");
16876+
16877+ if (!connector)
16878+ return 0;
16879+
16880+ psb_intel_output = to_psb_intel_output(connector);
16881+
16882+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
16883+ NULL, 0);
16884+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
16885+
16886+ if (response[0] != 0)
16887+ return 1;
16888+
16889+ return 0;
16890+}
16891+
16892+void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
16893+{
16894+ u8 response[2];
16895+ u8 status;
16896+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
16897+
16898+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
16899+ NULL, 0);
16900+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
16901+
16902+ if (on) {
16903+ psb_intel_sdvo_write_cmd(psb_intel_output,
16904+ SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL,
16905+ 0);
16906+ status =
16907+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
16908+
16909+ psb_intel_sdvo_write_cmd(psb_intel_output,
16910+ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
16911+ &response, 2);
16912+ } else {
16913+ response[0] = 0;
16914+ response[1] = 0;
16915+ psb_intel_sdvo_write_cmd(psb_intel_output,
16916+ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
16917+ &response, 2);
16918+ }
16919+
16920+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
16921+ NULL, 0);
16922+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
16923+}
16924+
16925+static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector
16926+ *connector)
16927+{
16928+ u8 response[2];
16929+ u8 status;
16930+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
16931+
16932+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS,
16933+ NULL, 0);
16934+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
16935+
16936+ DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
16937+ if ((response[0] != 0) || (response[1] != 0))
16938+ return connector_status_connected;
16939+ else
16940+ return connector_status_disconnected;
16941+}
16942+
16943+static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
16944+{
16945+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
16946+
16947+ /* set the bus switch and get the modes */
16948+ psb_intel_sdvo_set_control_bus_switch(psb_intel_output,
16949+ SDVO_CONTROL_BUS_DDC2);
16950+ psb_intel_ddc_get_modes(psb_intel_output);
16951+
16952+ if (list_empty(&connector->probed_modes))
16953+ return 0;
16954+ return 1;
16955+#if 0
16956+ /* Mac mini hack. On this device, I get DDC through the analog, which
16957+ * load-detects as disconnected. I fail to DDC through the SDVO DDC,
16958+ * but it does load-detect as connected. So, just steal the DDC bits
16959+ * from analog when we fail at finding it the right way.
16960+ */
16961+ /* TODO */
16962+ return NULL;
16963+
16964+ return NULL;
16965+#endif
16966+}
16967+
16968+static void psb_intel_sdvo_destroy(struct drm_connector *connector)
16969+{
16970+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
16971+
16972+ if (psb_intel_output->i2c_bus)
16973+ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
16974+ drm_sysfs_connector_remove(connector);
16975+ drm_connector_cleanup(connector);
16976+ kfree(psb_intel_output);
16977+}
16978+
16979+static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
16980+ .dpms = psb_intel_sdvo_dpms,
16981+ .mode_fixup = psb_intel_sdvo_mode_fixup,
16982+ .prepare = psb_intel_encoder_prepare,
16983+ .mode_set = psb_intel_sdvo_mode_set,
16984+ .commit = psb_intel_encoder_commit,
16985+};
16986+
16987+static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
16988+ .save = psb_intel_sdvo_save,
16989+ .restore = psb_intel_sdvo_restore,
16990+ .detect = psb_intel_sdvo_detect,
16991+ .fill_modes = drm_helper_probe_single_connector_modes,
16992+ .destroy = psb_intel_sdvo_destroy,
16993+};
16994+
16995+static const struct drm_connector_helper_funcs
16996+ psb_intel_sdvo_connector_helper_funcs = {
16997+ .get_modes = psb_intel_sdvo_get_modes,
16998+ .mode_valid = psb_intel_sdvo_mode_valid,
16999+ .best_encoder = psb_intel_best_encoder,
17000+};
17001+
17002+void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
17003+{
17004+ drm_encoder_cleanup(encoder);
17005+}
17006+
17007+static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
17008+ .destroy = psb_intel_sdvo_enc_destroy,
17009+};
17010+
17011+
17012+void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
17013+{
17014+ struct drm_connector *connector;
17015+ struct psb_intel_output *psb_intel_output;
17016+ struct psb_intel_sdvo_priv *sdvo_priv;
17017+ struct psb_intel_i2c_chan *i2cbus = NULL;
17018+ int connector_type;
17019+ u8 ch[0x40];
17020+ int i;
17021+ int encoder_type, output_id;
17022+
17023+ psb_intel_output =
17024+ kcalloc(sizeof(struct psb_intel_output) +
17025+ sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL);
17026+ if (!psb_intel_output)
17027+ return;
17028+
17029+ connector = &psb_intel_output->base;
17030+
17031+ drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs,
17032+ DRM_MODE_CONNECTOR_Unknown);
17033+ drm_connector_helper_add(connector,
17034+ &psb_intel_sdvo_connector_helper_funcs);
17035+ sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1);
17036+ psb_intel_output->type = INTEL_OUTPUT_SDVO;
17037+
17038+ connector->interlace_allowed = 0;
17039+ connector->doublescan_allowed = 0;
17040+
17041+ /* setup the DDC bus. */
17042+ if (output_device == SDVOB)
17043+ i2cbus =
17044+ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
17045+ else
17046+ i2cbus =
17047+ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
17048+
17049+ if (!i2cbus)
17050+ goto err_connector;
17051+
17052+ sdvo_priv->i2c_bus = i2cbus;
17053+
17054+ if (output_device == SDVOB) {
17055+ output_id = 1;
17056+ sdvo_priv->i2c_bus->slave_addr = 0x38;
17057+ } else {
17058+ output_id = 2;
17059+ sdvo_priv->i2c_bus->slave_addr = 0x39;
17060+ }
17061+
17062+ sdvo_priv->output_device = output_device;
17063+ psb_intel_output->i2c_bus = i2cbus;
17064+ psb_intel_output->dev_priv = sdvo_priv;
17065+
17066+
17067+ /* Read the regs to test if we can talk to the device */
17068+ for (i = 0; i < 0x40; i++) {
17069+ if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) {
17070+ DRM_DEBUG("No SDVO device found on SDVO%c\n",
17071+ output_device == SDVOB ? 'B' : 'C');
17072+ goto err_i2c;
17073+ }
17074+ }
17075+
17076+ psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps);
17077+
17078+ memset(&sdvo_priv->active_outputs, 0,
17079+ sizeof(sdvo_priv->active_outputs));
17080+
17081+ /* TODO, CVBS, SVID, YPRPB & SCART outputs. */
17082+ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) {
17083+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
17084+ connector->display_info.subpixel_order =
17085+ SubPixelHorizontalRGB;
17086+ encoder_type = DRM_MODE_ENCODER_DAC;
17087+ connector_type = DRM_MODE_CONNECTOR_VGA;
17088+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) {
17089+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
17090+ connector->display_info.subpixel_order =
17091+ SubPixelHorizontalRGB;
17092+ encoder_type = DRM_MODE_ENCODER_DAC;
17093+ connector_type = DRM_MODE_CONNECTOR_VGA;
17094+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
17095+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
17096+ connector->display_info.subpixel_order =
17097+ SubPixelHorizontalRGB;
17098+ encoder_type = DRM_MODE_ENCODER_TMDS;
17099+ connector_type = DRM_MODE_CONNECTOR_DVID;
17100+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
17101+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
17102+ connector->display_info.subpixel_order =
17103+ SubPixelHorizontalRGB;
17104+ encoder_type = DRM_MODE_ENCODER_TMDS;
17105+ connector_type = DRM_MODE_CONNECTOR_DVID;
17106+ } else {
17107+ unsigned char bytes[2];
17108+
17109+ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
17110+ DRM_DEBUG
17111+ ("%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
17112+ SDVO_NAME(sdvo_priv), bytes[0], bytes[1]);
17113+ goto err_i2c;
17114+ }
17115+
17116+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs,
17117+ encoder_type);
17118+ drm_encoder_helper_add(&psb_intel_output->enc,
17119+ &psb_intel_sdvo_helper_funcs);
17120+ connector->connector_type = connector_type;
17121+
17122+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
17123+ &psb_intel_output->enc);
17124+ drm_sysfs_connector_add(connector);
17125+
17126+ /* Set the input timing to the screen. Assume always input 0. */
17127+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
17128+
17129+ psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output,
17130+ &sdvo_priv->pixel_clock_min,
17131+ &sdvo_priv->
17132+ pixel_clock_max);
17133+
17134+
17135+ DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
17136+ "clock range %dMHz - %dMHz, "
17137+ "input 1: %c, input 2: %c, "
17138+ "output 1: %c, output 2: %c\n",
17139+ SDVO_NAME(sdvo_priv),
17140+ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
17141+ sdvo_priv->caps.device_rev_id,
17142+ sdvo_priv->pixel_clock_min / 1000,
17143+ sdvo_priv->pixel_clock_max / 1000,
17144+ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
17145+ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
17146+ /* check currently supported outputs */
17147+ sdvo_priv->caps.output_flags &
17148+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
17149+ sdvo_priv->caps.output_flags &
17150+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
17151+
17152+ psb_intel_output->ddc_bus = i2cbus;
17153+
17154+ return;
17155+
17156+err_i2c:
17157+ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
17158+err_connector:
17159+ drm_connector_cleanup(connector);
17160+ kfree(psb_intel_output);
17161+
17162+ return;
17163+}
17164diff -uNr a/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h b/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h
17165--- a/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h 1969-12-31 16:00:00.000000000 -0800
17166+++ b/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h 2009-04-07 13:28:38.000000000 -0700
17167@@ -0,0 +1,328 @@
17168+/*
17169+ * Copyright (c) 2008, Intel Corporation
17170+ *
17171+ * Permission is hereby granted, free of charge, to any person obtaining a
17172+ * copy of this software and associated documentation files (the "Software"),
17173+ * to deal in the Software without restriction, including without limitation
17174+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17175+ * and/or sell copies of the Software, and to permit persons to whom the
17176+ * Software is furnished to do so, subject to the following conditions:
17177+ *
17178+ * The above copyright notice and this permission notice (including the next
17179+ * paragraph) shall be included in all copies or substantial portions of the
17180+ * Software.
17181+ *
17182+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17183+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17184+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17185+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17186+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17187+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
17188+ * DEALINGS IN THE SOFTWARE.
17189+ *
17190+ * Authors:
17191+ * Eric Anholt <eric@anholt.net>
17192+ */
17193+
17194+/**
17195+ * @file SDVO command definitions and structures.
17196+ */
17197+
17198+#define SDVO_OUTPUT_FIRST (0)
17199+#define SDVO_OUTPUT_TMDS0 (1 << 0)
17200+#define SDVO_OUTPUT_RGB0 (1 << 1)
17201+#define SDVO_OUTPUT_CVBS0 (1 << 2)
17202+#define SDVO_OUTPUT_SVID0 (1 << 3)
17203+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
17204+#define SDVO_OUTPUT_SCART0 (1 << 5)
17205+#define SDVO_OUTPUT_LVDS0 (1 << 6)
17206+#define SDVO_OUTPUT_TMDS1 (1 << 8)
17207+#define SDVO_OUTPUT_RGB1 (1 << 9)
17208+#define SDVO_OUTPUT_CVBS1 (1 << 10)
17209+#define SDVO_OUTPUT_SVID1 (1 << 11)
17210+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
17211+#define SDVO_OUTPUT_SCART1 (1 << 13)
17212+#define SDVO_OUTPUT_LVDS1 (1 << 14)
17213+#define SDVO_OUTPUT_LAST (14)
17214+
17215+struct psb_intel_sdvo_caps {
17216+ u8 vendor_id;
17217+ u8 device_id;
17218+ u8 device_rev_id;
17219+ u8 sdvo_version_major;
17220+ u8 sdvo_version_minor;
17221+ unsigned int sdvo_inputs_mask:2;
17222+ unsigned int smooth_scaling:1;
17223+ unsigned int sharp_scaling:1;
17224+ unsigned int up_scaling:1;
17225+ unsigned int down_scaling:1;
17226+ unsigned int stall_support:1;
17227+ unsigned int pad:1;
17228+ u16 output_flags;
17229+} __attribute__ ((packed));
17230+
17231+/** This matches the EDID DTD structure, more or less */
17232+struct psb_intel_sdvo_dtd {
17233+ struct {
17234+ u16 clock; /**< pixel clock, in 10kHz units */
17235+ u8 h_active; /**< lower 8 bits (pixels) */
17236+ u8 h_blank; /**< lower 8 bits (pixels) */
17237+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
17238+ u8 v_active; /**< lower 8 bits (lines) */
17239+ u8 v_blank; /**< lower 8 bits (lines) */
17240+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
17241+ } part1;
17242+
17243+ struct {
17244+ u8 h_sync_off;
17245+ /**< lower 8 bits, from hblank start */
17246+ u8 h_sync_width;/**< lower 8 bits (pixels) */
17247+ /** lower 4 bits each vsync offset, vsync width */
17248+ u8 v_sync_off_width;
17249+ /**
17250+ * 2 high bits of hsync offset, 2 high bits of hsync width,
17251+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
17252+ */
17253+ u8 sync_off_width_high;
17254+ u8 dtd_flags;
17255+ u8 sdvo_flags;
17256+ /** bits 6-7 of vsync offset at bits 6-7 */
17257+ u8 v_sync_off_high;
17258+ u8 reserved;
17259+ } part2;
17260+} __attribute__ ((packed));
17261+
17262+struct psb_intel_sdvo_pixel_clock_range {
17263+ u16 min; /**< pixel clock, in 10kHz units */
17264+ u16 max; /**< pixel clock, in 10kHz units */
17265+} __attribute__ ((packed));
17266+
17267+struct psb_intel_sdvo_preferred_input_timing_args {
17268+ u16 clock;
17269+ u16 width;
17270+ u16 height;
17271+} __attribute__ ((packed));
17272+
17273+/* I2C registers for SDVO */
17274+#define SDVO_I2C_ARG_0 0x07
17275+#define SDVO_I2C_ARG_1 0x06
17276+#define SDVO_I2C_ARG_2 0x05
17277+#define SDVO_I2C_ARG_3 0x04
17278+#define SDVO_I2C_ARG_4 0x03
17279+#define SDVO_I2C_ARG_5 0x02
17280+#define SDVO_I2C_ARG_6 0x01
17281+#define SDVO_I2C_ARG_7 0x00
17282+#define SDVO_I2C_OPCODE 0x08
17283+#define SDVO_I2C_CMD_STATUS 0x09
17284+#define SDVO_I2C_RETURN_0 0x0a
17285+#define SDVO_I2C_RETURN_1 0x0b
17286+#define SDVO_I2C_RETURN_2 0x0c
17287+#define SDVO_I2C_RETURN_3 0x0d
17288+#define SDVO_I2C_RETURN_4 0x0e
17289+#define SDVO_I2C_RETURN_5 0x0f
17290+#define SDVO_I2C_RETURN_6 0x10
17291+#define SDVO_I2C_RETURN_7 0x11
17292+#define SDVO_I2C_VENDOR_BEGIN 0x20
17293+
17294+/* Status results */
17295+#define SDVO_CMD_STATUS_POWER_ON 0x0
17296+#define SDVO_CMD_STATUS_SUCCESS 0x1
17297+#define SDVO_CMD_STATUS_NOTSUPP 0x2
17298+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
17299+#define SDVO_CMD_STATUS_PENDING 0x4
17300+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
17301+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
17302+
17303+/* SDVO commands, argument/result registers */
17304+
17305+#define SDVO_CMD_RESET 0x01
17306+
17307+/** Returns a struct psb_intel_sdvo_caps */
17308+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
17309+
17310+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
17311+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
17312+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
17313+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
17314+
17315+/**
17316+ * Reports which inputs are trained (managed to sync).
17317+ *
17318+ * Devices must have trained within 2 vsyncs of a mode change.
17319+ */
17320+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
17321+struct psb_intel_sdvo_get_trained_inputs_response {
17322+ unsigned int input0_trained:1;
17323+ unsigned int input1_trained:1;
17324+ unsigned int pad:6;
17325+} __attribute__ ((packed));
17326+
17327+/** Returns a struct psb_intel_sdvo_output_flags of active outputs. */
17328+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
17329+
17330+/**
17331+ * Sets the current set of active outputs.
17332+ *
17333+ * Takes a struct psb_intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
17334+ * on multi-output devices.
17335+ */
17336+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
17337+
17338+/**
17339+ * Returns the current mapping of SDVO inputs to outputs on the device.
17340+ *
17341+ * Returns two struct psb_intel_sdvo_output_flags structures.
17342+ */
17343+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
17344+
17345+/**
17346+ * Sets the current mapping of SDVO inputs to outputs on the device.
17347+ *
17348+ * Takes two struct i380_sdvo_output_flags structures.
17349+ */
17350+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
17351+
17352+/**
17353+ * Returns a struct psb_intel_sdvo_output_flags of attached displays.
17354+ */
17355+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
17356+
17357+/**
17358+ * Returns a struct psb_intel_sdvo_ouptut_flags of displays supporting hot plugging.
17359+ */
17360+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
17361+
17362+/**
17363+ * Takes a struct psb_intel_sdvo_output_flags.
17364+ */
17365+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
17366+
17367+/**
17368+ * Returns a struct psb_intel_sdvo_output_flags of displays with hot plug
17369+ * interrupts enabled.
17370+ */
17371+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
17372+
17373+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
17374+struct psb_intel_sdvo_get_interrupt_event_source_response {
17375+ u16 interrupt_status;
17376+ unsigned int ambient_light_interrupt:1;
17377+ unsigned int pad:7;
17378+} __attribute__ ((packed));
17379+
17380+/**
17381+ * Selects which input is affected by future input commands.
17382+ *
17383+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
17384+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
17385+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
17386+ */
17387+#define SDVO_CMD_SET_TARGET_INPUT 0x10
17388+struct psb_intel_sdvo_set_target_input_args {
17389+ unsigned int target_1:1;
17390+ unsigned int pad:7;
17391+} __attribute__ ((packed));
17392+
17393+/**
17394+ * Takes a struct psb_intel_sdvo_output_flags of which outputs are targetted by
17395+ * future output commands.
17396+ *
17397+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
17398+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
17399+ */
17400+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
17401+
17402+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
17403+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
17404+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
17405+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
17406+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
17407+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
17408+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
17409+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
17410+/* Part 1 */
17411+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
17412+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
17413+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
17414+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
17415+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
17416+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
17417+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
17418+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
17419+/* Part 2 */
17420+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
17421+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
17422+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
17423+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
17424+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
17425+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
17426+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
17427+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
17428+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
17429+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
17430+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
17431+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
17432+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
17433+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
17434+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
17435+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
17436+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
17437+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
17438+
17439+/**
17440+ * Generates a DTD based on the given width, height, and flags.
17441+ *
17442+ * This will be supported by any device supporting scaling or interlaced
17443+ * modes.
17444+ */
17445+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
17446+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
17447+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
17448+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
17449+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
17450+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
17451+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
17452+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
17453+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
17454+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
17455+
17456+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
17457+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
17458+
17459+/** Returns a struct psb_intel_sdvo_pixel_clock_range */
17460+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
17461+/** Returns a struct psb_intel_sdvo_pixel_clock_range */
17462+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
17463+
17464+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
17465+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
17466+
17467+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
17468+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
17469+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
17470+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
17471+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
17472+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
17473+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
17474+
17475+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
17476+
17477+#define SDVO_CMD_GET_TV_FORMAT 0x28
17478+
17479+#define SDVO_CMD_SET_TV_FORMAT 0x29
17480+
17481+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
17482+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
17483+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
17484+# define SDVO_ENCODER_STATE_ON (1 << 0)
17485+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
17486+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
17487+# define SDVO_ENCODER_STATE_OFF (1 << 3)
17488+
17489+#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
17490+
17491+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
17492+# define SDVO_CONTROL_BUS_PROM 0x0
17493+# define SDVO_CONTROL_BUS_DDC1 0x1
17494+# define SDVO_CONTROL_BUS_DDC2 0x2
17495+# define SDVO_CONTROL_BUS_DDC3 0x3
17496diff -uNr a/drivers/gpu/drm/psb/psb_irq.c b/drivers/gpu/drm/psb/psb_irq.c
17497--- a/drivers/gpu/drm/psb/psb_irq.c 1969-12-31 16:00:00.000000000 -0800
17498+++ b/drivers/gpu/drm/psb/psb_irq.c 2009-04-07 13:28:38.000000000 -0700
17499@@ -0,0 +1,420 @@
17500+/**************************************************************************
17501+ * Copyright (c) 2007, Intel Corporation.
17502+ * All Rights Reserved.
17503+ *
17504+ * This program is free software; you can redistribute it and/or modify it
17505+ * under the terms and conditions of the GNU General Public License,
17506+ * version 2, as published by the Free Software Foundation.
17507+ *
17508+ * This program is distributed in the hope it will be useful, but WITHOUT
17509+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17510+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17511+ * more details.
17512+ *
17513+ * You should have received a copy of the GNU General Public License along with
17514+ * this program; if not, write to the Free Software Foundation, Inc.,
17515+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17516+ *
17517+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
17518+ * develop this driver.
17519+ *
17520+ **************************************************************************/
17521+/*
17522+ */
17523+
17524+#include <drm/drmP.h>
17525+#include "psb_drv.h"
17526+#include "psb_reg.h"
17527+#include "psb_msvdx.h"
17528+#include "lnc_topaz.h"
17529+
17530+/*
17531+ * Video display controller interrupt.
17532+ */
17533+
17534+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
17535+{
17536+ struct drm_psb_private *dev_priv =
17537+ (struct drm_psb_private *) dev->dev_private;
17538+ int wake = 0;
17539+
17540+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)) {
17541+#ifdef PSB_FIXME
17542+ atomic_inc(&dev->vbl_received);
17543+#endif
17544+ wake = 1;
17545+ PSB_WVDC32(_PSB_VBLANK_INTERRUPT_ENABLE |
17546+ _PSB_VBLANK_CLEAR, PSB_PIPEASTAT);
17547+ }
17548+
17549+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)) {
17550+#ifdef PSB_FIXME
17551+ atomic_inc(&dev->vbl_received2);
17552+#endif
17553+ wake = 1;
17554+ PSB_WVDC32(_PSB_VBLANK_INTERRUPT_ENABLE |
17555+ _PSB_VBLANK_CLEAR, PSB_PIPEBSTAT);
17556+ }
17557+
17558+ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
17559+ (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
17560+ DRM_READMEMORYBARRIER();
17561+
17562+#ifdef PSB_FIXME
17563+ if (wake) {
17564+ DRM_WAKEUP(&dev->vbl_queue);
17565+ drm_vbl_send_signals(dev);
17566+ }
17567+#endif
17568+}
17569+
17570+/*
17571+ * SGX interrupt source 1.
17572+ */
17573+
17574+static void psb_sgx_interrupt(struct drm_device *dev, uint32_t sgx_stat,
17575+ uint32_t sgx_stat2)
17576+{
17577+ struct drm_psb_private *dev_priv =
17578+ (struct drm_psb_private *) dev->dev_private;
17579+
17580+ if (sgx_stat & _PSB_CE_TWOD_COMPLETE) {
17581+ DRM_WAKEUP(&dev_priv->event_2d_queue);
17582+ psb_fence_handler(dev, PSB_ENGINE_2D);
17583+ }
17584+
17585+ if (unlikely(sgx_stat2 & _PSB_CE2_BIF_REQUESTER_FAULT))
17586+ psb_print_pagefault(dev_priv);
17587+
17588+ psb_scheduler_handler(dev_priv, sgx_stat);
17589+}
17590+
17591+/*
17592+ * MSVDX interrupt.
17593+ */
17594+static void psb_msvdx_interrupt(struct drm_device *dev,
17595+ uint32_t msvdx_stat)
17596+{
17597+ struct drm_psb_private *dev_priv =
17598+ (struct drm_psb_private *) dev->dev_private;
17599+
17600+ if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) {
17601+ /*Ideally we should we should never get to this */
17602+ PSB_DEBUG_IRQ("MSVDX:MMU Fault:0x%x fence2_irq_on=%d\n",
17603+ msvdx_stat, dev_priv->fence2_irq_on);
17604+
17605+ /* Pause MMU */
17606+ PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK,
17607+ MSVDX_MMU_CONTROL0);
17608+ DRM_WRITEMEMORYBARRIER();
17609+
17610+ /* Clear this interupt bit only */
17611+ PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK,
17612+ MSVDX_INTERRUPT_CLEAR);
17613+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
17614+ DRM_READMEMORYBARRIER();
17615+
17616+ dev_priv->msvdx_needs_reset = 1;
17617+ } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) {
17618+ PSB_DEBUG_IRQ
17619+ ("MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d(MTX)\n",
17620+ msvdx_stat, dev_priv->fence2_irq_on);
17621+
17622+ /* Clear all interupt bits */
17623+ PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR);
17624+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
17625+ DRM_READMEMORYBARRIER();
17626+
17627+ psb_msvdx_mtx_interrupt(dev);
17628+ }
17629+}
17630+
17631+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
17632+{
17633+ struct drm_device *dev = (struct drm_device *) arg;
17634+ struct drm_psb_private *dev_priv =
17635+ (struct drm_psb_private *) dev->dev_private;
17636+ uint32_t vdc_stat,msvdx_int = 0, topaz_int = 0;
17637+ uint32_t sgx_stat = 0;
17638+ uint32_t sgx_stat2 = 0;
17639+ uint32_t sgx_int = 0;
17640+ int handled = 0;
17641+
17642+ spin_lock(&dev_priv->irqmask_lock);
17643+
17644+ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
17645+
17646+ if (vdc_stat & _PSB_IRQ_SGX_FLAG) {
17647+ PSB_DEBUG_IRQ("Got SGX interrupt\n");
17648+ sgx_int = 1;
17649+ }
17650+ if (vdc_stat & _PSB_IRQ_MSVDX_FLAG) {
17651+ PSB_DEBUG_IRQ("Got MSVDX interrupt\n");
17652+ msvdx_int = 1;
17653+ }
17654+
17655+ if (vdc_stat & _LNC_IRQ_TOPAZ_FLAG) {
17656+ PSB_DEBUG_IRQ("Got TOPAX interrupt\n");
17657+ topaz_int = 1;
17658+ }
17659+ if (sgx_int && (dev_priv->graphics_state == PSB_PWR_STATE_D0i0)) {
17660+ sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS);
17661+ sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
17662+
17663+ sgx_stat2 &= dev_priv->sgx2_irq_mask;
17664+ sgx_stat &= dev_priv->sgx_irq_mask;
17665+ PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2);
17666+ PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR);
17667+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
17668+ } else if (unlikely(PSB_D_PM & drm_psb_debug)) {
17669+ if (sgx_int)
17670+ PSB_DEBUG_PM("sgx int in down mode\n");
17671+ }
17672+ vdc_stat &= dev_priv->vdc_irq_mask;
17673+ spin_unlock(&dev_priv->irqmask_lock);
17674+
17675+ if (msvdx_int) {
17676+ uint32_t msvdx_stat = 0;
17677+
17678+ msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
17679+ psb_msvdx_interrupt(dev, msvdx_stat);
17680+ handled = 1;
17681+ }
17682+
17683+ if (IS_MRST(dev) && topaz_int) {
17684+ uint32_t topaz_stat = 0;
17685+
17686+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT,&topaz_stat);
17687+ lnc_topaz_interrupt (dev, topaz_stat);
17688+ handled = 1;
17689+ }
17690+
17691+ if (vdc_stat) {
17692+ /* MSVDX IRQ status is part of vdc_irq_mask */
17693+ psb_vdc_interrupt(dev, vdc_stat);
17694+ handled = 1;
17695+ }
17696+
17697+ if (sgx_stat || sgx_stat2) {
17698+
17699+ psb_sgx_interrupt(dev, sgx_stat, sgx_stat2);
17700+ handled = 1;
17701+ }
17702+
17703+ if (!handled)
17704+ return IRQ_NONE;
17705+
17706+
17707+ return IRQ_HANDLED;
17708+}
17709+
17710+void psb_msvdx_irq_preinstall(struct drm_psb_private *dev_priv)
17711+{
17712+ unsigned long mtx_int = 0;
17713+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
17714+
17715+ /* Clear MTX interrupt */
17716+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
17717+ 1);
17718+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
17719+}
17720+
17721+void psb_irq_preinstall(struct drm_device *dev)
17722+{
17723+ struct drm_psb_private *dev_priv =
17724+ (struct drm_psb_private *) dev->dev_private;
17725+ unsigned long mtx_int = 0;
17726+ unsigned long irqflags;
17727+ PSB_DEBUG_PM("psb_irq_preinstall\n");
17728+
17729+ down_read(&dev_priv->sgx_sem);
17730+ psb_check_power_state(dev, PSB_DEVICE_SGX);
17731+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
17732+
17733+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
17734+ PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
17735+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
17736+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
17737+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
17738+
17739+ dev_priv->sgx_irq_mask = _PSB_CE_PIXELBE_END_RENDER |
17740+ _PSB_CE_DPM_3D_MEM_FREE |
17741+ _PSB_CE_TA_FINISHED |
17742+ _PSB_CE_DPM_REACHED_MEM_THRESH |
17743+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
17744+ _PSB_CE_DPM_OUT_OF_MEMORY_MT |
17745+ _PSB_CE_TA_TERMINATE | _PSB_CE_SW_EVENT;
17746+
17747+ dev_priv->sgx2_irq_mask = _PSB_CE2_BIF_REQUESTER_FAULT;
17748+
17749+ dev_priv->vdc_irq_mask = _PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG;
17750+
17751+ if (!drm_psb_disable_vsync)
17752+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG |
17753+ _PSB_VSYNC_PIPEB_FLAG;
17754+
17755+ /* Clear MTX interrupt */
17756+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS,
17757+ CR_MTX_IRQ, 1);
17758+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
17759+
17760+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
17761+ up_read(&dev_priv->sgx_sem);
17762+}
17763+
17764+void psb_msvdx_irq_postinstall(struct drm_psb_private *dev_priv)
17765+{
17766+ /* Enable Mtx Interupt to host */
17767+ unsigned long enables = 0;
17768+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
17769+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
17770+ 1);
17771+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
17772+}
17773+
17774+int psb_irq_postinstall(struct drm_device *dev)
17775+{
17776+ struct drm_psb_private *dev_priv =
17777+ (struct drm_psb_private *) dev->dev_private;
17778+ unsigned long irqflags;
17779+ unsigned long enables = 0;
17780+
17781+ PSB_DEBUG_PM("psb_irq_postinstall\n");
17782+ down_read(&dev_priv->sgx_sem);
17783+ psb_check_power_state(dev, PSB_DEVICE_SGX);
17784+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
17785+
17786+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
17787+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
17788+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
17789+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
17790+
17791+ /* MSVDX IRQ Setup, Enable Mtx Interupt to host */
17792+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
17793+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS,
17794+ CR_MTX_IRQ, 1);
17795+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
17796+
17797+ dev_priv->irq_enabled = 1;
17798+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
17799+ up_read(&dev_priv->sgx_sem);
17800+ return 0;
17801+}
17802+
17803+void psb_irq_uninstall(struct drm_device *dev)
17804+{
17805+ struct drm_psb_private *dev_priv =
17806+ (struct drm_psb_private *) dev->dev_private;
17807+ unsigned long irqflags;
17808+ PSB_DEBUG_PM("psb_irq_uninstall\n");
17809+ down_read(&dev_priv->sgx_sem);
17810+ psb_check_power_state(dev, PSB_DEVICE_SGX);
17811+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
17812+
17813+ dev_priv->sgx_irq_mask = 0x00000000;
17814+ dev_priv->sgx2_irq_mask = 0x00000000;
17815+ dev_priv->vdc_irq_mask = 0x00000000;
17816+
17817+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
17818+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
17819+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
17820+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
17821+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
17822+ wmb();
17823+ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
17824+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS),
17825+ PSB_CR_EVENT_HOST_CLEAR);
17826+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2),
17827+ PSB_CR_EVENT_HOST_CLEAR2);
17828+
17829+ /* MSVDX IRQ Setup */
17830+ /* Clear interrupt enabled flag */
17831+ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
17832+
17833+ if (IS_MRST(dev))
17834+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0);
17835+
17836+ dev_priv->irq_enabled = 0;
17837+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
17838+ up_read(&dev_priv->sgx_sem);
17839+}
17840+
17841+void psb_2D_irq_off(struct drm_psb_private *dev_priv)
17842+{
17843+ unsigned long irqflags;
17844+ uint32_t old_mask;
17845+ uint32_t cleared_mask;
17846+
17847+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
17848+ --dev_priv->irqen_count_2d;
17849+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
17850+
17851+ old_mask = dev_priv->sgx_irq_mask;
17852+ dev_priv->sgx_irq_mask &= ~_PSB_CE_TWOD_COMPLETE;
17853+ PSB_WSGX32(dev_priv->sgx_irq_mask,
17854+ PSB_CR_EVENT_HOST_ENABLE);
17855+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
17856+
17857+ cleared_mask =
17858+ (old_mask ^ dev_priv->sgx_irq_mask) & old_mask;
17859+ PSB_WSGX32(cleared_mask, PSB_CR_EVENT_HOST_CLEAR);
17860+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
17861+ }
17862+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
17863+}
17864+
17865+void psb_2D_irq_on(struct drm_psb_private *dev_priv)
17866+{
17867+ unsigned long irqflags;
17868+
17869+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
17870+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
17871+ dev_priv->sgx_irq_mask |= _PSB_CE_TWOD_COMPLETE;
17872+ PSB_WSGX32(dev_priv->sgx_irq_mask,
17873+ PSB_CR_EVENT_HOST_ENABLE);
17874+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
17875+ }
17876+ ++dev_priv->irqen_count_2d;
17877+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
17878+}
17879+
17880+#ifdef PSB_FIXME
17881+static int psb_vblank_do_wait(struct drm_device *dev,
17882+ unsigned int *sequence, atomic_t *counter)
17883+{
17884+ unsigned int cur_vblank;
17885+ int ret = 0;
17886+ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
17887+ (((cur_vblank = atomic_read(counter))
17888+ - *sequence) <= (1 << 23)));
17889+ *sequence = cur_vblank;
17890+
17891+ return ret;
17892+}
17893+#endif
17894+
17895+void psb_msvdx_irq_off(struct drm_psb_private *dev_priv)
17896+{
17897+ unsigned long irqflags;
17898+
17899+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
17900+ if (dev_priv->irq_enabled) {
17901+ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
17902+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
17903+ (void) PSB_RSGX32(PSB_INT_ENABLE_R);
17904+ }
17905+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
17906+}
17907+
17908+void psb_msvdx_irq_on(struct drm_psb_private *dev_priv)
17909+{
17910+ unsigned long irqflags;
17911+
17912+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
17913+ if (dev_priv->irq_enabled) {
17914+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
17915+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
17916+ (void) PSB_RSGX32(PSB_INT_ENABLE_R);
17917+ }
17918+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
17919+}
17920diff -uNr a/drivers/gpu/drm/psb/psb_mmu.c b/drivers/gpu/drm/psb/psb_mmu.c
17921--- a/drivers/gpu/drm/psb/psb_mmu.c 1969-12-31 16:00:00.000000000 -0800
17922+++ b/drivers/gpu/drm/psb/psb_mmu.c 2009-04-07 13:28:38.000000000 -0700
17923@@ -0,0 +1,1069 @@
17924+/**************************************************************************
17925+ * Copyright (c) 2007, Intel Corporation.
17926+ * All Rights Reserved.
17927+ *
17928+ * This program is free software; you can redistribute it and/or modify it
17929+ * under the terms and conditions of the GNU General Public License,
17930+ * version 2, as published by the Free Software Foundation.
17931+ *
17932+ * This program is distributed in the hope it will be useful, but WITHOUT
17933+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17934+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17935+ * more details.
17936+ *
17937+ * You should have received a copy of the GNU General Public License along with
17938+ * this program; if not, write to the Free Software Foundation, Inc.,
17939+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17940+ *
17941+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
17942+ * develop this driver.
17943+ *
17944+ **************************************************************************/
17945+#include <drm/drmP.h>
17946+#include "psb_drv.h"
17947+#include "psb_reg.h"
17948+
17949+/*
17950+ * Code for the SGX MMU:
17951+ */
17952+
17953+/*
17954+ * clflush on one processor only:
17955+ * clflush should apparently flush the cache line on all processors in an
17956+ * SMP system.
17957+ */
17958+
17959+/*
17960+ * kmap atomic:
17961+ * The usage of the slots must be completely encapsulated within a spinlock, and
17962+ * no other functions that may be using the locks for other purposed may be
17963+ * called from within the locked region.
17964+ * Since the slots are per processor, this will guarantee that we are the only
17965+ * user.
17966+ */
17967+
17968+/*
17969+ * TODO: Inserting ptes from an interrupt handler:
17970+ * This may be desirable for some SGX functionality where the GPU can fault in
17971+ * needed pages. For that, we need to make an atomic insert_pages function, that
17972+ * may fail.
17973+ * If it fails, the caller need to insert the page using a workqueue function,
17974+ * but on average it should be fast.
17975+ */
17976+
17977+struct psb_mmu_driver {
17978+ /* protects driver- and pd structures. Always take in read mode
17979+ * before taking the page table spinlock.
17980+ */
17981+ struct rw_semaphore sem;
17982+
17983+ /* protects page tables, directory tables and pt tables.
17984+ * and pt structures.
17985+ */
17986+ spinlock_t lock;
17987+
17988+ atomic_t needs_tlbflush;
17989+
17990+ uint8_t __iomem *register_map;
17991+ struct psb_mmu_pd *default_pd;
17992+ uint32_t bif_ctrl;
17993+ int has_clflush;
17994+ int clflush_add;
17995+ unsigned long clflush_mask;
17996+
17997+ struct drm_psb_private *dev_priv;
17998+};
17999+
18000+struct psb_mmu_pd;
18001+
18002+struct psb_mmu_pt {
18003+ struct psb_mmu_pd *pd;
18004+ uint32_t index;
18005+ uint32_t count;
18006+ struct page *p;
18007+ uint32_t *v;
18008+};
18009+
18010+struct psb_mmu_pd {
18011+ struct psb_mmu_driver *driver;
18012+ int hw_context;
18013+ struct psb_mmu_pt **tables;
18014+ struct page *p;
18015+ struct page *dummy_pt;
18016+ struct page *dummy_page;
18017+ uint32_t pd_mask;
18018+ uint32_t invalid_pde;
18019+ uint32_t invalid_pte;
18020+};
18021+
18022+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
18023+
18024+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
18025+{
18026+ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
18027+}
18028+
18029+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
18030+{
18031+ return offset >> PSB_PDE_SHIFT;
18032+}
18033+
18034+#if defined(CONFIG_X86)
18035+static inline void psb_clflush(void *addr)
18036+{
18037+ __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
18038+}
18039+
18040+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
18041+ void *addr)
18042+{
18043+ if (!driver->has_clflush)
18044+ return;
18045+
18046+ mb();
18047+ psb_clflush(addr);
18048+ mb();
18049+}
18050+#else
18051+
18052+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
18053+ void *addr)
18054+{;
18055+}
18056+
18057+#endif
18058+
18059+static inline void psb_iowrite32(const struct psb_mmu_driver *d,
18060+ uint32_t val, uint32_t offset)
18061+{
18062+ iowrite32(val, d->register_map + offset);
18063+}
18064+
18065+static inline uint32_t psb_ioread32(const struct psb_mmu_driver *d,
18066+ uint32_t offset)
18067+{
18068+ return ioread32(d->register_map + offset);
18069+}
18070+
18071+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
18072+ int force)
18073+{
18074+ if (atomic_read(&driver->needs_tlbflush) || force) {
18075+ uint32_t val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
18076+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
18077+ PSB_CR_BIF_CTRL);
18078+ wmb();
18079+ psb_iowrite32(driver, val & ~_PSB_CB_CTRL_INVALDC,
18080+ PSB_CR_BIF_CTRL);
18081+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
18082+ if (driver->dev_priv) {
18083+ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
18084+ if (IS_MRST(driver->dev_priv->dev))
18085+ topaz_mmu_flushcache(driver->dev_priv);
18086+ }
18087+ }
18088+ atomic_set(&driver->needs_tlbflush, 0);
18089+}
18090+
18091+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
18092+{
18093+ down_write(&driver->sem);
18094+ psb_mmu_flush_pd_locked(driver, force);
18095+ up_write(&driver->sem);
18096+}
18097+
18098+void psb_mmu_flush(struct psb_mmu_driver *driver)
18099+{
18100+ uint32_t val;
18101+
18102+ down_write(&driver->sem);
18103+ if (driver->dev_priv->graphics_state == PSB_PWR_STATE_D0i0) {
18104+ val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
18105+ if (atomic_read(&driver->needs_tlbflush))
18106+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
18107+ PSB_CR_BIF_CTRL);
18108+ else
18109+ psb_iowrite32(driver, val | _PSB_CB_CTRL_FLUSH,
18110+ PSB_CR_BIF_CTRL);
18111+ wmb();
18112+ psb_iowrite32(driver,
18113+ val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
18114+ PSB_CR_BIF_CTRL);
18115+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
18116+ atomic_set(&driver->needs_tlbflush, 0);
18117+ } else {
18118+ PSB_DEBUG_PM("mmu flush when down\n");
18119+ }
18120+
18121+ if (driver->dev_priv) {
18122+ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
18123+ if (IS_MRST(driver->dev_priv->dev))
18124+ topaz_mmu_flushcache(driver->dev_priv);
18125+ }
18126+
18127+ up_write(&driver->sem);
18128+}
18129+
18130+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
18131+{
18132+ uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
18133+ PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
18134+
18135+ ttm_tt_cache_flush(&pd->p, 1);
18136+ down_write(&pd->driver->sem);
18137+ psb_iowrite32(pd->driver, (page_to_pfn(pd->p) << PAGE_SHIFT),
18138+ offset);
18139+ wmb();
18140+ psb_mmu_flush_pd_locked(pd->driver, 1);
18141+ pd->hw_context = hw_context;
18142+ up_write(&pd->driver->sem);
18143+
18144+}
18145+
18146+static inline unsigned long psb_pd_addr_end(unsigned long addr,
18147+ unsigned long end)
18148+{
18149+
18150+ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
18151+ return (addr < end) ? addr : end;
18152+}
18153+
18154+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
18155+{
18156+ uint32_t mask = PSB_PTE_VALID;
18157+
18158+ if (type & PSB_MMU_CACHED_MEMORY)
18159+ mask |= PSB_PTE_CACHED;
18160+ if (type & PSB_MMU_RO_MEMORY)
18161+ mask |= PSB_PTE_RO;
18162+ if (type & PSB_MMU_WO_MEMORY)
18163+ mask |= PSB_PTE_WO;
18164+
18165+ return (pfn << PAGE_SHIFT) | mask;
18166+}
18167+
18168+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
18169+ int trap_pagefaults, int invalid_type)
18170+{
18171+ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
18172+ uint32_t *v;
18173+ int i;
18174+
18175+ if (!pd)
18176+ return NULL;
18177+
18178+ pd->p = alloc_page(GFP_DMA32);
18179+ if (!pd->p)
18180+ goto out_err1;
18181+ pd->dummy_pt = alloc_page(GFP_DMA32);
18182+ if (!pd->dummy_pt)
18183+ goto out_err2;
18184+ pd->dummy_page = alloc_page(GFP_DMA32);
18185+ if (!pd->dummy_page)
18186+ goto out_err3;
18187+
18188+ if (!trap_pagefaults) {
18189+ pd->invalid_pde =
18190+ psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
18191+ invalid_type);
18192+ pd->invalid_pte =
18193+ psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
18194+ invalid_type);
18195+ } else {
18196+ pd->invalid_pde = 0;
18197+ pd->invalid_pte = 0;
18198+ }
18199+
18200+ v = kmap(pd->dummy_pt);
18201+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
18202+ v[i] = pd->invalid_pte;
18203+
18204+ kunmap(pd->dummy_pt);
18205+
18206+ v = kmap(pd->p);
18207+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
18208+ v[i] = pd->invalid_pde;
18209+
18210+ kunmap(pd->p);
18211+
18212+ clear_page(kmap(pd->dummy_page));
18213+ kunmap(pd->dummy_page);
18214+
18215+ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
18216+ if (!pd->tables)
18217+ goto out_err4;
18218+
18219+ pd->hw_context = -1;
18220+ pd->pd_mask = PSB_PTE_VALID;
18221+ pd->driver = driver;
18222+
18223+ return pd;
18224+
18225+out_err4:
18226+ __free_page(pd->dummy_page);
18227+out_err3:
18228+ __free_page(pd->dummy_pt);
18229+out_err2:
18230+ __free_page(pd->p);
18231+out_err1:
18232+ kfree(pd);
18233+ return NULL;
18234+}
18235+
18236+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
18237+{
18238+ __free_page(pt->p);
18239+ kfree(pt);
18240+}
18241+
18242+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
18243+{
18244+ struct psb_mmu_driver *driver = pd->driver;
18245+ struct psb_mmu_pt *pt;
18246+ int i;
18247+
18248+ down_write(&driver->sem);
18249+ if (pd->hw_context != -1) {
18250+ psb_iowrite32(driver, 0,
18251+ PSB_CR_BIF_DIR_LIST_BASE0 +
18252+ pd->hw_context * 4);
18253+ psb_mmu_flush_pd_locked(driver, 1);
18254+ }
18255+
18256+ /* Should take the spinlock here, but we don't need to do that
18257+ since we have the semaphore in write mode. */
18258+
18259+ for (i = 0; i < 1024; ++i) {
18260+ pt = pd->tables[i];
18261+ if (pt)
18262+ psb_mmu_free_pt(pt);
18263+ }
18264+
18265+ vfree(pd->tables);
18266+ __free_page(pd->dummy_page);
18267+ __free_page(pd->dummy_pt);
18268+ __free_page(pd->p);
18269+ kfree(pd);
18270+ up_write(&driver->sem);
18271+}
18272+
18273+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
18274+{
18275+ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
18276+ void *v;
18277+ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
18278+ uint32_t clflush_count = PAGE_SIZE / clflush_add;
18279+ spinlock_t *lock = &pd->driver->lock;
18280+ uint8_t *clf;
18281+ uint32_t *ptes;
18282+ int i;
18283+
18284+ if (!pt)
18285+ return NULL;
18286+
18287+ pt->p = alloc_page(GFP_DMA32);
18288+ if (!pt->p) {
18289+ kfree(pt);
18290+ return NULL;
18291+ }
18292+
18293+ spin_lock(lock);
18294+
18295+ v = kmap_atomic(pt->p, KM_USER0);
18296+ clf = (uint8_t *) v;
18297+ ptes = (uint32_t *) v;
18298+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
18299+ *ptes++ = pd->invalid_pte;
18300+
18301+
18302+#if defined(CONFIG_X86)
18303+ if (pd->driver->has_clflush && pd->hw_context != -1) {
18304+ mb();
18305+ for (i = 0; i < clflush_count; ++i) {
18306+ psb_clflush(clf);
18307+ clf += clflush_add;
18308+ }
18309+ mb();
18310+ }
18311+#endif
18312+ kunmap_atomic(v, KM_USER0);
18313+ spin_unlock(lock);
18314+
18315+ pt->count = 0;
18316+ pt->pd = pd;
18317+ pt->index = 0;
18318+
18319+ return pt;
18320+}
18321+
18322+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
18323+ unsigned long addr)
18324+{
18325+ uint32_t index = psb_mmu_pd_index(addr);
18326+ struct psb_mmu_pt *pt;
18327+ uint32_t *v;
18328+ spinlock_t *lock = &pd->driver->lock;
18329+
18330+ spin_lock(lock);
18331+ pt = pd->tables[index];
18332+ while (!pt) {
18333+ spin_unlock(lock);
18334+ pt = psb_mmu_alloc_pt(pd);
18335+ if (!pt)
18336+ return NULL;
18337+ spin_lock(lock);
18338+
18339+ if (pd->tables[index]) {
18340+ spin_unlock(lock);
18341+ psb_mmu_free_pt(pt);
18342+ spin_lock(lock);
18343+ pt = pd->tables[index];
18344+ continue;
18345+ }
18346+
18347+ v = kmap_atomic(pd->p, KM_USER0);
18348+ pd->tables[index] = pt;
18349+ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
18350+ pt->index = index;
18351+ kunmap_atomic((void *) v, KM_USER0);
18352+
18353+ if (pd->hw_context != -1) {
18354+ psb_mmu_clflush(pd->driver, (void *) &v[index]);
18355+ atomic_set(&pd->driver->needs_tlbflush, 1);
18356+ }
18357+ }
18358+ pt->v = kmap_atomic(pt->p, KM_USER0);
18359+ return pt;
18360+}
18361+
18362+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
18363+ unsigned long addr)
18364+{
18365+ uint32_t index = psb_mmu_pd_index(addr);
18366+ struct psb_mmu_pt *pt;
18367+ spinlock_t *lock = &pd->driver->lock;
18368+
18369+ spin_lock(lock);
18370+ pt = pd->tables[index];
18371+ if (!pt) {
18372+ spin_unlock(lock);
18373+ return NULL;
18374+ }
18375+ pt->v = kmap_atomic(pt->p, KM_USER0);
18376+ return pt;
18377+}
18378+
18379+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
18380+{
18381+ struct psb_mmu_pd *pd = pt->pd;
18382+ uint32_t *v;
18383+
18384+ kunmap_atomic(pt->v, KM_USER0);
18385+ if (pt->count == 0) {
18386+ v = kmap_atomic(pd->p, KM_USER0);
18387+ v[pt->index] = pd->invalid_pde;
18388+ pd->tables[pt->index] = NULL;
18389+
18390+ if (pd->hw_context != -1) {
18391+ psb_mmu_clflush(pd->driver,
18392+ (void *) &v[pt->index]);
18393+ atomic_set(&pd->driver->needs_tlbflush, 1);
18394+ }
18395+ kunmap_atomic(pt->v, KM_USER0);
18396+ spin_unlock(&pd->driver->lock);
18397+ psb_mmu_free_pt(pt);
18398+ return;
18399+ }
18400+ spin_unlock(&pd->driver->lock);
18401+}
18402+
18403+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
18404+ unsigned long addr, uint32_t pte)
18405+{
18406+ pt->v[psb_mmu_pt_index(addr)] = pte;
18407+}
18408+
18409+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
18410+ unsigned long addr)
18411+{
18412+ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
18413+}
18414+
18415+#if 0
18416+static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
18417+ uint32_t mmu_offset)
18418+{
18419+ uint32_t *v;
18420+ uint32_t pfn;
18421+
18422+ v = kmap_atomic(pd->p, KM_USER0);
18423+ if (!v) {
18424+ printk(KERN_INFO "Could not kmap pde page.\n");
18425+ return 0;
18426+ }
18427+ pfn = v[psb_mmu_pd_index(mmu_offset)];
18428+ /* printk(KERN_INFO "pde is 0x%08x\n",pfn); */
18429+ kunmap_atomic(v, KM_USER0);
18430+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
18431+ printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
18432+ mmu_offset, pfn);
18433+ }
18434+ v = ioremap(pfn & 0xFFFFF000, 4096);
18435+ if (!v) {
18436+ printk(KERN_INFO "Could not kmap pte page.\n");
18437+ return 0;
18438+ }
18439+ pfn = v[psb_mmu_pt_index(mmu_offset)];
18440+ /* printk(KERN_INFO "pte is 0x%08x\n",pfn); */
18441+ iounmap(v);
18442+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
18443+ printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
18444+ mmu_offset, pfn);
18445+ }
18446+ return pfn >> PAGE_SHIFT;
18447+}
18448+
18449+static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
18450+ uint32_t mmu_offset,
18451+ uint32_t gtt_pages)
18452+{
18453+ uint32_t start;
18454+ uint32_t next;
18455+
18456+ printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
18457+ mmu_offset, gtt_pages);
18458+ down_read(&pd->driver->sem);
18459+ start = psb_mmu_check_pte_locked(pd, mmu_offset);
18460+ mmu_offset += PAGE_SIZE;
18461+ gtt_pages -= 1;
18462+ while (gtt_pages--) {
18463+ next = psb_mmu_check_pte_locked(pd, mmu_offset);
18464+ if (next != start + 1) {
18465+ printk(KERN_INFO
18466+ "Ptes out of order: 0x%08x, 0x%08x.\n",
18467+ start, next);
18468+ }
18469+ start = next;
18470+ mmu_offset += PAGE_SIZE;
18471+ }
18472+ up_read(&pd->driver->sem);
18473+}
18474+
18475+#endif
18476+
18477+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
18478+ uint32_t mmu_offset, uint32_t gtt_start,
18479+ uint32_t gtt_pages)
18480+{
18481+ uint32_t *v;
18482+ uint32_t start = psb_mmu_pd_index(mmu_offset);
18483+ struct psb_mmu_driver *driver = pd->driver;
18484+ int num_pages = gtt_pages;
18485+
18486+ down_read(&driver->sem);
18487+ spin_lock(&driver->lock);
18488+
18489+ v = kmap_atomic(pd->p, KM_USER0);
18490+ v += start;
18491+
18492+ while (gtt_pages--) {
18493+ *v++ = gtt_start | pd->pd_mask;
18494+ gtt_start += PAGE_SIZE;
18495+ }
18496+
18497+ ttm_tt_cache_flush(&pd->p, num_pages);
18498+ kunmap_atomic(v, KM_USER0);
18499+ spin_unlock(&driver->lock);
18500+
18501+ if (pd->hw_context != -1)
18502+ atomic_set(&pd->driver->needs_tlbflush, 1);
18503+
18504+ up_read(&pd->driver->sem);
18505+ psb_mmu_flush_pd(pd->driver, 0);
18506+}
18507+
18508+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
18509+{
18510+ struct psb_mmu_pd *pd;
18511+
18512+ down_read(&driver->sem);
18513+ pd = driver->default_pd;
18514+ up_read(&driver->sem);
18515+
18516+ return pd;
18517+}
18518+
18519+/* Returns the physical address of the PD shared by sgx/msvdx */
18520+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
18521+{
18522+ struct psb_mmu_pd *pd;
18523+
18524+ pd = psb_mmu_get_default_pd(driver);
18525+ return page_to_pfn(pd->p) << PAGE_SHIFT;
18526+}
18527+
18528+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
18529+{
18530+ psb_iowrite32(driver, driver->bif_ctrl, PSB_CR_BIF_CTRL);
18531+ psb_mmu_free_pagedir(driver->default_pd);
18532+ kfree(driver);
18533+}
18534+
18535+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
18536+ int trap_pagefaults,
18537+ int invalid_type,
18538+ struct drm_psb_private *dev_priv)
18539+{
18540+ struct psb_mmu_driver *driver;
18541+
18542+ driver = kmalloc(sizeof(*driver), GFP_KERNEL);
18543+
18544+ if (!driver)
18545+ return NULL;
18546+ driver->dev_priv = dev_priv;
18547+
18548+ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
18549+ invalid_type);
18550+ if (!driver->default_pd)
18551+ goto out_err1;
18552+
18553+ spin_lock_init(&driver->lock);
18554+ init_rwsem(&driver->sem);
18555+ down_write(&driver->sem);
18556+ driver->register_map = registers;
18557+ atomic_set(&driver->needs_tlbflush, 1);
18558+
18559+ driver->bif_ctrl = psb_ioread32(driver, PSB_CR_BIF_CTRL);
18560+ psb_iowrite32(driver, driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
18561+ PSB_CR_BIF_CTRL);
18562+ psb_iowrite32(driver, driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
18563+ PSB_CR_BIF_CTRL);
18564+
18565+ driver->has_clflush = 0;
18566+
18567+#if defined(CONFIG_X86)
18568+ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
18569+ uint32_t tfms, misc, cap0, cap4, clflush_size;
18570+
18571+ /*
18572+ * clflush size is determined at kernel setup for x86_64
18573+ * but not for i386. We have to do it here.
18574+ */
18575+
18576+ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
18577+ clflush_size = ((misc >> 8) & 0xff) * 8;
18578+ driver->has_clflush = 1;
18579+ driver->clflush_add =
18580+ PAGE_SIZE * clflush_size / sizeof(uint32_t);
18581+ driver->clflush_mask = driver->clflush_add - 1;
18582+ driver->clflush_mask = ~driver->clflush_mask;
18583+ }
18584+#endif
18585+
18586+ up_write(&driver->sem);
18587+ return driver;
18588+
18589+out_err1:
18590+ kfree(driver);
18591+ return NULL;
18592+}
18593+
18594+#if defined(CONFIG_X86)
18595+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
18596+ unsigned long address, uint32_t num_pages,
18597+ uint32_t desired_tile_stride,
18598+ uint32_t hw_tile_stride)
18599+{
18600+ struct psb_mmu_pt *pt;
18601+ uint32_t rows = 1;
18602+ uint32_t i;
18603+ unsigned long addr;
18604+ unsigned long end;
18605+ unsigned long next;
18606+ unsigned long add;
18607+ unsigned long row_add;
18608+ unsigned long clflush_add = pd->driver->clflush_add;
18609+ unsigned long clflush_mask = pd->driver->clflush_mask;
18610+
18611+ if (!pd->driver->has_clflush) {
18612+ ttm_tt_cache_flush(&pd->p, num_pages);
18613+ return;
18614+ }
18615+
18616+ if (hw_tile_stride)
18617+ rows = num_pages / desired_tile_stride;
18618+ else
18619+ desired_tile_stride = num_pages;
18620+
18621+ add = desired_tile_stride << PAGE_SHIFT;
18622+ row_add = hw_tile_stride << PAGE_SHIFT;
18623+ mb();
18624+ for (i = 0; i < rows; ++i) {
18625+
18626+ addr = address;
18627+ end = addr + add;
18628+
18629+ do {
18630+ next = psb_pd_addr_end(addr, end);
18631+ pt = psb_mmu_pt_map_lock(pd, addr);
18632+ if (!pt)
18633+ continue;
18634+ do {
18635+ psb_clflush(&pt->v
18636+ [psb_mmu_pt_index(addr)]);
18637+ } while (addr +=
18638+ clflush_add,
18639+ (addr & clflush_mask) < next);
18640+
18641+ psb_mmu_pt_unmap_unlock(pt);
18642+ } while (addr = next, next != end);
18643+ address += row_add;
18644+ }
18645+ mb();
18646+}
18647+#else
18648+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
18649+ unsigned long address, uint32_t num_pages,
18650+ uint32_t desired_tile_stride,
18651+ uint32_t hw_tile_stride)
18652+{
18653+ drm_ttm_cache_flush(&pd->p, num_pages);
18654+}
18655+#endif
18656+
18657+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
18658+ unsigned long address, uint32_t num_pages)
18659+{
18660+ struct psb_mmu_pt *pt;
18661+ unsigned long addr;
18662+ unsigned long end;
18663+ unsigned long next;
18664+ unsigned long f_address = address;
18665+
18666+ down_read(&pd->driver->sem);
18667+
18668+ addr = address;
18669+ end = addr + (num_pages << PAGE_SHIFT);
18670+
18671+ do {
18672+ next = psb_pd_addr_end(addr, end);
18673+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
18674+ if (!pt)
18675+ goto out;
18676+ do {
18677+ psb_mmu_invalidate_pte(pt, addr);
18678+ --pt->count;
18679+ } while (addr += PAGE_SIZE, addr < next);
18680+ psb_mmu_pt_unmap_unlock(pt);
18681+
18682+ } while (addr = next, next != end);
18683+
18684+out:
18685+ if (pd->hw_context != -1)
18686+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
18687+
18688+ up_read(&pd->driver->sem);
18689+
18690+ if (pd->hw_context != -1)
18691+ psb_mmu_flush(pd->driver);
18692+
18693+ return;
18694+}
18695+
18696+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
18697+ uint32_t num_pages, uint32_t desired_tile_stride,
18698+ uint32_t hw_tile_stride)
18699+{
18700+ struct psb_mmu_pt *pt;
18701+ uint32_t rows = 1;
18702+ uint32_t i;
18703+ unsigned long addr;
18704+ unsigned long end;
18705+ unsigned long next;
18706+ unsigned long add;
18707+ unsigned long row_add;
18708+ unsigned long f_address = address;
18709+
18710+ if (hw_tile_stride)
18711+ rows = num_pages / desired_tile_stride;
18712+ else
18713+ desired_tile_stride = num_pages;
18714+
18715+ add = desired_tile_stride << PAGE_SHIFT;
18716+ row_add = hw_tile_stride << PAGE_SHIFT;
18717+
18718+ down_read(&pd->driver->sem);
18719+
18720+ /* Make sure we only need to flush this processor's cache */
18721+
18722+ for (i = 0; i < rows; ++i) {
18723+
18724+ addr = address;
18725+ end = addr + add;
18726+
18727+ do {
18728+ next = psb_pd_addr_end(addr, end);
18729+ pt = psb_mmu_pt_map_lock(pd, addr);
18730+ if (!pt)
18731+ continue;
18732+ do {
18733+ psb_mmu_invalidate_pte(pt, addr);
18734+ --pt->count;
18735+
18736+ } while (addr += PAGE_SIZE, addr < next);
18737+ psb_mmu_pt_unmap_unlock(pt);
18738+
18739+ } while (addr = next, next != end);
18740+ address += row_add;
18741+ }
18742+ if (pd->hw_context != -1)
18743+ psb_mmu_flush_ptes(pd, f_address, num_pages,
18744+ desired_tile_stride, hw_tile_stride);
18745+
18746+ up_read(&pd->driver->sem);
18747+
18748+ if (pd->hw_context != -1)
18749+ psb_mmu_flush(pd->driver);
18750+}
18751+
18752+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
18753+ unsigned long address, uint32_t num_pages,
18754+ int type)
18755+{
18756+ struct psb_mmu_pt *pt;
18757+ uint32_t pte;
18758+ unsigned long addr;
18759+ unsigned long end;
18760+ unsigned long next;
18761+ unsigned long f_address = address;
18762+ int ret = 0;
18763+
18764+ down_read(&pd->driver->sem);
18765+
18766+ addr = address;
18767+ end = addr + (num_pages << PAGE_SHIFT);
18768+
18769+ do {
18770+ next = psb_pd_addr_end(addr, end);
18771+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
18772+ if (!pt) {
18773+ ret = -ENOMEM;
18774+ goto out;
18775+ }
18776+ do {
18777+ pte = psb_mmu_mask_pte(start_pfn++, type);
18778+ psb_mmu_set_pte(pt, addr, pte);
18779+ pt->count++;
18780+ } while (addr += PAGE_SIZE, addr < next);
18781+ psb_mmu_pt_unmap_unlock(pt);
18782+
18783+ } while (addr = next, next != end);
18784+
18785+out:
18786+ if (pd->hw_context != -1)
18787+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
18788+
18789+ up_read(&pd->driver->sem);
18790+
18791+ if (pd->hw_context != -1)
18792+ psb_mmu_flush(pd->driver);
18793+
18794+ return ret;
18795+}
18796+
18797+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
18798+ unsigned long address, uint32_t num_pages,
18799+ uint32_t desired_tile_stride,
18800+ uint32_t hw_tile_stride, int type)
18801+{
18802+ struct psb_mmu_pt *pt;
18803+ uint32_t rows = 1;
18804+ uint32_t i;
18805+ uint32_t pte;
18806+ unsigned long addr;
18807+ unsigned long end;
18808+ unsigned long next;
18809+ unsigned long add;
18810+ unsigned long row_add;
18811+ unsigned long f_address = address;
18812+ int ret = 0;
18813+
18814+ if (hw_tile_stride) {
18815+ if (num_pages % desired_tile_stride != 0)
18816+ return -EINVAL;
18817+ rows = num_pages / desired_tile_stride;
18818+ } else {
18819+ desired_tile_stride = num_pages;
18820+ }
18821+
18822+ add = desired_tile_stride << PAGE_SHIFT;
18823+ row_add = hw_tile_stride << PAGE_SHIFT;
18824+
18825+ down_read(&pd->driver->sem);
18826+
18827+ for (i = 0; i < rows; ++i) {
18828+
18829+ addr = address;
18830+ end = addr + add;
18831+
18832+ do {
18833+ next = psb_pd_addr_end(addr, end);
18834+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
18835+ if (!pt) {
18836+ ret = -ENOMEM;
18837+ goto out;
18838+ }
18839+ do {
18840+ pte =
18841+ psb_mmu_mask_pte(page_to_pfn(*pages++),
18842+ type);
18843+ psb_mmu_set_pte(pt, addr, pte);
18844+ pt->count++;
18845+ } while (addr += PAGE_SIZE, addr < next);
18846+ psb_mmu_pt_unmap_unlock(pt);
18847+
18848+ } while (addr = next, next != end);
18849+
18850+ address += row_add;
18851+ }
18852+out:
18853+ if (pd->hw_context != -1)
18854+ psb_mmu_flush_ptes(pd, f_address, num_pages,
18855+ desired_tile_stride, hw_tile_stride);
18856+
18857+ up_read(&pd->driver->sem);
18858+
18859+ if (pd->hw_context != -1)
18860+ psb_mmu_flush(pd->driver);
18861+
18862+ return ret;
18863+}
18864+
18865+void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
18866+{
18867+ mask &= _PSB_MMU_ER_MASK;
18868+ psb_iowrite32(driver,
18869+ psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
18870+ PSB_CR_BIF_CTRL);
18871+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
18872+}
18873+
18874+void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
18875+ uint32_t mask)
18876+{
18877+ mask &= _PSB_MMU_ER_MASK;
18878+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
18879+ PSB_CR_BIF_CTRL);
18880+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
18881+}
18882+
18883+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
18884+ unsigned long *pfn)
18885+{
18886+ int ret;
18887+ struct psb_mmu_pt *pt;
18888+ uint32_t tmp;
18889+ spinlock_t *lock = &pd->driver->lock;
18890+
18891+ down_read(&pd->driver->sem);
18892+ pt = psb_mmu_pt_map_lock(pd, virtual);
18893+ if (!pt) {
18894+ uint32_t *v;
18895+
18896+ spin_lock(lock);
18897+ v = kmap_atomic(pd->p, KM_USER0);
18898+ tmp = v[psb_mmu_pd_index(virtual)];
18899+ kunmap_atomic(v, KM_USER0);
18900+ spin_unlock(lock);
18901+
18902+ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
18903+ !(pd->invalid_pte & PSB_PTE_VALID)) {
18904+ ret = -EINVAL;
18905+ goto out;
18906+ }
18907+ ret = 0;
18908+ *pfn = pd->invalid_pte >> PAGE_SHIFT;
18909+ goto out;
18910+ }
18911+ tmp = pt->v[psb_mmu_pt_index(virtual)];
18912+ if (!(tmp & PSB_PTE_VALID)) {
18913+ ret = -EINVAL;
18914+ } else {
18915+ ret = 0;
18916+ *pfn = tmp >> PAGE_SHIFT;
18917+ }
18918+ psb_mmu_pt_unmap_unlock(pt);
18919+out:
18920+ up_read(&pd->driver->sem);
18921+ return ret;
18922+}
18923+
18924+void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
18925+{
18926+ struct page *p;
18927+ unsigned long pfn;
18928+ int ret = 0;
18929+ struct psb_mmu_pd *pd;
18930+ uint32_t *v;
18931+ uint32_t *vmmu;
18932+
18933+ pd = driver->default_pd;
18934+ if (!pd)
18935+ printk(KERN_WARNING "Could not get default pd\n");
18936+
18937+
18938+ p = alloc_page(GFP_DMA32);
18939+
18940+ if (!p) {
18941+ printk(KERN_WARNING "Failed allocating page\n");
18942+ return;
18943+ }
18944+
18945+ v = kmap(p);
18946+ memset(v, 0x67, PAGE_SIZE);
18947+
18948+ pfn = (offset >> PAGE_SHIFT);
18949+
18950+ ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
18951+ if (ret) {
18952+ printk(KERN_WARNING "Failed inserting mmu page\n");
18953+ goto out_err1;
18954+ }
18955+
18956+ /* Ioremap the page through the GART aperture */
18957+
18958+ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
18959+ if (!vmmu) {
18960+ printk(KERN_WARNING "Failed ioremapping page\n");
18961+ goto out_err2;
18962+ }
18963+
18964+ /* Read from the page with mmu disabled. */
18965+ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
18966+
18967+ /* Enable the mmu for host accesses and read again. */
18968+ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
18969+
18970+ printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
18971+ ioread32(vmmu));
18972+ *v = 0x15243705;
18973+ printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
18974+ ioread32(vmmu));
18975+ iowrite32(0x16243355, vmmu);
18976+ (void) ioread32(vmmu);
18977+ printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
18978+
18979+ printk(KERN_INFO "Int stat is 0x%08x\n",
18980+ psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
18981+ printk(KERN_INFO "Fault is 0x%08x\n",
18982+ psb_ioread32(driver, PSB_CR_BIF_FAULT));
18983+
18984+ /* Disable MMU for host accesses and clear page fault register */
18985+ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
18986+ iounmap(vmmu);
18987+out_err2:
18988+ psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
18989+out_err1:
18990+ kunmap(p);
18991+ __free_page(p);
18992+}
18993diff -uNr a/drivers/gpu/drm/psb/psb_msvdx.c b/drivers/gpu/drm/psb/psb_msvdx.c
18994--- a/drivers/gpu/drm/psb/psb_msvdx.c 1969-12-31 16:00:00.000000000 -0800
18995+++ b/drivers/gpu/drm/psb/psb_msvdx.c 2009-04-07 13:28:38.000000000 -0700
18996@@ -0,0 +1,681 @@
18997+/**
18998+ * file psb_msvdx.c
18999+ * MSVDX I/O operations and IRQ handling
19000+ *
19001+ */
19002+
19003+/**************************************************************************
19004+ *
19005+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
19006+ * Copyright (c) Imagination Technologies Limited, UK
19007+ * All Rights Reserved.
19008+ *
19009+ * Permission is hereby granted, free of charge, to any person obtaining a
19010+ * copy of this software and associated documentation files (the
19011+ * "Software"), to deal in the Software without restriction, including
19012+ * without limitation the rights to use, copy, modify, merge, publish,
19013+ * distribute, sub license, and/or sell copies of the Software, and to
19014+ * permit persons to whom the Software is furnished to do so, subject to
19015+ * the following conditions:
19016+ *
19017+ * The above copyright notice and this permission notice (including the
19018+ * next paragraph) shall be included in all copies or substantial portions
19019+ * of the Software.
19020+ *
19021+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19022+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19023+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19024+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19025+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19026+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19027+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
19028+ *
19029+ **************************************************************************/
19030+
19031+#include <drm/drmP.h>
19032+#include <drm/drm_os_linux.h>
19033+#include "psb_drv.h"
19034+#include "psb_drm.h"
19035+#include "psb_msvdx.h"
19036+
19037+#include <linux/io.h>
19038+#include <linux/delay.h>
19039+
19040+#ifndef list_first_entry
19041+#define list_first_entry(ptr, type, member) \
19042+ list_entry((ptr)->next, type, member)
19043+#endif
19044+
19045+
19046+static int psb_msvdx_send(struct drm_device *dev, void *cmd,
19047+ unsigned long cmd_size);
19048+
19049+int psb_msvdx_dequeue_send(struct drm_device *dev)
19050+{
19051+ struct drm_psb_private *dev_priv = dev->dev_private;
19052+ struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
19053+ int ret = 0;
19054+
19055+ if (list_empty(&dev_priv->msvdx_queue)) {
19056+ PSB_DEBUG_GENERAL("MSVDXQUE: msvdx list empty.\n");
19057+ dev_priv->msvdx_busy = 0;
19058+ return -EINVAL;
19059+ }
19060+ msvdx_cmd = list_first_entry(&dev_priv->msvdx_queue,
19061+ struct psb_msvdx_cmd_queue, head);
19062+ PSB_DEBUG_GENERAL("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
19063+ ret = psb_msvdx_send(dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size);
19064+ if (ret) {
19065+ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
19066+ ret = -EINVAL;
19067+ }
19068+ list_del(&msvdx_cmd->head);
19069+ kfree(msvdx_cmd->cmd);
19070+ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
19071+
19072+ return ret;
19073+}
19074+
19075+int psb_msvdx_map_command(struct drm_device *dev,
19076+ struct ttm_buffer_object *cmd_buffer,
19077+ unsigned long cmd_offset, unsigned long cmd_size,
19078+ void **msvdx_cmd, uint32_t sequence, int copy_cmd)
19079+{
19080+ struct drm_psb_private *dev_priv = dev->dev_private;
19081+ int ret = 0;
19082+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
19083+ unsigned long cmd_size_remaining;
19084+ struct ttm_bo_kmap_obj cmd_kmap;
19085+ void *cmd, *tmp, *cmd_start;
19086+ bool is_iomem;
19087+
19088+ /* command buffers may not exceed page boundary */
19089+ if (cmd_size + cmd_page_offset > PAGE_SIZE)
19090+ return -EINVAL;
19091+
19092+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2, &cmd_kmap);
19093+ if (ret) {
19094+ DRM_ERROR("MSVDXQUE:ret:%d\n", ret);
19095+ return ret;
19096+ }
19097+
19098+ cmd_start = (void *)ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem)
19099+ + cmd_page_offset;
19100+ cmd = cmd_start;
19101+ cmd_size_remaining = cmd_size;
19102+
19103+ while (cmd_size_remaining > 0) {
19104+ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
19105+ uint32_t cur_cmd_id = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_ID);
19106+ uint32_t mmu_ptd = 0, tmp = 0;
19107+
19108+ PSB_DEBUG_GENERAL("cmd start at %08x cur_cmd_size = %d"
19109+ " cur_cmd_id = %02x fence = %08x\n",
19110+ (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence);
19111+ if ((cur_cmd_size % sizeof(uint32_t))
19112+ || (cur_cmd_size > cmd_size_remaining)) {
19113+ ret = -EINVAL;
19114+ DRM_ERROR("MSVDX: ret:%d\n", ret);
19115+ goto out;
19116+ }
19117+
19118+ switch (cur_cmd_id) {
19119+ case VA_MSGID_RENDER:
19120+ /* Fence ID */
19121+ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_FENCE_VALUE,
19122+ sequence);
19123+ mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
19124+ tmp = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc,
19125+ 1, 0);
19126+ if (tmp == 1) {
19127+ mmu_ptd |= 1;
19128+ PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
19129+ }
19130+
19131+ /* PTD */
19132+ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_MMUPTD, mmu_ptd);
19133+ break;
19134+
19135+ default:
19136+ /* Msg not supported */
19137+ ret = -EINVAL;
19138+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
19139+ goto out;
19140+ }
19141+
19142+ cmd += cur_cmd_size;
19143+ cmd_size_remaining -= cur_cmd_size;
19144+ }
19145+
19146+ if (copy_cmd) {
19147+ PSB_DEBUG_GENERAL("MSVDXQUE:copying command\n");
19148+
19149+ tmp = drm_calloc(1, cmd_size, DRM_MEM_DRIVER);
19150+ if (tmp == NULL) {
19151+ ret = -ENOMEM;
19152+ DRM_ERROR("MSVDX: fail to callc,ret=:%d\n", ret);
19153+ goto out;
19154+ }
19155+ memcpy(tmp, cmd_start, cmd_size);
19156+ *msvdx_cmd = tmp;
19157+ } else {
19158+ PSB_DEBUG_GENERAL("MSVDXQUE:did NOT copy command\n");
19159+ ret = psb_msvdx_send(dev, cmd_start, cmd_size);
19160+ if (ret) {
19161+ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
19162+ ret = -EINVAL;
19163+ }
19164+ }
19165+
19166+out:
19167+ ttm_bo_kunmap(&cmd_kmap);
19168+
19169+ return ret;
19170+}
19171+
19172+int psb_submit_video_cmdbuf(struct drm_device *dev,
19173+ struct ttm_buffer_object *cmd_buffer,
19174+ unsigned long cmd_offset, unsigned long cmd_size,
19175+ struct ttm_fence_object *fence)
19176+{
19177+ struct drm_psb_private *dev_priv = dev->dev_private;
19178+ uint32_t sequence = dev_priv->sequence[PSB_ENGINE_VIDEO];
19179+ unsigned long irq_flags;
19180+ int ret = 0;
19181+
19182+ mutex_lock(&dev_priv->msvdx_mutex);
19183+
19184+ psb_schedule_watchdog(dev_priv);
19185+
19186+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
19187+ if (dev_priv->msvdx_needs_reset) {
19188+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
19189+ PSB_DEBUG_GENERAL("MSVDX: will reset msvdx\n");
19190+ if (psb_msvdx_reset(dev_priv)) {
19191+ mutex_unlock(&dev_priv->msvdx_mutex);
19192+ ret = -EBUSY;
19193+ DRM_ERROR("MSVDX: Reset failed\n");
19194+ return ret;
19195+ }
19196+ dev_priv->msvdx_needs_reset = 0;
19197+ dev_priv->msvdx_busy = 0;
19198+
19199+ psb_msvdx_init(dev);
19200+ psb_msvdx_irq_preinstall(dev_priv);
19201+ psb_msvdx_irq_postinstall(dev_priv);
19202+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
19203+ }
19204+
19205+ if (!dev_priv->msvdx_fw_loaded) {
19206+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
19207+ PSB_DEBUG_GENERAL("MSVDX:load /lib/firmware/msvdx_fw.bin"
19208+ " by udevd\n");
19209+
19210+ ret = psb_setup_fw(dev);
19211+ if (ret) {
19212+ mutex_unlock(&dev_priv->msvdx_mutex);
19213+
19214+ DRM_ERROR("MSVDX:is there a /lib/firmware/msvdx_fw.bin,"
19215+ "and udevd is configured correctly?\n");
19216+
19217+ /* FIXME: find a proper return value */
19218+ return -EFAULT;
19219+ }
19220+ dev_priv->msvdx_fw_loaded = 1;
19221+
19222+ psb_msvdx_irq_preinstall(dev_priv);
19223+ psb_msvdx_irq_postinstall(dev_priv);
19224+ PSB_DEBUG_GENERAL("MSVDX: load firmware successfully\n");
19225+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
19226+ }
19227+
19228+
19229+ if (!dev_priv->msvdx_busy) {
19230+ dev_priv->msvdx_busy = 1;
19231+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
19232+ PSB_DEBUG_GENERAL("MSVDX: commit command to HW,seq=0x%08x\n",
19233+ sequence);
19234+ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
19235+ cmd_size, NULL, sequence, 0);
19236+ if (ret) {
19237+ mutex_unlock(&dev_priv->msvdx_mutex);
19238+ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
19239+ return ret;
19240+ }
19241+ } else {
19242+ struct psb_msvdx_cmd_queue *msvdx_cmd;
19243+ void *cmd = NULL;
19244+
19245+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
19246+ /* queue the command to be sent when the h/w is ready */
19247+ PSB_DEBUG_GENERAL("MSVDXQUE: queueing sequence:%08x..\n",
19248+ sequence);
19249+ msvdx_cmd = drm_calloc(1, sizeof(struct psb_msvdx_cmd_queue),
19250+ DRM_MEM_DRIVER);
19251+ if (msvdx_cmd == NULL) {
19252+ mutex_unlock(&dev_priv->msvdx_mutex);
19253+ DRM_ERROR("MSVDXQUE: Out of memory...\n");
19254+ return -ENOMEM;
19255+ }
19256+
19257+ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
19258+ cmd_size, &cmd, sequence, 1);
19259+ if (ret) {
19260+ mutex_unlock(&dev_priv->msvdx_mutex);
19261+ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
19262+ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue),
19263+ DRM_MEM_DRIVER);
19264+ return ret;
19265+ }
19266+ msvdx_cmd->cmd = cmd;
19267+ msvdx_cmd->cmd_size = cmd_size;
19268+ msvdx_cmd->sequence = sequence;
19269+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
19270+ list_add_tail(&msvdx_cmd->head, &dev_priv->msvdx_queue);
19271+ if (!dev_priv->msvdx_busy) {
19272+ dev_priv->msvdx_busy = 1;
19273+ PSB_DEBUG_GENERAL("MSVDXQUE: Need immediate dequeue\n");
19274+ psb_msvdx_dequeue_send(dev);
19275+ }
19276+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
19277+ }
19278+ mutex_unlock(&dev_priv->msvdx_mutex);
19279+ return ret;
19280+}
19281+
19282+int psb_msvdx_send(struct drm_device *dev, void *cmd, unsigned long cmd_size)
19283+{
19284+ int ret = 0;
19285+ struct drm_psb_private *dev_priv = dev->dev_private;
19286+
19287+ while (cmd_size > 0) {
19288+ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
19289+ if (cur_cmd_size > cmd_size) {
19290+ ret = -EINVAL;
19291+ DRM_ERROR("MSVDX:cmd_size %lu cur_cmd_size %lu\n",
19292+ cmd_size, (unsigned long)cur_cmd_size);
19293+ goto out;
19294+ }
19295+ /* Send the message to h/w */
19296+ ret = psb_mtx_send(dev_priv, cmd);
19297+ if (ret) {
19298+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
19299+ goto out;
19300+ }
19301+ cmd += cur_cmd_size;
19302+ cmd_size -= cur_cmd_size;
19303+ }
19304+
19305+out:
19306+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
19307+ return ret;
19308+}
19309+
19310+int psb_mtx_send(struct drm_psb_private *dev_priv, const void *msg)
19311+{
19312+ static uint32_t pad_msg[FWRK_PADMSG_SIZE];
19313+ const uint32_t *p_msg = (uint32_t *) msg;
19314+ uint32_t msg_num, words_free, ridx, widx;
19315+ int ret = 0;
19316+
19317+ PSB_DEBUG_GENERAL("MSVDX: psb_mtx_send\n");
19318+
19319+ /* we need clocks enabled before we touch VEC local ram */
19320+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
19321+
19322+ msg_num = (MEMIO_READ_FIELD(msg, FWRK_GENMSG_SIZE) + 3) / 4;
19323+
19324+ if (msg_num > NUM_WORDS_MTX_BUF) {
19325+ ret = -EINVAL;
19326+ DRM_ERROR("MSVDX: message exceed maximum,ret:%d\n", ret);
19327+ goto out;
19328+ }
19329+
19330+ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX);
19331+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
19332+
19333+ /* message would wrap, need to send a pad message */
19334+ if (widx + msg_num > NUM_WORDS_MTX_BUF) {
19335+ /* Shouldn't happen for a PAD message itself */
19336+ BUG_ON(MEMIO_READ_FIELD(msg, FWRK_GENMSG_ID)
19337+ == FWRK_MSGID_PADDING);
19338+
19339+ /* if the read pointer is at zero then we must wait for it to
19340+ * change otherwise the write pointer will equal the read
19341+ * pointer,which should only happen when the buffer is empty
19342+ *
19343+ * This will only happens if we try to overfill the queue,
19344+ * queue management should make
19345+ * sure this never happens in the first place.
19346+ */
19347+ BUG_ON(0 == ridx);
19348+ if (0 == ridx) {
19349+ ret = -EINVAL;
19350+ DRM_ERROR("MSVDX: RIndex=0, ret:%d\n", ret);
19351+ goto out;
19352+ }
19353+ /* Send a pad message */
19354+ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_SIZE,
19355+ (NUM_WORDS_MTX_BUF - widx) << 2);
19356+ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_ID,
19357+ FWRK_MSGID_PADDING);
19358+ psb_mtx_send(dev_priv, pad_msg);
19359+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
19360+ }
19361+
19362+ if (widx >= ridx)
19363+ words_free = NUM_WORDS_MTX_BUF - (widx - ridx);
19364+ else
19365+ words_free = ridx - widx;
19366+
19367+ BUG_ON(msg_num > words_free);
19368+ if (msg_num > words_free) {
19369+ ret = -EINVAL;
19370+ DRM_ERROR("MSVDX: msg_num > words_free, ret:%d\n", ret);
19371+ goto out;
19372+ }
19373+
19374+ while (msg_num > 0) {
19375+ PSB_WMSVDX32(*p_msg++, MSVDX_COMMS_TO_MTX_BUF + (widx << 2));
19376+ msg_num--;
19377+ widx++;
19378+ if (NUM_WORDS_MTX_BUF == widx)
19379+ widx = 0;
19380+ }
19381+ PSB_WMSVDX32(widx, MSVDX_COMMS_TO_MTX_WRT_INDEX);
19382+
19383+ /* Make sure clocks are enabled before we kick */
19384+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
19385+
19386+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
19387+
19388+ /* signal an interrupt to let the mtx know there is a new message */
19389+ PSB_WMSVDX32(1, MSVDX_MTX_KICKI);
19390+
19391+out:
19392+ return ret;
19393+}
19394+
19395+/*
19396+ * MSVDX MTX interrupt
19397+ */
19398+void psb_msvdx_mtx_interrupt(struct drm_device *dev)
19399+{
19400+ struct drm_psb_private *dev_priv =
19401+ (struct drm_psb_private *)dev->dev_private;
19402+ static uint32_t buf[128]; /* message buffer */
19403+ uint32_t ridx, widx;
19404+ uint32_t num, ofs; /* message num and offset */
19405+
19406+ PSB_DEBUG_GENERAL("MSVDX:Got a MSVDX MTX interrupt\n");
19407+
19408+ /* Are clocks enabled - If not enable before
19409+ * attempting to read from VLR
19410+ */
19411+ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all)) {
19412+ PSB_DEBUG_GENERAL("MSVDX:Clocks disabled when Interupt set\n");
19413+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
19414+ }
19415+
19416+loop: /* just for coding style check */
19417+ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_RD_INDEX);
19418+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_WRT_INDEX);
19419+
19420+ /* Get out of here if nothing */
19421+ if (ridx == widx)
19422+ goto done;
19423+
19424+ ofs = 0;
19425+ buf[ofs] = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF + (ridx << 2));
19426+
19427+ /* round to nearest word */
19428+ num = (MEMIO_READ_FIELD(buf, FWRK_GENMSG_SIZE) + 3) / 4;
19429+
19430+ /* ASSERT(num <= sizeof(buf) / sizeof(uint32_t)); */
19431+
19432+ if (++ridx >= NUM_WORDS_HOST_BUF)
19433+ ridx = 0;
19434+
19435+ for (ofs++; ofs < num; ofs++) {
19436+ buf[ofs] = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF + (ridx << 2));
19437+
19438+ if (++ridx >= NUM_WORDS_HOST_BUF)
19439+ ridx = 0;
19440+ }
19441+
19442+ /* Update the Read index */
19443+ PSB_WMSVDX32(ridx, MSVDX_COMMS_TO_HOST_RD_INDEX);
19444+
19445+ if (dev_priv->msvdx_needs_reset)
19446+ goto loop;
19447+
19448+ switch (MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID)) {
19449+ case VA_MSGID_CMD_HW_PANIC:
19450+ case VA_MSGID_CMD_FAILED: {
19451+ uint32_t fence = MEMIO_READ_FIELD(buf,
19452+ FW_VA_CMD_FAILED_FENCE_VALUE);
19453+ uint32_t fault = MEMIO_READ_FIELD(buf,
19454+ FW_VA_CMD_FAILED_IRQSTATUS);
19455+ uint32_t msg_id = MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID);
19456+ uint32_t diff = 0;
19457+
19458+ if (msg_id == VA_MSGID_CMD_HW_PANIC)
19459+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_HW_PANIC:"
19460+ "Fault detected"
19461+ " - Fence: %08x, Status: %08x"
19462+ " - resetting and ignoring error\n",
19463+ fence, fault);
19464+ else
19465+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_FAILED:"
19466+ "Fault detected"
19467+ " - Fence: %08x, Status: %08x"
19468+ " - resetting and ignoring error\n",
19469+ fence, fault);
19470+
19471+ dev_priv->msvdx_needs_reset = 1;
19472+
19473+ if (msg_id == VA_MSGID_CMD_HW_PANIC) {
19474+ diff = dev_priv->msvdx_current_sequence
19475+ - dev_priv->sequence[PSB_ENGINE_VIDEO];
19476+
19477+ if (diff > 0x0FFFFFFF)
19478+ dev_priv->msvdx_current_sequence++;
19479+
19480+ PSB_DEBUG_GENERAL("MSVDX: Fence ID missing, "
19481+ "assuming %08x\n",
19482+ dev_priv->msvdx_current_sequence);
19483+ } else {
19484+ dev_priv->msvdx_current_sequence = fence;
19485+ }
19486+
19487+ psb_fence_error(dev, PSB_ENGINE_VIDEO,
19488+ dev_priv->msvdx_current_sequence,
19489+ _PSB_FENCE_TYPE_EXE, DRM_CMD_FAILED);
19490+
19491+ /* Flush the command queue */
19492+ psb_msvdx_flush_cmd_queue(dev);
19493+
19494+ goto done;
19495+ }
19496+ case VA_MSGID_CMD_COMPLETED: {
19497+ uint32_t fence = MEMIO_READ_FIELD(buf,
19498+ FW_VA_CMD_COMPLETED_FENCE_VALUE);
19499+ uint32_t flags = MEMIO_READ_FIELD(buf,
19500+ FW_VA_CMD_COMPLETED_FLAGS);
19501+
19502+ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED: "
19503+ "FenceID: %08x, flags: 0x%x\n",
19504+ fence, flags);
19505+
19506+ dev_priv->msvdx_current_sequence = fence;
19507+
19508+ psb_fence_handler(dev, PSB_ENGINE_VIDEO);
19509+
19510+ if (flags & FW_VA_RENDER_HOST_INT) {
19511+ /*Now send the next command from the msvdx cmd queue */
19512+ psb_msvdx_dequeue_send(dev);
19513+ goto done;
19514+ }
19515+
19516+ break;
19517+ }
19518+ case VA_MSGID_CMD_COMPLETED_BATCH: {
19519+ uint32_t fence = MEMIO_READ_FIELD(buf,
19520+ FW_VA_CMD_COMPLETED_FENCE_VALUE);
19521+ uint32_t tickcnt = MEMIO_READ_FIELD(buf,
19522+ FW_VA_CMD_COMPLETED_NO_TICKS);
19523+
19524+ /* we have the fence value in the message */
19525+ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED_BATCH:"
19526+ " FenceID: %08x, TickCount: %08x\n",
19527+ fence, tickcnt);
19528+ dev_priv->msvdx_current_sequence = fence;
19529+
19530+ break;
19531+ }
19532+ case VA_MSGID_ACK:
19533+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_ACK\n");
19534+ break;
19535+
19536+ case VA_MSGID_TEST1:
19537+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST1\n");
19538+ break;
19539+
19540+ case VA_MSGID_TEST2:
19541+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST2\n");
19542+ break;
19543+ /* Don't need to do anything with these messages */
19544+
19545+ case VA_MSGID_DEBLOCK_REQUIRED: {
19546+ uint32_t ctxid = MEMIO_READ_FIELD(buf,
19547+ FW_VA_DEBLOCK_REQUIRED_CONTEXT);
19548+
19549+ /* The BE we now be locked. */
19550+ /* Unblock rendec by reading the mtx2mtx end of slice */
19551+ (void) PSB_RMSVDX32(MSVDX_RENDEC_READ_DATA);
19552+
19553+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_DEBLOCK_REQUIRED"
19554+ " Context=%08x\n", ctxid);
19555+ goto done;
19556+ }
19557+ default:
19558+ DRM_ERROR("ERROR: msvdx Unknown message from MTX \n");
19559+ goto done;
19560+ }
19561+
19562+done:
19563+
19564+#if 1
19565+ if (!dev_priv->msvdx_busy) {
19566+ /* If the firmware says the hardware is idle
19567+ * and the CCB is empty then we can power down
19568+ */
19569+ uint32_t fs_status = PSB_RMSVDX32(MSVDX_COMMS_FW_STATUS);
19570+ uint32_t ccb_roff = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX);
19571+ uint32_t ccb_woff = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
19572+
19573+ /* check that clocks are enabled before reading VLR */
19574+ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all))
19575+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
19576+
19577+ if ((fs_status & MSVDX_FW_STATUS_HW_IDLE) &&
19578+ (ccb_roff == ccb_woff)) {
19579+ PSB_DEBUG_GENERAL("MSVDX: Setting clock to minimal\n");
19580+ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
19581+ }
19582+ }
19583+#endif
19584+ DRM_MEMORYBARRIER(); /* TBD check this... */
19585+}
19586+
19587+void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
19588+ int *msvdx_lockup, int *msvdx_idle)
19589+{
19590+ int tmp;
19591+ *msvdx_lockup = 0;
19592+ *msvdx_idle = 1;
19593+
19594+ if (!dev_priv->has_msvdx)
19595+ return;
19596+#if 0
19597+ PSB_DEBUG_GENERAL("MSVDXTimer: current_sequence:%d "
19598+ "last_sequence:%d and last_submitted_sequence :%d\n",
19599+ dev_priv->msvdx_current_sequence,
19600+ dev_priv->msvdx_last_sequence,
19601+ dev_priv->sequence[PSB_ENGINE_VIDEO]);
19602+#endif
19603+
19604+ tmp = dev_priv->msvdx_current_sequence -
19605+ dev_priv->sequence[PSB_ENGINE_VIDEO];
19606+
19607+ if (tmp > 0x0FFFFFFF) {
19608+ if (dev_priv->msvdx_current_sequence ==
19609+ dev_priv->msvdx_last_sequence) {
19610+ DRM_ERROR("MSVDXTimer:locked-up for sequence:%d\n",
19611+ dev_priv->msvdx_current_sequence);
19612+ *msvdx_lockup = 1;
19613+ } else {
19614+ PSB_DEBUG_GENERAL("MSVDXTimer: "
19615+ "msvdx responded fine so far\n");
19616+ dev_priv->msvdx_last_sequence =
19617+ dev_priv->msvdx_current_sequence;
19618+ *msvdx_idle = 0;
19619+ }
19620+ }
19621+}
19622+
19623+/* power up msvdx, OSPM function */
19624+int psb_power_up_msvdx(struct drm_device *dev)
19625+{
19626+ struct drm_psb_private *dev_priv =
19627+ (struct drm_psb_private *)dev->dev_private;
19628+ int ret;
19629+
19630+ if ((dev_priv->msvdx_state & PSB_PWR_STATE_MASK) != PSB_PWR_STATE_D0i3)
19631+ return -EINVAL;
19632+
19633+ PSB_DEBUG_TMP("power up msvdx\n");
19634+ dump_stack();
19635+
19636+ psb_up_island_power(dev, PSB_VIDEO_DEC_ISLAND);
19637+
19638+ ret = psb_msvdx_init(dev);
19639+ if (ret) {
19640+ DRM_ERROR("failed to init msvdx when power up it\n");
19641+ goto err;
19642+ }
19643+ PSB_WMSVDX32(dev_priv->msvdx_clk_state, MSVDX_MAN_CLK_ENABLE);
19644+
19645+ PSB_DEBUG_GENERAL("FIXME restore registers or init msvdx\n");
19646+
19647+ PSB_DEBUG_GENERAL("FIXME MSVDX MMU setting up\n");
19648+
19649+ dev_priv->msvdx_state = PSB_PWR_STATE_D0i0;
19650+ return 0;
19651+
19652+err:
19653+ return -1;
19654+}
19655+
19656+int psb_power_down_msvdx(struct drm_device *dev)
19657+{
19658+ struct drm_psb_private *dev_priv =
19659+ (struct drm_psb_private *)dev->dev_private;
19660+
19661+ if ((dev_priv->msvdx_state & PSB_PWR_STATE_MASK) != PSB_PWR_STATE_D0i0)
19662+ return -EINVAL;
19663+ if (dev_priv->msvdx_busy) {
19664+ PSB_DEBUG_GENERAL("FIXME: MSVDX is busy, should wait it\n");
19665+ return -EBUSY;
19666+ }
19667+
19668+ dev_priv->msvdx_clk_state = PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE);
19669+ PSB_DEBUG_GENERAL("FIXME: save MSVDX register\n");
19670+
19671+ PSB_DEBUG_GENERAL("FIXME: save MSVDX context\n");
19672+ psb_down_island_power(dev, PSB_VIDEO_DEC_ISLAND);
19673+
19674+ dev_priv->msvdx_state = PSB_PWR_STATE_D0i3;
19675+
19676+ return 0;
19677+}
19678diff -uNr a/drivers/gpu/drm/psb/psb_msvdx.h b/drivers/gpu/drm/psb/psb_msvdx.h
19679--- a/drivers/gpu/drm/psb/psb_msvdx.h 1969-12-31 16:00:00.000000000 -0800
19680+++ b/drivers/gpu/drm/psb/psb_msvdx.h 2009-04-07 13:28:38.000000000 -0700
19681@@ -0,0 +1,442 @@
19682+/**************************************************************************
19683+ *
19684+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
19685+ * Copyright (c) Imagination Technologies Limited, UK
19686+ * All Rights Reserved.
19687+ *
19688+ * Permission is hereby granted, free of charge, to any person obtaining a
19689+ * copy of this software and associated documentation files (the
19690+ * "Software"), to deal in the Software without restriction, including
19691+ * without limitation the rights to use, copy, modify, merge, publish,
19692+ * distribute, sub license, and/or sell copies of the Software, and to
19693+ * permit persons to whom the Software is furnished to do so, subject to
19694+ * the following conditions:
19695+ *
19696+ * The above copyright notice and this permission notice (including the
19697+ * next paragraph) shall be included in all copies or substantial portions
19698+ * of the Software.
19699+ *
19700+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19701+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19702+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19703+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19704+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19705+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19706+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
19707+ *
19708+ **************************************************************************/
19709+
19710+#ifndef _PSB_MSVDX_H_
19711+#define _PSB_MSVDX_H_
19712+
19713+#include "psb_drv.h"
19714+
19715+void psb_msvdx_mtx_interrupt(struct drm_device *dev);
19716+int psb_msvdx_init(struct drm_device *dev);
19717+int psb_msvdx_uninit(struct drm_device *dev);
19718+int psb_msvdx_reset(struct drm_psb_private *dev_priv);
19719+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
19720+int psb_mtx_send(struct drm_psb_private *dev_priv, const void *pvMsg);
19721+void psb_msvdx_irq_preinstall(struct drm_psb_private *dev_priv);
19722+void psb_msvdx_irq_postinstall(struct drm_psb_private *dev_priv);
19723+void psb_msvdx_flush_cmd_queue(struct drm_device *dev);
19724+extern void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
19725+ int *msvdx_lockup, int *msvdx_idle);
19726+int psb_setup_fw(struct drm_device *dev);
19727+int psb_power_up_msvdx(struct drm_device *dev);
19728+int psb_power_down_msvdx(struct drm_device *dev);
19729+
19730+/* Non-Optimal Invalidation is not default */
19731+#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2
19732+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
19733+
19734+#define FW_VA_RENDER_HOST_INT 0x00004000
19735+#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020
19736+
19737+/* There is no work currently underway on the hardware */
19738+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
19739+#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200
19740+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 \
19741+ (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | \
19742+ MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
19743+ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
19744+
19745+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 \
19746+ (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
19747+ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
19748+
19749+#define POULSBO_D0 0x5
19750+#define POULSBO_D1 0x6
19751+#define PSB_REVID_OFFSET 0x8
19752+
19753+#define MTX_CODE_BASE (0x80900000)
19754+#define MTX_DATA_BASE (0x82880000)
19755+#define PC_START_ADDRESS (0x80900000)
19756+
19757+#define MTX_CORE_CODE_MEM (0x10)
19758+#define MTX_CORE_DATA_MEM (0x18)
19759+
19760+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
19761+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
19762+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK \
19763+ (0x00010000)
19764+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK \
19765+ (0x00100000)
19766+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK \
19767+ (0x01000000)
19768+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK \
19769+ (0x10000000)
19770+
19771+#define clk_enable_all \
19772+(MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
19773+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
19774+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
19775+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
19776+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
19777+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
19778+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
19779+
19780+#define clk_enable_minimal \
19781+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
19782+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
19783+
19784+#define clk_enable_auto \
19785+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \
19786+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \
19787+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \
19788+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \
19789+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \
19790+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
19791+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
19792+
19793+#define msvdx_sw_reset_all \
19794+(MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \
19795+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \
19796+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \
19797+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \
19798+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK)
19799+
19800+#define MTX_INTERNAL_REG(R_SPECIFIER , U_SPECIFIER) \
19801+ (((R_SPECIFIER)<<4) | (U_SPECIFIER))
19802+#define MTX_PC MTX_INTERNAL_REG(0, 5)
19803+
19804+#define RENDEC_A_SIZE (1024 * 1024)
19805+#define RENDEC_B_SIZE (1024 * 1024)
19806+
19807+#define MEMIO_READ_FIELD(vpMem, field) \
19808+ ((uint32_t)(((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
19809+ & field##_MASK) >> field##_SHIFT))
19810+
19811+#define MEMIO_WRITE_FIELD(vpMem, field, value) \
19812+ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
19813+ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
19814+ & (field##_TYPE)~field##_MASK) | \
19815+ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT) & field##_MASK);
19816+
19817+#define MEMIO_WRITE_FIELD_LITE(vpMem, field, value) \
19818+ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
19819+ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) | \
19820+ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT)));
19821+
19822+#define REGIO_READ_FIELD(reg_val, reg, field) \
19823+ ((reg_val & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
19824+
19825+#define REGIO_WRITE_FIELD(reg_val, reg, field, value) \
19826+ (reg_val) = \
19827+ ((reg_val) & ~(reg##_##field##_MASK)) | \
19828+ (((value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));
19829+
19830+#define REGIO_WRITE_FIELD_LITE(reg_val, reg, field, value) \
19831+ (reg_val) = \
19832+ ((reg_val) | ((value) << (reg##_##field##_SHIFT)));
19833+
19834+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK \
19835+ (0x00000001)
19836+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK \
19837+ (0x00000002)
19838+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK \
19839+ (0x00000004)
19840+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK \
19841+ (0x00000008)
19842+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK \
19843+ (0x00000010)
19844+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK \
19845+ (0x00000020)
19846+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK \
19847+ (0x00000040)
19848+
19849+#define clk_enable_all \
19850+ (MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
19851+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
19852+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
19853+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
19854+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
19855+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
19856+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
19857+
19858+#define clk_enable_minimal \
19859+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
19860+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
19861+
19862+/* MTX registers */
19863+#define MSVDX_MTX_ENABLE (0x0000)
19864+#define MSVDX_MTX_KICKI (0x0088)
19865+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC)
19866+#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8)
19867+#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104)
19868+#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108)
19869+#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C)
19870+#define MSVDX_MTX_SOFT_RESET (0x0200)
19871+
19872+/* MSVDX registers */
19873+#define MSVDX_CONTROL (0x0600)
19874+#define MSVDX_INTERRUPT_CLEAR (0x060C)
19875+#define MSVDX_INTERRUPT_STATUS (0x0608)
19876+#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610)
19877+#define MSVDX_MMU_CONTROL0 (0x0680)
19878+#define MSVDX_MTX_RAM_BANK (0x06F0)
19879+#define MSVDX_MAN_CLK_ENABLE (0x0620)
19880+
19881+/* RENDEC registers */
19882+#define MSVDX_RENDEC_CONTROL0 (0x0868)
19883+#define MSVDX_RENDEC_CONTROL1 (0x086C)
19884+#define MSVDX_RENDEC_BUFFER_SIZE (0x0870)
19885+#define MSVDX_RENDEC_BASE_ADDR0 (0x0874)
19886+#define MSVDX_RENDEC_BASE_ADDR1 (0x0878)
19887+#define MSVDX_RENDEC_READ_DATA (0x0898)
19888+#define MSVDX_RENDEC_CONTEXT0 (0x0950)
19889+#define MSVDX_RENDEC_CONTEXT1 (0x0954)
19890+#define MSVDX_RENDEC_CONTEXT2 (0x0958)
19891+#define MSVDX_RENDEC_CONTEXT3 (0x095C)
19892+#define MSVDX_RENDEC_CONTEXT4 (0x0960)
19893+#define MSVDX_RENDEC_CONTEXT5 (0x0964)
19894+
19895+/*
19896+ * This defines the MSVDX communication buffer
19897+ */
19898+#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */
19899+/*!< Host buffer size (in 32-bit words) */
19900+#define NUM_WORDS_HOST_BUF (100)
19901+/*!< MTX buffer size (in 32-bit words) */
19902+#define NUM_WORDS_MTX_BUF (100)
19903+
19904+/* There is no work currently underway on the hardware */
19905+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
19906+
19907+#define MSVDX_COMMS_AREA_ADDR (0x02cc0)
19908+
19909+#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18)
19910+#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04)
19911+#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10)
19912+#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00)
19913+#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04)
19914+#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08)
19915+#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C)
19916+#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10)
19917+#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14)
19918+#define MSVDX_COMMS_TO_MTX_CB_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x18)
19919+#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C)
19920+#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20)
19921+#define MSVDX_COMMS_TO_MTX_BUF \
19922+ (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
19923+
19924+#define MSVDX_COMMS_AREA_END \
19925+ (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
19926+
19927+#if (MSVDX_COMMS_AREA_END != 0x03000)
19928+#error
19929+#endif
19930+
19931+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000)
19932+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31)
19933+
19934+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000)
19935+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16)
19936+
19937+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000)
19938+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20)
19939+
19940+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC)
19941+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2)
19942+
19943+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002)
19944+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1)
19945+
19946+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001)
19947+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0)
19948+
19949+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001)
19950+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0)
19951+
19952+#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001)
19953+#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0)
19954+
19955+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
19956+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
19957+
19958+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00)
19959+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8)
19960+
19961+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000)
19962+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14)
19963+
19964+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002)
19965+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1)
19966+
19967+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000)
19968+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16)
19969+
19970+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF)
19971+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0)
19972+
19973+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000)
19974+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16)
19975+
19976+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF)
19977+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0)
19978+
19979+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000)
19980+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18)
19981+
19982+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000)
19983+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16)
19984+
19985+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000)
19986+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24)
19987+
19988+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001)
19989+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0)
19990+
19991+/* Start of parser specific Host->MTX messages. */
19992+#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80)
19993+
19994+/* Start of parser specific MTX->Host messages. */
19995+#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0)
19996+
19997+#define FWRK_MSGID_PADDING (0)
19998+
19999+#define FWRK_GENMSG_SIZE_TYPE uint8_t
20000+#define FWRK_GENMSG_SIZE_MASK (0xFF)
20001+#define FWRK_GENMSG_SIZE_SHIFT (0)
20002+#define FWRK_GENMSG_SIZE_OFFSET (0x0000)
20003+#define FWRK_GENMSG_ID_TYPE uint8_t
20004+#define FWRK_GENMSG_ID_MASK (0xFF)
20005+#define FWRK_GENMSG_ID_SHIFT (0)
20006+#define FWRK_GENMSG_ID_OFFSET (0x0001)
20007+#define FWRK_PADMSG_SIZE (2)
20008+
20009+/* This type defines the framework specified message ids */
20010+enum {
20011+ /* ! Sent by the DXVA driver on the host to the mtx firmware.
20012+ */
20013+ VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
20014+ VA_MSGID_RENDER,
20015+ VA_MSGID_DEBLOCK,
20016+ VA_MSGID_BUBBLE,
20017+
20018+ /* Test Messages */
20019+ VA_MSGID_TEST1,
20020+ VA_MSGID_TEST2,
20021+
20022+ /*! Sent by the mtx firmware to itself.
20023+ */
20024+ VA_MSGID_RENDER_MC_INTERRUPT,
20025+
20026+ /*! Sent by the DXVA firmware on the MTX to the host.
20027+ */
20028+ VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
20029+ VA_MSGID_CMD_COMPLETED_BATCH,
20030+ VA_MSGID_DEBLOCK_REQUIRED,
20031+ VA_MSGID_TEST_RESPONCE,
20032+ VA_MSGID_ACK,
20033+
20034+ VA_MSGID_CMD_FAILED,
20035+ VA_MSGID_CMD_UNSUPPORTED,
20036+ VA_MSGID_CMD_HW_PANIC,
20037+};
20038+
20039+/* MSVDX Firmware interface */
20040+#define FW_VA_INIT_SIZE (8)
20041+#define FW_VA_DEBUG_TEST2_SIZE (4)
20042+
20043+/* FW_VA_DEBUG_TEST2 MSG_SIZE */
20044+#define FW_VA_DEBUG_TEST2_MSG_SIZE_TYPE uint8_t
20045+#define FW_VA_DEBUG_TEST2_MSG_SIZE_MASK (0xFF)
20046+#define FW_VA_DEBUG_TEST2_MSG_SIZE_OFFSET (0x0000)
20047+#define FW_VA_DEBUG_TEST2_MSG_SIZE_SHIFT (0)
20048+
20049+/* FW_VA_DEBUG_TEST2 ID */
20050+#define FW_VA_DEBUG_TEST2_ID_TYPE uint8_t
20051+#define FW_VA_DEBUG_TEST2_ID_MASK (0xFF)
20052+#define FW_VA_DEBUG_TEST2_ID_OFFSET (0x0001)
20053+#define FW_VA_DEBUG_TEST2_ID_SHIFT (0)
20054+
20055+/* FW_VA_CMD_FAILED FENCE_VALUE */
20056+#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t
20057+#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF)
20058+#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004)
20059+#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0)
20060+
20061+/* FW_VA_CMD_FAILED IRQSTATUS */
20062+#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t
20063+#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF)
20064+#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008)
20065+#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0)
20066+
20067+/* FW_VA_CMD_COMPLETED FENCE_VALUE */
20068+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t
20069+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF)
20070+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004)
20071+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0)
20072+
20073+/* FW_VA_CMD_COMPLETED FLAGS */
20074+#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4)
20075+#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t
20076+#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF)
20077+#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF)
20078+#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008)
20079+#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0)
20080+
20081+/* FW_VA_CMD_COMPLETED NO_TICKS */
20082+#define FW_VA_CMD_COMPLETED_NO_TICKS_TYPE uint16_t
20083+#define FW_VA_CMD_COMPLETED_NO_TICKS_MASK (0xFFFF)
20084+#define FW_VA_CMD_COMPLETED_NO_TICKS_OFFSET (0x0002)
20085+#define FW_VA_CMD_COMPLETED_NO_TICKS_SHIFT (0)
20086+
20087+/* FW_VA_DEBLOCK_REQUIRED CONTEXT */
20088+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t
20089+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF)
20090+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004)
20091+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0)
20092+
20093+/* FW_VA_INIT GLOBAL_PTD */
20094+#define FW_VA_INIT_GLOBAL_PTD_TYPE uint32_t
20095+#define FW_VA_INIT_GLOBAL_PTD_MASK (0xFFFFFFFF)
20096+#define FW_VA_INIT_GLOBAL_PTD_OFFSET (0x0004)
20097+#define FW_VA_INIT_GLOBAL_PTD_SHIFT (0)
20098+
20099+/* FW_VA_RENDER FENCE_VALUE */
20100+#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t
20101+#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF)
20102+#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010)
20103+#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0)
20104+
20105+/* FW_VA_RENDER MMUPTD */
20106+#define FW_VA_RENDER_MMUPTD_TYPE uint32_t
20107+#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF)
20108+#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004)
20109+#define FW_VA_RENDER_MMUPTD_SHIFT (0)
20110+
20111+/* FW_VA_RENDER BUFFER_ADDRESS */
20112+#define FW_VA_RENDER_BUFFER_ADDRESS_TYPE uint32_t
20113+#define FW_VA_RENDER_BUFFER_ADDRESS_MASK (0xFFFFFFFF)
20114+#define FW_VA_RENDER_BUFFER_ADDRESS_OFFSET (0x0008)
20115+#define FW_VA_RENDER_BUFFER_ADDRESS_SHIFT (0)
20116+
20117+/* FW_VA_RENDER BUFFER_SIZE */
20118+#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t
20119+#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF)
20120+#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002)
20121+#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0)
20122+
20123+#endif
20124diff -uNr a/drivers/gpu/drm/psb/psb_msvdxinit.c b/drivers/gpu/drm/psb/psb_msvdxinit.c
20125--- a/drivers/gpu/drm/psb/psb_msvdxinit.c 1969-12-31 16:00:00.000000000 -0800
20126+++ b/drivers/gpu/drm/psb/psb_msvdxinit.c 2009-04-07 13:28:38.000000000 -0700
20127@@ -0,0 +1,668 @@
20128+/**
20129+ * file psb_msvdxinit.c
20130+ * MSVDX initialization and mtx-firmware upload
20131+ *
20132+ */
20133+
20134+/**************************************************************************
20135+ *
20136+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
20137+ * Copyright (c) Imagination Technologies Limited, UK
20138+ * All Rights Reserved.
20139+ *
20140+ * Permission is hereby granted, free of charge, to any person obtaining a
20141+ * copy of this software and associated documentation files (the
20142+ * "Software"), to deal in the Software without restriction, including
20143+ * without limitation the rights to use, copy, modify, merge, publish,
20144+ * distribute, sub license, and/or sell copies of the Software, and to
20145+ * permit persons to whom the Software is furnished to do so, subject to
20146+ * the following conditions:
20147+ *
20148+ * The above copyright notice and this permission notice (including the
20149+ * next paragraph) shall be included in all copies or substantial portions
20150+ * of the Software.
20151+ *
20152+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20153+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20154+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20155+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20156+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20157+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20158+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
20159+ *
20160+ **************************************************************************/
20161+
20162+#include <drm/drmP.h>
20163+#include <drm/drm.h>
20164+#include "psb_drv.h"
20165+#include "psb_msvdx.h"
20166+#include <linux/firmware.h>
20167+
20168+#define MSVDX_REG (dev_priv->msvdx_reg)
20169+uint8_t psb_rev_id;
20170+/*MSVDX FW header*/
20171+struct msvdx_fw {
20172+ uint32_t ver;
20173+ uint32_t text_size;
20174+ uint32_t data_size;
20175+ uint32_t data_location;
20176+};
20177+
20178+int psb_wait_for_register(struct drm_psb_private *dev_priv,
20179+ uint32_t offset, uint32_t value, uint32_t enable)
20180+{
20181+ uint32_t tmp;
20182+ uint32_t poll_cnt = 10000;
20183+ while (poll_cnt) {
20184+ tmp = PSB_RMSVDX32(offset);
20185+ if (value == (tmp & enable)) /* All the bits are reset */
20186+ return 0; /* So exit */
20187+
20188+ /* Wait a bit */
20189+ DRM_UDELAY(1000);
20190+ poll_cnt--;
20191+ }
20192+ DRM_ERROR("MSVDX: Timeout while waiting for register %08x:"
20193+ " expecting %08x (mask %08x), got %08x\n",
20194+ offset, value, enable, tmp);
20195+
20196+ return 1;
20197+}
20198+
20199+int psb_poll_mtx_irq(struct drm_psb_private *dev_priv)
20200+{
20201+ int ret = 0;
20202+ uint32_t mtx_int = 0;
20203+
20204+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
20205+ 1);
20206+
20207+ ret = psb_wait_for_register(dev_priv, MSVDX_INTERRUPT_STATUS,
20208+ /* Required value */
20209+ mtx_int,
20210+ /* Enabled bits */
20211+ mtx_int);
20212+
20213+ if (ret) {
20214+ DRM_ERROR("MSVDX: Error Mtx did not return"
20215+ " int within a resonable time\n");
20216+ return ret;
20217+ }
20218+
20219+ PSB_DEBUG_IRQ("MSVDX: Got MTX Int\n");
20220+
20221+ /* Got it so clear the bit */
20222+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
20223+
20224+ return ret;
20225+}
20226+
20227+void psb_write_mtx_core_reg(struct drm_psb_private *dev_priv,
20228+ const uint32_t core_reg, const uint32_t val)
20229+{
20230+ uint32_t reg = 0;
20231+
20232+ /* Put data in MTX_RW_DATA */
20233+ PSB_WMSVDX32(val, MSVDX_MTX_REGISTER_READ_WRITE_DATA);
20234+
20235+ /* DREADY is set to 0 and request a write */
20236+ reg = core_reg;
20237+ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
20238+ MTX_RNW, 0);
20239+ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
20240+ MTX_DREADY, 0);
20241+ PSB_WMSVDX32(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST);
20242+
20243+ psb_wait_for_register(dev_priv,
20244+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
20245+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
20246+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
20247+}
20248+
20249+void psb_upload_fw(struct drm_psb_private *dev_priv,
20250+ const uint32_t data_mem, uint32_t ram_bank_size,
20251+ uint32_t address, const unsigned int words,
20252+ const uint32_t * const data)
20253+{
20254+ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
20255+ uint32_t access_ctrl;
20256+
20257+ /* Save the access control register... */
20258+ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
20259+
20260+ /* Wait for MCMSTAT to become be idle 1 */
20261+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
20262+ 1, /* Required Value */
20263+ 0xffffffff /* Enables */);
20264+
20265+ for (loop = 0; loop < words; loop++) {
20266+ ram_id = data_mem + (address / ram_bank_size);
20267+ if (ram_id != cur_bank) {
20268+ addr = address >> 2;
20269+ ctrl = 0;
20270+ REGIO_WRITE_FIELD_LITE(ctrl,
20271+ MSVDX_MTX_RAM_ACCESS_CONTROL,
20272+ MTX_MCMID, ram_id);
20273+ REGIO_WRITE_FIELD_LITE(ctrl,
20274+ MSVDX_MTX_RAM_ACCESS_CONTROL,
20275+ MTX_MCM_ADDR, addr);
20276+ REGIO_WRITE_FIELD_LITE(ctrl,
20277+ MSVDX_MTX_RAM_ACCESS_CONTROL,
20278+ MTX_MCMAI, 1);
20279+ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
20280+ cur_bank = ram_id;
20281+ }
20282+ address += 4;
20283+
20284+ PSB_WMSVDX32(data[loop],
20285+ MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
20286+
20287+ /* Wait for MCMSTAT to become be idle 1 */
20288+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
20289+ 1, /* Required Value */
20290+ 0xffffffff /* Enables */);
20291+ }
20292+ PSB_DEBUG_GENERAL("MSVDX: Upload done\n");
20293+
20294+ /* Restore the access control register... */
20295+ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
20296+}
20297+
20298+static int psb_verify_fw(struct drm_psb_private *dev_priv,
20299+ const uint32_t ram_bank_size,
20300+ const uint32_t data_mem, uint32_t address,
20301+ const uint32_t words, const uint32_t * const data)
20302+{
20303+ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
20304+ uint32_t access_ctrl;
20305+ int ret = 0;
20306+
20307+ /* Save the access control register... */
20308+ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
20309+
20310+ /* Wait for MCMSTAT to become be idle 1 */
20311+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
20312+ 1, /* Required Value */
20313+ 0xffffffff /* Enables */);
20314+
20315+ for (loop = 0; loop < words; loop++) {
20316+ uint32_t tmp;
20317+ ram_id = data_mem + (address / ram_bank_size);
20318+
20319+ if (ram_id != cur_bank) {
20320+ addr = address >> 2;
20321+ ctrl = 0;
20322+ REGIO_WRITE_FIELD_LITE(ctrl,
20323+ MSVDX_MTX_RAM_ACCESS_CONTROL,
20324+ MTX_MCMID, ram_id);
20325+ REGIO_WRITE_FIELD_LITE(ctrl,
20326+ MSVDX_MTX_RAM_ACCESS_CONTROL,
20327+ MTX_MCM_ADDR, addr);
20328+ REGIO_WRITE_FIELD_LITE(ctrl,
20329+ MSVDX_MTX_RAM_ACCESS_CONTROL,
20330+ MTX_MCMAI, 1);
20331+ REGIO_WRITE_FIELD_LITE(ctrl,
20332+ MSVDX_MTX_RAM_ACCESS_CONTROL,
20333+ MTX_MCMR, 1);
20334+
20335+ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
20336+
20337+ cur_bank = ram_id;
20338+ }
20339+ address += 4;
20340+
20341+ /* Wait for MCMSTAT to become be idle 1 */
20342+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
20343+ 1, /* Required Value */
20344+ 0xffffffff /* Enables */);
20345+
20346+ tmp = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
20347+ if (data[loop] != tmp) {
20348+ DRM_ERROR("psb: Firmware validation fails"
20349+ " at index=%08x\n", loop);
20350+ ret = 1;
20351+ break;
20352+ }
20353+ }
20354+
20355+ /* Restore the access control register... */
20356+ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
20357+
20358+ return ret;
20359+}
20360+
20361+static uint32_t *msvdx_get_fw(struct drm_device *dev,
20362+ const struct firmware **raw, uint8_t *name)
20363+{
20364+ struct drm_psb_private *dev_priv = dev->dev_private;
20365+ int rc, fw_size;
20366+ int *ptr = NULL;
20367+
20368+ rc = request_firmware(raw, name, &dev->pdev->dev);
20369+ if (rc < 0) {
20370+ DRM_ERROR("MSVDX: %s request_firmware failed: Reason %d\n",
20371+ name, rc);
20372+ return NULL;
20373+ }
20374+
20375+ if ((*raw)->size < sizeof(struct msvdx_fw)) {
20376+ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
20377+ name, (*raw)->size);
20378+ return NULL;
20379+ }
20380+
20381+ ptr = (int *) ((*raw))->data;
20382+
20383+ if (!ptr) {
20384+ DRM_ERROR("MSVDX: Failed to load %s\n", name);
20385+ return NULL;
20386+ }
20387+
20388+ /* another sanity check... */
20389+ fw_size = sizeof(struct msvdx_fw) +
20390+ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
20391+ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size;
20392+ if ((*raw)->size != fw_size) {
20393+ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
20394+ name, (*raw)->size);
20395+ return NULL;
20396+ }
20397+ dev_priv->msvdx_fw = drm_calloc(1, fw_size, DRM_MEM_DRIVER);
20398+ if (dev_priv->msvdx_fw == NULL)
20399+ DRM_ERROR("MSVDX: allocate FW buffer failed\n");
20400+ else {
20401+ memcpy(dev_priv->msvdx_fw, ptr, fw_size);
20402+ dev_priv->msvdx_fw_size = fw_size;
20403+ }
20404+
20405+ PSB_DEBUG_GENERAL("MSVDX: releasing firmware resouces\n");
20406+ release_firmware(*raw);
20407+
20408+ return dev_priv->msvdx_fw;
20409+}
20410+
20411+int psb_setup_fw(struct drm_device *dev)
20412+{
20413+ struct drm_psb_private *dev_priv = dev->dev_private;
20414+ int ret = 0;
20415+
20416+ uint32_t ram_bank_size;
20417+ struct msvdx_fw *fw;
20418+ uint32_t *fw_ptr = NULL;
20419+ uint32_t *text_ptr = NULL;
20420+ uint32_t *data_ptr = NULL;
20421+ const struct firmware *raw = NULL;
20422+ /* todo : Assert the clock is on - if not turn it on to upload code */
20423+
20424+ PSB_DEBUG_GENERAL("MSVDX: psb_setup_fw\n");
20425+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
20426+
20427+ /* Reset MTX */
20428+ PSB_WMSVDX32(MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK,
20429+ MSVDX_MTX_SOFT_RESET);
20430+
20431+ /* Initialses Communication controll area to 0 */
20432+ if (psb_rev_id >= POULSBO_D1) {
20433+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1"
20434+ " or later revision.\n");
20435+ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1,
20436+ MSVDX_COMMS_OFFSET_FLAGS);
20437+ } else {
20438+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0"
20439+ " or earlier revision.\n");
20440+ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0,
20441+ MSVDX_COMMS_OFFSET_FLAGS);
20442+ }
20443+
20444+ PSB_WMSVDX32(0, MSVDX_COMMS_MSG_COUNTER);
20445+ PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE);
20446+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_RD_INDEX);
20447+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
20448+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_RD_INDEX);
20449+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
20450+ PSB_WMSVDX32(0, MSVDX_COMMS_FW_STATUS);
20451+
20452+ /* read register bank size */
20453+ {
20454+ uint32_t bank_size, reg;
20455+ reg = PSB_RMSVDX32(MSVDX_MTX_RAM_BANK);
20456+ bank_size =
20457+ REGIO_READ_FIELD(reg, MSVDX_MTX_RAM_BANK,
20458+ CR_MTX_RAM_BANK_SIZE);
20459+ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
20460+ }
20461+
20462+ PSB_DEBUG_GENERAL("MSVDX: RAM bank size = %d bytes\n",
20463+ ram_bank_size);
20464+
20465+ /* if FW already loaded from storage */
20466+ if (dev_priv->msvdx_fw)
20467+ fw_ptr = dev_priv->msvdx_fw;
20468+ else
20469+ fw_ptr = msvdx_get_fw(dev, &raw, "msvdx_fw.bin");
20470+
20471+ if (!fw_ptr) {
20472+ DRM_ERROR("psb: No valid msvdx_fw.bin firmware found.\n");
20473+ ret = 1;
20474+ goto out;
20475+ }
20476+
20477+ fw = (struct msvdx_fw *) fw_ptr;
20478+ if (fw->ver != 0x02) {
20479+ DRM_ERROR("psb: msvdx_fw.bin firmware version mismatch,"
20480+ "got version=%02x expected version=%02x\n",
20481+ fw->ver, 0x02);
20482+ ret = 1;
20483+ goto out;
20484+ }
20485+
20486+ text_ptr =
20487+ (uint32_t *) ((uint8_t *) fw_ptr + sizeof(struct msvdx_fw));
20488+ data_ptr = text_ptr + fw->text_size;
20489+
20490+ PSB_DEBUG_GENERAL("MSVDX: Retrieved pointers for firmware\n");
20491+ PSB_DEBUG_GENERAL("MSVDX: text_size: %d\n", fw->text_size);
20492+ PSB_DEBUG_GENERAL("MSVDX: data_size: %d\n", fw->data_size);
20493+ PSB_DEBUG_GENERAL("MSVDX: data_location: 0x%x\n",
20494+ fw->data_location);
20495+ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of text: 0x%x\n",
20496+ *text_ptr);
20497+ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of data: 0x%x\n",
20498+ *data_ptr);
20499+
20500+ PSB_DEBUG_GENERAL("MSVDX: Uploading firmware\n");
20501+ psb_upload_fw(dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
20502+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size,
20503+ text_ptr);
20504+ psb_upload_fw(dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
20505+ fw->data_location - MTX_DATA_BASE, fw->data_size,
20506+ data_ptr);
20507+
20508+#if 0
20509+ /* todo : Verify code upload possibly only in debug */
20510+ ret = psb_verify_fw(dev_priv, ram_bank_size,
20511+ MTX_CORE_CODE_MEM,
20512+ PC_START_ADDRESS - MTX_CODE_BASE,
20513+ fw->text_size, text_ptr);
20514+ if (ret) {
20515+ /* Firmware code upload failed */
20516+ ret = 1;
20517+ goto out;
20518+ }
20519+
20520+ ret = psb_verify_fw(dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
20521+ fw->data_location - MTX_DATA_BASE,
20522+ fw->data_size, data_ptr);
20523+ if (ret) {
20524+ /* Firmware data upload failed */
20525+ ret = 1;
20526+ goto out;
20527+ }
20528+#else
20529+ (void)psb_verify_fw;
20530+#endif
20531+ /* -- Set starting PC address */
20532+ psb_write_mtx_core_reg(dev_priv, MTX_PC, PC_START_ADDRESS);
20533+
20534+ /* -- Turn on the thread */
20535+ PSB_WMSVDX32(MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE);
20536+
20537+ /* Wait for the signature value to be written back */
20538+ ret = psb_wait_for_register(dev_priv, MSVDX_COMMS_SIGNATURE,
20539+ MSVDX_COMMS_SIGNATURE_VALUE, /*Required value*/
20540+ 0xffffffff /* Enabled bits */);
20541+ if (ret) {
20542+ DRM_ERROR("MSVDX: firmware fails to initialize.\n");
20543+ goto out;
20544+ }
20545+
20546+ PSB_DEBUG_GENERAL("MSVDX: MTX Initial indications OK\n");
20547+ PSB_DEBUG_GENERAL("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
20548+ MSVDX_COMMS_AREA_ADDR);
20549+#if 0
20550+
20551+ /* Send test message */
20552+ {
20553+ uint32_t msg_buf[FW_VA_DEBUG_TEST2_SIZE >> 2];
20554+
20555+ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_MSG_SIZE,
20556+ FW_VA_DEBUG_TEST2_SIZE);
20557+ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_ID,
20558+ VA_MSGID_TEST2);
20559+
20560+ ret = psb_mtx_send(dev_priv, msg_buf);
20561+ if (ret) {
20562+ DRM_ERROR("psb: MSVDX sending fails.\n");
20563+ goto out;
20564+ }
20565+
20566+ /* Wait for Mtx to ack this message */
20567+ psb_poll_mtx_irq(dev_priv);
20568+
20569+ }
20570+#endif
20571+out:
20572+
20573+ return ret;
20574+}
20575+
20576+
20577+static void psb_free_ccb(struct ttm_buffer_object **ccb)
20578+{
20579+ ttm_bo_unref(ccb);
20580+ *ccb = NULL;
20581+}
20582+
20583+/**
20584+ * Reset chip and disable interrupts.
20585+ * Return 0 success, 1 failure
20586+ */
20587+int psb_msvdx_reset(struct drm_psb_private *dev_priv)
20588+{
20589+ int ret = 0;
20590+
20591+ /* Issue software reset */
20592+ PSB_WMSVDX32(msvdx_sw_reset_all, MSVDX_CONTROL);
20593+
20594+ ret = psb_wait_for_register(dev_priv, MSVDX_CONTROL, 0,
20595+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
20596+
20597+ if (!ret) {
20598+ /* Clear interrupt enabled flag */
20599+ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
20600+
20601+ /* Clear any pending interrupt flags */
20602+ PSB_WMSVDX32(0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR);
20603+ }
20604+
20605+ /* mutex_destroy(&dev_priv->msvdx_mutex); */
20606+
20607+ return ret;
20608+}
20609+
20610+static int psb_allocate_ccb(struct drm_device *dev,
20611+ struct ttm_buffer_object **ccb,
20612+ uint32_t *base_addr, int size)
20613+{
20614+ struct drm_psb_private *dev_priv = psb_priv(dev);
20615+ struct ttm_bo_device *bdev = &dev_priv->bdev;
20616+ int ret;
20617+ struct ttm_bo_kmap_obj tmp_kmap;
20618+ bool is_iomem;
20619+
20620+ PSB_DEBUG_INIT("MSVDX: allocate CCB\n");
20621+
20622+ ret = ttm_buffer_object_create(bdev, size,
20623+ ttm_bo_type_kernel,
20624+ DRM_PSB_FLAG_MEM_KERNEL |
20625+ TTM_PL_FLAG_NO_EVICT, 0, 0, 0,
20626+ NULL, ccb);
20627+ if (ret) {
20628+ DRM_ERROR("MSVDX:failed to allocate CCB.\n");
20629+ *ccb = NULL;
20630+ return 1;
20631+ }
20632+
20633+ ret = ttm_bo_kmap(*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
20634+ if (ret) {
20635+ PSB_DEBUG_GENERAL("ttm_bo_kmap failed ret: %d\n", ret);
20636+ ttm_bo_unref(ccb);
20637+ *ccb = NULL;
20638+ return 1;
20639+ }
20640+
20641+ memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0,
20642+ RENDEC_A_SIZE);
20643+ ttm_bo_kunmap(&tmp_kmap);
20644+
20645+ *base_addr = (*ccb)->offset;
20646+ return 0;
20647+}
20648+
20649+int psb_msvdx_init(struct drm_device *dev)
20650+{
20651+ struct drm_psb_private *dev_priv = dev->dev_private;
20652+ uint32_t cmd;
20653+ /* uint32_t clk_gate_ctrl = clk_enable_all; */
20654+ int ret;
20655+
20656+ if (!dev_priv->ccb0) { /* one for the first time */
20657+ /* Initialize comand msvdx queueing */
20658+ INIT_LIST_HEAD(&dev_priv->msvdx_queue);
20659+ mutex_init(&dev_priv->msvdx_mutex);
20660+ spin_lock_init(&dev_priv->msvdx_lock);
20661+ /*figure out the stepping */
20662+ pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &psb_rev_id);
20663+ }
20664+
20665+ dev_priv->msvdx_busy = 0;
20666+
20667+ /* Enable Clocks */
20668+ PSB_DEBUG_GENERAL("Enabling clocks\n");
20669+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
20670+
20671+ /* Enable MMU by removing all bypass bits */
20672+ PSB_WMSVDX32(0, MSVDX_MMU_CONTROL0);
20673+
20674+ /* move firmware loading to the place receiving first command buffer */
20675+
20676+ PSB_DEBUG_GENERAL("MSVDX: Setting up RENDEC,allocate CCB 0/1\n");
20677+ /* Allocate device virtual memory as required by rendec.... */
20678+ if (!dev_priv->ccb0) {
20679+ ret = psb_allocate_ccb(dev, &dev_priv->ccb0,
20680+ &dev_priv->base_addr0,
20681+ RENDEC_A_SIZE);
20682+ if (ret)
20683+ goto err_exit;
20684+ }
20685+
20686+ if (!dev_priv->ccb1) {
20687+ ret = psb_allocate_ccb(dev, &dev_priv->ccb1,
20688+ &dev_priv->base_addr1,
20689+ RENDEC_B_SIZE);
20690+ if (ret)
20691+ goto err_exit;
20692+ }
20693+
20694+
20695+ PSB_DEBUG_GENERAL("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
20696+ dev_priv->base_addr0, dev_priv->base_addr1);
20697+
20698+ PSB_WMSVDX32(dev_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0);
20699+ PSB_WMSVDX32(dev_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1);
20700+
20701+ cmd = 0;
20702+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
20703+ RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
20704+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
20705+ RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
20706+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_BUFFER_SIZE);
20707+
20708+ cmd = 0;
20709+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
20710+ RENDEC_DECODE_START_SIZE, 0);
20711+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
20712+ RENDEC_BURST_SIZE_W, 1);
20713+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
20714+ RENDEC_BURST_SIZE_R, 1);
20715+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
20716+ RENDEC_EXTERNAL_MEMORY, 1);
20717+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL1);
20718+
20719+ cmd = 0x00101010;
20720+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT0);
20721+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT1);
20722+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT2);
20723+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT3);
20724+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT4);
20725+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT5);
20726+
20727+ cmd = 0;
20728+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE,
20729+ 1);
20730+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL0);
20731+
20732+ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
20733+ PSB_DEBUG_INIT("MSVDX:defer firmware loading to the"
20734+ " place when receiving user space commands\n");
20735+
20736+ dev_priv->msvdx_fw_loaded = 0; /* need to load firware */
20737+
20738+ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
20739+
20740+#if 0
20741+ ret = psb_setup_fw(dev);
20742+ if (ret)
20743+ goto err_exit;
20744+ /* Send Initialisation message to firmware */
20745+ if (0) {
20746+ uint32_t msg_init[FW_VA_INIT_SIZE >> 2];
20747+ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_SIZE,
20748+ FW_VA_INIT_SIZE);
20749+ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_ID, VA_MSGID_INIT);
20750+
20751+ /* Need to set this for all but A0 */
20752+ MEMIO_WRITE_FIELD(msg_init, FW_VA_INIT_GLOBAL_PTD,
20753+ psb_get_default_pd_addr(dev_priv->mmu));
20754+
20755+ ret = psb_mtx_send(dev_priv, msg_init);
20756+ if (ret)
20757+ goto err_exit;
20758+
20759+ psb_poll_mtx_irq(dev_priv);
20760+ }
20761+#endif
20762+
20763+ return 0;
20764+
20765+err_exit:
20766+ DRM_ERROR("MSVDX: initialization failed\n");
20767+ if (dev_priv->ccb0)
20768+ psb_free_ccb(&dev_priv->ccb0);
20769+ if (dev_priv->ccb1)
20770+ psb_free_ccb(&dev_priv->ccb1);
20771+
20772+ return 1;
20773+}
20774+
20775+int psb_msvdx_uninit(struct drm_device *dev)
20776+{
20777+ struct drm_psb_private *dev_priv = dev->dev_private;
20778+
20779+ /* Reset MSVDX chip */
20780+ psb_msvdx_reset(dev_priv);
20781+
20782+ /* PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */
20783+ PSB_DEBUG_INIT("MSVDX:set the msvdx clock to 0\n");
20784+ PSB_WMSVDX32(0, MSVDX_MAN_CLK_ENABLE);
20785+
20786+ if (dev_priv->ccb0)
20787+ psb_free_ccb(&dev_priv->ccb0);
20788+ if (dev_priv->ccb1)
20789+ psb_free_ccb(&dev_priv->ccb1);
20790+ if (dev_priv->msvdx_fw)
20791+ drm_free(dev_priv->msvdx_fw, dev_priv->msvdx_fw_size,
20792+ DRM_MEM_DRIVER);
20793+
20794+ return 0;
20795+}
20796diff -uNr a/drivers/gpu/drm/psb/psb_reg.h b/drivers/gpu/drm/psb/psb_reg.h
20797--- a/drivers/gpu/drm/psb/psb_reg.h 1969-12-31 16:00:00.000000000 -0800
20798+++ b/drivers/gpu/drm/psb/psb_reg.h 2009-04-07 13:28:38.000000000 -0700
20799@@ -0,0 +1,569 @@
20800+/**************************************************************************
20801+ *
20802+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
20803+ * Copyright (c) 2007, Intel Corporation.
20804+ * All Rights Reserved.
20805+ *
20806+ * This program is free software; you can redistribute it and/or modify it
20807+ * under the terms and conditions of the GNU General Public License,
20808+ * version 2, as published by the Free Software Foundation.
20809+ *
20810+ * This program is distributed in the hope it will be useful, but WITHOUT
20811+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20812+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20813+ * more details.
20814+ *
20815+ * You should have received a copy of the GNU General Public License along with
20816+ * this program; if not, write to the Free Software Foundation, Inc.,
20817+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20818+ *
20819+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
20820+ * develop this driver.
20821+ *
20822+ **************************************************************************/
20823+/*
20824+ */
20825+#ifndef _PSB_REG_H_
20826+#define _PSB_REG_H_
20827+
20828+#define PSB_CR_CLKGATECTL 0x0000
20829+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
20830+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
20831+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
20832+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
20833+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
20834+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
20835+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
20836+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
20837+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
20838+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
20839+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
20840+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
20841+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
20842+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
20843+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
20844+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
20845+
20846+#define PSB_CR_CORE_ID 0x0010
20847+#define _PSB_CC_ID_ID_SHIFT (16)
20848+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
20849+#define _PSB_CC_ID_CONFIG_SHIFT (0)
20850+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
20851+
20852+#define PSB_CR_CORE_REVISION 0x0014
20853+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
20854+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
20855+#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
20856+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
20857+#define _PSB_CC_REVISION_MINOR_SHIFT (8)
20858+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
20859+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
20860+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
20861+
20862+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
20863+
20864+#define PSB_CR_SOFT_RESET 0x0080
20865+#define _PSB_CS_RESET_TSP_RESET (1 << 6)
20866+#define _PSB_CS_RESET_ISP_RESET (1 << 5)
20867+#define _PSB_CS_RESET_USE_RESET (1 << 4)
20868+#define _PSB_CS_RESET_TA_RESET (1 << 3)
20869+#define _PSB_CS_RESET_DPM_RESET (1 << 2)
20870+#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
20871+#define _PSB_CS_RESET_BIF_RESET (1 << 0)
20872+
20873+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
20874+
20875+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
20876+
20877+#define PSB_CR_EVENT_STATUS2 0x0118
20878+
20879+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
20880+#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
20881+
20882+#define PSB_CR_EVENT_STATUS 0x012C
20883+
20884+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
20885+
20886+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
20887+#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
20888+#define _PSB_CE_TA_DPM_FAULT (1 << 28)
20889+#define _PSB_CE_TWOD_COMPLETE (1 << 27)
20890+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
20891+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
20892+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
20893+#define _PSB_CE_SW_EVENT (1 << 14)
20894+#define _PSB_CE_TA_FINISHED (1 << 13)
20895+#define _PSB_CE_TA_TERMINATE (1 << 12)
20896+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
20897+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
20898+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
20899+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
20900+
20901+
20902+#define PSB_USE_OFFSET_MASK 0x0007FFFF
20903+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
20904+#define PSB_CR_USE_CODE_BASE0 0x0A0C
20905+#define PSB_CR_USE_CODE_BASE1 0x0A10
20906+#define PSB_CR_USE_CODE_BASE2 0x0A14
20907+#define PSB_CR_USE_CODE_BASE3 0x0A18
20908+#define PSB_CR_USE_CODE_BASE4 0x0A1C
20909+#define PSB_CR_USE_CODE_BASE5 0x0A20
20910+#define PSB_CR_USE_CODE_BASE6 0x0A24
20911+#define PSB_CR_USE_CODE_BASE7 0x0A28
20912+#define PSB_CR_USE_CODE_BASE8 0x0A2C
20913+#define PSB_CR_USE_CODE_BASE9 0x0A30
20914+#define PSB_CR_USE_CODE_BASE10 0x0A34
20915+#define PSB_CR_USE_CODE_BASE11 0x0A38
20916+#define PSB_CR_USE_CODE_BASE12 0x0A3C
20917+#define PSB_CR_USE_CODE_BASE13 0x0A40
20918+#define PSB_CR_USE_CODE_BASE14 0x0A44
20919+#define PSB_CR_USE_CODE_BASE15 0x0A48
20920+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
20921+#define _PSB_CUC_BASE_DM_SHIFT (25)
20922+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
20923+#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
20924+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
20925+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
20926+#define _PSB_CUC_DM_VERTEX (0)
20927+#define _PSB_CUC_DM_PIXEL (1)
20928+#define _PSB_CUC_DM_RESERVED (2)
20929+#define _PSB_CUC_DM_EDM (3)
20930+
20931+#define PSB_CR_PDS_EXEC_BASE 0x0AB8
20932+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
20933+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
20934+
20935+#define PSB_CR_EVENT_KICKER 0x0AC4
20936+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
20937+
20938+#define PSB_CR_EVENT_KICK 0x0AC8
20939+#define _PSB_CE_KICK_NOW (1 << 0)
20940+
20941+
20942+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
20943+
20944+#define PSB_CR_BIF_CTRL 0x0C00
20945+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
20946+#define _PSB_CB_CTRL_INVALDC (1 << 3)
20947+#define _PSB_CB_CTRL_FLUSH (1 << 2)
20948+
20949+#define PSB_CR_BIF_INT_STAT 0x0C04
20950+
20951+#define PSB_CR_BIF_FAULT 0x0C08
20952+#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
20953+#define _PSB_CBI_STAT_FAULT_SHIFT (0)
20954+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
20955+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
20956+#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
20957+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
20958+#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
20959+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
20960+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
20961+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
20962+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
20963+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
20964+
20965+#define PSB_CR_BIF_BANK0 0x0C78
20966+
20967+#define PSB_CR_BIF_BANK1 0x0C7C
20968+
20969+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
20970+
20971+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
20972+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
20973+
20974+#define PSB_CR_2D_SOCIF 0x0E18
20975+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
20976+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
20977+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
20978+
20979+#define PSB_CR_2D_BLIT_STATUS 0x0E04
20980+#define _PSB_C2B_STATUS_BUSY (1 << 24)
20981+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
20982+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
20983+
20984+/*
20985+ * 2D defs.
20986+ */
20987+
20988+/*
20989+ * 2D Slave Port Data : Block Header's Object Type
20990+ */
20991+
20992+#define PSB_2D_CLIP_BH (0x00000000)
20993+#define PSB_2D_PAT_BH (0x10000000)
20994+#define PSB_2D_CTRL_BH (0x20000000)
20995+#define PSB_2D_SRC_OFF_BH (0x30000000)
20996+#define PSB_2D_MASK_OFF_BH (0x40000000)
20997+#define PSB_2D_RESERVED1_BH (0x50000000)
20998+#define PSB_2D_RESERVED2_BH (0x60000000)
20999+#define PSB_2D_FENCE_BH (0x70000000)
21000+#define PSB_2D_BLIT_BH (0x80000000)
21001+#define PSB_2D_SRC_SURF_BH (0x90000000)
21002+#define PSB_2D_DST_SURF_BH (0xA0000000)
21003+#define PSB_2D_PAT_SURF_BH (0xB0000000)
21004+#define PSB_2D_SRC_PAL_BH (0xC0000000)
21005+#define PSB_2D_PAT_PAL_BH (0xD0000000)
21006+#define PSB_2D_MASK_SURF_BH (0xE0000000)
21007+#define PSB_2D_FLUSH_BH (0xF0000000)
21008+
21009+/*
21010+ * Clip Definition block (PSB_2D_CLIP_BH)
21011+ */
21012+#define PSB_2D_CLIPCOUNT_MAX (1)
21013+#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
21014+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
21015+#define PSB_2D_CLIPCOUNT_SHIFT (0)
21016+/* clip rectangle min & max */
21017+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
21018+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
21019+#define PSB_2D_CLIP_XMAX_SHIFT (12)
21020+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
21021+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
21022+#define PSB_2D_CLIP_XMIN_SHIFT (0)
21023+/* clip rectangle offset */
21024+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
21025+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
21026+#define PSB_2D_CLIP_YMAX_SHIFT (12)
21027+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
21028+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
21029+#define PSB_2D_CLIP_YMIN_SHIFT (0)
21030+
21031+/*
21032+ * Pattern Control (PSB_2D_PAT_BH)
21033+ */
21034+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
21035+#define PSB_2D_PAT_HEIGHT_SHIFT (0)
21036+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
21037+#define PSB_2D_PAT_WIDTH_SHIFT (5)
21038+#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
21039+#define PSB_2D_PAT_YSTART_SHIFT (10)
21040+#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
21041+#define PSB_2D_PAT_XSTART_SHIFT (15)
21042+
21043+/*
21044+ * 2D Control block (PSB_2D_CTRL_BH)
21045+ */
21046+/* Present Flags */
21047+#define PSB_2D_SRCCK_CTRL (0x00000001)
21048+#define PSB_2D_DSTCK_CTRL (0x00000002)
21049+#define PSB_2D_ALPHA_CTRL (0x00000004)
21050+/* Colour Key Colour (SRC/DST)*/
21051+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
21052+#define PSB_2D_CK_COL_CLRMASK (0x00000000)
21053+#define PSB_2D_CK_COL_SHIFT (0)
21054+/* Colour Key Mask (SRC/DST)*/
21055+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
21056+#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
21057+#define PSB_2D_CK_MASK_SHIFT (0)
21058+/* Alpha Control (Alpha/RGB)*/
21059+#define PSB_2D_GBLALPHA_MASK (0x000FF000)
21060+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
21061+#define PSB_2D_GBLALPHA_SHIFT (12)
21062+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
21063+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
21064+#define PSB_2D_SRCALPHA_OP_SHIFT (20)
21065+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
21066+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
21067+#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
21068+#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
21069+#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
21070+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
21071+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
21072+#define PSB_2D_SRCALPHA_INVERT (0x00800000)
21073+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
21074+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
21075+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
21076+#define PSB_2D_DSTALPHA_OP_SHIFT (24)
21077+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
21078+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
21079+#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
21080+#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
21081+#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
21082+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
21083+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
21084+#define PSB_2D_DSTALPHA_INVERT (0x08000000)
21085+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
21086+
21087+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
21088+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
21089+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
21090+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
21091+
21092+/*
21093+ *Source Offset (PSB_2D_SRC_OFF_BH)
21094+ */
21095+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
21096+#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
21097+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
21098+#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
21099+
21100+/*
21101+ * Mask Offset (PSB_2D_MASK_OFF_BH)
21102+ */
21103+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
21104+#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
21105+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
21106+#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
21107+
21108+/*
21109+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
21110+ */
21111+
21112+/*
21113+ *Blit Rectangle (PSB_2D_BLIT_BH)
21114+ */
21115+
21116+#define PSB_2D_ROT_MASK (3<<25)
21117+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
21118+#define PSB_2D_ROT_NONE (0<<25)
21119+#define PSB_2D_ROT_90DEGS (1<<25)
21120+#define PSB_2D_ROT_180DEGS (2<<25)
21121+#define PSB_2D_ROT_270DEGS (3<<25)
21122+
21123+#define PSB_2D_COPYORDER_MASK (3<<23)
21124+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
21125+#define PSB_2D_COPYORDER_TL2BR (0<<23)
21126+#define PSB_2D_COPYORDER_BR2TL (1<<23)
21127+#define PSB_2D_COPYORDER_TR2BL (2<<23)
21128+#define PSB_2D_COPYORDER_BL2TR (3<<23)
21129+
21130+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
21131+#define PSB_2D_DSTCK_DISABLE (0x00000000)
21132+#define PSB_2D_DSTCK_PASS (0x00200000)
21133+#define PSB_2D_DSTCK_REJECT (0x00400000)
21134+
21135+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
21136+#define PSB_2D_SRCCK_DISABLE (0x00000000)
21137+#define PSB_2D_SRCCK_PASS (0x00080000)
21138+#define PSB_2D_SRCCK_REJECT (0x00100000)
21139+
21140+#define PSB_2D_CLIP_ENABLE (0x00040000)
21141+
21142+#define PSB_2D_ALPHA_ENABLE (0x00020000)
21143+
21144+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
21145+#define PSB_2D_PAT_MASK (0x00010000)
21146+#define PSB_2D_USE_PAT (0x00010000)
21147+#define PSB_2D_USE_FILL (0x00000000)
21148+/*
21149+ * Tungsten Graphics note on rop codes: If rop A and rop B are
21150+ * identical, the mask surface will not be read and need not be
21151+ * set up.
21152+ */
21153+
21154+#define PSB_2D_ROP3B_MASK (0x0000FF00)
21155+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
21156+#define PSB_2D_ROP3B_SHIFT (8)
21157+/* rop code A */
21158+#define PSB_2D_ROP3A_MASK (0x000000FF)
21159+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
21160+#define PSB_2D_ROP3A_SHIFT (0)
21161+
21162+#define PSB_2D_ROP4_MASK (0x0000FFFF)
21163+/*
21164+ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
21165+ * Fill Colour RGBA8888
21166+ */
21167+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
21168+#define PSB_2D_FILLCOLOUR_SHIFT (0)
21169+/*
21170+ * DWORD1: (Always Present)
21171+ * X Start (Dest)
21172+ * Y Start (Dest)
21173+ */
21174+#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
21175+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
21176+#define PSB_2D_DST_XSTART_SHIFT (12)
21177+#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
21178+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
21179+#define PSB_2D_DST_YSTART_SHIFT (0)
21180+/*
21181+ * DWORD2: (Always Present)
21182+ * X Size (Dest)
21183+ * Y Size (Dest)
21184+ */
21185+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
21186+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
21187+#define PSB_2D_DST_XSIZE_SHIFT (12)
21188+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
21189+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
21190+#define PSB_2D_DST_YSIZE_SHIFT (0)
21191+
21192+/*
21193+ * Source Surface (PSB_2D_SRC_SURF_BH)
21194+ */
21195+/*
21196+ * WORD 0
21197+ */
21198+
21199+#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
21200+#define PSB_2D_SRC_1_PAL (0x00000000)
21201+#define PSB_2D_SRC_2_PAL (0x00008000)
21202+#define PSB_2D_SRC_4_PAL (0x00010000)
21203+#define PSB_2D_SRC_8_PAL (0x00018000)
21204+#define PSB_2D_SRC_8_ALPHA (0x00020000)
21205+#define PSB_2D_SRC_4_ALPHA (0x00028000)
21206+#define PSB_2D_SRC_332RGB (0x00030000)
21207+#define PSB_2D_SRC_4444ARGB (0x00038000)
21208+#define PSB_2D_SRC_555RGB (0x00040000)
21209+#define PSB_2D_SRC_1555ARGB (0x00048000)
21210+#define PSB_2D_SRC_565RGB (0x00050000)
21211+#define PSB_2D_SRC_0888ARGB (0x00058000)
21212+#define PSB_2D_SRC_8888ARGB (0x00060000)
21213+#define PSB_2D_SRC_8888UYVY (0x00068000)
21214+#define PSB_2D_SRC_RESERVED (0x00070000)
21215+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
21216+
21217+
21218+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
21219+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
21220+#define PSB_2D_SRC_STRIDE_SHIFT (0)
21221+/*
21222+ * WORD 1 - Base Address
21223+ */
21224+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
21225+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
21226+#define PSB_2D_SRC_ADDR_SHIFT (2)
21227+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
21228+
21229+/*
21230+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
21231+ */
21232+/*
21233+ * WORD 0
21234+ */
21235+
21236+#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
21237+#define PSB_2D_PAT_1_PAL (0x00000000)
21238+#define PSB_2D_PAT_2_PAL (0x00008000)
21239+#define PSB_2D_PAT_4_PAL (0x00010000)
21240+#define PSB_2D_PAT_8_PAL (0x00018000)
21241+#define PSB_2D_PAT_8_ALPHA (0x00020000)
21242+#define PSB_2D_PAT_4_ALPHA (0x00028000)
21243+#define PSB_2D_PAT_332RGB (0x00030000)
21244+#define PSB_2D_PAT_4444ARGB (0x00038000)
21245+#define PSB_2D_PAT_555RGB (0x00040000)
21246+#define PSB_2D_PAT_1555ARGB (0x00048000)
21247+#define PSB_2D_PAT_565RGB (0x00050000)
21248+#define PSB_2D_PAT_0888ARGB (0x00058000)
21249+#define PSB_2D_PAT_8888ARGB (0x00060000)
21250+
21251+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
21252+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
21253+#define PSB_2D_PAT_STRIDE_SHIFT (0)
21254+/*
21255+ * WORD 1 - Base Address
21256+ */
21257+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
21258+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
21259+#define PSB_2D_PAT_ADDR_SHIFT (2)
21260+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
21261+
21262+/*
21263+ * Destination Surface (PSB_2D_DST_SURF_BH)
21264+ */
21265+/*
21266+ * WORD 0
21267+ */
21268+
21269+#define PSB_2D_DST_FORMAT_MASK (0x00078000)
21270+#define PSB_2D_DST_332RGB (0x00030000)
21271+#define PSB_2D_DST_4444ARGB (0x00038000)
21272+#define PSB_2D_DST_555RGB (0x00040000)
21273+#define PSB_2D_DST_1555ARGB (0x00048000)
21274+#define PSB_2D_DST_565RGB (0x00050000)
21275+#define PSB_2D_DST_0888ARGB (0x00058000)
21276+#define PSB_2D_DST_8888ARGB (0x00060000)
21277+#define PSB_2D_DST_8888AYUV (0x00070000)
21278+
21279+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
21280+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
21281+#define PSB_2D_DST_STRIDE_SHIFT (0)
21282+/*
21283+ * WORD 1 - Base Address
21284+ */
21285+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
21286+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
21287+#define PSB_2D_DST_ADDR_SHIFT (2)
21288+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
21289+
21290+/*
21291+ * Mask Surface (PSB_2D_MASK_SURF_BH)
21292+ */
21293+/*
21294+ * WORD 0
21295+ */
21296+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
21297+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
21298+#define PSB_2D_MASK_STRIDE_SHIFT (0)
21299+/*
21300+ * WORD 1 - Base Address
21301+ */
21302+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
21303+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
21304+#define PSB_2D_MASK_ADDR_SHIFT (2)
21305+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
21306+
21307+/*
21308+ * Source Palette (PSB_2D_SRC_PAL_BH)
21309+ */
21310+
21311+#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
21312+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
21313+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
21314+#define PSB_2D_SRCPAL_BYTEALIGN (1024)
21315+
21316+/*
21317+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
21318+ */
21319+
21320+#define PSB_2D_PATPAL_ADDR_SHIFT (0)
21321+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
21322+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
21323+#define PSB_2D_PATPAL_BYTEALIGN (1024)
21324+
21325+/*
21326+ * Rop3 Codes (2 LS bytes)
21327+ */
21328+
21329+#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
21330+#define PSB_2D_ROP3_PATCOPY (0xF0F0)
21331+#define PSB_2D_ROP3_WHITENESS (0xFFFF)
21332+#define PSB_2D_ROP3_BLACKNESS (0x0000)
21333+#define PSB_2D_ROP3_SRC (0xCC)
21334+#define PSB_2D_ROP3_PAT (0xF0)
21335+#define PSB_2D_ROP3_DST (0xAA)
21336+
21337+
21338+/*
21339+ * Sizes.
21340+ */
21341+
21342+#define PSB_SCENE_HW_COOKIE_SIZE 16
21343+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
21344+
21345+/*
21346+ * Scene stuff.
21347+ */
21348+
21349+#define PSB_NUM_HW_SCENES 2
21350+
21351+/*
21352+ * Scheduler completion actions.
21353+ */
21354+
21355+#define PSB_RASTER_BLOCK 0
21356+#define PSB_RASTER 1
21357+#define PSB_RETURN 2
21358+#define PSB_TA 3
21359+
21360+
21361+/*Power management*/
21362+#define PSB_PUNIT_PORT 0x04
21363+#define PSB_PWRGT_CNT 0x60
21364+#define PSB_PWRGT_STS 0x61
21365+#define PSB_PWRGT_GFX_MASK 0x3
21366+#define PSB_PWRGT_VID_ENC_MASK 0x30
21367+#define PSB_PWRGT_VID_DEC_MASK 0xc
21368+#endif
21369diff -uNr a/drivers/gpu/drm/psb/psb_reset.c b/drivers/gpu/drm/psb/psb_reset.c
21370--- a/drivers/gpu/drm/psb/psb_reset.c 1969-12-31 16:00:00.000000000 -0800
21371+++ b/drivers/gpu/drm/psb/psb_reset.c 2009-04-07 13:28:38.000000000 -0700
21372@@ -0,0 +1,423 @@
21373+/**************************************************************************
21374+ * Copyright (c) 2007, Intel Corporation.
21375+ * All Rights Reserved.
21376+ *
21377+ * This program is free software; you can redistribute it and/or modify it
21378+ * under the terms and conditions of the GNU General Public License,
21379+ * version 2, as published by the Free Software Foundation.
21380+ *
21381+ * This program is distributed in the hope it will be useful, but WITHOUT
21382+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21383+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21384+ * more details.
21385+ *
21386+ * You should have received a copy of the GNU General Public License along with
21387+ * this program; if not, write to the Free Software Foundation, Inc.,
21388+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21389+ *
21390+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
21391+ * develop this driver.
21392+ *
21393+ **************************************************************************/
21394+/*
21395+ * Authors:
21396+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
21397+ */
21398+
21399+#include <drm/drmP.h>
21400+#include "psb_drv.h"
21401+#include "psb_reg.h"
21402+#include "psb_scene.h"
21403+#include "psb_msvdx.h"
21404+#include "lnc_topaz.h"
21405+#include <linux/spinlock.h>
21406+#define PSB_2D_TIMEOUT_MSEC 100
21407+
21408+void psb_reset(struct drm_psb_private *dev_priv, int reset_2d)
21409+{
21410+ uint32_t val;
21411+
21412+ val = _PSB_CS_RESET_BIF_RESET |
21413+ _PSB_CS_RESET_DPM_RESET |
21414+ _PSB_CS_RESET_TA_RESET |
21415+ _PSB_CS_RESET_USE_RESET |
21416+ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET;
21417+
21418+ if (reset_2d)
21419+ val |= _PSB_CS_RESET_TWOD_RESET;
21420+
21421+ PSB_WSGX32(val, PSB_CR_SOFT_RESET);
21422+ (void) PSB_RSGX32(PSB_CR_SOFT_RESET);
21423+
21424+ msleep(1);
21425+
21426+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
21427+ wmb();
21428+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
21429+ PSB_CR_BIF_CTRL);
21430+ wmb();
21431+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
21432+
21433+ msleep(1);
21434+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
21435+ PSB_CR_BIF_CTRL);
21436+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
21437+}
21438+
21439+void psb_print_pagefault(struct drm_psb_private *dev_priv)
21440+{
21441+ uint32_t val;
21442+ uint32_t addr;
21443+
21444+ val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
21445+ addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
21446+
21447+ if (val) {
21448+ if (val & _PSB_CBI_STAT_PF_N_RW)
21449+ DRM_ERROR("Poulsbo MMU page fault:\n");
21450+ else
21451+ DRM_ERROR("Poulsbo MMU read / write "
21452+ "protection fault:\n");
21453+
21454+ if (val & _PSB_CBI_STAT_FAULT_CACHE)
21455+ DRM_ERROR("\tCache requestor.\n");
21456+ if (val & _PSB_CBI_STAT_FAULT_TA)
21457+ DRM_ERROR("\tTA requestor.\n");
21458+ if (val & _PSB_CBI_STAT_FAULT_VDM)
21459+ DRM_ERROR("\tVDM requestor.\n");
21460+ if (val & _PSB_CBI_STAT_FAULT_2D)
21461+ DRM_ERROR("\t2D requestor.\n");
21462+ if (val & _PSB_CBI_STAT_FAULT_PBE)
21463+ DRM_ERROR("\tPBE requestor.\n");
21464+ if (val & _PSB_CBI_STAT_FAULT_TSP)
21465+ DRM_ERROR("\tTSP requestor.\n");
21466+ if (val & _PSB_CBI_STAT_FAULT_ISP)
21467+ DRM_ERROR("\tISP requestor.\n");
21468+ if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
21469+ DRM_ERROR("\tUSSEPDS requestor.\n");
21470+ if (val & _PSB_CBI_STAT_FAULT_HOST)
21471+ DRM_ERROR("\tHost requestor.\n");
21472+
21473+ DRM_ERROR("\tMMU failing address is 0x%08x.\n",
21474+ (unsigned) addr);
21475+ }
21476+}
21477+
21478+void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
21479+{
21480+ struct timer_list *wt = &dev_priv->watchdog_timer;
21481+ unsigned long irq_flags;
21482+
21483+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
21484+ if (dev_priv->timer_available && !timer_pending(wt)) {
21485+ wt->expires = jiffies + PSB_WATCHDOG_DELAY;
21486+ add_timer(wt);
21487+ }
21488+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
21489+}
21490+
21491+#if 0
21492+static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv,
21493+ unsigned int engine, int *lockup,
21494+ int *idle)
21495+{
21496+ uint32_t received_seq;
21497+
21498+ received_seq = dev_priv->comm[engine << 4];
21499+ spin_lock(&dev_priv->sequence_lock);
21500+ *idle = (received_seq == dev_priv->sequence[engine]);
21501+ spin_unlock(&dev_priv->sequence_lock);
21502+
21503+ if (*idle) {
21504+ dev_priv->idle[engine] = 1;
21505+ *lockup = 0;
21506+ return;
21507+ }
21508+
21509+ if (dev_priv->idle[engine]) {
21510+ dev_priv->idle[engine] = 0;
21511+ dev_priv->last_sequence[engine] = received_seq;
21512+ *lockup = 0;
21513+ return;
21514+ }
21515+
21516+ *lockup = (dev_priv->last_sequence[engine] == received_seq);
21517+}
21518+
21519+#endif
21520+static void psb_watchdog_func(unsigned long data)
21521+{
21522+ struct drm_psb_private *dev_priv = (struct drm_psb_private *) data;
21523+ struct drm_device *dev = dev_priv->dev;
21524+ int lockup;
21525+ int msvdx_lockup;
21526+ int msvdx_idle;
21527+ int lockup_2d;
21528+#if 0
21529+ int topaz_lockup = 0;
21530+ int topaz_idle = 0;
21531+#endif
21532+ int idle_2d;
21533+ int idle;
21534+ unsigned long irq_flags;
21535+
21536+ psb_scheduler_lockup(dev_priv, &lockup, &idle);
21537+ psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
21538+
21539+#if 0
21540+ if (IS_MRST(dev))
21541+ lnc_topaz_lockup(dev_priv, &topaz_lockup, &topaz_idle);
21542+#endif
21543+
21544+#if 0
21545+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
21546+#else
21547+ lockup_2d = false;
21548+ idle_2d = true;
21549+#endif
21550+ if (lockup || msvdx_lockup || lockup_2d) {
21551+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
21552+ dev_priv->timer_available = 0;
21553+ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
21554+ irq_flags);
21555+ if (lockup) {
21556+ psb_print_pagefault(dev_priv);
21557+ schedule_work(&dev_priv->watchdog_wq);
21558+ }
21559+ if (msvdx_lockup)
21560+ schedule_work(&dev_priv->msvdx_watchdog_wq);
21561+#if 0
21562+ if (IS_MRST(dev) && (topaz_lockup))
21563+ schedule_work(&dev_priv->topaz_watchdog_wq);
21564+#else
21565+ (void) dev;
21566+#endif
21567+ }
21568+ if (!idle || !msvdx_idle || !idle_2d /* || !topaz_idle */)
21569+ psb_schedule_watchdog(dev_priv);
21570+}
21571+
21572+void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
21573+{
21574+ struct drm_psb_private *dev_priv = dev->dev_private;
21575+ struct psb_msvdx_cmd_queue *msvdx_cmd;
21576+ struct list_head *list, *next;
21577+ /*Flush the msvdx cmd queue and signal all fences in the queue */
21578+ list_for_each_safe(list, next, &dev_priv->msvdx_queue) {
21579+ msvdx_cmd =
21580+ list_entry(list, struct psb_msvdx_cmd_queue, head);
21581+ PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
21582+ msvdx_cmd->sequence);
21583+ dev_priv->msvdx_current_sequence = msvdx_cmd->sequence;
21584+ psb_fence_error(dev, PSB_ENGINE_VIDEO,
21585+ dev_priv->msvdx_current_sequence,
21586+ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
21587+ list_del(list);
21588+ kfree(msvdx_cmd->cmd);
21589+ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue),
21590+ DRM_MEM_DRIVER);
21591+ }
21592+}
21593+
21594+static void psb_msvdx_reset_wq(struct work_struct *work)
21595+{
21596+ struct drm_psb_private *dev_priv =
21597+ container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
21598+
21599+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
21600+ unsigned long irq_flags;
21601+
21602+ mutex_lock(&dev_priv->msvdx_mutex);
21603+ dev_priv->msvdx_needs_reset = 1;
21604+ dev_priv->msvdx_current_sequence++;
21605+ PSB_DEBUG_GENERAL
21606+ ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
21607+ dev_priv->msvdx_current_sequence);
21608+
21609+ psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
21610+ dev_priv->msvdx_current_sequence,
21611+ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
21612+
21613+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
21614+ dev_priv->timer_available = 1;
21615+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
21616+
21617+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
21618+ psb_msvdx_flush_cmd_queue(scheduler->dev);
21619+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
21620+
21621+ psb_schedule_watchdog(dev_priv);
21622+ mutex_unlock(&dev_priv->msvdx_mutex);
21623+}
21624+
21625+static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv)
21626+{
21627+ struct psb_xhw_buf buf;
21628+ uint32_t bif_ctrl;
21629+
21630+ INIT_LIST_HEAD(&buf.head);
21631+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
21632+ bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
21633+ PSB_WSGX32(bif_ctrl |
21634+ _PSB_CB_CTRL_CLEAR_FAULT |
21635+ _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
21636+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
21637+ msleep(1);
21638+ PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL);
21639+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
21640+ return psb_xhw_reset_dpm(dev_priv, &buf);
21641+}
21642+
21643+/*
21644+ * Block command submission and reset hardware and schedulers.
21645+ */
21646+
21647+static void psb_reset_wq(struct work_struct *work)
21648+{
21649+ struct drm_psb_private *dev_priv =
21650+ container_of(work, struct drm_psb_private, watchdog_wq);
21651+ int lockup_2d;
21652+ int idle_2d;
21653+ unsigned long irq_flags;
21654+ int ret;
21655+ int reset_count = 0;
21656+ struct psb_xhw_buf buf;
21657+ uint32_t xhw_lockup;
21658+
21659+ /*
21660+ * Block command submission.
21661+ */
21662+ PSB_DEBUG_PM("ioctl: psb_pl_reference\n");
21663+
21664+ mutex_lock(&dev_priv->reset_mutex);
21665+
21666+ INIT_LIST_HEAD(&buf.head);
21667+ ret = psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup);
21668+ if (likely(ret == 0)) {
21669+ if (psb_extend_timeout(dev_priv, xhw_lockup) == 0) {
21670+ /*
21671+ * no lockup, just re-schedule
21672+ */
21673+ spin_lock_irqsave(&dev_priv->watchdog_lock,
21674+ irq_flags);
21675+ dev_priv->timer_available = 1;
21676+ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
21677+ irq_flags);
21678+ psb_schedule_watchdog(dev_priv);
21679+ mutex_unlock(&dev_priv->reset_mutex);
21680+ return;
21681+ }
21682+ } else {
21683+ DRM_ERROR("Check lockup returned %d\n", ret);
21684+ }
21685+#if 0
21686+ msleep(PSB_2D_TIMEOUT_MSEC);
21687+
21688+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
21689+
21690+ if (lockup_2d) {
21691+ uint32_t seq_2d;
21692+ spin_lock(&dev_priv->sequence_lock);
21693+ seq_2d = dev_priv->sequence[PSB_ENGINE_2D];
21694+ spin_unlock(&dev_priv->sequence_lock);
21695+ psb_fence_error(dev_priv->scheduler.dev,
21696+ PSB_ENGINE_2D,
21697+ seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY);
21698+ DRM_INFO("Resetting 2D engine.\n");
21699+ }
21700+
21701+ psb_reset(dev_priv, lockup_2d);
21702+#else
21703+ (void) lockup_2d;
21704+ (void) idle_2d;
21705+ psb_reset(dev_priv, 0);
21706+#endif
21707+ (void) psb_xhw_mmu_reset(dev_priv);
21708+ DRM_INFO("Resetting scheduler.\n");
21709+ psb_scheduler_pause(dev_priv);
21710+ psb_scheduler_reset(dev_priv, -EBUSY);
21711+ psb_scheduler_ta_mem_check(dev_priv);
21712+
21713+ while (dev_priv->ta_mem &&
21714+ !dev_priv->force_ta_mem_load && ++reset_count < 10) {
21715+ struct ttm_fence_object *fence;
21716+
21717+ /*
21718+ * TA memory is currently fenced so offsets
21719+ * are valid. Reload offsets into the dpm now.
21720+ */
21721+
21722+ struct psb_xhw_buf buf;
21723+ INIT_LIST_HEAD(&buf.head);
21724+
21725+ msleep(100);
21726+
21727+ fence = dev_priv->ta_mem->ta_memory->sync_obj;
21728+
21729+ DRM_INFO("Reloading TA memory at offset "
21730+ "0x%08lx to 0x%08lx seq %d\n",
21731+ dev_priv->ta_mem->ta_memory->offset,
21732+ dev_priv->ta_mem->ta_memory->offset +
21733+ (dev_priv->ta_mem->ta_memory->num_pages << PAGE_SHIFT),
21734+ fence->sequence);
21735+
21736+ fence = dev_priv->ta_mem->hw_data->sync_obj;
21737+
21738+ DRM_INFO("Reloading TA HW memory at offset "
21739+ "0x%08lx to 0x%08lx seq %u\n",
21740+ dev_priv->ta_mem->hw_data->offset,
21741+ dev_priv->ta_mem->hw_data->offset +
21742+ (dev_priv->ta_mem->hw_data->num_pages << PAGE_SHIFT),
21743+ fence->sequence);
21744+
21745+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
21746+ PSB_TA_MEM_FLAG_TA |
21747+ PSB_TA_MEM_FLAG_RASTER |
21748+ PSB_TA_MEM_FLAG_HOSTA |
21749+ PSB_TA_MEM_FLAG_HOSTD |
21750+ PSB_TA_MEM_FLAG_INIT,
21751+ dev_priv->ta_mem->ta_memory->
21752+ offset,
21753+ dev_priv->ta_mem->hw_data->
21754+ offset,
21755+ dev_priv->ta_mem->hw_cookie);
21756+ if (!ret)
21757+ break;
21758+
21759+ DRM_INFO("Reloading TA memory failed. Retrying.\n");
21760+ psb_reset(dev_priv, 0);
21761+ (void) psb_xhw_mmu_reset(dev_priv);
21762+ }
21763+
21764+ psb_scheduler_restart(dev_priv);
21765+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
21766+ dev_priv->timer_available = 1;
21767+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
21768+ mutex_unlock(&dev_priv->reset_mutex);
21769+}
21770+
21771+void psb_watchdog_init(struct drm_psb_private *dev_priv)
21772+{
21773+ struct timer_list *wt = &dev_priv->watchdog_timer;
21774+ unsigned long irq_flags;
21775+
21776+ spin_lock_init(&dev_priv->watchdog_lock);
21777+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
21778+ init_timer(wt);
21779+ INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq);
21780+ INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
21781+ wt->data = (unsigned long) dev_priv;
21782+ wt->function = &psb_watchdog_func;
21783+ dev_priv->timer_available = 1;
21784+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
21785+}
21786+
21787+void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
21788+{
21789+ unsigned long irq_flags;
21790+
21791+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
21792+ dev_priv->timer_available = 0;
21793+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
21794+ (void) del_timer_sync(&dev_priv->watchdog_timer);
21795+}
21796diff -uNr a/drivers/gpu/drm/psb/psb_scene.c b/drivers/gpu/drm/psb/psb_scene.c
21797--- a/drivers/gpu/drm/psb/psb_scene.c 1969-12-31 16:00:00.000000000 -0800
21798+++ b/drivers/gpu/drm/psb/psb_scene.c 2009-04-07 13:28:38.000000000 -0700
21799@@ -0,0 +1,523 @@
21800+/**************************************************************************
21801+ * Copyright (c) 2007, Intel Corporation.
21802+ * All Rights Reserved.
21803+ *
21804+ * This program is free software; you can redistribute it and/or modify it
21805+ * under the terms and conditions of the GNU General Public License,
21806+ * version 2, as published by the Free Software Foundation.
21807+ *
21808+ * This program is distributed in the hope it will be useful, but WITHOUT
21809+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21810+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21811+ * more details.
21812+ *
21813+ * You should have received a copy of the GNU General Public License along with
21814+ * this program; if not, write to the Free Software Foundation, Inc.,
21815+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21816+ *
21817+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
21818+ * develop this driver.
21819+ *
21820+ **************************************************************************/
21821+/*
21822+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
21823+ */
21824+
21825+#include <drm/drmP.h>
21826+#include "psb_drv.h"
21827+#include "psb_scene.h"
21828+
21829+void psb_clear_scene_atomic(struct psb_scene *scene)
21830+{
21831+ int i;
21832+ struct page *page;
21833+ void *v;
21834+
21835+ for (i = 0; i < scene->clear_num_pages; ++i) {
21836+ page = ttm_tt_get_page(scene->hw_data->ttm,
21837+ scene->clear_p_start + i);
21838+ if (in_irq())
21839+ v = kmap_atomic(page, KM_IRQ0);
21840+ else
21841+ v = kmap_atomic(page, KM_USER0);
21842+
21843+ memset(v, 0, PAGE_SIZE);
21844+
21845+ if (in_irq())
21846+ kunmap_atomic(v, KM_IRQ0);
21847+ else
21848+ kunmap_atomic(v, KM_USER0);
21849+ }
21850+}
21851+
21852+int psb_clear_scene(struct psb_scene *scene)
21853+{
21854+ struct ttm_bo_kmap_obj bmo;
21855+ bool is_iomem;
21856+ void *addr;
21857+
21858+ int ret = ttm_bo_kmap(scene->hw_data, scene->clear_p_start,
21859+ scene->clear_num_pages, &bmo);
21860+
21861+ PSB_DEBUG_RENDER("Scene clear.\n");
21862+ if (ret)
21863+ return ret;
21864+
21865+ addr = ttm_kmap_obj_virtual(&bmo, &is_iomem);
21866+ BUG_ON(is_iomem);
21867+ memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT);
21868+ ttm_bo_kunmap(&bmo);
21869+
21870+ return 0;
21871+}
21872+
21873+static void psb_destroy_scene(struct kref *kref)
21874+{
21875+ struct psb_scene *scene =
21876+ container_of(kref, struct psb_scene, kref);
21877+
21878+ PSB_DEBUG_RENDER("Scene destroy.\n");
21879+ psb_scheduler_remove_scene_refs(scene);
21880+ ttm_bo_unref(&scene->hw_data);
21881+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
21882+}
21883+
21884+void psb_scene_unref(struct psb_scene **p_scene)
21885+{
21886+ struct psb_scene *scene = *p_scene;
21887+
21888+ PSB_DEBUG_RENDER("Scene unref.\n");
21889+ *p_scene = NULL;
21890+ kref_put(&scene->kref, &psb_destroy_scene);
21891+}
21892+
21893+struct psb_scene *psb_scene_ref(struct psb_scene *src)
21894+{
21895+ PSB_DEBUG_RENDER("Scene ref.\n");
21896+ kref_get(&src->kref);
21897+ return src;
21898+}
21899+
21900+static struct psb_scene *psb_alloc_scene(struct drm_device *dev,
21901+ uint32_t w, uint32_t h)
21902+{
21903+ struct drm_psb_private *dev_priv =
21904+ (struct drm_psb_private *) dev->dev_private;
21905+ struct ttm_bo_device *bdev = &dev_priv->bdev;
21906+ int ret = -EINVAL;
21907+ struct psb_scene *scene;
21908+ uint32_t bo_size;
21909+ struct psb_xhw_buf buf;
21910+
21911+ PSB_DEBUG_RENDER("Alloc scene w %u h %u msaa %u\n", w & 0xffff, h,
21912+ w >> 16);
21913+
21914+ scene = drm_calloc(1, sizeof(*scene), DRM_MEM_DRIVER);
21915+
21916+ if (!scene) {
21917+ DRM_ERROR("Out of memory allocating scene object.\n");
21918+ return NULL;
21919+ }
21920+
21921+ scene->dev = dev;
21922+ scene->w = w;
21923+ scene->h = h;
21924+ scene->hw_scene = NULL;
21925+ kref_init(&scene->kref);
21926+
21927+ INIT_LIST_HEAD(&buf.head);
21928+ ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h,
21929+ scene->hw_cookie, &bo_size,
21930+ &scene->clear_p_start,
21931+ &scene->clear_num_pages);
21932+ if (ret)
21933+ goto out_err;
21934+
21935+ ret = ttm_buffer_object_create(bdev, bo_size, ttm_bo_type_kernel,
21936+ DRM_PSB_FLAG_MEM_MMU |
21937+ TTM_PL_FLAG_CACHED,
21938+ 0, 0, 1, NULL, &scene->hw_data);
21939+ if (ret)
21940+ goto out_err;
21941+
21942+ return scene;
21943+out_err:
21944+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
21945+ return NULL;
21946+}
21947+
21948+int psb_validate_scene_pool(struct psb_context *context,
21949+ struct psb_scene_pool *pool,
21950+ uint32_t w,
21951+ uint32_t h,
21952+ int final_pass, struct psb_scene **scene_p)
21953+{
21954+ struct drm_device *dev = pool->dev;
21955+ struct drm_psb_private *dev_priv =
21956+ (struct drm_psb_private *) dev->dev_private;
21957+ struct psb_scene *scene = pool->scenes[pool->cur_scene];
21958+ int ret;
21959+ unsigned long irq_flags;
21960+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
21961+ uint32_t bin_pt_offset;
21962+ uint32_t bin_param_offset;
21963+
21964+ PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n",
21965+ pool->cur_scene);
21966+
21967+ if (unlikely(!dev_priv->ta_mem)) {
21968+ dev_priv->ta_mem =
21969+ psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages);
21970+ if (!dev_priv->ta_mem)
21971+ return -ENOMEM;
21972+
21973+ bin_pt_offset = ~0;
21974+ bin_param_offset = ~0;
21975+ } else {
21976+ bin_pt_offset = dev_priv->ta_mem->hw_data->offset;
21977+ bin_param_offset = dev_priv->ta_mem->ta_memory->offset;
21978+ }
21979+
21980+ pool->w = w;
21981+ pool->h = h;
21982+ if (scene && (scene->w != pool->w || scene->h != pool->h)) {
21983+ spin_lock_irqsave(&scheduler->lock, irq_flags);
21984+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
21985+ spin_unlock_irqrestore(&scheduler->lock,
21986+ irq_flags);
21987+ DRM_ERROR("Trying to resize a dirty scene.\n");
21988+ return -EINVAL;
21989+ }
21990+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
21991+ psb_scene_unref(&pool->scenes[pool->cur_scene]);
21992+ scene = NULL;
21993+ }
21994+
21995+ if (!scene) {
21996+ pool->scenes[pool->cur_scene] = scene =
21997+ psb_alloc_scene(pool->dev, pool->w, pool->h);
21998+
21999+ if (!scene)
22000+ return -ENOMEM;
22001+
22002+ scene->flags = PSB_SCENE_FLAG_CLEARED;
22003+ }
22004+
22005+ ret = psb_validate_kernel_buffer(context, scene->hw_data,
22006+ PSB_ENGINE_TA,
22007+ PSB_BO_FLAG_SCENE |
22008+ PSB_GPU_ACCESS_READ |
22009+ PSB_GPU_ACCESS_WRITE, 0);
22010+ if (unlikely(ret != 0))
22011+ return ret;
22012+
22013+ /*
22014+ * FIXME: We need atomic bit manipulation here for the
22015+ * scheduler. For now use the spinlock.
22016+ */
22017+
22018+ spin_lock_irqsave(&scheduler->lock, irq_flags);
22019+ if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) {
22020+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
22021+ PSB_DEBUG_RENDER("Waiting to clear scene memory.\n");
22022+ mutex_lock(&scene->hw_data->mutex);
22023+
22024+ ret = ttm_bo_wait(scene->hw_data, 0, 1, 0);
22025+ mutex_unlock(&scene->hw_data->mutex);
22026+ if (ret)
22027+ return ret;
22028+
22029+ ret = psb_clear_scene(scene);
22030+
22031+ if (ret)
22032+ return ret;
22033+ spin_lock_irqsave(&scheduler->lock, irq_flags);
22034+ scene->flags |= PSB_SCENE_FLAG_CLEARED;
22035+ }
22036+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
22037+
22038+ ret = psb_validate_kernel_buffer(context, dev_priv->ta_mem->hw_data,
22039+ PSB_ENGINE_TA,
22040+ PSB_BO_FLAG_SCENE |
22041+ PSB_GPU_ACCESS_READ |
22042+ PSB_GPU_ACCESS_WRITE, 0);
22043+ if (unlikely(ret != 0))
22044+ return ret;
22045+
22046+ ret =
22047+ psb_validate_kernel_buffer(context,
22048+ dev_priv->ta_mem->ta_memory,
22049+ PSB_ENGINE_TA,
22050+ PSB_BO_FLAG_SCENE |
22051+ PSB_GPU_ACCESS_READ |
22052+ PSB_GPU_ACCESS_WRITE, 0);
22053+
22054+ if (unlikely(ret != 0))
22055+ return ret;
22056+
22057+ if (unlikely(bin_param_offset !=
22058+ dev_priv->ta_mem->ta_memory->offset ||
22059+ bin_pt_offset !=
22060+ dev_priv->ta_mem->hw_data->offset ||
22061+ dev_priv->force_ta_mem_load)) {
22062+
22063+ struct psb_xhw_buf buf;
22064+
22065+ INIT_LIST_HEAD(&buf.head);
22066+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
22067+ PSB_TA_MEM_FLAG_TA |
22068+ PSB_TA_MEM_FLAG_RASTER |
22069+ PSB_TA_MEM_FLAG_HOSTA |
22070+ PSB_TA_MEM_FLAG_HOSTD |
22071+ PSB_TA_MEM_FLAG_INIT,
22072+ dev_priv->ta_mem->ta_memory->
22073+ offset,
22074+ dev_priv->ta_mem->hw_data->
22075+ offset,
22076+ dev_priv->ta_mem->hw_cookie);
22077+ if (ret)
22078+ return ret;
22079+
22080+ dev_priv->force_ta_mem_load = 0;
22081+ }
22082+
22083+ if (final_pass) {
22084+
22085+ /*
22086+ * Clear the scene on next use. Advance the scene counter.
22087+ */
22088+
22089+ spin_lock_irqsave(&scheduler->lock, irq_flags);
22090+ scene->flags &= ~PSB_SCENE_FLAG_CLEARED;
22091+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
22092+ pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes;
22093+ }
22094+
22095+ *scene_p = psb_scene_ref(scene);
22096+ return 0;
22097+}
22098+
22099+static void psb_scene_pool_destroy(struct kref *kref)
22100+{
22101+ struct psb_scene_pool *pool =
22102+ container_of(kref, struct psb_scene_pool, kref);
22103+ int i;
22104+ PSB_DEBUG_RENDER("Scene pool destroy.\n");
22105+
22106+ for (i = 0; i < pool->num_scenes; ++i) {
22107+ PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i,
22108+ (unsigned long) pool->scenes[i]);
22109+ if (pool->scenes[i])
22110+ psb_scene_unref(&pool->scenes[i]);
22111+ }
22112+
22113+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
22114+}
22115+
22116+void psb_scene_pool_unref(struct psb_scene_pool **p_pool)
22117+{
22118+ struct psb_scene_pool *pool = *p_pool;
22119+
22120+ PSB_DEBUG_RENDER("Scene pool unref\n");
22121+ *p_pool = NULL;
22122+ kref_put(&pool->kref, &psb_scene_pool_destroy);
22123+}
22124+
22125+struct psb_scene_pool *psb_scene_pool_ref(struct psb_scene_pool *src)
22126+{
22127+ kref_get(&src->kref);
22128+ return src;
22129+}
22130+
22131+/*
22132+ * Callback for base object manager.
22133+ */
22134+
22135+static void psb_scene_pool_release(struct ttm_base_object **p_base)
22136+{
22137+ struct ttm_base_object *base = *p_base;
22138+ struct psb_scene_pool *pool =
22139+ container_of(base, struct psb_scene_pool, base);
22140+ *p_base = NULL;
22141+
22142+ psb_scene_pool_unref(&pool);
22143+}
22144+
22145+struct psb_scene_pool *psb_scene_pool_lookup(struct drm_file *file_priv,
22146+ uint32_t handle,
22147+ int check_owner)
22148+{
22149+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
22150+ struct ttm_base_object *base;
22151+ struct psb_scene_pool *pool;
22152+
22153+
22154+ base = ttm_base_object_lookup(tfile, handle);
22155+ if (!base || (base->object_type != PSB_USER_OBJECT_SCENE_POOL)) {
22156+ DRM_ERROR("Could not find scene pool object 0x%08x\n",
22157+ handle);
22158+ return NULL;
22159+ }
22160+
22161+ if (check_owner && tfile != base->tfile && !base->shareable) {
22162+ ttm_base_object_unref(&base);
22163+ return NULL;
22164+ }
22165+
22166+ pool = container_of(base, struct psb_scene_pool, base);
22167+ kref_get(&pool->kref);
22168+ ttm_base_object_unref(&base);
22169+ return pool;
22170+}
22171+
22172+struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *file_priv,
22173+ int shareable,
22174+ uint32_t num_scenes,
22175+ uint32_t w, uint32_t h)
22176+{
22177+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
22178+ struct drm_device *dev = file_priv->minor->dev;
22179+ struct psb_scene_pool *pool;
22180+ int ret;
22181+
22182+ PSB_DEBUG_RENDER("Scene pool alloc\n");
22183+ pool = drm_calloc(1, sizeof(*pool), DRM_MEM_DRIVER);
22184+ if (!pool) {
22185+ DRM_ERROR("Out of memory allocating scene pool object.\n");
22186+ return NULL;
22187+ }
22188+ pool->w = w;
22189+ pool->h = h;
22190+ pool->dev = dev;
22191+ pool->num_scenes = num_scenes;
22192+ kref_init(&pool->kref);
22193+
22194+ /*
22195+ * The base object holds a reference.
22196+ */
22197+
22198+ kref_get(&pool->kref);
22199+ ret = ttm_base_object_init(tfile, &pool->base, shareable,
22200+ PSB_USER_OBJECT_SCENE_POOL,
22201+ &psb_scene_pool_release, NULL);
22202+ if (unlikely(ret != 0))
22203+ goto out_err;
22204+
22205+ return pool;
22206+out_err:
22207+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
22208+ return NULL;
22209+}
22210+
22211+/*
22212+ * Code to support multiple ta memory buffers.
22213+ */
22214+
22215+static void psb_ta_mem_destroy(struct kref *kref)
22216+{
22217+ struct psb_ta_mem *ta_mem =
22218+ container_of(kref, struct psb_ta_mem, kref);
22219+
22220+ ttm_bo_unref(&ta_mem->hw_data);
22221+ ttm_bo_unref(&ta_mem->ta_memory);
22222+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
22223+}
22224+
22225+void psb_ta_mem_unref(struct psb_ta_mem **p_ta_mem)
22226+{
22227+ struct psb_ta_mem *ta_mem = *p_ta_mem;
22228+ *p_ta_mem = NULL;
22229+ kref_put(&ta_mem->kref, psb_ta_mem_destroy);
22230+}
22231+
22232+struct psb_ta_mem *psb_ta_mem_ref(struct psb_ta_mem *src)
22233+{
22234+ kref_get(&src->kref);
22235+ return src;
22236+}
22237+
22238+struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages)
22239+{
22240+ struct drm_psb_private *dev_priv =
22241+ (struct drm_psb_private *) dev->dev_private;
22242+ struct ttm_bo_device *bdev = &dev_priv->bdev;
22243+ int ret = -EINVAL;
22244+ struct psb_ta_mem *ta_mem;
22245+ uint32_t bo_size;
22246+ uint32_t ta_min_size;
22247+ struct psb_xhw_buf buf;
22248+
22249+ INIT_LIST_HEAD(&buf.head);
22250+
22251+ ta_mem = drm_calloc(1, sizeof(*ta_mem), DRM_MEM_DRIVER);
22252+
22253+ if (!ta_mem) {
22254+ DRM_ERROR("Out of memory allocating parameter memory.\n");
22255+ return NULL;
22256+ }
22257+
22258+ kref_init(&ta_mem->kref);
22259+ ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages,
22260+ ta_mem->hw_cookie,
22261+ &bo_size,
22262+ &ta_min_size);
22263+ if (ret == -ENOMEM) {
22264+ DRM_ERROR("Parameter memory size is too small.\n");
22265+ DRM_INFO("Attempted to use %u kiB of parameter memory.\n",
22266+ (unsigned int) (pages * (PAGE_SIZE / 1024)));
22267+ DRM_INFO("The Xpsb driver thinks this is too small and\n");
22268+ DRM_INFO("suggests %u kiB. Check the psb DRM\n",
22269+ (unsigned int)(ta_min_size / 1024));
22270+ DRM_INFO("\"ta_mem_size\" parameter!\n");
22271+ }
22272+ if (ret)
22273+ goto out_err0;
22274+
22275+ ret = ttm_buffer_object_create(bdev, bo_size, ttm_bo_type_kernel,
22276+ DRM_PSB_FLAG_MEM_MMU,
22277+ 0, 0, 0, NULL,
22278+ &ta_mem->hw_data);
22279+ if (ret)
22280+ goto out_err0;
22281+
22282+ bo_size = pages * PAGE_SIZE;
22283+ ret =
22284+ ttm_buffer_object_create(bdev, bo_size,
22285+ ttm_bo_type_kernel,
22286+ DRM_PSB_FLAG_MEM_RASTGEOM,
22287+ 0,
22288+ 1024 * 1024 >> PAGE_SHIFT, 0,
22289+ NULL,
22290+ &ta_mem->ta_memory);
22291+ if (ret)
22292+ goto out_err1;
22293+
22294+ return ta_mem;
22295+out_err1:
22296+ ttm_bo_unref(&ta_mem->hw_data);
22297+out_err0:
22298+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
22299+ return NULL;
22300+}
22301+
22302+int drm_psb_scene_unref_ioctl(struct drm_device *dev,
22303+ void *data, struct drm_file *file_priv)
22304+{
22305+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
22306+ struct drm_psb_scene *scene = (struct drm_psb_scene *) data;
22307+ int ret = 0;
22308+ struct drm_psb_private *dev_priv = psb_priv(dev);
22309+ if (!scene->handle_valid)
22310+ return 0;
22311+ down_read(&dev_priv->sgx_sem);
22312+ psb_check_power_state(dev, PSB_DEVICE_SGX);
22313+
22314+ ret =
22315+ ttm_ref_object_base_unref(tfile, scene->handle, TTM_REF_USAGE);
22316+ if (unlikely(ret != 0))
22317+ DRM_ERROR("Could not unreference a scene object.\n");
22318+ up_read(&dev_priv->sgx_sem);
22319+ if (drm_psb_ospm && IS_MRST(dev))
22320+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
22321+ return ret;
22322+}
22323diff -uNr a/drivers/gpu/drm/psb/psb_scene.h b/drivers/gpu/drm/psb/psb_scene.h
22324--- a/drivers/gpu/drm/psb/psb_scene.h 1969-12-31 16:00:00.000000000 -0800
22325+++ b/drivers/gpu/drm/psb/psb_scene.h 2009-04-07 13:28:38.000000000 -0700
22326@@ -0,0 +1,119 @@
22327+/**************************************************************************
22328+ * Copyright (c) 2007, Intel Corporation.
22329+ * All Rights Reserved.
22330+ *
22331+ * This program is free software; you can redistribute it and/or modify it
22332+ * under the terms and conditions of the GNU General Public License,
22333+ * version 2, as published by the Free Software Foundation.
22334+ *
22335+ * This program is distributed in the hope it will be useful, but WITHOUT
22336+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22337+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22338+ * more details.
22339+ *
22340+ * You should have received a copy of the GNU General Public License along with
22341+ * this program; if not, write to the Free Software Foundation, Inc.,
22342+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22343+ *
22344+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
22345+ * develop this driver.
22346+ *
22347+ **************************************************************************/
22348+/*
22349+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
22350+ */
22351+
22352+#ifndef _PSB_SCENE_H_
22353+#define _PSB_SCENE_H_
22354+
22355+#include "ttm/ttm_object.h"
22356+
22357+#define PSB_USER_OBJECT_SCENE_POOL ttm_driver_type0
22358+#define PSB_USER_OBJECT_TA_MEM ttm_driver_type1
22359+#define PSB_MAX_NUM_SCENES 8
22360+
22361+struct psb_hw_scene;
22362+struct psb_hw_ta_mem;
22363+
22364+struct psb_scene_pool {
22365+ struct ttm_base_object base;
22366+ struct drm_device *dev;
22367+ struct kref kref;
22368+ uint32_t w;
22369+ uint32_t h;
22370+ uint32_t cur_scene;
22371+ struct psb_scene *scenes[PSB_MAX_NUM_SCENES];
22372+ uint32_t num_scenes;
22373+};
22374+
22375+struct psb_scene {
22376+ struct drm_device *dev;
22377+ struct kref kref;
22378+ uint32_t hw_cookie[PSB_SCENE_HW_COOKIE_SIZE];
22379+ uint32_t bo_size;
22380+ uint32_t w;
22381+ uint32_t h;
22382+ struct psb_ta_mem *ta_mem;
22383+ struct psb_hw_scene *hw_scene;
22384+ struct ttm_buffer_object *hw_data;
22385+ uint32_t flags;
22386+ uint32_t clear_p_start;
22387+ uint32_t clear_num_pages;
22388+};
22389+
22390+#if 0
22391+struct psb_scene_entry {
22392+ struct list_head head;
22393+ struct psb_scene *scene;
22394+};
22395+
22396+struct psb_user_scene {
22397+ struct ttm_base_object base;
22398+ struct drm_device *dev;
22399+};
22400+
22401+#endif
22402+
22403+struct psb_ta_mem {
22404+ struct ttm_base_object base;
22405+ struct drm_device *dev;
22406+ struct kref kref;
22407+ uint32_t hw_cookie[PSB_TA_MEM_HW_COOKIE_SIZE];
22408+ uint32_t bo_size;
22409+ struct ttm_buffer_object *ta_memory;
22410+ struct ttm_buffer_object *hw_data;
22411+ int is_deallocating;
22412+ int deallocating_scheduled;
22413+};
22414+
22415+extern struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
22416+ int shareable,
22417+ uint32_t num_scenes,
22418+ uint32_t w, uint32_t h);
22419+extern void psb_scene_pool_unref(struct psb_scene_pool **pool);
22420+extern struct psb_scene_pool *psb_scene_pool_lookup(struct drm_file
22421+ *priv,
22422+ uint32_t handle,
22423+ int check_owner);
22424+extern int psb_validate_scene_pool(struct psb_context *context,
22425+ struct psb_scene_pool *pool,
22426+ uint32_t w,
22427+ uint32_t h, int final_pass,
22428+ struct psb_scene **scene_p);
22429+extern void psb_scene_unref(struct psb_scene **scene);
22430+extern struct psb_scene *psb_scene_ref(struct psb_scene *src);
22431+extern int drm_psb_scene_unref_ioctl(struct drm_device *dev,
22432+ void *data,
22433+ struct drm_file *file_priv);
22434+
22435+static inline uint32_t psb_scene_pool_handle(struct psb_scene_pool *pool)
22436+{
22437+ return pool->base.hash.key;
22438+}
22439+
22440+extern struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev,
22441+ uint32_t pages);
22442+extern struct psb_ta_mem *psb_ta_mem_ref(struct psb_ta_mem *src);
22443+extern void psb_ta_mem_unref(struct psb_ta_mem **ta_mem);
22444+
22445+#endif
22446diff -uNr a/drivers/gpu/drm/psb/psb_schedule.c b/drivers/gpu/drm/psb/psb_schedule.c
22447--- a/drivers/gpu/drm/psb/psb_schedule.c 1969-12-31 16:00:00.000000000 -0800
22448+++ b/drivers/gpu/drm/psb/psb_schedule.c 2009-04-07 13:28:38.000000000 -0700
22449@@ -0,0 +1,1539 @@
22450+/**************************************************************************
22451+ * Copyright (c) 2007, Intel Corporation.
22452+ * All Rights Reserved.
22453+ *
22454+ * This program is free software; you can redistribute it and/or modify it
22455+ * under the terms and conditions of the GNU General Public License,
22456+ * version 2, as published by the Free Software Foundation.
22457+ *
22458+ * This program is distributed in the hope it will be useful, but WITHOUT
22459+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22460+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22461+ * more details.
22462+ *
22463+ * You should have received a copy of the GNU General Public License along with
22464+ * this program; if not, write to the Free Software Foundation, Inc.,
22465+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22466+ *
22467+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
22468+ * develop this driver.
22469+ *
22470+ **************************************************************************/
22471+/*
22472+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
22473+ */
22474+
22475+#include <drm/drmP.h>
22476+#include "psb_drm.h"
22477+#include "psb_drv.h"
22478+#include "psb_reg.h"
22479+#include "psb_scene.h"
22480+#include "ttm/ttm_execbuf_util.h"
22481+
22482+#define PSB_ALLOWED_RASTER_RUNTIME (DRM_HZ * 30)
22483+#define PSB_ALLOWED_TA_RUNTIME (DRM_HZ * 30)
22484+#define PSB_RASTER_TIMEOUT (DRM_HZ / 10)
22485+#define PSB_TA_TIMEOUT (DRM_HZ / 10)
22486+
22487+#undef PSB_SOFTWARE_WORKAHEAD
22488+
22489+#ifdef PSB_STABLE_SETTING
22490+
22491+/*
22492+ * Software blocks completely while the engines are working so there can be no
22493+ * overlap.
22494+ */
22495+
22496+#define PSB_WAIT_FOR_RASTER_COMPLETION
22497+#define PSB_WAIT_FOR_TA_COMPLETION
22498+
22499+#elif defined(PSB_PARANOID_SETTING)
22500+/*
22501+ * Software blocks "almost" while the engines are working so there can be no
22502+ * overlap.
22503+ */
22504+
22505+#define PSB_WAIT_FOR_RASTER_COMPLETION
22506+#define PSB_WAIT_FOR_TA_COMPLETION
22507+#define PSB_BE_PARANOID
22508+
22509+#elif defined(PSB_SOME_OVERLAP_BUT_LOCKUP)
22510+/*
22511+ * Software leaps ahead while the rasterizer is running and prepares
22512+ * a new ta job that can be scheduled before the rasterizer has
22513+ * finished.
22514+ */
22515+
22516+#define PSB_WAIT_FOR_TA_COMPLETION
22517+
22518+#elif defined(PSB_SOFTWARE_WORKAHEAD)
22519+/*
22520+ * Don't sync, but allow software to work ahead. and queue a number of jobs.
22521+ * But block overlapping in the scheduler.
22522+ */
22523+
22524+#define PSB_BLOCK_OVERLAP
22525+#define ONLY_ONE_JOB_IN_RASTER_QUEUE
22526+
22527+#endif
22528+
22529+/*
22530+ * Avoid pixelbe pagefaults on C0.
22531+ */
22532+#if 0
22533+#define PSB_BLOCK_OVERLAP
22534+#endif
22535+
22536+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
22537+ struct psb_scheduler *scheduler,
22538+ uint32_t reply_flag);
22539+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
22540+ struct psb_scheduler *scheduler,
22541+ uint32_t reply_flag);
22542+
22543+#ifdef FIX_TG_16
22544+
22545+void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
22546+static int psb_check_2d_idle(struct drm_psb_private *dev_priv);
22547+
22548+#endif
22549+
22550+void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
22551+ int *lockup, int *idle)
22552+{
22553+ unsigned long irq_flags;
22554+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
22555+
22556+ *lockup = 0;
22557+ *idle = 1;
22558+
22559+ spin_lock_irqsave(&scheduler->lock, irq_flags);
22560+
22561+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
22562+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
22563+ *lockup = 1;
22564+ }
22565+ if (!*lockup
22566+ && (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
22567+ && time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
22568+ *lockup = 1;
22569+ }
22570+ if (!*lockup)
22571+ *idle = scheduler->idle;
22572+
22573+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
22574+}
22575+
22576+static inline void psb_set_idle(struct psb_scheduler *scheduler)
22577+{
22578+ scheduler->idle =
22579+ (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] == NULL) &&
22580+ (scheduler->current_task[PSB_SCENE_ENGINE_TA] == NULL);
22581+ if (scheduler->idle)
22582+ wake_up(&scheduler->idle_queue);
22583+}
22584+
22585+/*
22586+ * Call with the scheduler spinlock held.
22587+ * Assigns a scene context to either the ta or the rasterizer,
22588+ * flushing out other scenes to memory if necessary.
22589+ */
22590+
22591+static int psb_set_scene_fire(struct psb_scheduler *scheduler,
22592+ struct psb_scene *scene,
22593+ int engine, struct psb_task *task)
22594+{
22595+ uint32_t flags = 0;
22596+ struct psb_hw_scene *hw_scene;
22597+ struct drm_device *dev = scene->dev;
22598+ struct drm_psb_private *dev_priv =
22599+ (struct drm_psb_private *) dev->dev_private;
22600+
22601+ hw_scene = scene->hw_scene;
22602+ if (hw_scene && hw_scene->last_scene == scene) {
22603+
22604+ /*
22605+ * Reuse the last hw scene context and delete it from the
22606+ * free list.
22607+ */
22608+
22609+ PSB_DEBUG_RENDER("Reusing hw scene %d.\n",
22610+ hw_scene->context_number);
22611+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
22612+
22613+ /*
22614+ * No hw context initialization to be done.
22615+ */
22616+
22617+ flags |= PSB_SCENE_FLAG_SETUP_ONLY;
22618+ }
22619+
22620+ list_del_init(&hw_scene->head);
22621+
22622+ } else {
22623+ struct list_head *list;
22624+ hw_scene = NULL;
22625+
22626+ /*
22627+ * Grab a new hw scene context.
22628+ */
22629+
22630+ list_for_each(list, &scheduler->hw_scenes) {
22631+ hw_scene =
22632+ list_entry(list, struct psb_hw_scene, head);
22633+ break;
22634+ }
22635+ BUG_ON(!hw_scene);
22636+ PSB_DEBUG_RENDER("New hw scene %d.\n",
22637+ hw_scene->context_number);
22638+
22639+ list_del_init(list);
22640+ }
22641+ scene->hw_scene = hw_scene;
22642+ hw_scene->last_scene = scene;
22643+
22644+ flags |= PSB_SCENE_FLAG_SETUP;
22645+
22646+ /*
22647+ * Switch context and setup the engine.
22648+ */
22649+
22650+ return psb_xhw_scene_bind_fire(dev_priv,
22651+ &task->buf,
22652+ task->flags,
22653+ hw_scene->context_number,
22654+ scene->hw_cookie,
22655+ task->oom_cmds,
22656+ task->oom_cmd_size,
22657+ scene->hw_data->offset,
22658+ engine, flags | scene->flags);
22659+}
22660+
22661+static inline void psb_report_fence(struct drm_psb_private *dev_priv,
22662+ struct psb_scheduler *scheduler,
22663+ uint32_t class,
22664+ uint32_t sequence,
22665+ uint32_t type, int call_handler)
22666+{
22667+ struct psb_scheduler_seq *seq = &scheduler->seq[type];
22668+ struct ttm_fence_device *fdev = &dev_priv->fdev;
22669+ struct ttm_fence_class_manager *fc = &fdev->fence_class[PSB_ENGINE_TA];
22670+ unsigned long irq_flags;
22671+
22672+ /**
22673+ * Block racing poll_ta calls, that take the lock in write mode.
22674+ */
22675+
22676+ read_lock_irqsave(&fc->lock, irq_flags);
22677+ seq->sequence = sequence;
22678+ seq->reported = 0;
22679+ read_unlock_irqrestore(&fc->lock, irq_flags);
22680+
22681+ if (call_handler)
22682+ psb_fence_handler(scheduler->dev, class);
22683+}
22684+
22685+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
22686+ struct psb_scheduler *scheduler);
22687+
22688+static void psb_schedule_ta(struct drm_psb_private *dev_priv,
22689+ struct psb_scheduler *scheduler)
22690+{
22691+ struct psb_task *task = NULL;
22692+ struct list_head *list, *next;
22693+ int pushed_raster_task = 0;
22694+
22695+ PSB_DEBUG_RENDER("schedule ta\n");
22696+
22697+ if (scheduler->idle_count != 0)
22698+ return;
22699+
22700+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL)
22701+ return;
22702+
22703+ if (scheduler->ta_state)
22704+ return;
22705+
22706+ /*
22707+ * Skip the ta stage for rasterization-only
22708+ * tasks. They arrive here to make sure we're rasterizing
22709+ * tasks in the correct order.
22710+ */
22711+
22712+ list_for_each_safe(list, next, &scheduler->ta_queue) {
22713+ task = list_entry(list, struct psb_task, head);
22714+ if (task->task_type != psb_raster_task)
22715+ break;
22716+
22717+ list_del_init(list);
22718+ list_add_tail(list, &scheduler->raster_queue);
22719+ psb_report_fence(dev_priv, scheduler, task->engine,
22720+ task->sequence,
22721+ _PSB_FENCE_TA_DONE_SHIFT, 1);
22722+ task = NULL;
22723+ pushed_raster_task = 1;
22724+ }
22725+
22726+ if (pushed_raster_task)
22727+ psb_schedule_raster(dev_priv, scheduler);
22728+
22729+ if (!task)
22730+ return;
22731+
22732+ /*
22733+ * Still waiting for a vistest?
22734+ */
22735+
22736+ if (scheduler->feedback_task == task)
22737+ return;
22738+
22739+#ifdef ONLY_ONE_JOB_IN_RASTER_QUEUE
22740+
22741+ /*
22742+ * Block ta from trying to use both hardware contexts
22743+ * without the rasterizer starting to render from one of them.
22744+ */
22745+
22746+ if (!list_empty(&scheduler->raster_queue))
22747+ return;
22748+
22749+#endif
22750+
22751+#ifdef PSB_BLOCK_OVERLAP
22752+ /*
22753+ * Make sure rasterizer isn't doing anything.
22754+ */
22755+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
22756+ return;
22757+#endif
22758+ if (list_empty(&scheduler->hw_scenes))
22759+ return;
22760+
22761+#ifdef FIX_TG_16
22762+ if (psb_check_2d_idle(dev_priv))
22763+ return;
22764+#endif
22765+
22766+ list_del_init(&task->head);
22767+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
22768+ scheduler->ta_state = 1;
22769+
22770+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = task;
22771+ scheduler->idle = 0;
22772+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
22773+ scheduler->total_ta_jiffies = 0;
22774+
22775+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
22776+ 0x00000000 : PSB_RF_FIRE_TA;
22777+
22778+ (void) psb_reg_submit(dev_priv, task->ta_cmds, task->ta_cmd_size);
22779+ psb_set_scene_fire(scheduler, task->scene, PSB_SCENE_ENGINE_TA,
22780+ task);
22781+ psb_schedule_watchdog(dev_priv);
22782+}
22783+
22784+static int psb_fire_raster(struct psb_scheduler *scheduler,
22785+ struct psb_task *task)
22786+{
22787+ struct drm_device *dev = scheduler->dev;
22788+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)
22789+ dev->dev_private;
22790+
22791+ PSB_DEBUG_RENDER("Fire raster %d\n", task->sequence);
22792+
22793+ return psb_xhw_fire_raster(dev_priv, &task->buf, task->flags);
22794+}
22795+
22796+/*
22797+ * Take the first rasterization task from the hp raster queue or from the
22798+ * raster queue and fire the rasterizer.
22799+ */
22800+
22801+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
22802+ struct psb_scheduler *scheduler)
22803+{
22804+ struct psb_task *task;
22805+ struct list_head *list;
22806+
22807+ if (scheduler->idle_count != 0)
22808+ return;
22809+
22810+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) {
22811+ PSB_DEBUG_RENDER("Raster busy.\n");
22812+ return;
22813+ }
22814+#ifdef PSB_BLOCK_OVERLAP
22815+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) {
22816+ PSB_DEBUG_RENDER("TA busy.\n");
22817+ return;
22818+ }
22819+#endif
22820+
22821+ if (!list_empty(&scheduler->hp_raster_queue))
22822+ list = scheduler->hp_raster_queue.next;
22823+ else if (!list_empty(&scheduler->raster_queue))
22824+ list = scheduler->raster_queue.next;
22825+ else {
22826+ PSB_DEBUG_RENDER("Nothing in list\n");
22827+ return;
22828+ }
22829+
22830+ task = list_entry(list, struct psb_task, head);
22831+
22832+ /*
22833+ * Sometimes changing ZLS format requires an ISP reset.
22834+ * Doesn't seem to consume too much time.
22835+ */
22836+
22837+ if (task->scene)
22838+ PSB_WSGX32(_PSB_CS_RESET_ISP_RESET, PSB_CR_SOFT_RESET);
22839+
22840+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = task;
22841+
22842+ list_del_init(list);
22843+ scheduler->idle = 0;
22844+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
22845+ scheduler->total_raster_jiffies = 0;
22846+
22847+ if (task->scene)
22848+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
22849+
22850+ (void) psb_reg_submit(dev_priv, task->raster_cmds,
22851+ task->raster_cmd_size);
22852+
22853+ if (task->scene) {
22854+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
22855+ 0x00000000 : PSB_RF_FIRE_RASTER;
22856+ psb_set_scene_fire(scheduler,
22857+ task->scene, PSB_SCENE_ENGINE_RASTER,
22858+ task);
22859+ } else {
22860+ task->reply_flags = PSB_RF_DEALLOC | PSB_RF_FIRE_RASTER;
22861+ psb_fire_raster(scheduler, task);
22862+ }
22863+ psb_schedule_watchdog(dev_priv);
22864+}
22865+
22866+int psb_extend_timeout(struct drm_psb_private *dev_priv,
22867+ uint32_t xhw_lockup)
22868+{
22869+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
22870+ unsigned long irq_flags;
22871+ int ret = -EBUSY;
22872+
22873+ spin_lock_irqsave(&scheduler->lock, irq_flags);
22874+
22875+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
22876+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
22877+ if (xhw_lockup & PSB_LOCKUP_TA) {
22878+ goto out_unlock;
22879+ } else {
22880+ scheduler->total_ta_jiffies +=
22881+ jiffies - scheduler->ta_end_jiffies +
22882+ PSB_TA_TIMEOUT;
22883+ if (scheduler->total_ta_jiffies >
22884+ PSB_ALLOWED_TA_RUNTIME)
22885+ goto out_unlock;
22886+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
22887+ }
22888+ }
22889+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL &&
22890+ time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
22891+ if (xhw_lockup & PSB_LOCKUP_RASTER) {
22892+ goto out_unlock;
22893+ } else {
22894+ scheduler->total_raster_jiffies +=
22895+ jiffies - scheduler->raster_end_jiffies +
22896+ PSB_RASTER_TIMEOUT;
22897+ if (scheduler->total_raster_jiffies >
22898+ PSB_ALLOWED_RASTER_RUNTIME)
22899+ goto out_unlock;
22900+ scheduler->raster_end_jiffies =
22901+ jiffies + PSB_RASTER_TIMEOUT;
22902+ }
22903+ }
22904+
22905+ ret = 0;
22906+
22907+out_unlock:
22908+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
22909+ return ret;
22910+}
22911+
22912+/*
22913+ * TA done handler.
22914+ */
22915+
22916+static void psb_ta_done(struct drm_psb_private *dev_priv,
22917+ struct psb_scheduler *scheduler)
22918+{
22919+ struct psb_task *task =
22920+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
22921+ struct psb_scene *scene = task->scene;
22922+
22923+ PSB_DEBUG_RENDER("TA done %u\n", task->sequence);
22924+
22925+ switch (task->ta_complete_action) {
22926+ case PSB_RASTER_BLOCK:
22927+ scheduler->ta_state = 1;
22928+ scene->flags |=
22929+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
22930+ list_add_tail(&task->head, &scheduler->raster_queue);
22931+ break;
22932+ case PSB_RASTER:
22933+ scene->flags |=
22934+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
22935+ list_add_tail(&task->head, &scheduler->raster_queue);
22936+ break;
22937+ case PSB_RETURN:
22938+ scheduler->ta_state = 0;
22939+ scene->flags |= PSB_SCENE_FLAG_DIRTY;
22940+ list_add_tail(&scene->hw_scene->head,
22941+ &scheduler->hw_scenes);
22942+
22943+ break;
22944+ }
22945+
22946+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
22947+
22948+#ifdef FIX_TG_16
22949+ psb_2d_atomic_unlock(dev_priv);
22950+#endif
22951+
22952+ if (task->ta_complete_action != PSB_RASTER_BLOCK)
22953+ psb_report_fence(dev_priv, scheduler, task->engine,
22954+ task->sequence,
22955+ _PSB_FENCE_TA_DONE_SHIFT, 1);
22956+
22957+ psb_schedule_raster(dev_priv, scheduler);
22958+ psb_schedule_ta(dev_priv, scheduler);
22959+ psb_set_idle(scheduler);
22960+
22961+ if (task->ta_complete_action != PSB_RETURN)
22962+ return;
22963+
22964+ list_add_tail(&task->head, &scheduler->task_done_queue);
22965+ schedule_delayed_work(&scheduler->wq, 1);
22966+}
22967+
22968+/*
22969+ * Rasterizer done handler.
22970+ */
22971+
22972+static void psb_raster_done(struct drm_psb_private *dev_priv,
22973+ struct psb_scheduler *scheduler)
22974+{
22975+ struct psb_task *task =
22976+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
22977+ struct psb_scene *scene = task->scene;
22978+ uint32_t complete_action = task->raster_complete_action;
22979+
22980+ PSB_DEBUG_RENDER("Raster done %u\n", task->sequence);
22981+
22982+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
22983+
22984+ if (complete_action != PSB_RASTER)
22985+ psb_schedule_raster(dev_priv, scheduler);
22986+
22987+ if (scene) {
22988+ if (task->feedback.page) {
22989+ if (unlikely(scheduler->feedback_task)) {
22990+ /*
22991+ * This should never happen, since the previous
22992+ * feedback query will return before the next
22993+ * raster task is fired.
22994+ */
22995+ DRM_ERROR("Feedback task busy.\n");
22996+ }
22997+ scheduler->feedback_task = task;
22998+ psb_xhw_vistest(dev_priv, &task->buf);
22999+ }
23000+ switch (complete_action) {
23001+ case PSB_RETURN:
23002+ scene->flags &=
23003+ ~(PSB_SCENE_FLAG_DIRTY |
23004+ PSB_SCENE_FLAG_COMPLETE);
23005+ list_add_tail(&scene->hw_scene->head,
23006+ &scheduler->hw_scenes);
23007+ psb_report_fence(dev_priv, scheduler, task->engine,
23008+ task->sequence,
23009+ _PSB_FENCE_SCENE_DONE_SHIFT, 1);
23010+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
23011+ scheduler->ta_state = 0;
23012+
23013+ break;
23014+ case PSB_RASTER:
23015+ list_add(&task->head, &scheduler->raster_queue);
23016+ task->raster_complete_action = PSB_RETURN;
23017+ psb_schedule_raster(dev_priv, scheduler);
23018+ break;
23019+ case PSB_TA:
23020+ list_add(&task->head, &scheduler->ta_queue);
23021+ scheduler->ta_state = 0;
23022+ task->raster_complete_action = PSB_RETURN;
23023+ task->ta_complete_action = PSB_RASTER;
23024+ break;
23025+
23026+ }
23027+ }
23028+ psb_schedule_ta(dev_priv, scheduler);
23029+ psb_set_idle(scheduler);
23030+
23031+ if (complete_action == PSB_RETURN) {
23032+ if (task->scene == NULL) {
23033+ psb_report_fence(dev_priv, scheduler, task->engine,
23034+ task->sequence,
23035+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
23036+ }
23037+ if (!task->feedback.page) {
23038+ list_add_tail(&task->head,
23039+ &scheduler->task_done_queue);
23040+ schedule_delayed_work(&scheduler->wq, 1);
23041+ }
23042+ }
23043+}
23044+
23045+void psb_scheduler_pause(struct drm_psb_private *dev_priv)
23046+{
23047+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23048+ unsigned long irq_flags;
23049+
23050+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23051+ scheduler->idle_count++;
23052+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23053+}
23054+
23055+void psb_scheduler_restart(struct drm_psb_private *dev_priv)
23056+{
23057+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23058+ unsigned long irq_flags;
23059+
23060+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23061+ if (--scheduler->idle_count == 0) {
23062+ psb_schedule_ta(dev_priv, scheduler);
23063+ psb_schedule_raster(dev_priv, scheduler);
23064+ }
23065+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23066+}
23067+
23068+int psb_scheduler_idle(struct drm_psb_private *dev_priv)
23069+{
23070+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23071+ unsigned long irq_flags;
23072+ int ret;
23073+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23074+ ret = scheduler->idle_count != 0 && scheduler->idle;
23075+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23076+ return ret;
23077+}
23078+
23079+int psb_scheduler_finished(struct drm_psb_private *dev_priv)
23080+{
23081+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23082+ unsigned long irq_flags;
23083+ int ret;
23084+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23085+ ret = (scheduler->idle &&
23086+ list_empty(&scheduler->raster_queue) &&
23087+ list_empty(&scheduler->ta_queue) &&
23088+ list_empty(&scheduler->hp_raster_queue));
23089+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23090+ return ret;
23091+}
23092+
23093+static void psb_ta_oom(struct drm_psb_private *dev_priv,
23094+ struct psb_scheduler *scheduler)
23095+{
23096+
23097+ struct psb_task *task =
23098+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
23099+ if (!task)
23100+ return;
23101+
23102+ if (task->aborting)
23103+ return;
23104+ task->aborting = 1;
23105+
23106+ DRM_INFO("Info: TA out of parameter memory.\n");
23107+
23108+ (void) psb_xhw_ta_oom(dev_priv, &task->buf,
23109+ task->scene->hw_cookie);
23110+}
23111+
23112+static void psb_ta_oom_reply(struct drm_psb_private *dev_priv,
23113+ struct psb_scheduler *scheduler)
23114+{
23115+
23116+ struct psb_task *task =
23117+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
23118+ uint32_t flags;
23119+ if (!task)
23120+ return;
23121+
23122+ psb_xhw_ta_oom_reply(dev_priv, &task->buf,
23123+ task->scene->hw_cookie,
23124+ &task->ta_complete_action,
23125+ &task->raster_complete_action, &flags);
23126+ task->flags |= flags;
23127+ task->aborting = 0;
23128+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM_REPLY);
23129+}
23130+
23131+static void psb_ta_hw_scene_freed(struct drm_psb_private *dev_priv,
23132+ struct psb_scheduler *scheduler)
23133+{
23134+ DRM_ERROR("TA hw scene freed.\n");
23135+}
23136+
23137+static void psb_vistest_reply(struct drm_psb_private *dev_priv,
23138+ struct psb_scheduler *scheduler)
23139+{
23140+ struct psb_task *task = scheduler->feedback_task;
23141+ uint8_t *feedback_map;
23142+ uint32_t add;
23143+ uint32_t cur;
23144+ struct drm_psb_vistest *vistest;
23145+ int i;
23146+
23147+ scheduler->feedback_task = NULL;
23148+ if (!task) {
23149+ DRM_ERROR("No Poulsbo feedback task.\n");
23150+ return;
23151+ }
23152+ if (!task->feedback.page) {
23153+ DRM_ERROR("No Poulsbo feedback page.\n");
23154+ goto out;
23155+ }
23156+
23157+ if (in_irq())
23158+ feedback_map = kmap_atomic(task->feedback.page, KM_IRQ0);
23159+ else
23160+ feedback_map = kmap_atomic(task->feedback.page, KM_USER0);
23161+
23162+ /*
23163+ * Loop over all requested vistest components here.
23164+ * Only one (vistest) currently.
23165+ */
23166+
23167+ vistest = (struct drm_psb_vistest *)
23168+ (feedback_map + task->feedback.offset);
23169+
23170+ for (i = 0; i < PSB_HW_FEEDBACK_SIZE; ++i) {
23171+ add = task->buf.arg.arg.feedback[i];
23172+ cur = vistest->vt[i];
23173+
23174+ /*
23175+ * Vistest saturates.
23176+ */
23177+
23178+ vistest->vt[i] = (cur + add < cur) ? ~0 : cur + add;
23179+ }
23180+ if (in_irq())
23181+ kunmap_atomic(feedback_map, KM_IRQ0);
23182+ else
23183+ kunmap_atomic(feedback_map, KM_USER0);
23184+out:
23185+ psb_report_fence(dev_priv, scheduler, task->engine, task->sequence,
23186+ _PSB_FENCE_FEEDBACK_SHIFT, 1);
23187+
23188+ if (list_empty(&task->head)) {
23189+ list_add_tail(&task->head, &scheduler->task_done_queue);
23190+ schedule_delayed_work(&scheduler->wq, 1);
23191+ } else
23192+ psb_schedule_ta(dev_priv, scheduler);
23193+}
23194+
23195+static void psb_ta_fire_reply(struct drm_psb_private *dev_priv,
23196+ struct psb_scheduler *scheduler)
23197+{
23198+ struct psb_task *task =
23199+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
23200+
23201+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
23202+
23203+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_FIRE_TA);
23204+}
23205+
23206+static void psb_raster_fire_reply(struct drm_psb_private *dev_priv,
23207+ struct psb_scheduler *scheduler)
23208+{
23209+ struct psb_task *task =
23210+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
23211+ uint32_t reply_flags;
23212+
23213+ if (!task) {
23214+ DRM_ERROR("Null task.\n");
23215+ return;
23216+ }
23217+
23218+ task->raster_complete_action = task->buf.arg.arg.sb.rca;
23219+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
23220+
23221+ reply_flags = PSB_RF_FIRE_RASTER;
23222+ if (task->raster_complete_action == PSB_RASTER)
23223+ reply_flags |= PSB_RF_DEALLOC;
23224+
23225+ psb_dispatch_raster(dev_priv, scheduler, reply_flags);
23226+}
23227+
23228+static int psb_user_interrupt(struct drm_psb_private *dev_priv,
23229+ struct psb_scheduler *scheduler)
23230+{
23231+ uint32_t type;
23232+ int ret;
23233+ unsigned long irq_flags;
23234+
23235+ /*
23236+ * Xhw cannot write directly to the comm page, so
23237+ * do it here. Firmware would have written directly.
23238+ */
23239+
23240+ ret = psb_xhw_handler(dev_priv);
23241+ if (unlikely(ret))
23242+ return ret;
23243+
23244+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
23245+ type = dev_priv->comm[PSB_COMM_USER_IRQ];
23246+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
23247+ if (dev_priv->comm[PSB_COMM_USER_IRQ_LOST]) {
23248+ dev_priv->comm[PSB_COMM_USER_IRQ_LOST] = 0;
23249+ DRM_ERROR("Lost Poulsbo hardware event.\n");
23250+ }
23251+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
23252+
23253+ if (type == 0)
23254+ return 0;
23255+
23256+ switch (type) {
23257+ case PSB_UIRQ_VISTEST:
23258+ psb_vistest_reply(dev_priv, scheduler);
23259+ break;
23260+ case PSB_UIRQ_OOM_REPLY:
23261+ psb_ta_oom_reply(dev_priv, scheduler);
23262+ break;
23263+ case PSB_UIRQ_FIRE_TA_REPLY:
23264+ psb_ta_fire_reply(dev_priv, scheduler);
23265+ break;
23266+ case PSB_UIRQ_FIRE_RASTER_REPLY:
23267+ psb_raster_fire_reply(dev_priv, scheduler);
23268+ break;
23269+ default:
23270+ DRM_ERROR("Unknown Poulsbo hardware event. %d\n", type);
23271+ }
23272+ return 0;
23273+}
23274+
23275+int psb_forced_user_interrupt(struct drm_psb_private *dev_priv)
23276+{
23277+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23278+ unsigned long irq_flags;
23279+ int ret;
23280+
23281+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23282+ ret = psb_user_interrupt(dev_priv, scheduler);
23283+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23284+ return ret;
23285+}
23286+
23287+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
23288+ struct psb_scheduler *scheduler,
23289+ uint32_t reply_flag)
23290+{
23291+ struct psb_task *task =
23292+ scheduler->current_task[PSB_SCENE_ENGINE_TA];
23293+ uint32_t flags;
23294+ uint32_t mask;
23295+
23296+ task->reply_flags |= reply_flag;
23297+ flags = task->reply_flags;
23298+ mask = PSB_RF_FIRE_TA;
23299+
23300+ if (!(flags & mask))
23301+ return;
23302+
23303+ mask = PSB_RF_TA_DONE;
23304+ if ((flags & mask) == mask) {
23305+ task->reply_flags &= ~mask;
23306+ psb_ta_done(dev_priv, scheduler);
23307+ }
23308+
23309+ mask = PSB_RF_OOM;
23310+ if ((flags & mask) == mask) {
23311+ task->reply_flags &= ~mask;
23312+ psb_ta_oom(dev_priv, scheduler);
23313+ }
23314+
23315+ mask = (PSB_RF_OOM_REPLY | PSB_RF_TERMINATE);
23316+ if ((flags & mask) == mask) {
23317+ task->reply_flags &= ~mask;
23318+ psb_ta_done(dev_priv, scheduler);
23319+ }
23320+}
23321+
23322+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
23323+ struct psb_scheduler *scheduler,
23324+ uint32_t reply_flag)
23325+{
23326+ struct psb_task *task =
23327+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
23328+ uint32_t flags;
23329+ uint32_t mask;
23330+
23331+ task->reply_flags |= reply_flag;
23332+ flags = task->reply_flags;
23333+ mask = PSB_RF_FIRE_RASTER;
23334+
23335+ if (!(flags & mask))
23336+ return;
23337+
23338+ /*
23339+ * For rasterizer-only tasks, don't report fence done here,
23340+ * as this is time consuming and the rasterizer wants a new
23341+ * task immediately. For other tasks, the hardware is probably
23342+ * still busy deallocating TA memory, so we can report
23343+ * fence done in parallel.
23344+ */
23345+
23346+ if (task->raster_complete_action == PSB_RETURN &&
23347+ (reply_flag & PSB_RF_RASTER_DONE) && task->scene != NULL) {
23348+ psb_report_fence(dev_priv, scheduler, task->engine,
23349+ task->sequence,
23350+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
23351+ }
23352+
23353+ mask = PSB_RF_RASTER_DONE | PSB_RF_DEALLOC;
23354+ if ((flags & mask) == mask) {
23355+ task->reply_flags &= ~mask;
23356+ psb_raster_done(dev_priv, scheduler);
23357+ }
23358+}
23359+
23360+void psb_scheduler_handler(struct drm_psb_private *dev_priv,
23361+ uint32_t status)
23362+{
23363+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23364+
23365+ spin_lock(&scheduler->lock);
23366+
23367+ if (status & _PSB_CE_PIXELBE_END_RENDER) {
23368+ psb_dispatch_raster(dev_priv, scheduler,
23369+ PSB_RF_RASTER_DONE);
23370+ }
23371+ if (status & _PSB_CE_DPM_3D_MEM_FREE)
23372+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_DEALLOC);
23373+
23374+ if (status & _PSB_CE_TA_FINISHED)
23375+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TA_DONE);
23376+
23377+ if (status & _PSB_CE_TA_TERMINATE)
23378+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TERMINATE);
23379+
23380+ if (status & (_PSB_CE_DPM_REACHED_MEM_THRESH |
23381+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
23382+ _PSB_CE_DPM_OUT_OF_MEMORY_MT)) {
23383+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM);
23384+ }
23385+ if (status & _PSB_CE_DPM_TA_MEM_FREE)
23386+ psb_ta_hw_scene_freed(dev_priv, scheduler);
23387+
23388+ if (status & _PSB_CE_SW_EVENT)
23389+ psb_user_interrupt(dev_priv, scheduler);
23390+
23391+ spin_unlock(&scheduler->lock);
23392+}
23393+
23394+static void psb_free_task_wq(struct work_struct *work)
23395+{
23396+ struct psb_scheduler *scheduler =
23397+ container_of(work, struct psb_scheduler, wq.work);
23398+
23399+ struct list_head *list, *next;
23400+ unsigned long irq_flags;
23401+ struct psb_task *task;
23402+
23403+ if (!mutex_trylock(&scheduler->task_wq_mutex))
23404+ return;
23405+
23406+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23407+ list_for_each_safe(list, next, &scheduler->task_done_queue) {
23408+ task = list_entry(list, struct psb_task, head);
23409+ list_del_init(list);
23410+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23411+
23412+ PSB_DEBUG_RENDER("Checking Task %d: Scene 0x%08lx, "
23413+ "Feedback bo 0x%08lx, done %d\n",
23414+ task->sequence,
23415+ (unsigned long) task->scene,
23416+ (unsigned long) task->feedback.bo,
23417+ atomic_read(&task->buf.done));
23418+
23419+ if (task->scene) {
23420+ PSB_DEBUG_RENDER("Unref scene %d\n",
23421+ task->sequence);
23422+ psb_scene_unref(&task->scene);
23423+ if (task->feedback.bo) {
23424+ PSB_DEBUG_RENDER("Unref feedback bo %d\n",
23425+ task->sequence);
23426+ ttm_bo_unref(&task->feedback.bo);
23427+ }
23428+ }
23429+
23430+ if (atomic_read(&task->buf.done)) {
23431+ PSB_DEBUG_RENDER("Deleting task %d\n",
23432+ task->sequence);
23433+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
23434+ task = NULL;
23435+ }
23436+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23437+ if (task != NULL)
23438+ list_add(list, &scheduler->task_done_queue);
23439+ }
23440+ if (!list_empty(&scheduler->task_done_queue)) {
23441+ PSB_DEBUG_RENDER("Rescheduling wq\n");
23442+ schedule_delayed_work(&scheduler->wq, 1);
23443+ }
23444+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23445+
23446+ if (list_empty(&scheduler->task_done_queue) &&
23447+ drm_psb_ospm && IS_MRST(scheduler->dev)) {
23448+ psb_try_power_down_sgx(scheduler->dev);
23449+ }
23450+ mutex_unlock(&scheduler->task_wq_mutex);
23451+}
23452+
23453+/*
23454+ * Check if any of the tasks in the queues is using a scene.
23455+ * In that case we know the TA memory buffer objects are
23456+ * fenced and will not be evicted until that fence is signaled.
23457+ */
23458+
23459+void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv)
23460+{
23461+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23462+ unsigned long irq_flags;
23463+ struct psb_task *task;
23464+ struct psb_task *next_task;
23465+
23466+ dev_priv->force_ta_mem_load = 1;
23467+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23468+ list_for_each_entry_safe(task, next_task, &scheduler->ta_queue,
23469+ head) {
23470+ if (task->scene) {
23471+ dev_priv->force_ta_mem_load = 0;
23472+ break;
23473+ }
23474+ }
23475+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
23476+ head) {
23477+ if (task->scene) {
23478+ dev_priv->force_ta_mem_load = 0;
23479+ break;
23480+ }
23481+ }
23482+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23483+}
23484+
23485+void psb_scheduler_reset(struct drm_psb_private *dev_priv,
23486+ int error_condition)
23487+{
23488+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23489+ unsigned long wait_jiffies;
23490+ unsigned long cur_jiffies;
23491+ struct psb_task *task;
23492+ struct psb_task *next_task;
23493+ unsigned long irq_flags;
23494+
23495+ psb_scheduler_pause(dev_priv);
23496+ if (!psb_scheduler_idle(dev_priv)) {
23497+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23498+
23499+ cur_jiffies = jiffies;
23500+ wait_jiffies = cur_jiffies;
23501+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] &&
23502+ time_after_eq(scheduler->ta_end_jiffies, wait_jiffies))
23503+ wait_jiffies = scheduler->ta_end_jiffies;
23504+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] &&
23505+ time_after_eq(scheduler->raster_end_jiffies,
23506+ wait_jiffies))
23507+ wait_jiffies = scheduler->raster_end_jiffies;
23508+
23509+ wait_jiffies -= cur_jiffies;
23510+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23511+
23512+ (void) wait_event_timeout(scheduler->idle_queue,
23513+ psb_scheduler_idle(dev_priv),
23514+ wait_jiffies);
23515+ }
23516+
23517+ if (!psb_scheduler_idle(dev_priv)) {
23518+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23519+ task = scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
23520+ if (task) {
23521+ DRM_ERROR("Detected Poulsbo rasterizer lockup.\n");
23522+ if (task->engine == PSB_ENGINE_HPRAST) {
23523+ psb_fence_error(scheduler->dev,
23524+ PSB_ENGINE_HPRAST,
23525+ task->sequence,
23526+ _PSB_FENCE_TYPE_RASTER_DONE,
23527+ error_condition);
23528+
23529+ list_del(&task->head);
23530+ psb_xhw_clean_buf(dev_priv, &task->buf);
23531+ list_add_tail(&task->head,
23532+ &scheduler->task_done_queue);
23533+ } else {
23534+ list_add(&task->head,
23535+ &scheduler->raster_queue);
23536+ }
23537+ }
23538+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
23539+ task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
23540+ if (task) {
23541+ DRM_ERROR("Detected Poulsbo ta lockup.\n");
23542+ list_add_tail(&task->head,
23543+ &scheduler->raster_queue);
23544+#ifdef FIX_TG_16
23545+ psb_2d_atomic_unlock(dev_priv);
23546+#endif
23547+ }
23548+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
23549+ scheduler->ta_state = 0;
23550+
23551+#ifdef FIX_TG_16
23552+ atomic_set(&dev_priv->ta_wait_2d, 0);
23553+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
23554+ wake_up(&dev_priv->queue_2d);
23555+#endif
23556+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23557+ }
23558+
23559+ /*
23560+ * Empty raster queue.
23561+ */
23562+
23563+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23564+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
23565+ head) {
23566+ struct psb_scene *scene = task->scene;
23567+
23568+ DRM_INFO("Signaling fence sequence %u\n",
23569+ task->sequence);
23570+
23571+ psb_fence_error(scheduler->dev,
23572+ task->engine,
23573+ task->sequence,
23574+ _PSB_FENCE_TYPE_TA_DONE |
23575+ _PSB_FENCE_TYPE_RASTER_DONE |
23576+ _PSB_FENCE_TYPE_SCENE_DONE |
23577+ _PSB_FENCE_TYPE_FEEDBACK, error_condition);
23578+ if (scene) {
23579+ scene->flags = 0;
23580+ if (scene->hw_scene) {
23581+ list_add_tail(&scene->hw_scene->head,
23582+ &scheduler->hw_scenes);
23583+ scene->hw_scene = NULL;
23584+ }
23585+ }
23586+
23587+ psb_xhw_clean_buf(dev_priv, &task->buf);
23588+ list_del(&task->head);
23589+ list_add_tail(&task->head, &scheduler->task_done_queue);
23590+ }
23591+
23592+ schedule_delayed_work(&scheduler->wq, 1);
23593+ scheduler->idle = 1;
23594+ wake_up(&scheduler->idle_queue);
23595+
23596+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23597+ psb_scheduler_restart(dev_priv);
23598+
23599+}
23600+
23601+int psb_scheduler_init(struct drm_device *dev,
23602+ struct psb_scheduler *scheduler)
23603+{
23604+ struct psb_hw_scene *hw_scene;
23605+ int i;
23606+
23607+ memset(scheduler, 0, sizeof(*scheduler));
23608+ scheduler->dev = dev;
23609+ mutex_init(&scheduler->task_wq_mutex);
23610+ spin_lock_init(&scheduler->lock);
23611+ scheduler->idle = 1;
23612+
23613+ INIT_LIST_HEAD(&scheduler->ta_queue);
23614+ INIT_LIST_HEAD(&scheduler->raster_queue);
23615+ INIT_LIST_HEAD(&scheduler->hp_raster_queue);
23616+ INIT_LIST_HEAD(&scheduler->hw_scenes);
23617+ INIT_LIST_HEAD(&scheduler->task_done_queue);
23618+ INIT_DELAYED_WORK(&scheduler->wq, &psb_free_task_wq);
23619+ init_waitqueue_head(&scheduler->idle_queue);
23620+
23621+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
23622+ hw_scene = &scheduler->hs[i];
23623+ hw_scene->context_number = i;
23624+ list_add_tail(&hw_scene->head, &scheduler->hw_scenes);
23625+ }
23626+
23627+ for (i = 0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i)
23628+ scheduler->seq[i].reported = 0;
23629+ return 0;
23630+}
23631+
23632+/*
23633+ * Scene references maintained by the scheduler are not refcounted.
23634+ * Remove all references to a particular scene here.
23635+ */
23636+
23637+void psb_scheduler_remove_scene_refs(struct psb_scene *scene)
23638+{
23639+ struct drm_psb_private *dev_priv =
23640+ (struct drm_psb_private *) scene->dev->dev_private;
23641+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23642+ struct psb_hw_scene *hw_scene;
23643+ unsigned long irq_flags;
23644+ unsigned int i;
23645+
23646+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23647+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
23648+ hw_scene = &scheduler->hs[i];
23649+ if (hw_scene->last_scene == scene) {
23650+ BUG_ON(list_empty(&hw_scene->head));
23651+ hw_scene->last_scene = NULL;
23652+ }
23653+ }
23654+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23655+}
23656+
23657+void psb_scheduler_takedown(struct psb_scheduler *scheduler)
23658+{
23659+ flush_scheduled_work();
23660+}
23661+
23662+static int psb_setup_task(struct drm_device *dev,
23663+ struct drm_psb_cmdbuf_arg *arg,
23664+ struct ttm_buffer_object *raster_cmd_buffer,
23665+ struct ttm_buffer_object *ta_cmd_buffer,
23666+ struct ttm_buffer_object *oom_cmd_buffer,
23667+ struct psb_scene *scene,
23668+ enum psb_task_type task_type,
23669+ uint32_t engine,
23670+ uint32_t flags, struct psb_task **task_p)
23671+{
23672+ struct psb_task *task;
23673+ int ret;
23674+
23675+ if (ta_cmd_buffer && arg->ta_size > PSB_MAX_TA_CMDS) {
23676+ DRM_ERROR("Too many ta cmds %d.\n", arg->ta_size);
23677+ return -EINVAL;
23678+ }
23679+ if (raster_cmd_buffer && arg->cmdbuf_size > PSB_MAX_RASTER_CMDS) {
23680+ DRM_ERROR("Too many raster cmds %d.\n", arg->cmdbuf_size);
23681+ return -EINVAL;
23682+ }
23683+ if (oom_cmd_buffer && arg->oom_size > PSB_MAX_OOM_CMDS) {
23684+ DRM_ERROR("Too many oom cmds %d.\n", arg->oom_size);
23685+ return -EINVAL;
23686+ }
23687+
23688+ task = drm_calloc(1, sizeof(*task), DRM_MEM_DRIVER);
23689+ if (!task)
23690+ return -ENOMEM;
23691+
23692+ atomic_set(&task->buf.done, 1);
23693+ task->engine = engine;
23694+ INIT_LIST_HEAD(&task->head);
23695+ INIT_LIST_HEAD(&task->buf.head);
23696+ if (ta_cmd_buffer && arg->ta_size != 0) {
23697+ task->ta_cmd_size = arg->ta_size;
23698+ ret = psb_submit_copy_cmdbuf(dev, ta_cmd_buffer,
23699+ arg->ta_offset,
23700+ arg->ta_size,
23701+ PSB_ENGINE_TA, task->ta_cmds);
23702+ if (ret)
23703+ goto out_err;
23704+ }
23705+ if (raster_cmd_buffer) {
23706+ task->raster_cmd_size = arg->cmdbuf_size;
23707+ ret = psb_submit_copy_cmdbuf(dev, raster_cmd_buffer,
23708+ arg->cmdbuf_offset,
23709+ arg->cmdbuf_size,
23710+ PSB_ENGINE_TA,
23711+ task->raster_cmds);
23712+ if (ret)
23713+ goto out_err;
23714+ }
23715+ if (oom_cmd_buffer && arg->oom_size != 0) {
23716+ task->oom_cmd_size = arg->oom_size;
23717+ ret = psb_submit_copy_cmdbuf(dev, oom_cmd_buffer,
23718+ arg->oom_offset,
23719+ arg->oom_size,
23720+ PSB_ENGINE_TA,
23721+ task->oom_cmds);
23722+ if (ret)
23723+ goto out_err;
23724+ }
23725+ task->task_type = task_type;
23726+ task->flags = flags;
23727+ if (scene)
23728+ task->scene = psb_scene_ref(scene);
23729+
23730+ *task_p = task;
23731+ return 0;
23732+out_err:
23733+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
23734+ *task_p = NULL;
23735+ return ret;
23736+}
23737+
23738+int psb_cmdbuf_ta(struct drm_file *priv,
23739+ struct psb_context *context,
23740+ struct drm_psb_cmdbuf_arg *arg,
23741+ struct ttm_buffer_object *cmd_buffer,
23742+ struct ttm_buffer_object *ta_buffer,
23743+ struct ttm_buffer_object *oom_buffer,
23744+ struct psb_scene *scene,
23745+ struct psb_feedback_info *feedback,
23746+ struct psb_ttm_fence_rep *fence_arg)
23747+{
23748+ struct drm_device *dev = priv->minor->dev;
23749+ struct drm_psb_private *dev_priv = dev->dev_private;
23750+ struct ttm_fence_object *fence = NULL;
23751+ struct psb_task *task = NULL;
23752+ int ret;
23753+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23754+ uint32_t sequence;
23755+
23756+ PSB_DEBUG_RENDER("Cmdbuf ta\n");
23757+
23758+ ret = psb_setup_task(dev, arg, cmd_buffer, ta_buffer,
23759+ oom_buffer, scene,
23760+ psb_ta_task, PSB_ENGINE_TA,
23761+ PSB_FIRE_FLAG_RASTER_DEALLOC, &task);
23762+
23763+ if (ret)
23764+ goto out_err;
23765+
23766+ task->feedback = *feedback;
23767+ mutex_lock(&dev_priv->reset_mutex);
23768+
23769+ /*
23770+ * Hand the task over to the scheduler.
23771+ */
23772+
23773+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
23774+
23775+ task->ta_complete_action = PSB_RASTER;
23776+ task->raster_complete_action = PSB_RETURN;
23777+ sequence = task->sequence;
23778+
23779+ spin_lock_irq(&scheduler->lock);
23780+
23781+ list_add_tail(&task->head, &scheduler->ta_queue);
23782+ PSB_DEBUG_RENDER("queued ta %u\n", task->sequence);
23783+
23784+ psb_schedule_ta(dev_priv, scheduler);
23785+
23786+ /**
23787+ * From this point we may no longer dereference task,
23788+ * as the object it points to may be freed by another thread.
23789+ */
23790+
23791+ task = NULL;
23792+ spin_unlock_irq(&scheduler->lock);
23793+ mutex_unlock(&dev_priv->reset_mutex);
23794+
23795+ psb_fence_or_sync(priv, PSB_ENGINE_TA, context->fence_types,
23796+ arg->fence_flags,
23797+ &context->validate_list, fence_arg, &fence);
23798+ ttm_eu_fence_buffer_objects(&context->kern_validate_list, fence);
23799+
23800+ if (fence) {
23801+ spin_lock_irq(&scheduler->lock);
23802+ psb_report_fence(dev_priv, scheduler, PSB_ENGINE_TA,
23803+ sequence, _PSB_FENCE_EXE_SHIFT, 1);
23804+ spin_unlock_irq(&scheduler->lock);
23805+ fence_arg->signaled_types |= _PSB_FENCE_TYPE_EXE;
23806+ }
23807+
23808+out_err:
23809+ if (ret && ret != -ERESTART)
23810+ DRM_ERROR("TA task queue job failed.\n");
23811+
23812+ if (fence) {
23813+#ifdef PSB_WAIT_FOR_TA_COMPLETION
23814+ ttm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
23815+ _PSB_FENCE_TYPE_TA_DONE);
23816+#ifdef PSB_BE_PARANOID
23817+ ttm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
23818+ _PSB_FENCE_TYPE_SCENE_DONE);
23819+#endif
23820+#endif
23821+ ttm_fence_object_unref(&fence);
23822+ }
23823+ return ret;
23824+}
23825+
23826+int psb_cmdbuf_raster(struct drm_file *priv,
23827+ struct psb_context *context,
23828+ struct drm_psb_cmdbuf_arg *arg,
23829+ struct ttm_buffer_object *cmd_buffer,
23830+ struct psb_ttm_fence_rep *fence_arg)
23831+{
23832+ struct drm_device *dev = priv->minor->dev;
23833+ struct drm_psb_private *dev_priv = dev->dev_private;
23834+ struct ttm_fence_object *fence = NULL;
23835+ struct psb_task *task = NULL;
23836+ int ret;
23837+ uint32_t sequence;
23838+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23839+
23840+ PSB_DEBUG_RENDER("Cmdbuf Raster\n");
23841+
23842+ ret = psb_setup_task(dev, arg, cmd_buffer, NULL, NULL,
23843+ NULL, psb_raster_task,
23844+ PSB_ENGINE_TA, 0, &task);
23845+
23846+ if (ret)
23847+ goto out_err;
23848+
23849+ /*
23850+ * Hand the task over to the scheduler.
23851+ */
23852+
23853+ mutex_lock(&dev_priv->reset_mutex);
23854+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
23855+ task->ta_complete_action = PSB_RASTER;
23856+ task->raster_complete_action = PSB_RETURN;
23857+ sequence = task->sequence;
23858+
23859+ spin_lock_irq(&scheduler->lock);
23860+ list_add_tail(&task->head, &scheduler->ta_queue);
23861+ PSB_DEBUG_RENDER("queued raster %u\n", task->sequence);
23862+ psb_schedule_ta(dev_priv, scheduler);
23863+
23864+ /**
23865+ * From this point we may no longer dereference task,
23866+ * as the object it points to may be freed by another thread.
23867+ */
23868+
23869+ task = NULL;
23870+ spin_unlock_irq(&scheduler->lock);
23871+ mutex_unlock(&dev_priv->reset_mutex);
23872+
23873+ psb_fence_or_sync(priv, PSB_ENGINE_TA, context->fence_types,
23874+ arg->fence_flags,
23875+ &context->validate_list, fence_arg, &fence);
23876+
23877+ ttm_eu_fence_buffer_objects(&context->kern_validate_list, fence);
23878+ if (fence) {
23879+ spin_lock_irq(&scheduler->lock);
23880+ psb_report_fence(dev_priv, scheduler, PSB_ENGINE_TA, sequence,
23881+ _PSB_FENCE_EXE_SHIFT, 1);
23882+ spin_unlock_irq(&scheduler->lock);
23883+ fence_arg->signaled_types |= _PSB_FENCE_TYPE_EXE;
23884+ }
23885+out_err:
23886+ if (ret && ret != -ERESTART)
23887+ DRM_ERROR("Raster task queue job failed.\n");
23888+
23889+ if (fence) {
23890+#ifdef PSB_WAIT_FOR_RASTER_COMPLETION
23891+ ttm_fence_object_wait(fence, 1, 1, fence->type);
23892+#endif
23893+ ttm_fence_object_unref(&fence);
23894+ }
23895+
23896+ return ret;
23897+}
23898+
23899+#ifdef FIX_TG_16
23900+
23901+static int psb_check_2d_idle(struct drm_psb_private *dev_priv)
23902+{
23903+ if (psb_2d_trylock(dev_priv)) {
23904+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
23905+ !((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
23906+ _PSB_C2B_STATUS_BUSY))) {
23907+ return 0;
23908+ }
23909+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 0, 1) == 0)
23910+ psb_2D_irq_on(dev_priv);
23911+
23912+ PSB_WSGX32(PSB_2D_FENCE_BH, PSB_SGX_2D_SLAVE_PORT);
23913+ PSB_WSGX32(PSB_2D_FLUSH_BH, PSB_SGX_2D_SLAVE_PORT);
23914+ (void) PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT);
23915+
23916+ psb_2d_atomic_unlock(dev_priv);
23917+ }
23918+
23919+ atomic_set(&dev_priv->ta_wait_2d, 1);
23920+ return -EBUSY;
23921+}
23922+
23923+static void psb_atomic_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
23924+{
23925+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23926+
23927+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d, 1, 0) == 1) {
23928+ psb_schedule_ta(dev_priv, scheduler);
23929+ if (atomic_read(&dev_priv->waiters_2d) != 0)
23930+ wake_up(&dev_priv->queue_2d);
23931+ }
23932+}
23933+
23934+void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
23935+{
23936+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23937+ unsigned long irq_flags;
23938+
23939+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23940+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 1, 0) == 1) {
23941+ atomic_set(&dev_priv->ta_wait_2d, 0);
23942+ psb_2D_irq_off(dev_priv);
23943+ psb_schedule_ta(dev_priv, scheduler);
23944+ if (atomic_read(&dev_priv->waiters_2d) != 0)
23945+ wake_up(&dev_priv->queue_2d);
23946+ }
23947+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23948+}
23949+
23950+/*
23951+ * 2D locking functions. Can't use a mutex since the trylock() and
23952+ * unlock() methods need to be accessible from interrupt context.
23953+ */
23954+
23955+int psb_2d_trylock(struct drm_psb_private *dev_priv)
23956+{
23957+ return atomic_cmpxchg(&dev_priv->lock_2d, 0, 1) == 0;
23958+}
23959+
23960+void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv)
23961+{
23962+ atomic_set(&dev_priv->lock_2d, 0);
23963+ if (atomic_read(&dev_priv->waiters_2d) != 0)
23964+ wake_up(&dev_priv->queue_2d);
23965+}
23966+
23967+void psb_2d_unlock(struct drm_psb_private *dev_priv)
23968+{
23969+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
23970+ unsigned long irq_flags;
23971+
23972+ spin_lock_irqsave(&scheduler->lock, irq_flags);
23973+ psb_2d_atomic_unlock(dev_priv);
23974+ if (atomic_read(&dev_priv->ta_wait_2d) != 0)
23975+ psb_atomic_resume_ta_2d_idle(dev_priv);
23976+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
23977+}
23978+
23979+void psb_2d_lock(struct drm_psb_private *dev_priv)
23980+{
23981+ atomic_inc(&dev_priv->waiters_2d);
23982+ wait_event(dev_priv->queue_2d,
23983+ atomic_read(&dev_priv->ta_wait_2d) == 0);
23984+ wait_event(dev_priv->queue_2d, psb_2d_trylock(dev_priv));
23985+ atomic_dec(&dev_priv->waiters_2d);
23986+}
23987+
23988+#endif
23989diff -uNr a/drivers/gpu/drm/psb/psb_schedule.h b/drivers/gpu/drm/psb/psb_schedule.h
23990--- a/drivers/gpu/drm/psb/psb_schedule.h 1969-12-31 16:00:00.000000000 -0800
23991+++ b/drivers/gpu/drm/psb/psb_schedule.h 2009-04-07 13:28:38.000000000 -0700
23992@@ -0,0 +1,176 @@
23993+/**************************************************************************
23994+ * Copyright (c) 2007, Intel Corporation.
23995+ * All Rights Reserved.
23996+ *
23997+ * This program is free software; you can redistribute it and/or modify it
23998+ * under the terms and conditions of the GNU General Public License,
23999+ * version 2, as published by the Free Software Foundation.
24000+ *
24001+ * This program is distributed in the hope it will be useful, but WITHOUT
24002+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24003+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24004+ * more details.
24005+ *
24006+ * You should have received a copy of the GNU General Public License along with
24007+ * this program; if not, write to the Free Software Foundation, Inc.,
24008+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24009+ *
24010+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
24011+ * develop this driver.
24012+ *
24013+ **************************************************************************/
24014+/*
24015+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
24016+ */
24017+
24018+#ifndef _PSB_SCHEDULE_H_
24019+#define _PSB_SCHEDULE_H_
24020+
24021+#include <drm/drmP.h>
24022+
24023+struct psb_context;
24024+
24025+enum psb_task_type {
24026+ psb_ta_midscene_task,
24027+ psb_ta_task,
24028+ psb_raster_task,
24029+ psb_freescene_task
24030+};
24031+
24032+#define PSB_MAX_TA_CMDS 60
24033+#define PSB_MAX_RASTER_CMDS 60
24034+#define PSB_MAX_OOM_CMDS (DRM_PSB_NUM_RASTER_USE_REG * 2 + 6)
24035+
24036+struct psb_xhw_buf {
24037+ struct list_head head;
24038+ int copy_back;
24039+ atomic_t done;
24040+ struct drm_psb_xhw_arg arg;
24041+
24042+};
24043+
24044+struct psb_feedback_info {
24045+ struct ttm_buffer_object *bo;
24046+ struct page *page;
24047+ uint32_t offset;
24048+};
24049+
24050+struct psb_task {
24051+ struct list_head head;
24052+ struct psb_scene *scene;
24053+ struct psb_feedback_info feedback;
24054+ enum psb_task_type task_type;
24055+ uint32_t engine;
24056+ uint32_t sequence;
24057+ uint32_t ta_cmds[PSB_MAX_TA_CMDS];
24058+ uint32_t raster_cmds[PSB_MAX_RASTER_CMDS];
24059+ uint32_t oom_cmds[PSB_MAX_OOM_CMDS];
24060+ uint32_t ta_cmd_size;
24061+ uint32_t raster_cmd_size;
24062+ uint32_t oom_cmd_size;
24063+ uint32_t feedback_offset;
24064+ uint32_t ta_complete_action;
24065+ uint32_t raster_complete_action;
24066+ uint32_t hw_cookie;
24067+ uint32_t flags;
24068+ uint32_t reply_flags;
24069+ uint32_t aborting;
24070+ struct psb_xhw_buf buf;
24071+};
24072+
24073+struct psb_hw_scene {
24074+ struct list_head head;
24075+ uint32_t context_number;
24076+
24077+ /*
24078+ * This pointer does not refcount the last_scene_buffer,
24079+ * so we must make sure it is set to NULL before destroying
24080+ * the corresponding task.
24081+ */
24082+
24083+ struct psb_scene *last_scene;
24084+};
24085+
24086+struct psb_scene;
24087+struct drm_psb_private;
24088+
24089+struct psb_scheduler_seq {
24090+ uint32_t sequence;
24091+ int reported;
24092+};
24093+
24094+struct psb_scheduler {
24095+ struct drm_device *dev;
24096+ struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES];
24097+ struct psb_hw_scene hs[PSB_NUM_HW_SCENES];
24098+ struct mutex task_wq_mutex;
24099+ spinlock_t lock;
24100+ struct list_head hw_scenes;
24101+ struct list_head ta_queue;
24102+ struct list_head raster_queue;
24103+ struct list_head hp_raster_queue;
24104+ struct list_head task_done_queue;
24105+ struct psb_task *current_task[PSB_SCENE_NUM_ENGINES];
24106+ struct psb_task *feedback_task;
24107+ int ta_state;
24108+ struct psb_hw_scene *pending_hw_scene;
24109+ uint32_t pending_hw_scene_seq;
24110+ struct delayed_work wq;
24111+ struct psb_scene_pool *pool;
24112+ uint32_t idle_count;
24113+ int idle;
24114+ wait_queue_head_t idle_queue;
24115+ unsigned long ta_end_jiffies;
24116+ unsigned long total_ta_jiffies;
24117+ unsigned long raster_end_jiffies;
24118+ unsigned long total_raster_jiffies;
24119+};
24120+
24121+#define PSB_RF_FIRE_TA (1 << 0)
24122+#define PSB_RF_OOM (1 << 1)
24123+#define PSB_RF_OOM_REPLY (1 << 2)
24124+#define PSB_RF_TERMINATE (1 << 3)
24125+#define PSB_RF_TA_DONE (1 << 4)
24126+#define PSB_RF_FIRE_RASTER (1 << 5)
24127+#define PSB_RF_RASTER_DONE (1 << 6)
24128+#define PSB_RF_DEALLOC (1 << 7)
24129+
24130+extern struct psb_scene_pool *psb_alloc_scene_pool(struct drm_file *priv,
24131+ int shareable,
24132+ uint32_t w, uint32_t h);
24133+extern uint32_t psb_scene_handle(struct psb_scene *scene);
24134+extern int psb_scheduler_init(struct drm_device *dev,
24135+ struct psb_scheduler *scheduler);
24136+extern void psb_scheduler_takedown(struct psb_scheduler *scheduler);
24137+extern int psb_cmdbuf_ta(struct drm_file *priv,
24138+ struct psb_context *context,
24139+ struct drm_psb_cmdbuf_arg *arg,
24140+ struct ttm_buffer_object *cmd_buffer,
24141+ struct ttm_buffer_object *ta_buffer,
24142+ struct ttm_buffer_object *oom_buffer,
24143+ struct psb_scene *scene,
24144+ struct psb_feedback_info *feedback,
24145+ struct psb_ttm_fence_rep *fence_arg);
24146+extern int psb_cmdbuf_raster(struct drm_file *priv,
24147+ struct psb_context *context,
24148+ struct drm_psb_cmdbuf_arg *arg,
24149+ struct ttm_buffer_object *cmd_buffer,
24150+ struct psb_ttm_fence_rep *fence_arg);
24151+extern void psb_scheduler_handler(struct drm_psb_private *dev_priv,
24152+ uint32_t status);
24153+extern void psb_scheduler_pause(struct drm_psb_private *dev_priv);
24154+extern void psb_scheduler_restart(struct drm_psb_private *dev_priv);
24155+extern int psb_scheduler_idle(struct drm_psb_private *dev_priv);
24156+extern int psb_scheduler_finished(struct drm_psb_private *dev_priv);
24157+
24158+extern void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
24159+ int *lockup, int *idle);
24160+extern void psb_scheduler_reset(struct drm_psb_private *dev_priv,
24161+ int error_condition);
24162+extern int psb_forced_user_interrupt(struct drm_psb_private *dev_priv);
24163+extern void psb_scheduler_remove_scene_refs(struct psb_scene *scene);
24164+extern void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv);
24165+extern int psb_extend_timeout(struct drm_psb_private *dev_priv,
24166+ uint32_t xhw_lockup);
24167+
24168+#endif
24169diff -uNr a/drivers/gpu/drm/psb/psb_setup.c b/drivers/gpu/drm/psb/psb_setup.c
24170--- a/drivers/gpu/drm/psb/psb_setup.c 1969-12-31 16:00:00.000000000 -0800
24171+++ b/drivers/gpu/drm/psb/psb_setup.c 2009-04-07 13:28:38.000000000 -0700
24172@@ -0,0 +1,18 @@
24173+#include <drm/drmP.h>
24174+#include <drm/drm.h>
24175+#include <drm/drm_crtc.h>
24176+#include <drm/drm_edid.h>
24177+#include "psb_intel_drv.h"
24178+#include "psb_drv.h"
24179+#include "psb_intel_reg.h"
24180+
24181+/* Fixed name */
24182+#define ACPI_EDID_LCD "\\_SB_.PCI0.GFX0.DD04._DDC"
24183+#define ACPI_DOD "\\_SB_.PCI0.GFX0._DOD"
24184+
24185+#include "psb_intel_i2c.c"
24186+#include "psb_intel_sdvo.c"
24187+#include "psb_intel_modes.c"
24188+#include "psb_intel_lvds.c"
24189+#include "psb_intel_dsi.c"
24190+#include "psb_intel_display.c"
24191diff -uNr a/drivers/gpu/drm/psb/psb_sgx.c b/drivers/gpu/drm/psb/psb_sgx.c
24192--- a/drivers/gpu/drm/psb/psb_sgx.c 1969-12-31 16:00:00.000000000 -0800
24193+++ b/drivers/gpu/drm/psb/psb_sgx.c 2009-04-07 13:28:38.000000000 -0700
24194@@ -0,0 +1,1869 @@
24195+/**************************************************************************
24196+ * Copyright (c) 2007, Intel Corporation.
24197+ * All Rights Reserved.
24198+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX. USA.
24199+ * All Rights Reserved.
24200+ *
24201+ * This program is free software; you can redistribute it and/or modify it
24202+ * under the terms and conditions of the GNU General Public License,
24203+ * version 2, as published by the Free Software Foundation.
24204+ *
24205+ * This program is distributed in the hope it will be useful, but WITHOUT
24206+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24207+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24208+ * more details.
24209+ *
24210+ * You should have received a copy of the GNU General Public License along with
24211+ * this program; if not, write to the Free Software Foundation, Inc.,
24212+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24213+ *
24214+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
24215+ * develop this driver.
24216+ *
24217+ **************************************************************************/
24218+/*
24219+ */
24220+
24221+#include <drm/drmP.h>
24222+#include "psb_drv.h"
24223+#include "psb_drm.h"
24224+#include "psb_reg.h"
24225+#include "psb_scene.h"
24226+#include "psb_msvdx.h"
24227+#include "lnc_topaz.h"
24228+#include "ttm/ttm_bo_api.h"
24229+#include "ttm/ttm_execbuf_util.h"
24230+#include "ttm/ttm_userobj_api.h"
24231+#include "ttm/ttm_placement_common.h"
24232+#include "psb_sgx.h"
24233+
24234+static inline int psb_same_page(unsigned long offset,
24235+ unsigned long offset2)
24236+{
24237+ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
24238+}
24239+
24240+static inline unsigned long psb_offset_end(unsigned long offset,
24241+ unsigned long end)
24242+{
24243+ offset = (offset + PAGE_SIZE) & PAGE_MASK;
24244+ return (end < offset) ? end : offset;
24245+}
24246+
24247+static void psb_idle_engine(struct drm_device *dev, int engine);
24248+
24249+struct psb_dstbuf_cache {
24250+ unsigned int dst;
24251+ struct ttm_buffer_object *dst_buf;
24252+ unsigned long dst_offset;
24253+ uint32_t *dst_page;
24254+ unsigned int dst_page_offset;
24255+ struct ttm_bo_kmap_obj dst_kmap;
24256+ bool dst_is_iomem;
24257+};
24258+
24259+struct psb_validate_buffer {
24260+ struct ttm_validate_buffer base;
24261+ struct psb_validate_req req;
24262+ int ret;
24263+ struct psb_validate_arg __user *user_val_arg;
24264+ uint32_t flags;
24265+ uint32_t offset;
24266+ int po_correct;
24267+};
24268+
24269+
24270+
24271+#define PSB_REG_GRAN_SHIFT 2
24272+#define PSB_REG_GRANULARITY (1 << PSB_REG_GRAN_SHIFT)
24273+#define PSB_MAX_REG 0x1000
24274+
24275+static const uint32_t disallowed_ranges[][2] = {
24276+ {0x0000, 0x0200},
24277+ {0x0208, 0x0214},
24278+ {0x021C, 0x0224},
24279+ {0x0230, 0x0234},
24280+ {0x0248, 0x024C},
24281+ {0x0254, 0x0358},
24282+ {0x0428, 0x0428},
24283+ {0x0430, 0x043C},
24284+ {0x0498, 0x04B4},
24285+ {0x04CC, 0x04D8},
24286+ {0x04E0, 0x07FC},
24287+ {0x0804, 0x0A14},
24288+ {0x0A4C, 0x0A58},
24289+ {0x0A68, 0x0A80},
24290+ {0x0AA0, 0x0B1C},
24291+ {0x0B2C, 0x0CAC},
24292+ {0x0CB4, PSB_MAX_REG - PSB_REG_GRANULARITY}
24293+};
24294+
24295+static uint32_t psb_disallowed_regs[PSB_MAX_REG /
24296+ (PSB_REG_GRANULARITY *
24297+ (sizeof(uint32_t) << 3))];
24298+
24299+static inline int psb_disallowed(uint32_t reg)
24300+{
24301+ reg >>= PSB_REG_GRAN_SHIFT;
24302+ return (psb_disallowed_regs[reg >> 5] & (1 << (reg & 31))) != 0;
24303+}
24304+
24305+void psb_init_disallowed(void)
24306+{
24307+ int i;
24308+ uint32_t reg, tmp;
24309+ static int initialized;
24310+
24311+ if (initialized)
24312+ return;
24313+
24314+ initialized = 1;
24315+ memset(psb_disallowed_regs, 0, sizeof(psb_disallowed_regs));
24316+
24317+ for (i = 0;
24318+ i < (sizeof(disallowed_ranges) / (2 * sizeof(uint32_t)));
24319+ ++i) {
24320+ for (reg = disallowed_ranges[i][0];
24321+ reg <= disallowed_ranges[i][1]; reg += 4) {
24322+ tmp = reg >> 2;
24323+ psb_disallowed_regs[tmp >> 5] |= (1 << (tmp & 31));
24324+ }
24325+ }
24326+}
24327+
24328+static int psb_memcpy_check(uint32_t *dst, const uint32_t *src,
24329+ uint32_t size)
24330+{
24331+ size >>= 3;
24332+ while (size--) {
24333+ if (unlikely((*src >= 0x1000) || psb_disallowed(*src))) {
24334+ DRM_ERROR("Forbidden SGX register access: "
24335+ "0x%04x.\n", *src);
24336+ return -EPERM;
24337+ }
24338+ *dst++ = *src++;
24339+ *dst++ = *src++;
24340+ }
24341+ return 0;
24342+}
24343+
24344+int psb_2d_wait_available(struct drm_psb_private *dev_priv,
24345+ unsigned size)
24346+{
24347+ uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
24348+ int ret = 0;
24349+
24350+retry:
24351+ if (avail < size) {
24352+#if 0
24353+ /* We'd ideally
24354+ * like to have an IRQ-driven event here.
24355+ */
24356+
24357+ psb_2D_irq_on(dev_priv);
24358+ DRM_WAIT_ON(ret, dev_priv->event_2d_queue, DRM_HZ,
24359+ ((avail =
24360+ PSB_RSGX32(PSB_CR_2D_SOCIF)) >= size));
24361+ psb_2D_irq_off(dev_priv);
24362+ if (ret == 0)
24363+ return 0;
24364+ if (ret == -EINTR) {
24365+ ret = 0;
24366+ goto retry;
24367+ }
24368+#else
24369+ avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
24370+ goto retry;
24371+#endif
24372+ }
24373+ return ret;
24374+}
24375+
24376+int psb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
24377+ unsigned size)
24378+{
24379+ int ret = 0;
24380+ int i;
24381+ unsigned submit_size;
24382+
24383+ while (size > 0) {
24384+ submit_size = (size < 0x60) ? size : 0x60;
24385+ size -= submit_size;
24386+ ret = psb_2d_wait_available(dev_priv, submit_size);
24387+ if (ret)
24388+ return ret;
24389+
24390+ submit_size <<= 2;
24391+ mutex_lock(&dev_priv->reset_mutex);
24392+ for (i = 0; i < submit_size; i += 4) {
24393+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
24394+ }
24395+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
24396+ mutex_unlock(&dev_priv->reset_mutex);
24397+ }
24398+ return 0;
24399+}
24400+
24401+int psb_blit_sequence(struct drm_psb_private *dev_priv, uint32_t sequence)
24402+{
24403+ uint32_t buffer[8];
24404+ uint32_t *bufp = buffer;
24405+ int ret;
24406+
24407+ *bufp++ = PSB_2D_FENCE_BH;
24408+
24409+ *bufp++ = PSB_2D_DST_SURF_BH |
24410+ PSB_2D_DST_8888ARGB | (4 << PSB_2D_DST_STRIDE_SHIFT);
24411+ *bufp++ = dev_priv->comm_mmu_offset - dev_priv->mmu_2d_offset;
24412+
24413+ *bufp++ = PSB_2D_BLIT_BH |
24414+ PSB_2D_ROT_NONE |
24415+ PSB_2D_COPYORDER_TL2BR |
24416+ PSB_2D_DSTCK_DISABLE |
24417+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
24418+
24419+ *bufp++ = sequence << PSB_2D_FILLCOLOUR_SHIFT;
24420+ *bufp++ = (0 << PSB_2D_DST_XSTART_SHIFT) |
24421+ (0 << PSB_2D_DST_YSTART_SHIFT);
24422+ *bufp++ =
24423+ (1 << PSB_2D_DST_XSIZE_SHIFT) | (1 << PSB_2D_DST_YSIZE_SHIFT);
24424+
24425+ *bufp++ = PSB_2D_FLUSH_BH;
24426+
24427+ psb_2d_lock(dev_priv);
24428+ ret = psb_2d_submit(dev_priv, buffer, bufp - buffer);
24429+ psb_2d_unlock(dev_priv);
24430+
24431+ if (!ret)
24432+ psb_schedule_watchdog(dev_priv);
24433+ return ret;
24434+}
24435+
24436+int psb_emit_2d_copy_blit(struct drm_device *dev,
24437+ uint32_t src_offset,
24438+ uint32_t dst_offset, uint32_t pages,
24439+ int direction)
24440+{
24441+ uint32_t cur_pages;
24442+ struct drm_psb_private *dev_priv = dev->dev_private;
24443+ uint32_t buf[10];
24444+ uint32_t *bufp;
24445+ uint32_t xstart;
24446+ uint32_t ystart;
24447+ uint32_t blit_cmd;
24448+ uint32_t pg_add;
24449+ int ret = 0;
24450+
24451+ if (!dev_priv)
24452+ return 0;
24453+
24454+ if (direction) {
24455+ pg_add = (pages - 1) << PAGE_SHIFT;
24456+ src_offset += pg_add;
24457+ dst_offset += pg_add;
24458+ }
24459+
24460+ blit_cmd = PSB_2D_BLIT_BH |
24461+ PSB_2D_ROT_NONE |
24462+ PSB_2D_DSTCK_DISABLE |
24463+ PSB_2D_SRCCK_DISABLE |
24464+ PSB_2D_USE_PAT |
24465+ PSB_2D_ROP3_SRCCOPY |
24466+ (direction ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TL2BR);
24467+ xstart = (direction) ? ((PAGE_SIZE - 1) >> 2) : 0;
24468+
24469+ psb_2d_lock(dev_priv);
24470+ while (pages > 0) {
24471+ cur_pages = pages;
24472+ if (cur_pages > 2048)
24473+ cur_pages = 2048;
24474+ pages -= cur_pages;
24475+ ystart = (direction) ? cur_pages - 1 : 0;
24476+
24477+ bufp = buf;
24478+ *bufp++ = PSB_2D_FENCE_BH;
24479+
24480+ *bufp++ = PSB_2D_DST_SURF_BH | PSB_2D_DST_8888ARGB |
24481+ (PAGE_SIZE << PSB_2D_DST_STRIDE_SHIFT);
24482+ *bufp++ = dst_offset;
24483+ *bufp++ = PSB_2D_SRC_SURF_BH | PSB_2D_SRC_8888ARGB |
24484+ (PAGE_SIZE << PSB_2D_SRC_STRIDE_SHIFT);
24485+ *bufp++ = src_offset;
24486+ *bufp++ =
24487+ PSB_2D_SRC_OFF_BH | (xstart <<
24488+ PSB_2D_SRCOFF_XSTART_SHIFT) |
24489+ (ystart << PSB_2D_SRCOFF_YSTART_SHIFT);
24490+ *bufp++ = blit_cmd;
24491+ *bufp++ = (xstart << PSB_2D_DST_XSTART_SHIFT) |
24492+ (ystart << PSB_2D_DST_YSTART_SHIFT);
24493+ *bufp++ = ((PAGE_SIZE >> 2) << PSB_2D_DST_XSIZE_SHIFT) |
24494+ (cur_pages << PSB_2D_DST_YSIZE_SHIFT);
24495+
24496+ ret = psb_2d_submit(dev_priv, buf, bufp - buf);
24497+ if (ret)
24498+ goto out;
24499+ pg_add =
24500+ (cur_pages << PAGE_SHIFT) * ((direction) ? -1 : 1);
24501+ src_offset += pg_add;
24502+ dst_offset += pg_add;
24503+ }
24504+out:
24505+ psb_2d_unlock(dev_priv);
24506+ return ret;
24507+}
24508+
24509+void psb_init_2d(struct drm_psb_private *dev_priv)
24510+{
24511+ spin_lock_init(&dev_priv->sequence_lock);
24512+ psb_reset(dev_priv, 1);
24513+ dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start;
24514+ PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE);
24515+ (void) PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE);
24516+}
24517+
24518+int psb_idle_2d(struct drm_device *dev)
24519+{
24520+ struct drm_psb_private *dev_priv = dev->dev_private;
24521+ unsigned long _end = jiffies + DRM_HZ;
24522+ int busy = 0;
24523+
24524+ /*
24525+ * First idle the 2D engine.
24526+ */
24527+
24528+ if (dev_priv->engine_lockup_2d)
24529+ return -EBUSY;
24530+
24531+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
24532+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) ==
24533+ 0))
24534+ goto out;
24535+
24536+ do {
24537+ busy =
24538+ (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
24539+ } while (busy && !time_after_eq(jiffies, _end));
24540+
24541+ if (busy)
24542+ busy =
24543+ (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
24544+ if (busy)
24545+ goto out;
24546+
24547+ do {
24548+ busy =
24549+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
24550+ _PSB_C2B_STATUS_BUSY)
24551+ != 0);
24552+ } while (busy && !time_after_eq(jiffies, _end));
24553+ if (busy)
24554+ busy =
24555+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
24556+ _PSB_C2B_STATUS_BUSY)
24557+ != 0);
24558+
24559+out:
24560+ if (busy)
24561+ dev_priv->engine_lockup_2d = 1;
24562+
24563+ return (busy) ? -EBUSY : 0;
24564+}
24565+
24566+int psb_idle_3d(struct drm_device *dev)
24567+{
24568+ struct drm_psb_private *dev_priv = dev->dev_private;
24569+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
24570+ int ret;
24571+
24572+ ret = wait_event_timeout(scheduler->idle_queue,
24573+ psb_scheduler_finished(dev_priv),
24574+ DRM_HZ * 10);
24575+
24576+ return (ret < 1) ? -EBUSY : 0;
24577+}
24578+
24579+static int psb_check_presumed(struct psb_validate_req *req,
24580+ struct ttm_buffer_object *bo,
24581+ struct psb_validate_arg __user *data,
24582+ int *presumed_ok)
24583+{
24584+ struct psb_validate_req __user *user_req = &(data->d.req);
24585+
24586+ *presumed_ok = 0;
24587+
24588+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
24589+ *presumed_ok = 1;
24590+ return 0;
24591+ }
24592+
24593+ if (unlikely(!(req->presumed_flags & PSB_USE_PRESUMED)))
24594+ return 0;
24595+
24596+ if (bo->offset == req->presumed_gpu_offset) {
24597+ *presumed_ok = 1;
24598+ return 0;
24599+ }
24600+
24601+ return __put_user(req->presumed_flags & ~PSB_USE_PRESUMED,
24602+ &user_req->presumed_flags);
24603+}
24604+
24605+
24606+static void psb_unreference_buffers(struct psb_context *context)
24607+{
24608+ struct ttm_validate_buffer *entry, *next;
24609+ struct psb_validate_buffer *vbuf;
24610+ struct list_head *list = &context->validate_list;
24611+
24612+ list_for_each_entry_safe(entry, next, list, head) {
24613+ vbuf =
24614+ container_of(entry, struct psb_validate_buffer, base);
24615+ list_del(&entry->head);
24616+ ttm_bo_unref(&entry->bo);
24617+ }
24618+
24619+ list = &context->kern_validate_list;
24620+
24621+ list_for_each_entry_safe(entry, next, list, head) {
24622+ vbuf =
24623+ container_of(entry, struct psb_validate_buffer, base);
24624+ list_del(&entry->head);
24625+ ttm_bo_unref(&entry->bo);
24626+ }
24627+}
24628+
24629+
24630+static int psb_lookup_validate_buffer(struct drm_file *file_priv,
24631+ uint64_t data,
24632+ struct psb_validate_buffer *item)
24633+{
24634+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
24635+
24636+ item->user_val_arg =
24637+ (struct psb_validate_arg __user *) (unsigned long) data;
24638+
24639+ if (unlikely(copy_from_user(&item->req, &item->user_val_arg->d.req,
24640+ sizeof(item->req)) != 0)) {
24641+ DRM_ERROR("Lookup copy fault.\n");
24642+ return -EFAULT;
24643+ }
24644+
24645+ item->base.bo =
24646+ ttm_buffer_object_lookup(tfile, item->req.buffer_handle);
24647+
24648+ if (unlikely(item->base.bo == NULL)) {
24649+ DRM_ERROR("Bo lookup fault.\n");
24650+ return -EINVAL;
24651+ }
24652+
24653+ return 0;
24654+}
24655+
24656+static int psb_reference_buffers(struct drm_file *file_priv,
24657+ uint64_t data,
24658+ struct psb_context *context)
24659+{
24660+ struct psb_validate_buffer *item;
24661+ int ret;
24662+
24663+ while (likely(data != 0)) {
24664+ if (unlikely(context->used_buffers >=
24665+ PSB_NUM_VALIDATE_BUFFERS)) {
24666+ DRM_ERROR("Too many buffers "
24667+ "on validate list.\n");
24668+ ret = -EINVAL;
24669+ goto out_err0;
24670+ }
24671+
24672+ item = &context->buffers[context->used_buffers];
24673+
24674+ ret = psb_lookup_validate_buffer(file_priv, data, item);
24675+ if (unlikely(ret != 0))
24676+ goto out_err0;
24677+
24678+ item->base.reserved = 0;
24679+ list_add_tail(&item->base.head, &context->validate_list);
24680+ context->used_buffers++;
24681+ data = item->req.next;
24682+ }
24683+ return 0;
24684+
24685+out_err0:
24686+ psb_unreference_buffers(context);
24687+ return ret;
24688+}
24689+
24690+static int
24691+psb_placement_fence_type(struct ttm_buffer_object *bo,
24692+ uint64_t set_val_flags,
24693+ uint64_t clr_val_flags,
24694+ uint32_t new_fence_class,
24695+ uint32_t *new_fence_type)
24696+{
24697+ int ret;
24698+ uint32_t n_fence_type;
24699+ uint32_t set_flags = set_val_flags & 0xFFFFFFFF;
24700+ uint32_t clr_flags = clr_val_flags & 0xFFFFFFFF;
24701+ struct ttm_fence_object *old_fence;
24702+ uint32_t old_fence_type;
24703+
24704+ if (unlikely
24705+ (!(set_val_flags &
24706+ (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)))) {
24707+ DRM_ERROR
24708+ ("GPU access type (read / write) is not indicated.\n");
24709+ return -EINVAL;
24710+ }
24711+
24712+ ret = ttm_bo_check_placement(bo, set_flags, clr_flags);
24713+ if (unlikely(ret != 0))
24714+ return ret;
24715+
24716+ switch (new_fence_class) {
24717+ case PSB_ENGINE_TA:
24718+ n_fence_type = _PSB_FENCE_TYPE_EXE |
24719+ _PSB_FENCE_TYPE_TA_DONE | _PSB_FENCE_TYPE_RASTER_DONE;
24720+ if (set_val_flags & PSB_BO_FLAG_TA)
24721+ n_fence_type &= ~_PSB_FENCE_TYPE_RASTER_DONE;
24722+ if (set_val_flags & PSB_BO_FLAG_COMMAND)
24723+ n_fence_type &=
24724+ ~(_PSB_FENCE_TYPE_RASTER_DONE |
24725+ _PSB_FENCE_TYPE_TA_DONE);
24726+ if (set_val_flags & PSB_BO_FLAG_SCENE)
24727+ n_fence_type |= _PSB_FENCE_TYPE_SCENE_DONE;
24728+ if (set_val_flags & PSB_BO_FLAG_FEEDBACK)
24729+ n_fence_type |= _PSB_FENCE_TYPE_FEEDBACK;
24730+ break;
24731+ default:
24732+ n_fence_type = _PSB_FENCE_TYPE_EXE;
24733+ }
24734+
24735+ *new_fence_type = n_fence_type;
24736+ old_fence = (struct ttm_fence_object *) bo->sync_obj;
24737+ old_fence_type = (uint32_t) (unsigned long) bo->sync_obj_arg;
24738+
24739+ if (old_fence && ((new_fence_class != old_fence->fence_class) ||
24740+ ((n_fence_type ^ old_fence_type) &
24741+ old_fence_type))) {
24742+ ret = ttm_bo_wait(bo, 0, 1, 0);
24743+ if (unlikely(ret != 0))
24744+ return ret;
24745+ }
24746+
24747+ bo->proposed_flags = (bo->proposed_flags | set_flags)
24748+ & ~clr_flags & TTM_PL_MASK_MEMTYPE;
24749+
24750+ return 0;
24751+}
24752+
24753+int psb_validate_kernel_buffer(struct psb_context *context,
24754+ struct ttm_buffer_object *bo,
24755+ uint32_t fence_class,
24756+ uint64_t set_flags, uint64_t clr_flags)
24757+{
24758+ struct psb_validate_buffer *item;
24759+ uint32_t cur_fence_type;
24760+ int ret;
24761+
24762+ if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) {
24763+ DRM_ERROR("Out of free validation buffer entries for "
24764+ "kernel buffer validation.\n");
24765+ return -ENOMEM;
24766+ }
24767+
24768+ item = &context->buffers[context->used_buffers];
24769+ item->user_val_arg = NULL;
24770+ item->base.reserved = 0;
24771+
24772+ ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq);
24773+ if (unlikely(ret != 0))
24774+ goto out_unlock;
24775+
24776+ mutex_lock(&bo->mutex);
24777+ ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class,
24778+ &cur_fence_type);
24779+ if (unlikely(ret != 0)) {
24780+ ttm_bo_unreserve(bo);
24781+ goto out_unlock;
24782+ }
24783+
24784+ item->base.bo = ttm_bo_reference(bo);
24785+ item->base.new_sync_obj_arg = (void *) (unsigned long) cur_fence_type;
24786+ item->base.reserved = 1;
24787+
24788+ list_add_tail(&item->base.head, &context->kern_validate_list);
24789+ context->used_buffers++;
24790+
24791+ ret = ttm_buffer_object_validate(bo, 1, 0);
24792+ if (unlikely(ret != 0))
24793+ goto out_unlock;
24794+
24795+ item->offset = bo->offset;
24796+ item->flags = bo->mem.flags;
24797+ context->fence_types |= cur_fence_type;
24798+
24799+out_unlock:
24800+ mutex_unlock(&bo->mutex);
24801+ return ret;
24802+}
24803+
24804+
24805+static int psb_validate_buffer_list(struct drm_file *file_priv,
24806+ uint32_t fence_class,
24807+ struct psb_context *context,
24808+ int *po_correct)
24809+{
24810+ struct psb_validate_buffer *item;
24811+ struct ttm_buffer_object *bo;
24812+ int ret;
24813+ struct psb_validate_req *req;
24814+ uint32_t fence_types = 0;
24815+ uint32_t cur_fence_type;
24816+ struct ttm_validate_buffer *entry;
24817+ struct list_head *list = &context->validate_list;
24818+
24819+ *po_correct = 1;
24820+
24821+ list_for_each_entry(entry, list, head) {
24822+ item =
24823+ container_of(entry, struct psb_validate_buffer, base);
24824+ bo = entry->bo;
24825+ item->ret = 0;
24826+ req = &item->req;
24827+
24828+ mutex_lock(&bo->mutex);
24829+ ret = psb_placement_fence_type(bo,
24830+ req->set_flags,
24831+ req->clear_flags,
24832+ fence_class,
24833+ &cur_fence_type);
24834+ if (unlikely(ret != 0))
24835+ goto out_err;
24836+
24837+ ret = ttm_buffer_object_validate(bo, 1, 0);
24838+
24839+ if (unlikely(ret != 0))
24840+ goto out_err;
24841+
24842+ fence_types |= cur_fence_type;
24843+ entry->new_sync_obj_arg = (void *)
24844+ (unsigned long) cur_fence_type;
24845+
24846+ item->offset = bo->offset;
24847+ item->flags = bo->mem.flags;
24848+ mutex_unlock(&bo->mutex);
24849+
24850+ ret =
24851+ psb_check_presumed(&item->req, bo, item->user_val_arg,
24852+ &item->po_correct);
24853+ if (unlikely(ret != 0))
24854+ goto out_err;
24855+
24856+ if (unlikely(!item->po_correct))
24857+ *po_correct = 0;
24858+
24859+ item++;
24860+ }
24861+
24862+ context->fence_types |= fence_types;
24863+
24864+ return 0;
24865+out_err:
24866+ mutex_unlock(&bo->mutex);
24867+ item->ret = ret;
24868+ return ret;
24869+}
24870+
24871+
24872+int
24873+psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t *regs,
24874+ unsigned int cmds)
24875+{
24876+ int i;
24877+
24878+ /*
24879+ * cmds is 32-bit words.
24880+ */
24881+
24882+ cmds >>= 1;
24883+ for (i = 0; i < cmds; ++i) {
24884+ PSB_WSGX32(regs[1], regs[0]);
24885+ regs += 2;
24886+ }
24887+ wmb();
24888+ return 0;
24889+}
24890+
24891+/*
24892+ * Security: Block user-space writing to MMU mapping registers.
24893+ * This is important for security and brings Poulsbo DRM
24894+ * up to par with the other DRM drivers. Using this,
24895+ * user-space should not be able to map arbitrary memory
24896+ * pages to graphics memory, but all user-space processes
24897+ * basically have access to all buffer objects mapped to
24898+ * graphics memory.
24899+ */
24900+
24901+int
24902+psb_submit_copy_cmdbuf(struct drm_device *dev,
24903+ struct ttm_buffer_object *cmd_buffer,
24904+ unsigned long cmd_offset,
24905+ unsigned long cmd_size,
24906+ int engine, uint32_t *copy_buffer)
24907+{
24908+ unsigned long cmd_end = cmd_offset + (cmd_size << 2);
24909+ struct drm_psb_private *dev_priv = dev->dev_private;
24910+ unsigned long cmd_page_offset =
24911+ cmd_offset - (cmd_offset & PAGE_MASK);
24912+ unsigned long cmd_next;
24913+ struct ttm_bo_kmap_obj cmd_kmap;
24914+ uint32_t *cmd_page;
24915+ unsigned cmds;
24916+ bool is_iomem;
24917+ int ret = 0;
24918+
24919+ if (cmd_size == 0)
24920+ return 0;
24921+
24922+ if (engine == PSB_ENGINE_2D)
24923+ psb_2d_lock(dev_priv);
24924+
24925+ do {
24926+ cmd_next = psb_offset_end(cmd_offset, cmd_end);
24927+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT,
24928+ 1, &cmd_kmap);
24929+
24930+ if (ret) {
24931+ if (engine == PSB_ENGINE_2D)
24932+ psb_2d_unlock(dev_priv);
24933+ return ret;
24934+ }
24935+ cmd_page = ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem);
24936+ cmd_page_offset = (cmd_offset & ~PAGE_MASK) >> 2;
24937+ cmds = (cmd_next - cmd_offset) >> 2;
24938+
24939+ switch (engine) {
24940+ case PSB_ENGINE_2D:
24941+ ret =
24942+ psb_2d_submit(dev_priv,
24943+ cmd_page + cmd_page_offset,
24944+ cmds);
24945+ break;
24946+ case PSB_ENGINE_RASTERIZER:
24947+ case PSB_ENGINE_TA:
24948+ case PSB_ENGINE_HPRAST:
24949+ PSB_DEBUG_GENERAL("Reg copy.\n");
24950+ ret = psb_memcpy_check(copy_buffer,
24951+ cmd_page + cmd_page_offset,
24952+ cmds * sizeof(uint32_t));
24953+ copy_buffer += cmds;
24954+ break;
24955+ default:
24956+ ret = -EINVAL;
24957+ }
24958+ ttm_bo_kunmap(&cmd_kmap);
24959+ if (ret)
24960+ break;
24961+ } while (cmd_offset = cmd_next, cmd_offset != cmd_end);
24962+
24963+ if (engine == PSB_ENGINE_2D)
24964+ psb_2d_unlock(dev_priv);
24965+
24966+ return ret;
24967+}
24968+
24969+static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
24970+{
24971+ if (dst_cache->dst_page) {
24972+ ttm_bo_kunmap(&dst_cache->dst_kmap);
24973+ dst_cache->dst_page = NULL;
24974+ }
24975+ dst_cache->dst_buf = NULL;
24976+ dst_cache->dst = ~0;
24977+}
24978+
24979+static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
24980+ struct psb_validate_buffer *buffers,
24981+ unsigned int dst,
24982+ unsigned long dst_offset)
24983+{
24984+ int ret;
24985+
24986+ PSB_DEBUG_GENERAL("Destination buffer is %d.\n", dst);
24987+
24988+ if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
24989+ psb_clear_dstbuf_cache(dst_cache);
24990+ dst_cache->dst = dst;
24991+ dst_cache->dst_buf = buffers[dst].base.bo;
24992+ }
24993+
24994+ if (unlikely
24995+ (dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
24996+ DRM_ERROR("Relocation destination out of bounds.\n");
24997+ return -EINVAL;
24998+ }
24999+
25000+ if (!psb_same_page(dst_cache->dst_offset, dst_offset) ||
25001+ NULL == dst_cache->dst_page) {
25002+ if (NULL != dst_cache->dst_page) {
25003+ ttm_bo_kunmap(&dst_cache->dst_kmap);
25004+ dst_cache->dst_page = NULL;
25005+ }
25006+
25007+ ret =
25008+ ttm_bo_kmap(dst_cache->dst_buf,
25009+ dst_offset >> PAGE_SHIFT, 1,
25010+ &dst_cache->dst_kmap);
25011+ if (ret) {
25012+ DRM_ERROR("Could not map destination buffer for "
25013+ "relocation.\n");
25014+ return ret;
25015+ }
25016+
25017+ dst_cache->dst_page =
25018+ ttm_kmap_obj_virtual(&dst_cache->dst_kmap,
25019+ &dst_cache->dst_is_iomem);
25020+ dst_cache->dst_offset = dst_offset & PAGE_MASK;
25021+ dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
25022+ }
25023+ return 0;
25024+}
25025+
25026+static int psb_apply_reloc(struct drm_psb_private *dev_priv,
25027+ uint32_t fence_class,
25028+ const struct drm_psb_reloc *reloc,
25029+ struct psb_validate_buffer *buffers,
25030+ int num_buffers,
25031+ struct psb_dstbuf_cache *dst_cache,
25032+ int no_wait, int interruptible)
25033+{
25034+ uint32_t val;
25035+ uint32_t background;
25036+ unsigned int index;
25037+ int ret;
25038+ unsigned int shift;
25039+ unsigned int align_shift;
25040+ struct ttm_buffer_object *reloc_bo;
25041+
25042+
25043+ PSB_DEBUG_GENERAL("Reloc type %d\n"
25044+ "\t where 0x%04x\n"
25045+ "\t buffer 0x%04x\n"
25046+ "\t mask 0x%08x\n"
25047+ "\t shift 0x%08x\n"
25048+ "\t pre_add 0x%08x\n"
25049+ "\t background 0x%08x\n"
25050+ "\t dst_buffer 0x%08x\n"
25051+ "\t arg0 0x%08x\n"
25052+ "\t arg1 0x%08x\n",
25053+ reloc->reloc_op,
25054+ reloc->where,
25055+ reloc->buffer,
25056+ reloc->mask,
25057+ reloc->shift,
25058+ reloc->pre_add,
25059+ reloc->background,
25060+ reloc->dst_buffer, reloc->arg0, reloc->arg1);
25061+
25062+ if (unlikely(reloc->buffer >= num_buffers)) {
25063+ DRM_ERROR("Illegal relocation buffer %d.\n",
25064+ reloc->buffer);
25065+ return -EINVAL;
25066+ }
25067+
25068+ if (buffers[reloc->buffer].po_correct)
25069+ return 0;
25070+
25071+ if (unlikely(reloc->dst_buffer >= num_buffers)) {
25072+ DRM_ERROR
25073+ ("Illegal destination buffer for relocation %d.\n",
25074+ reloc->dst_buffer);
25075+ return -EINVAL;
25076+ }
25077+
25078+ ret =
25079+ psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer,
25080+ reloc->where << 2);
25081+ if (ret)
25082+ return ret;
25083+
25084+ reloc_bo = buffers[reloc->buffer].base.bo;
25085+
25086+ if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
25087+ DRM_ERROR("Illegal relocation offset add.\n");
25088+ return -EINVAL;
25089+ }
25090+
25091+ switch (reloc->reloc_op) {
25092+ case PSB_RELOC_OP_OFFSET:
25093+ val = reloc_bo->offset + reloc->pre_add;
25094+ break;
25095+ case PSB_RELOC_OP_2D_OFFSET:
25096+ val = reloc_bo->offset + reloc->pre_add -
25097+ dev_priv->mmu_2d_offset;
25098+ if (unlikely(val >= PSB_2D_SIZE)) {
25099+ DRM_ERROR("2D relocation out of bounds\n");
25100+ return -EINVAL;
25101+ }
25102+ break;
25103+ case PSB_RELOC_OP_PDS_OFFSET:
25104+ val =
25105+ reloc_bo->offset + reloc->pre_add - PSB_MEM_PDS_START;
25106+ if (unlikely
25107+ (val >= (PSB_MEM_MMU_START - PSB_MEM_PDS_START))) {
25108+ DRM_ERROR("PDS relocation out of bounds\n");
25109+ return -EINVAL;
25110+ }
25111+ break;
25112+ default:
25113+ DRM_ERROR("Unimplemented relocation.\n");
25114+ return -EINVAL;
25115+ }
25116+
25117+ shift =
25118+ (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
25119+ align_shift =
25120+ (reloc->
25121+ shift & PSB_RELOC_ALSHIFT_MASK) >> PSB_RELOC_ALSHIFT_SHIFT;
25122+
25123+ val = ((val >> align_shift) << shift);
25124+ index = reloc->where - dst_cache->dst_page_offset;
25125+
25126+ background = reloc->background;
25127+ val = (background & ~reloc->mask) | (val & reloc->mask);
25128+ dst_cache->dst_page[index] = val;
25129+
25130+ PSB_DEBUG_GENERAL("Reloc buffer %d index 0x%08x, value 0x%08x\n",
25131+ reloc->dst_buffer, index,
25132+ dst_cache->dst_page[index]);
25133+
25134+ return 0;
25135+}
25136+
25137+static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
25138+ unsigned int num_pages)
25139+{
25140+ int ret = 0;
25141+
25142+ spin_lock(&dev_priv->reloc_lock);
25143+ if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
25144+ dev_priv->rel_mapped_pages += num_pages;
25145+ ret = 1;
25146+ }
25147+ spin_unlock(&dev_priv->reloc_lock);
25148+ return ret;
25149+}
25150+
25151+static int psb_fixup_relocs(struct drm_file *file_priv,
25152+ uint32_t fence_class,
25153+ unsigned int num_relocs,
25154+ unsigned int reloc_offset,
25155+ uint32_t reloc_handle,
25156+ struct psb_context *context,
25157+ int no_wait, int interruptible)
25158+{
25159+ struct drm_device *dev = file_priv->minor->dev;
25160+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
25161+ struct drm_psb_private *dev_priv =
25162+ (struct drm_psb_private *) dev->dev_private;
25163+ struct ttm_buffer_object *reloc_buffer = NULL;
25164+ unsigned int reloc_num_pages;
25165+ unsigned int reloc_first_page;
25166+ unsigned int reloc_last_page;
25167+ struct psb_dstbuf_cache dst_cache;
25168+ struct drm_psb_reloc *reloc;
25169+ struct ttm_bo_kmap_obj reloc_kmap;
25170+ bool reloc_is_iomem;
25171+ int count;
25172+ int ret = 0;
25173+ int registered = 0;
25174+ uint32_t num_buffers = context->used_buffers;
25175+
25176+ if (num_relocs == 0)
25177+ return 0;
25178+
25179+ memset(&dst_cache, 0, sizeof(dst_cache));
25180+ memset(&reloc_kmap, 0, sizeof(reloc_kmap));
25181+
25182+ reloc_buffer = ttm_buffer_object_lookup(tfile, reloc_handle);
25183+ if (!reloc_buffer)
25184+ goto out;
25185+
25186+ if (unlikely(atomic_read(&reloc_buffer->reserved) != 1)) {
25187+ DRM_ERROR("Relocation buffer was not on validate list.\n");
25188+ ret = -EINVAL;
25189+ goto out;
25190+ }
25191+
25192+ reloc_first_page = reloc_offset >> PAGE_SHIFT;
25193+ reloc_last_page =
25194+ (reloc_offset +
25195+ num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
25196+ reloc_num_pages = reloc_last_page - reloc_first_page + 1;
25197+ reloc_offset &= ~PAGE_MASK;
25198+
25199+ if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
25200+ DRM_ERROR("Relocation buffer is too large\n");
25201+ ret = -EINVAL;
25202+ goto out;
25203+ }
25204+
25205+ DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
25206+ (registered =
25207+ psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
25208+
25209+ if (ret == -EINTR) {
25210+ ret = -ERESTART;
25211+ goto out;
25212+ }
25213+ if (ret) {
25214+ DRM_ERROR("Error waiting for space to map "
25215+ "relocation buffer.\n");
25216+ goto out;
25217+ }
25218+
25219+ ret = ttm_bo_kmap(reloc_buffer, reloc_first_page,
25220+ reloc_num_pages, &reloc_kmap);
25221+
25222+ if (ret) {
25223+ DRM_ERROR("Could not map relocation buffer.\n"
25224+ "\tReloc buffer id 0x%08x.\n"
25225+ "\tReloc first page %d.\n"
25226+ "\tReloc num pages %d.\n",
25227+ reloc_handle, reloc_first_page, reloc_num_pages);
25228+ goto out;
25229+ }
25230+
25231+ reloc = (struct drm_psb_reloc *)
25232+ ((unsigned long)
25233+ ttm_kmap_obj_virtual(&reloc_kmap,
25234+ &reloc_is_iomem) + reloc_offset);
25235+
25236+ for (count = 0; count < num_relocs; ++count) {
25237+ ret = psb_apply_reloc(dev_priv, fence_class,
25238+ reloc, context->buffers,
25239+ num_buffers, &dst_cache,
25240+ no_wait, interruptible);
25241+ if (ret)
25242+ goto out1;
25243+ reloc++;
25244+ }
25245+
25246+out1:
25247+ ttm_bo_kunmap(&reloc_kmap);
25248+out:
25249+ if (registered) {
25250+ spin_lock(&dev_priv->reloc_lock);
25251+ dev_priv->rel_mapped_pages -= reloc_num_pages;
25252+ spin_unlock(&dev_priv->reloc_lock);
25253+ DRM_WAKEUP(&dev_priv->rel_mapped_queue);
25254+ }
25255+
25256+ psb_clear_dstbuf_cache(&dst_cache);
25257+ if (reloc_buffer)
25258+ ttm_bo_unref(&reloc_buffer);
25259+ return ret;
25260+}
25261+
25262+void psb_fence_or_sync(struct drm_file *file_priv,
25263+ uint32_t engine,
25264+ uint32_t fence_types,
25265+ uint32_t fence_flags,
25266+ struct list_head *list,
25267+ struct psb_ttm_fence_rep *fence_arg,
25268+ struct ttm_fence_object **fence_p)
25269+{
25270+ struct drm_device *dev = file_priv->minor->dev;
25271+ struct drm_psb_private *dev_priv = psb_priv(dev);
25272+ struct ttm_fence_device *fdev = &dev_priv->fdev;
25273+ int ret;
25274+ struct ttm_fence_object *fence;
25275+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
25276+ uint32_t handle;
25277+
25278+ ret = ttm_fence_user_create(fdev, tfile,
25279+ engine, fence_types,
25280+ TTM_FENCE_FLAG_EMIT, &fence, &handle);
25281+ if (ret) {
25282+
25283+ /*
25284+ * Fence creation failed.
25285+ * Fall back to synchronous operation and idle the engine.
25286+ */
25287+
25288+ psb_idle_engine(dev, engine);
25289+ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
25290+
25291+ /*
25292+ * Communicate to user-space that
25293+ * fence creation has failed and that
25294+ * the engine is idle.
25295+ */
25296+
25297+ fence_arg->handle = ~0;
25298+ fence_arg->error = ret;
25299+ }
25300+
25301+ ttm_eu_backoff_reservation(list);
25302+ if (fence_p)
25303+ *fence_p = NULL;
25304+ return;
25305+ }
25306+
25307+ ttm_eu_fence_buffer_objects(list, fence);
25308+ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
25309+ struct ttm_fence_info info = ttm_fence_get_info(fence);
25310+ fence_arg->handle = handle;
25311+ fence_arg->fence_class = ttm_fence_class(fence);
25312+ fence_arg->fence_type = ttm_fence_types(fence);
25313+ fence_arg->signaled_types = info.signaled_types;
25314+ fence_arg->error = 0;
25315+ } else {
25316+ ret =
25317+ ttm_ref_object_base_unref(tfile, handle,
25318+ ttm_fence_type);
25319+ BUG_ON(ret);
25320+ }
25321+
25322+ if (fence_p)
25323+ *fence_p = fence;
25324+ else if (fence)
25325+ ttm_fence_object_unref(&fence);
25326+}
25327+
25328+
25329+
25330+static int psb_cmdbuf_2d(struct drm_file *priv,
25331+ struct list_head *validate_list,
25332+ uint32_t fence_type,
25333+ struct drm_psb_cmdbuf_arg *arg,
25334+ struct ttm_buffer_object *cmd_buffer,
25335+ struct psb_ttm_fence_rep *fence_arg)
25336+{
25337+ struct drm_device *dev = priv->minor->dev;
25338+ int ret;
25339+
25340+ ret = psb_submit_copy_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
25341+ arg->cmdbuf_size, PSB_ENGINE_2D,
25342+ NULL);
25343+ if (ret)
25344+ goto out_unlock;
25345+
25346+ psb_fence_or_sync(priv, PSB_ENGINE_2D, fence_type,
25347+ arg->fence_flags, validate_list, fence_arg,
25348+ NULL);
25349+
25350+ mutex_lock(&cmd_buffer->mutex);
25351+ if (cmd_buffer->sync_obj != NULL)
25352+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
25353+ mutex_unlock(&cmd_buffer->mutex);
25354+out_unlock:
25355+ return ret;
25356+}
25357+
25358+#if 0
25359+static int psb_dump_page(struct ttm_buffer_object *bo,
25360+ unsigned int page_offset, unsigned int num)
25361+{
25362+ struct ttm_bo_kmap_obj kmobj;
25363+ int is_iomem;
25364+ uint32_t *p;
25365+ int ret;
25366+ unsigned int i;
25367+
25368+ ret = ttm_bo_kmap(bo, page_offset, 1, &kmobj);
25369+ if (ret)
25370+ return ret;
25371+
25372+ p = ttm_kmap_obj_virtual(&kmobj, &is_iomem);
25373+ for (i = 0; i < num; ++i)
25374+ PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
25375+
25376+ ttm_bo_kunmap(&kmobj);
25377+ return 0;
25378+}
25379+#endif
25380+
25381+static void psb_idle_engine(struct drm_device *dev, int engine)
25382+{
25383+ struct drm_psb_private *dev_priv =
25384+ (struct drm_psb_private *) dev->dev_private;
25385+ uint32_t dummy;
25386+ unsigned long dummy2;
25387+
25388+ switch (engine) {
25389+ case PSB_ENGINE_2D:
25390+
25391+ /*
25392+ * Make sure we flush 2D properly using a dummy
25393+ * fence sequence emit.
25394+ */
25395+
25396+ (void) psb_fence_emit_sequence(&dev_priv->fdev,
25397+ PSB_ENGINE_2D, 0,
25398+ &dummy, &dummy2);
25399+ psb_2d_lock(dev_priv);
25400+ (void) psb_idle_2d(dev);
25401+ psb_2d_unlock(dev_priv);
25402+ break;
25403+ case PSB_ENGINE_TA:
25404+ case PSB_ENGINE_RASTERIZER:
25405+ case PSB_ENGINE_HPRAST:
25406+ (void) psb_idle_3d(dev);
25407+ break;
25408+ default:
25409+
25410+ /*
25411+ * FIXME: Insert video engine idle command here.
25412+ */
25413+
25414+ break;
25415+ }
25416+}
25417+
25418+static int psb_handle_copyback(struct drm_device *dev,
25419+ struct psb_context *context,
25420+ int ret)
25421+{
25422+ int err = ret;
25423+ struct ttm_validate_buffer *entry;
25424+ struct psb_validate_arg arg;
25425+ struct list_head *list = &context->validate_list;
25426+
25427+ if (ret) {
25428+ ttm_eu_backoff_reservation(list);
25429+ ttm_eu_backoff_reservation(&context->kern_validate_list);
25430+ }
25431+
25432+
25433+ if (ret != -EAGAIN && ret != -EINTR && ret != -ERESTART) {
25434+ list_for_each_entry(entry, list, head) {
25435+ struct psb_validate_buffer *vbuf =
25436+ container_of(entry, struct psb_validate_buffer,
25437+ base);
25438+ arg.handled = 1;
25439+ arg.ret = vbuf->ret;
25440+ if (!arg.ret) {
25441+ struct ttm_buffer_object *bo = entry->bo;
25442+ mutex_lock(&bo->mutex);
25443+ arg.d.rep.gpu_offset = bo->offset;
25444+ arg.d.rep.placement = bo->mem.flags;
25445+ arg.d.rep.fence_type_mask =
25446+ (uint32_t) (unsigned long)
25447+ entry->new_sync_obj_arg;
25448+ mutex_unlock(&bo->mutex);
25449+ }
25450+
25451+ if (__copy_to_user(vbuf->user_val_arg,
25452+ &arg, sizeof(arg)))
25453+ err = -EFAULT;
25454+
25455+ if (arg.ret)
25456+ break;
25457+ }
25458+ }
25459+
25460+ return err;
25461+}
25462+
25463+
25464+static int psb_cmdbuf_video(struct drm_file *priv,
25465+ struct list_head *validate_list,
25466+ uint32_t fence_type,
25467+ struct drm_psb_cmdbuf_arg *arg,
25468+ struct ttm_buffer_object *cmd_buffer,
25469+ struct psb_ttm_fence_rep *fence_arg)
25470+{
25471+ struct drm_device *dev = priv->minor->dev;
25472+ struct ttm_fence_object *fence;
25473+ int ret;
25474+
25475+ /*
25476+ * Check this. Doesn't seem right. Have fencing done AFTER command
25477+ * submission and make sure drm_psb_idle idles the MSVDX completely.
25478+ */
25479+ ret =
25480+ psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
25481+ arg->cmdbuf_size, NULL);
25482+ if (ret)
25483+ return ret;
25484+
25485+
25486+ /* DRM_ERROR("Intel: Fix video fencing!!\n"); */
25487+ psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, fence_type,
25488+ arg->fence_flags, validate_list, fence_arg,
25489+ &fence);
25490+
25491+
25492+ ttm_fence_object_unref(&fence);
25493+ mutex_lock(&cmd_buffer->mutex);
25494+ if (cmd_buffer->sync_obj != NULL)
25495+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
25496+ mutex_unlock(&cmd_buffer->mutex);
25497+ return 0;
25498+}
25499+
25500+static int psb_feedback_buf(struct ttm_object_file *tfile,
25501+ struct psb_context *context,
25502+ uint32_t feedback_ops,
25503+ uint32_t handle,
25504+ uint32_t offset,
25505+ uint32_t feedback_breakpoints,
25506+ uint32_t feedback_size,
25507+ struct psb_feedback_info *feedback)
25508+{
25509+ struct ttm_buffer_object *bo;
25510+ struct page *page;
25511+ uint32_t page_no;
25512+ uint32_t page_offset;
25513+ int ret;
25514+
25515+ if (feedback_ops & ~PSB_FEEDBACK_OP_VISTEST) {
25516+ DRM_ERROR("Illegal feedback op.\n");
25517+ return -EINVAL;
25518+ }
25519+
25520+ if (feedback_breakpoints != 0) {
25521+ DRM_ERROR("Feedback breakpoints not implemented yet.\n");
25522+ return -EINVAL;
25523+ }
25524+
25525+ if (feedback_size < PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) {
25526+ DRM_ERROR("Feedback buffer size too small.\n");
25527+ return -EINVAL;
25528+ }
25529+
25530+ page_offset = offset & ~PAGE_MASK;
25531+ if ((PAGE_SIZE - PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t))
25532+ < page_offset) {
25533+ DRM_ERROR("Illegal feedback buffer alignment.\n");
25534+ return -EINVAL;
25535+ }
25536+
25537+ bo = ttm_buffer_object_lookup(tfile, handle);
25538+ if (unlikely(bo == NULL)) {
25539+ DRM_ERROR("Failed looking up feedback buffer.\n");
25540+ return -EINVAL;
25541+ }
25542+
25543+
25544+ ret = psb_validate_kernel_buffer(context, bo,
25545+ PSB_ENGINE_TA,
25546+ TTM_PL_FLAG_SYSTEM |
25547+ TTM_PL_FLAG_CACHED |
25548+ PSB_GPU_ACCESS_WRITE |
25549+ PSB_BO_FLAG_FEEDBACK,
25550+ TTM_PL_MASK_MEM &
25551+ ~(TTM_PL_FLAG_SYSTEM |
25552+ TTM_PL_FLAG_CACHED));
25553+ if (unlikely(ret != 0))
25554+ goto out_unref;
25555+
25556+ page_no = offset >> PAGE_SHIFT;
25557+ if (unlikely(page_no >= bo->num_pages)) {
25558+ ret = -EINVAL;
25559+ DRM_ERROR("Illegal feedback buffer offset.\n");
25560+ goto out_unref;
25561+ }
25562+
25563+ if (unlikely(bo->ttm == NULL)) {
25564+ ret = -EINVAL;
25565+ DRM_ERROR("Vistest buffer without TTM.\n");
25566+ goto out_unref;
25567+ }
25568+
25569+ page = ttm_tt_get_page(bo->ttm, page_no);
25570+ if (unlikely(page == NULL)) {
25571+ ret = -ENOMEM;
25572+ goto out_unref;
25573+ }
25574+
25575+ feedback->page = page;
25576+ feedback->offset = page_offset;
25577+
25578+ /*
25579+ * Note: bo referece transferred.
25580+ */
25581+
25582+ feedback->bo = bo;
25583+ return 0;
25584+
25585+out_unref:
25586+ ttm_bo_unref(&bo);
25587+ return ret;
25588+}
25589+
25590+void psb_down_island_power(struct drm_device *dev, int islands)
25591+{
25592+ u32 pwr_cnt = 0;
25593+ pwr_cnt = MSG_READ32(PSB_PUNIT_PORT, PSB_PWRGT_CNT);
25594+ if (islands & PSB_GRAPHICS_ISLAND)
25595+ pwr_cnt |= 0x3;
25596+ if (islands & PSB_VIDEO_ENC_ISLAND)
25597+ pwr_cnt |= 0x30;
25598+ if (islands & PSB_VIDEO_DEC_ISLAND)
25599+ pwr_cnt |= 0xc;
25600+ MSG_WRITE32(PSB_PUNIT_PORT, PSB_PWRGT_CNT, pwr_cnt);
25601+}
25602+void psb_up_island_power(struct drm_device *dev, int islands)
25603+{
25604+ u32 pwr_cnt = 0;
25605+ u32 count = 5;
25606+ u32 pwr_sts = 0;
25607+ u32 pwr_mask = 0;
25608+ pwr_cnt = MSG_READ32(PSB_PUNIT_PORT, PSB_PWRGT_CNT);
25609+ if (islands & PSB_GRAPHICS_ISLAND) {
25610+ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
25611+ pwr_mask |= PSB_PWRGT_GFX_MASK;
25612+ }
25613+ if (islands & PSB_VIDEO_ENC_ISLAND) {
25614+ pwr_cnt &= ~PSB_PWRGT_VID_ENC_MASK;
25615+ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
25616+ }
25617+ if (islands & PSB_VIDEO_DEC_ISLAND) {
25618+ pwr_cnt &= ~PSB_PWRGT_VID_DEC_MASK;
25619+ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
25620+ }
25621+ MSG_WRITE32(PSB_PUNIT_PORT, PSB_PWRGT_CNT, pwr_cnt);
25622+ while (count--) {
25623+ pwr_sts = MSG_READ32(PSB_PUNIT_PORT, PSB_PWRGT_STS);
25624+ if ((pwr_sts & pwr_mask) == 0)
25625+ break;
25626+ else
25627+ udelay(10);
25628+ }
25629+}
25630+
25631+static int psb_power_down_sgx(struct drm_device *dev)
25632+{
25633+ struct drm_psb_private *dev_priv =
25634+ (struct drm_psb_private *)dev->dev_private;
25635+
25636+ PSB_DEBUG_PM("power down sgx \n");
25637+
25638+#ifdef OSPM_STAT
25639+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i0)
25640+ dev_priv->gfx_d0i0_time += jiffies - dev_priv->gfx_last_mode_change;
25641+ else
25642+ PSB_DEBUG_PM("power down:illegal previous power state\n");
25643+ dev_priv->gfx_last_mode_change = jiffies;
25644+ dev_priv->gfx_d0i3_cnt++;
25645+#endif
25646+
25647+ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
25648+ dev_priv->graphics_state = PSB_PWR_STATE_D0i3;
25649+ psb_down_island_power(dev, PSB_GRAPHICS_ISLAND);
25650+ return 0;
25651+}
25652+static int psb_power_up_sgx(struct drm_device *dev)
25653+{
25654+ struct drm_psb_private *dev_priv =
25655+ (struct drm_psb_private *)dev->dev_private;
25656+ if ((dev_priv->graphics_state & PSB_PWR_STATE_MASK) !=
25657+ PSB_PWR_STATE_D0i3)
25658+ return -EINVAL;
25659+
25660+ PSB_DEBUG_PM("power up sgx \n");
25661+ if (unlikely(PSB_D_PM & drm_psb_debug))
25662+ dump_stack();
25663+ INIT_LIST_HEAD(&dev_priv->resume_buf.head);
25664+
25665+ psb_up_island_power(dev, PSB_GRAPHICS_ISLAND);
25666+
25667+ /*
25668+ * The SGX loses it's register contents.
25669+ * Restore BIF registers. The MMU page tables are
25670+ * "normal" pages, so their contents should be kept.
25671+ */
25672+
25673+ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
25674+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
25675+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
25676+ PSB_RSGX32(PSB_CR_BIF_BANK1);
25677+
25678+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
25679+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
25680+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
25681+
25682+ /*
25683+ * 2D Base registers..
25684+ */
25685+ psb_init_2d(dev_priv);
25686+ /*
25687+ * Persistant 3D base registers and USSE base registers..
25688+ */
25689+
25690+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
25691+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
25692+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
25693+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
25694+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
25695+ /*
25696+ * Now, re-initialize the 3D engine.
25697+ */
25698+ if (dev_priv->xhw_on)
25699+ psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
25700+
25701+ psb_scheduler_ta_mem_check(dev_priv);
25702+ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
25703+ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
25704+ PSB_TA_MEM_FLAG_TA |
25705+ PSB_TA_MEM_FLAG_RASTER |
25706+ PSB_TA_MEM_FLAG_HOSTA |
25707+ PSB_TA_MEM_FLAG_HOSTD |
25708+ PSB_TA_MEM_FLAG_INIT,
25709+ dev_priv->ta_mem->ta_memory->offset,
25710+ dev_priv->ta_mem->hw_data->offset,
25711+ dev_priv->ta_mem->hw_cookie);
25712+ }
25713+
25714+#ifdef OSPM_STAT
25715+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i3)
25716+ dev_priv->gfx_d0i3_time += jiffies - dev_priv->gfx_last_mode_change;
25717+ else
25718+ PSB_DEBUG_PM("power up:illegal previous power state\n");
25719+ dev_priv->gfx_last_mode_change = jiffies;
25720+ dev_priv->gfx_d0i0_cnt++;
25721+#endif
25722+
25723+ dev_priv->graphics_state = PSB_PWR_STATE_D0i0;
25724+
25725+ return 0;
25726+}
25727+
25728+int psb_try_power_down_sgx(struct drm_device *dev)
25729+{
25730+ struct drm_psb_private *dev_priv =
25731+ (struct drm_psb_private *)dev->dev_private;
25732+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
25733+ int ret;
25734+ if (!down_write_trylock(&dev_priv->sgx_sem))
25735+ return -EBUSY;
25736+ /*Try lock 2d, because FB driver ususally use 2D engine.*/
25737+ if (!psb_2d_trylock(dev_priv)) {
25738+ ret = -EBUSY;
25739+ goto out_err0;
25740+ }
25741+ if ((dev_priv->graphics_state & PSB_PWR_STATE_MASK) !=
25742+ PSB_PWR_STATE_D0i0) {
25743+ ret = -EINVAL;
25744+ goto out_err1;
25745+ }
25746+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY) ||
25747+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) != 0)) {
25748+ ret = -EBUSY;
25749+ goto out_err1;
25750+ }
25751+ if (!scheduler->idle ||
25752+ !list_empty(&scheduler->raster_queue) ||
25753+ !list_empty(&scheduler->ta_queue) ||
25754+ !list_empty(&scheduler->hp_raster_queue)) {
25755+ ret = -EBUSY;
25756+ goto out_err1;
25757+ }
25758+ /*flush_scheduled_work();*/
25759+ ret = psb_power_down_sgx(dev);
25760+out_err1:
25761+ psb_2d_atomic_unlock(dev_priv);
25762+out_err0:
25763+ up_write(&dev_priv->sgx_sem);
25764+ return ret;
25765+}
25766+/*check power state, if in sleep, wake up*/
25767+void psb_check_power_state(struct drm_device *dev, int devices)
25768+{
25769+ struct pci_dev *pdev = dev->pdev;
25770+ struct drm_psb_private *dev_priv = dev->dev_private;
25771+ down(&dev_priv->pm_sem);
25772+ switch (pdev->current_state) {
25773+ case PCI_D3hot:
25774+ dev->driver->pci_driver.resume(pdev);
25775+ break;
25776+ default:
25777+
25778+ if (devices & PSB_DEVICE_SGX) {
25779+ if ((dev_priv->graphics_state & PSB_PWR_STATE_MASK) ==
25780+ PSB_PWR_STATE_D0i3) {
25781+ /*power up sgx*/
25782+ psb_power_up_sgx(dev);
25783+ }
25784+ } else if (devices & PSB_DEVICE_MSVDX) {
25785+ if ((dev_priv->msvdx_state & PSB_PWR_STATE_MASK) ==
25786+ PSB_PWR_STATE_D0i3) {
25787+ psb_power_up_msvdx(dev);
25788+ } else {
25789+ dev_priv->msvdx_last_action = jiffies;
25790+ }
25791+ }
25792+ break;
25793+ }
25794+ up(&dev_priv->pm_sem);
25795+}
25796+
25797+void psb_init_ospm(struct drm_psb_private *dev_priv)
25798+{
25799+ static int init;
25800+ if (!init) {
25801+ dev_priv->graphics_state = PSB_PWR_STATE_D0i0;
25802+ init_rwsem(&dev_priv->sgx_sem);
25803+ sema_init(&dev_priv->pm_sem, 1);
25804+#ifdef OSPM_STAT
25805+ dev_priv->gfx_last_mode_change = jiffies;
25806+ dev_priv->gfx_d0i0_time = 0;
25807+ dev_priv->gfx_d0i3_time = 0;
25808+ dev_priv->gfx_d3_time = 0;
25809+#endif
25810+ init = 1;
25811+ }
25812+}
25813+
25814+int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
25815+ struct drm_file *file_priv)
25816+{
25817+ struct drm_psb_cmdbuf_arg *arg = data;
25818+ int ret = 0;
25819+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
25820+ struct ttm_buffer_object *cmd_buffer = NULL;
25821+ struct ttm_buffer_object *ta_buffer = NULL;
25822+ struct ttm_buffer_object *oom_buffer = NULL;
25823+ struct psb_ttm_fence_rep fence_arg;
25824+ struct drm_psb_scene user_scene;
25825+ struct psb_scene_pool *pool = NULL;
25826+ struct psb_scene *scene = NULL;
25827+ struct drm_psb_private *dev_priv =
25828+ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
25829+ int engine;
25830+ struct psb_feedback_info feedback;
25831+ int po_correct;
25832+ struct psb_context *context;
25833+ unsigned num_buffers;
25834+
25835+ num_buffers = PSB_NUM_VALIDATE_BUFFERS;
25836+
25837+ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
25838+ if (unlikely(ret != 0))
25839+ return ret;
25840+
25841+ if ((arg->engine == PSB_ENGINE_2D) || (arg->engine == PSB_ENGINE_TA)
25842+ || (arg->engine == PSB_ENGINE_RASTERIZER)) {
25843+ down_read(&dev_priv->sgx_sem);
25844+ psb_check_power_state(dev, PSB_DEVICE_SGX);
25845+ }
25846+
25847+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
25848+ if (unlikely(ret != 0))
25849+ goto out_err0;
25850+
25851+
25852+ context = &dev_priv->context;
25853+ context->used_buffers = 0;
25854+ context->fence_types = 0;
25855+ BUG_ON(!list_empty(&context->validate_list));
25856+ BUG_ON(!list_empty(&context->kern_validate_list));
25857+
25858+ if (unlikely(context->buffers == NULL)) {
25859+ context->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
25860+ sizeof(*context->buffers));
25861+ if (unlikely(context->buffers == NULL)) {
25862+ ret = -ENOMEM;
25863+ goto out_err1;
25864+ }
25865+ }
25866+
25867+ ret = psb_reference_buffers(file_priv,
25868+ arg->buffer_list,
25869+ context);
25870+
25871+ if (unlikely(ret != 0))
25872+ goto out_err1;
25873+
25874+ context->val_seq = atomic_add_return(1, &dev_priv->val_seq);
25875+
25876+ ret = ttm_eu_reserve_buffers(&context->validate_list,
25877+ context->val_seq);
25878+ if (unlikely(ret != 0)) {
25879+ goto out_err2;
25880+ }
25881+
25882+ engine = (arg->engine == PSB_ENGINE_RASTERIZER) ?
25883+ PSB_ENGINE_TA : arg->engine;
25884+
25885+ ret = psb_validate_buffer_list(file_priv, engine,
25886+ context, &po_correct);
25887+ if (unlikely(ret != 0))
25888+ goto out_err3;
25889+
25890+ if (!po_correct) {
25891+ ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
25892+ arg->reloc_offset,
25893+ arg->reloc_handle, context, 0, 1);
25894+ if (unlikely(ret != 0))
25895+ goto out_err3;
25896+
25897+ }
25898+
25899+ cmd_buffer = ttm_buffer_object_lookup(tfile, arg->cmdbuf_handle);
25900+ if (unlikely(cmd_buffer == NULL)) {
25901+ ret = -EINVAL;
25902+ goto out_err4;
25903+ }
25904+
25905+ switch (arg->engine) {
25906+ case PSB_ENGINE_2D:
25907+ ret = psb_cmdbuf_2d(file_priv, &context->validate_list,
25908+ context->fence_types, arg, cmd_buffer,
25909+ &fence_arg);
25910+ if (unlikely(ret != 0))
25911+ goto out_err4;
25912+ break;
25913+ case PSB_ENGINE_VIDEO:
25914+ psb_check_power_state(dev, PSB_DEVICE_MSVDX);
25915+ ret = psb_cmdbuf_video(file_priv, &context->validate_list,
25916+ context->fence_types, arg,
25917+ cmd_buffer, &fence_arg);
25918+
25919+ if (unlikely(ret != 0))
25920+ goto out_err4;
25921+ break;
25922+ case LNC_ENGINE_ENCODE:
25923+ psb_check_power_state(dev, PSB_DEVICE_TOPAZ);
25924+ ret = lnc_cmdbuf_video(file_priv, &context->validate_list,
25925+ context->fence_types, arg,
25926+ cmd_buffer, &fence_arg);
25927+ if (unlikely(ret != 0))
25928+ goto out_err4;
25929+ break;
25930+ case PSB_ENGINE_RASTERIZER:
25931+ ret = psb_cmdbuf_raster(file_priv, context,
25932+ arg, cmd_buffer, &fence_arg);
25933+ if (unlikely(ret != 0))
25934+ goto out_err4;
25935+ break;
25936+ case PSB_ENGINE_TA:
25937+ if (arg->ta_handle == arg->cmdbuf_handle) {
25938+ ta_buffer = ttm_bo_reference(cmd_buffer);
25939+ } else {
25940+ ta_buffer =
25941+ ttm_buffer_object_lookup(tfile,
25942+ arg->ta_handle);
25943+ if (!ta_buffer) {
25944+ ret = -EINVAL;
25945+ goto out_err4;
25946+ }
25947+ }
25948+ if (arg->oom_size != 0) {
25949+ if (arg->oom_handle == arg->cmdbuf_handle) {
25950+ oom_buffer = ttm_bo_reference(cmd_buffer);
25951+ } else {
25952+ oom_buffer =
25953+ ttm_buffer_object_lookup(tfile,
25954+ arg->
25955+ oom_handle);
25956+ if (!oom_buffer) {
25957+ ret = -EINVAL;
25958+ goto out_err4;
25959+ }
25960+ }
25961+ }
25962+
25963+ ret = copy_from_user(&user_scene, (void __user *)
25964+ ((unsigned long) arg->scene_arg),
25965+ sizeof(user_scene));
25966+ if (ret)
25967+ goto out_err4;
25968+
25969+ if (!user_scene.handle_valid) {
25970+ pool = psb_scene_pool_alloc(file_priv, 0,
25971+ user_scene.num_buffers,
25972+ user_scene.w,
25973+ user_scene.h);
25974+ if (!pool) {
25975+ ret = -ENOMEM;
25976+ goto out_err0;
25977+ }
25978+
25979+ user_scene.handle = psb_scene_pool_handle(pool);
25980+ user_scene.handle_valid = 1;
25981+ ret = copy_to_user((void __user *)
25982+ ((unsigned long) arg->
25983+ scene_arg), &user_scene,
25984+ sizeof(user_scene));
25985+
25986+ if (ret)
25987+ goto out_err4;
25988+ } else {
25989+ pool =
25990+ psb_scene_pool_lookup(file_priv,
25991+ user_scene.handle, 1);
25992+ if (!pool) {
25993+ ret = -EINVAL;
25994+ goto out_err4;
25995+ }
25996+ }
25997+
25998+ ret = psb_validate_scene_pool(context, pool,
25999+ user_scene.w,
26000+ user_scene.h,
26001+ arg->ta_flags &
26002+ PSB_TA_FLAG_LASTPASS, &scene);
26003+ if (ret)
26004+ goto out_err4;
26005+
26006+ memset(&feedback, 0, sizeof(feedback));
26007+ if (arg->feedback_ops) {
26008+ ret = psb_feedback_buf(tfile,
26009+ context,
26010+ arg->feedback_ops,
26011+ arg->feedback_handle,
26012+ arg->feedback_offset,
26013+ arg->feedback_breakpoints,
26014+ arg->feedback_size,
26015+ &feedback);
26016+ if (ret)
26017+ goto out_err4;
26018+ }
26019+ ret = psb_cmdbuf_ta(file_priv, context,
26020+ arg, cmd_buffer, ta_buffer,
26021+ oom_buffer, scene, &feedback,
26022+ &fence_arg);
26023+ if (ret)
26024+ goto out_err4;
26025+ break;
26026+ default:
26027+ DRM_ERROR
26028+ ("Unimplemented command submission mechanism (%x).\n",
26029+ arg->engine);
26030+ ret = -EINVAL;
26031+ goto out_err4;
26032+ }
26033+
26034+ if (!(arg->fence_flags & DRM_PSB_FENCE_NO_USER)) {
26035+ ret = copy_to_user((void __user *)
26036+ ((unsigned long) arg->fence_arg),
26037+ &fence_arg, sizeof(fence_arg));
26038+ }
26039+
26040+out_err4:
26041+ if (scene)
26042+ psb_scene_unref(&scene);
26043+ if (pool)
26044+ psb_scene_pool_unref(&pool);
26045+ if (cmd_buffer)
26046+ ttm_bo_unref(&cmd_buffer);
26047+ if (ta_buffer)
26048+ ttm_bo_unref(&ta_buffer);
26049+ if (oom_buffer)
26050+ ttm_bo_unref(&oom_buffer);
26051+out_err3:
26052+ ret = psb_handle_copyback(dev, context, ret);
26053+out_err2:
26054+ psb_unreference_buffers(context);
26055+out_err1:
26056+ mutex_unlock(&dev_priv->cmdbuf_mutex);
26057+out_err0:
26058+ ttm_read_unlock(&dev_priv->ttm_lock);
26059+ if ((arg->engine == PSB_ENGINE_2D) || (arg->engine == PSB_ENGINE_TA)
26060+ || (arg->engine == PSB_ENGINE_RASTERIZER))
26061+ up_read(&dev_priv->sgx_sem);
26062+ return ret;
26063+}
26064diff -uNr a/drivers/gpu/drm/psb/psb_sgx.h b/drivers/gpu/drm/psb/psb_sgx.h
26065--- a/drivers/gpu/drm/psb/psb_sgx.h 1969-12-31 16:00:00.000000000 -0800
26066+++ b/drivers/gpu/drm/psb/psb_sgx.h 2009-04-07 13:28:38.000000000 -0700
26067@@ -0,0 +1,41 @@
26068+/*
26069+ * Copyright (c) 2008, Intel Corporation
26070+ *
26071+ * Permission is hereby granted, free of charge, to any person obtaining a
26072+ * copy of this software and associated documentation files (the "Software"),
26073+ * to deal in the Software without restriction, including without limitation
26074+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
26075+ * and/or sell copies of the Software, and to permit persons to whom the
26076+ * Software is furnished to do so, subject to the following conditions:
26077+ *
26078+ * The above copyright notice and this permission notice (including the next
26079+ * paragraph) shall be included in all copies or substantial portions of the
26080+ * Software.
26081+ *
26082+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26083+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26084+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26085+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26086+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26087+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26088+ * SOFTWARE.
26089+ *
26090+ * Authors:
26091+ * Eric Anholt <eric@anholt.net>
26092+ *
26093+ **/
26094+#ifndef _PSB_SGX_H_
26095+#define _PSB_SGX_H_
26096+
26097+extern int psb_submit_video_cmdbuf(struct drm_device *dev,
26098+ struct ttm_buffer_object *cmd_buffer,
26099+ unsigned long cmd_offset,
26100+ unsigned long cmd_size,
26101+ struct ttm_fence_object *fence);
26102+
26103+extern int psb_2d_wait_available(struct drm_psb_private *dev_priv,
26104+ unsigned size);
26105+extern int drm_idle_check_interval;
26106+extern int drm_psb_ospm;
26107+
26108+#endif
26109diff -uNr a/drivers/gpu/drm/psb/psb_ttm_glue.c b/drivers/gpu/drm/psb/psb_ttm_glue.c
26110--- a/drivers/gpu/drm/psb/psb_ttm_glue.c 1969-12-31 16:00:00.000000000 -0800
26111+++ b/drivers/gpu/drm/psb/psb_ttm_glue.c 2009-04-07 13:28:38.000000000 -0700
26112@@ -0,0 +1,345 @@
26113+/**************************************************************************
26114+ * Copyright (c) 2008, Intel Corporation.
26115+ * All Rights Reserved.
26116+ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
26117+ * All Rights Reserved.
26118+ *
26119+ * This program is free software; you can redistribute it and/or modify it
26120+ * under the terms and conditions of the GNU General Public License,
26121+ * version 2, as published by the Free Software Foundation.
26122+ *
26123+ * This program is distributed in the hope it will be useful, but WITHOUT
26124+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26125+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
26126+ * more details.
26127+ *
26128+ * You should have received a copy of the GNU General Public License along with
26129+ * this program; if not, write to the Free Software Foundation, Inc.,
26130+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26131+ *
26132+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
26133+ * develop this driver.
26134+ *
26135+ **************************************************************************/
26136+/*
26137+ */
26138+
26139+#include <drm/drmP.h>
26140+#include "psb_drv.h"
26141+#include "ttm/ttm_userobj_api.h"
26142+
26143+static struct vm_operations_struct psb_ttm_vm_ops;
26144+
26145+int psb_open(struct inode *inode, struct file *filp)
26146+{
26147+ struct drm_file *file_priv;
26148+ struct drm_psb_private *dev_priv;
26149+ struct psb_fpriv *psb_fp;
26150+ int ret;
26151+
26152+ ret = drm_open(inode, filp);
26153+ if (unlikely(ret))
26154+ return ret;
26155+
26156+ psb_fp = drm_calloc(1, sizeof(*psb_fp), DRM_MEM_FILES);
26157+
26158+ if (unlikely(psb_fp == NULL))
26159+ goto out_err0;
26160+
26161+ file_priv = (struct drm_file *) filp->private_data;
26162+ dev_priv = psb_priv(file_priv->minor->dev);
26163+
26164+
26165+ psb_fp->tfile = ttm_object_file_init(dev_priv->tdev,
26166+ PSB_FILE_OBJECT_HASH_ORDER);
26167+ if (unlikely(psb_fp->tfile == NULL))
26168+ goto out_err1;
26169+
26170+ file_priv->driver_priv = psb_fp;
26171+
26172+ if (unlikely(dev_priv->bdev.dev_mapping == NULL))
26173+ dev_priv->bdev.dev_mapping = dev_priv->dev->dev_mapping;
26174+
26175+ return 0;
26176+
26177+out_err1:
26178+ drm_free(psb_fp, sizeof(*psb_fp), DRM_MEM_FILES);
26179+out_err0:
26180+ (void) drm_release(inode, filp);
26181+ return ret;
26182+}
26183+
26184+int psb_release(struct inode *inode, struct file *filp)
26185+{
26186+ struct drm_file *file_priv;
26187+ struct psb_fpriv *psb_fp;
26188+ struct drm_psb_private *dev_priv;
26189+ int ret;
26190+
26191+ file_priv = (struct drm_file *) filp->private_data;
26192+ psb_fp = psb_fpriv(file_priv);
26193+ dev_priv = psb_priv(file_priv->minor->dev);
26194+
26195+ down_read(&dev_priv->sgx_sem);
26196+ psb_check_power_state(file_priv->minor->dev, PSB_DEVICE_SGX);
26197+
26198+ ttm_object_file_release(&psb_fp->tfile);
26199+ drm_free(psb_fp, sizeof(*psb_fp), DRM_MEM_FILES);
26200+
26201+ if (dev_priv && dev_priv->xhw_file)
26202+ psb_xhw_init_takedown(dev_priv, file_priv, 1);
26203+
26204+ ret = drm_release(inode, filp);
26205+ up_read(&dev_priv->sgx_sem);
26206+ if (drm_psb_ospm && IS_MRST(dev_priv->dev))
26207+ schedule_delayed_work(&dev_priv->scheduler.wq, 0);
26208+ return ret;
26209+}
26210+
26211+int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
26212+ struct drm_file *file_priv)
26213+{
26214+ int ret;
26215+ struct drm_psb_private *dev_priv = psb_priv(dev);
26216+ down_read(&dev_priv->sgx_sem);
26217+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26218+ ret = ttm_fence_signaled_ioctl(psb_fpriv(file_priv)->tfile, data);
26219+ up_read(&dev_priv->sgx_sem);
26220+ if (drm_psb_ospm && IS_MRST(dev))
26221+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26222+ return ret;
26223+}
26224+
26225+int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
26226+ struct drm_file *file_priv)
26227+{
26228+ int ret;
26229+ struct drm_psb_private *dev_priv = psb_priv(dev);
26230+ down_read(&dev_priv->sgx_sem);
26231+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26232+ ret = ttm_fence_finish_ioctl(psb_fpriv(file_priv)->tfile, data);
26233+ up_read(&dev_priv->sgx_sem);
26234+ if (drm_psb_ospm && IS_MRST(dev))
26235+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26236+ return ret;
26237+}
26238+
26239+int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
26240+ struct drm_file *file_priv)
26241+{
26242+ int ret;
26243+ struct drm_psb_private *dev_priv = psb_priv(dev);
26244+ down_read(&dev_priv->sgx_sem);
26245+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26246+ ret = ttm_fence_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
26247+ up_read(&dev_priv->sgx_sem);
26248+ if (drm_psb_ospm && IS_MRST(dev))
26249+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26250+ return ret;
26251+}
26252+
26253+int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
26254+ struct drm_file *file_priv)
26255+{
26256+ PSB_DEBUG_PM("ioctl: psb_pl_reference\n");
26257+ return ttm_pl_waitidle_ioctl(psb_fpriv(file_priv)->tfile, data);
26258+}
26259+
26260+int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
26261+ struct drm_file *file_priv)
26262+{
26263+ int ret;
26264+ struct drm_psb_private *dev_priv = psb_priv(dev);
26265+ down_read(&dev_priv->sgx_sem);
26266+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26267+ ret = ttm_pl_setstatus_ioctl(psb_fpriv(file_priv)->tfile,
26268+ &psb_priv(dev)->ttm_lock, data);
26269+ up_read(&dev_priv->sgx_sem);
26270+ if (drm_psb_ospm && IS_MRST(dev))
26271+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26272+ return ret;
26273+}
26274+
26275+int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
26276+ struct drm_file *file_priv)
26277+{
26278+ int ret;
26279+ struct drm_psb_private *dev_priv = psb_priv(dev);
26280+ down_read(&dev_priv->sgx_sem);
26281+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26282+ ret = ttm_pl_synccpu_ioctl(psb_fpriv(file_priv)->tfile, data);
26283+ up_read(&dev_priv->sgx_sem);
26284+ if (drm_psb_ospm && IS_MRST(dev))
26285+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26286+ return ret;
26287+}
26288+
26289+int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
26290+ struct drm_file *file_priv)
26291+{
26292+ struct drm_psb_private *dev_priv = psb_priv(dev);
26293+ int ret;
26294+ down_read(&dev_priv->sgx_sem);
26295+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26296+ ret = ttm_pl_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
26297+ up_read(&dev_priv->sgx_sem);
26298+ if (drm_psb_ospm && IS_MRST(dev))
26299+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26300+ return ret;
26301+}
26302+
26303+int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
26304+ struct drm_file *file_priv)
26305+{
26306+ struct drm_psb_private *dev_priv = psb_priv(dev);
26307+ int ret;
26308+ down_read(&dev_priv->sgx_sem);
26309+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26310+ ret = ttm_pl_reference_ioctl(psb_fpriv(file_priv)->tfile, data);
26311+ up_read(&dev_priv->sgx_sem);
26312+ if (drm_psb_ospm && IS_MRST(dev))
26313+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26314+ return ret;
26315+}
26316+
26317+int psb_pl_create_ioctl(struct drm_device *dev, void *data,
26318+ struct drm_file *file_priv)
26319+{
26320+ struct drm_psb_private *dev_priv = psb_priv(dev);
26321+ int ret;
26322+ down_read(&dev_priv->sgx_sem);
26323+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26324+ ret = ttm_pl_create_ioctl(psb_fpriv(file_priv)->tfile,
26325+ &dev_priv->bdev, &dev_priv->ttm_lock, data);
26326+ up_read(&dev_priv->sgx_sem);
26327+ if (drm_psb_ospm && IS_MRST(dev))
26328+ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
26329+ return ret;
26330+}
26331+
26332+/**
26333+ * psb_ttm_fault - Wrapper around the ttm fault method.
26334+ *
26335+ * @vma: The struct vm_area_struct as in the vm fault() method.
26336+ * @vmf: The struct vm_fault as in the vm fault() method.
26337+ *
26338+ * Since ttm_fault() will reserve buffers while faulting,
26339+ * we need to take the ttm read lock around it, as this driver
26340+ * relies on the ttm_lock in write mode to exclude all threads from
26341+ * reserving and thus validating buffers in aperture- and memory shortage
26342+ * situations.
26343+ */
26344+
26345+static int psb_ttm_fault(struct vm_area_struct *vma,
26346+ struct vm_fault *vmf)
26347+{
26348+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
26349+ vma->vm_private_data;
26350+ struct drm_psb_private *dev_priv =
26351+ container_of(bo->bdev, struct drm_psb_private, bdev);
26352+ int ret;
26353+
26354+ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
26355+ if (unlikely(ret != 0))
26356+ return VM_FAULT_NOPAGE;
26357+
26358+ ret = dev_priv->ttm_vm_ops->fault(vma, vmf);
26359+
26360+ ttm_read_unlock(&dev_priv->ttm_lock);
26361+ return ret;
26362+}
26363+
26364+
26365+int psb_mmap(struct file *filp, struct vm_area_struct *vma)
26366+{
26367+ struct drm_file *file_priv;
26368+ struct drm_psb_private *dev_priv;
26369+ int ret;
26370+
26371+ if (unlikely(vma->vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET))
26372+ return drm_mmap(filp, vma);
26373+
26374+ file_priv = (struct drm_file *) filp->private_data;
26375+ dev_priv = psb_priv(file_priv->minor->dev);
26376+
26377+ ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
26378+ if (unlikely(ret != 0))
26379+ return ret;
26380+
26381+ if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
26382+ dev_priv->ttm_vm_ops = vma->vm_ops;
26383+ psb_ttm_vm_ops = *vma->vm_ops;
26384+ psb_ttm_vm_ops.fault = &psb_ttm_fault;
26385+ }
26386+
26387+ vma->vm_ops = &psb_ttm_vm_ops;
26388+
26389+ return 0;
26390+}
26391+
26392+ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
26393+ size_t count, loff_t *f_pos)
26394+{
26395+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
26396+ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
26397+
26398+ return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, 1);
26399+}
26400+
26401+ssize_t psb_ttm_read(struct file *filp, char __user *buf,
26402+ size_t count, loff_t *f_pos)
26403+{
26404+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
26405+ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
26406+
26407+ return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, 1);
26408+}
26409+
26410+int psb_verify_access(struct ttm_buffer_object *bo,
26411+ struct file *filp)
26412+{
26413+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
26414+
26415+ if (capable(CAP_SYS_ADMIN))
26416+ return 0;
26417+
26418+ if (unlikely(!file_priv->authenticated))
26419+ return -EPERM;
26420+
26421+ return ttm_pl_verify_access(bo, psb_fpriv(file_priv)->tfile);
26422+}
26423+
26424+static int psb_ttm_mem_global_init(struct drm_global_reference *ref)
26425+{
26426+ return ttm_mem_global_init(ref->object);
26427+}
26428+
26429+static void psb_ttm_mem_global_release(struct drm_global_reference *ref)
26430+{
26431+ ttm_mem_global_release(ref->object);
26432+}
26433+
26434+int psb_ttm_global_init(struct drm_psb_private *dev_priv)
26435+{
26436+ struct drm_global_reference *global_ref;
26437+ int ret;
26438+
26439+ global_ref = &dev_priv->mem_global_ref;
26440+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
26441+ global_ref->size = sizeof(struct ttm_mem_global);
26442+ global_ref->init = &psb_ttm_mem_global_init;
26443+ global_ref->release = &psb_ttm_mem_global_release;
26444+
26445+ ret = drm_global_item_ref(global_ref);
26446+ if (unlikely(ret != 0)) {
26447+ DRM_ERROR("Failed referencing a global TTM memory object.\n");
26448+ return ret;
26449+ }
26450+
26451+ return 0;
26452+}
26453+
26454+void psb_ttm_global_release(struct drm_psb_private *dev_priv)
26455+{
26456+ drm_global_item_unref(&dev_priv->mem_global_ref);
26457+}
26458diff -uNr a/drivers/gpu/drm/psb/psb_xhw.c b/drivers/gpu/drm/psb/psb_xhw.c
26459--- a/drivers/gpu/drm/psb/psb_xhw.c 1969-12-31 16:00:00.000000000 -0800
26460+++ b/drivers/gpu/drm/psb/psb_xhw.c 2009-04-07 13:28:38.000000000 -0700
26461@@ -0,0 +1,629 @@
26462+/**************************************************************************
26463+ *Copyright (c) 2007-2008, Intel Corporation.
26464+ *All Rights Reserved.
26465+ *
26466+ *This program is free software; you can redistribute it and/or modify it
26467+ *under the terms and conditions of the GNU General Public License,
26468+ *version 2, as published by the Free Software Foundation.
26469+ *
26470+ *This program is distributed in the hope it will be useful, but WITHOUT
26471+ *ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26472+ *FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
26473+ *more details.
26474+ *
26475+ *You should have received a copy of the GNU General Public License along with
26476+ *this program; if not, write to the Free Software Foundation, Inc.,
26477+ *51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26478+ *
26479+ *Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
26480+ *develop this driver.
26481+ *
26482+ **************************************************************************/
26483+/*
26484+ *Make calls into closed source X server code.
26485+ */
26486+
26487+#include <drm/drmP.h>
26488+#include "psb_drv.h"
26489+#include "ttm/ttm_userobj_api.h"
26490+
26491+void
26492+psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
26493+ struct psb_xhw_buf *buf)
26494+{
26495+ unsigned long irq_flags;
26496+
26497+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
26498+ list_del_init(&buf->head);
26499+ if (dev_priv->xhw_cur_buf == buf)
26500+ dev_priv->xhw_cur_buf = NULL;
26501+ atomic_set(&buf->done, 1);
26502+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
26503+}
26504+
26505+static inline int psb_xhw_add(struct drm_psb_private *dev_priv,
26506+ struct psb_xhw_buf *buf)
26507+{
26508+ unsigned long irq_flags;
26509+
26510+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
26511+ atomic_set(&buf->done, 0);
26512+ if (unlikely(!dev_priv->xhw_submit_ok)) {
26513+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
26514+ DRM_ERROR("No Xpsb 3D extension available.\n");
26515+ return -EINVAL;
26516+ }
26517+ if (!list_empty(&buf->head)) {
26518+ DRM_ERROR("Recursive list adding.\n");
26519+ goto out;
26520+ }
26521+ list_add_tail(&buf->head, &dev_priv->xhw_in);
26522+ wake_up_interruptible(&dev_priv->xhw_queue);
26523+out:
26524+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
26525+ return 0;
26526+}
26527+
26528+int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
26529+ struct psb_xhw_buf *buf,
26530+ uint32_t w,
26531+ uint32_t h,
26532+ uint32_t *hw_cookie,
26533+ uint32_t *bo_size,
26534+ uint32_t *clear_p_start,
26535+ uint32_t *clear_num_pages)
26536+{
26537+ struct drm_psb_xhw_arg *xa = &buf->arg;
26538+ int ret;
26539+
26540+ buf->copy_back = 1;
26541+ xa->op = PSB_XHW_SCENE_INFO;
26542+ xa->irq_op = 0;
26543+ xa->issue_irq = 0;
26544+ xa->arg.si.w = w;
26545+ xa->arg.si.h = h;
26546+
26547+ ret = psb_xhw_add(dev_priv, buf);
26548+ if (ret)
26549+ return ret;
26550+
26551+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
26552+ atomic_read(&buf->done), DRM_HZ);
26553+
26554+ if (!atomic_read(&buf->done)) {
26555+ psb_xhw_clean_buf(dev_priv, buf);
26556+ return -EBUSY;
26557+ }
26558+
26559+ if (!xa->ret) {
26560+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
26561+ *bo_size = xa->arg.si.size;
26562+ *clear_p_start = xa->arg.si.clear_p_start;
26563+ *clear_num_pages = xa->arg.si.clear_num_pages;
26564+ }
26565+ return xa->ret;
26566+}
26567+
26568+int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
26569+ struct psb_xhw_buf *buf, uint32_t fire_flags)
26570+{
26571+ struct drm_psb_xhw_arg *xa = &buf->arg;
26572+
26573+ buf->copy_back = 0;
26574+ xa->op = PSB_XHW_FIRE_RASTER;
26575+ xa->issue_irq = 0;
26576+ xa->arg.sb.fire_flags = 0;
26577+
26578+ return psb_xhw_add(dev_priv, buf);
26579+}
26580+
26581+int psb_xhw_vistest(struct drm_psb_private *dev_priv,
26582+ struct psb_xhw_buf *buf)
26583+{
26584+ struct drm_psb_xhw_arg *xa = &buf->arg;
26585+
26586+ buf->copy_back = 1;
26587+ xa->op = PSB_XHW_VISTEST;
26588+ /*
26589+ *Could perhaps decrease latency somewhat by
26590+ *issuing an irq in this case.
26591+ */
26592+ xa->issue_irq = 0;
26593+ xa->irq_op = PSB_UIRQ_VISTEST;
26594+ return psb_xhw_add(dev_priv, buf);
26595+}
26596+
26597+int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
26598+ struct psb_xhw_buf *buf,
26599+ uint32_t fire_flags,
26600+ uint32_t hw_context,
26601+ uint32_t *cookie,
26602+ uint32_t *oom_cmds,
26603+ uint32_t num_oom_cmds,
26604+ uint32_t offset, uint32_t engine,
26605+ uint32_t flags)
26606+{
26607+ struct drm_psb_xhw_arg *xa = &buf->arg;
26608+
26609+ buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM);
26610+ xa->op = PSB_XHW_SCENE_BIND_FIRE;
26611+ xa->issue_irq = (buf->copy_back) ? 1 : 0;
26612+ if (unlikely(buf->copy_back))
26613+ xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ?
26614+ PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY;
26615+ else
26616+ xa->irq_op = 0;
26617+ xa->arg.sb.fire_flags = fire_flags;
26618+ xa->arg.sb.hw_context = hw_context;
26619+ xa->arg.sb.offset = offset;
26620+ xa->arg.sb.engine = engine;
26621+ xa->arg.sb.flags = flags;
26622+ xa->arg.sb.num_oom_cmds = num_oom_cmds;
26623+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
26624+ if (num_oom_cmds)
26625+ memcpy(xa->arg.sb.oom_cmds, oom_cmds,
26626+ sizeof(uint32_t) * num_oom_cmds);
26627+ return psb_xhw_add(dev_priv, buf);
26628+}
26629+
26630+int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
26631+ struct psb_xhw_buf *buf)
26632+{
26633+ struct drm_psb_xhw_arg *xa = &buf->arg;
26634+ int ret;
26635+
26636+ buf->copy_back = 1;
26637+ xa->op = PSB_XHW_RESET_DPM;
26638+ xa->issue_irq = 0;
26639+ xa->irq_op = 0;
26640+
26641+ ret = psb_xhw_add(dev_priv, buf);
26642+ if (ret)
26643+ return ret;
26644+
26645+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
26646+ atomic_read(&buf->done), 3 * DRM_HZ);
26647+
26648+ if (!atomic_read(&buf->done)) {
26649+ psb_xhw_clean_buf(dev_priv, buf);
26650+ return -EBUSY;
26651+ }
26652+
26653+ return xa->ret;
26654+}
26655+
26656+int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
26657+ struct psb_xhw_buf *buf, uint32_t *value)
26658+{
26659+ struct drm_psb_xhw_arg *xa = &buf->arg;
26660+ int ret;
26661+
26662+ *value = 0;
26663+
26664+ buf->copy_back = 1;
26665+ xa->op = PSB_XHW_CHECK_LOCKUP;
26666+ xa->issue_irq = 0;
26667+ xa->irq_op = 0;
26668+
26669+ ret = psb_xhw_add(dev_priv, buf);
26670+ if (ret)
26671+ return ret;
26672+
26673+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
26674+ atomic_read(&buf->done), DRM_HZ * 3);
26675+
26676+ if (!atomic_read(&buf->done)) {
26677+ psb_xhw_clean_buf(dev_priv, buf);
26678+ return -EBUSY;
26679+ }
26680+
26681+ if (!xa->ret)
26682+ *value = xa->arg.cl.value;
26683+
26684+ return xa->ret;
26685+}
26686+
26687+static int psb_xhw_terminate(struct drm_psb_private *dev_priv,
26688+ struct psb_xhw_buf *buf)
26689+{
26690+ struct drm_psb_xhw_arg *xa = &buf->arg;
26691+ unsigned long irq_flags;
26692+
26693+ buf->copy_back = 0;
26694+ xa->op = PSB_XHW_TERMINATE;
26695+ xa->issue_irq = 0;
26696+
26697+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
26698+ dev_priv->xhw_submit_ok = 0;
26699+ atomic_set(&buf->done, 0);
26700+ if (!list_empty(&buf->head)) {
26701+ DRM_ERROR("Recursive list adding.\n");
26702+ goto out;
26703+ }
26704+ list_add_tail(&buf->head, &dev_priv->xhw_in);
26705+out:
26706+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
26707+ wake_up_interruptible(&dev_priv->xhw_queue);
26708+
26709+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
26710+ atomic_read(&buf->done), DRM_HZ / 10);
26711+
26712+ if (!atomic_read(&buf->done)) {
26713+ DRM_ERROR("Xpsb terminate timeout.\n");
26714+ psb_xhw_clean_buf(dev_priv, buf);
26715+ return -EBUSY;
26716+ }
26717+
26718+ return 0;
26719+}
26720+
26721+int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
26722+ struct psb_xhw_buf *buf,
26723+ uint32_t pages, uint32_t * hw_cookie,
26724+ uint32_t * size,
26725+ uint32_t * ta_min_size)
26726+{
26727+ struct drm_psb_xhw_arg *xa = &buf->arg;
26728+ int ret;
26729+
26730+ buf->copy_back = 1;
26731+ xa->op = PSB_XHW_TA_MEM_INFO;
26732+ xa->issue_irq = 0;
26733+ xa->irq_op = 0;
26734+ xa->arg.bi.pages = pages;
26735+
26736+ ret = psb_xhw_add(dev_priv, buf);
26737+ if (ret)
26738+ return ret;
26739+
26740+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
26741+ atomic_read(&buf->done), DRM_HZ);
26742+
26743+ if (!atomic_read(&buf->done)) {
26744+ psb_xhw_clean_buf(dev_priv, buf);
26745+ return -EBUSY;
26746+ }
26747+
26748+ if (!xa->ret)
26749+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
26750+
26751+ *size = xa->arg.bi.size;
26752+ *ta_min_size = xa->arg.bi.ta_min_size;
26753+ return xa->ret;
26754+}
26755+
26756+int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
26757+ struct psb_xhw_buf *buf,
26758+ uint32_t flags,
26759+ uint32_t param_offset,
26760+ uint32_t pt_offset, uint32_t *hw_cookie)
26761+{
26762+ struct drm_psb_xhw_arg *xa = &buf->arg;
26763+ int ret;
26764+
26765+ buf->copy_back = 1;
26766+ xa->op = PSB_XHW_TA_MEM_LOAD;
26767+ xa->issue_irq = 0;
26768+ xa->irq_op = 0;
26769+ xa->arg.bl.flags = flags;
26770+ xa->arg.bl.param_offset = param_offset;
26771+ xa->arg.bl.pt_offset = pt_offset;
26772+ memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie));
26773+
26774+ ret = psb_xhw_add(dev_priv, buf);
26775+ if (ret)
26776+ return ret;
26777+
26778+ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
26779+ atomic_read(&buf->done), 3 * DRM_HZ);
26780+
26781+ if (!atomic_read(&buf->done)) {
26782+ psb_xhw_clean_buf(dev_priv, buf);
26783+ return -EBUSY;
26784+ }
26785+
26786+ if (!xa->ret)
26787+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
26788+
26789+ return xa->ret;
26790+}
26791+
26792+int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
26793+ struct psb_xhw_buf *buf, uint32_t *cookie)
26794+{
26795+ struct drm_psb_xhw_arg *xa = &buf->arg;
26796+
26797+ /*
26798+ *This calls the extensive closed source
26799+ *OOM handler, which resolves the condition and
26800+ *sends a reply telling the scheduler what to do
26801+ *with the task.
26802+ */
26803+
26804+ buf->copy_back = 1;
26805+ xa->op = PSB_XHW_OOM;
26806+ xa->issue_irq = 1;
26807+ xa->irq_op = PSB_UIRQ_OOM_REPLY;
26808+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
26809+
26810+ return psb_xhw_add(dev_priv, buf);
26811+}
26812+
26813+void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
26814+ struct psb_xhw_buf *buf,
26815+ uint32_t *cookie,
26816+ uint32_t *bca, uint32_t *rca, uint32_t *flags)
26817+{
26818+ struct drm_psb_xhw_arg *xa = &buf->arg;
26819+
26820+ /*
26821+ *Get info about how to schedule an OOM task.
26822+ */
26823+
26824+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
26825+ *bca = xa->arg.oom.bca;
26826+ *rca = xa->arg.oom.rca;
26827+ *flags = xa->arg.oom.flags;
26828+}
26829+
26830+void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
26831+ struct psb_xhw_buf *buf, uint32_t *cookie)
26832+{
26833+ struct drm_psb_xhw_arg *xa = &buf->arg;
26834+
26835+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
26836+}
26837+
26838+int psb_xhw_resume(struct drm_psb_private *dev_priv,
26839+ struct psb_xhw_buf *buf)
26840+{
26841+ struct drm_psb_xhw_arg *xa = &buf->arg;
26842+
26843+ buf->copy_back = 0;
26844+ xa->op = PSB_XHW_RESUME;
26845+ xa->issue_irq = 0;
26846+ xa->irq_op = 0;
26847+ return psb_xhw_add(dev_priv, buf);
26848+}
26849+
26850+void psb_xhw_takedown(struct drm_psb_private *dev_priv)
26851+{
26852+}
26853+
26854+int psb_xhw_init(struct drm_device *dev)
26855+{
26856+ struct drm_psb_private *dev_priv =
26857+ (struct drm_psb_private *) dev->dev_private;
26858+ unsigned long irq_flags;
26859+
26860+ INIT_LIST_HEAD(&dev_priv->xhw_in);
26861+ spin_lock_init(&dev_priv->xhw_lock);
26862+ atomic_set(&dev_priv->xhw_client, 0);
26863+ init_waitqueue_head(&dev_priv->xhw_queue);
26864+ init_waitqueue_head(&dev_priv->xhw_caller_queue);
26865+ mutex_init(&dev_priv->xhw_mutex);
26866+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
26867+ dev_priv->xhw_on = 0;
26868+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
26869+
26870+ return 0;
26871+}
26872+
26873+static int psb_xhw_init_init(struct drm_device *dev,
26874+ struct drm_file *file_priv,
26875+ struct drm_psb_xhw_init_arg *arg)
26876+{
26877+ struct drm_psb_private *dev_priv =
26878+ (struct drm_psb_private *) dev->dev_private;
26879+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
26880+ int ret;
26881+ bool is_iomem;
26882+
26883+ if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) {
26884+ unsigned long irq_flags;
26885+
26886+ dev_priv->xhw_bo =
26887+ ttm_buffer_object_lookup(tfile, arg->buffer_handle);
26888+ if (!dev_priv->xhw_bo) {
26889+ ret = -EINVAL;
26890+ goto out_err;
26891+ }
26892+ ret = ttm_bo_kmap(dev_priv->xhw_bo, 0,
26893+ dev_priv->xhw_bo->num_pages,
26894+ &dev_priv->xhw_kmap);
26895+ if (ret) {
26896+ DRM_ERROR("Failed mapping X server "
26897+ "communications buffer.\n");
26898+ goto out_err0;
26899+ }
26900+ dev_priv->xhw =
26901+ ttm_kmap_obj_virtual(&dev_priv->xhw_kmap, &is_iomem);
26902+ if (is_iomem) {
26903+ DRM_ERROR("X server communications buffer"
26904+ "is in device memory.\n");
26905+ ret = -EINVAL;
26906+ goto out_err1;
26907+ }
26908+ dev_priv->xhw_file = file_priv;
26909+
26910+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
26911+ dev_priv->xhw_on = 1;
26912+ dev_priv->xhw_submit_ok = 1;
26913+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
26914+ return 0;
26915+ } else {
26916+ DRM_ERROR("Xhw is already initialized.\n");
26917+ return -EBUSY;
26918+ }
26919+out_err1:
26920+ dev_priv->xhw = NULL;
26921+ ttm_bo_kunmap(&dev_priv->xhw_kmap);
26922+out_err0:
26923+ ttm_bo_unref(&dev_priv->xhw_bo);
26924+out_err:
26925+ atomic_dec(&dev_priv->xhw_client);
26926+ return ret;
26927+}
26928+
26929+static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv)
26930+{
26931+ struct psb_xhw_buf *cur_buf, *next;
26932+ unsigned long irq_flags;
26933+
26934+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
26935+ dev_priv->xhw_submit_ok = 0;
26936+
26937+ list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) {
26938+ list_del_init(&cur_buf->head);
26939+ if (cur_buf->copy_back)
26940+ cur_buf->arg.ret = -EINVAL;
26941+ atomic_set(&cur_buf->done, 1);
26942+ }
26943+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
26944+ wake_up(&dev_priv->xhw_caller_queue);
26945+}
26946+
26947+void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
26948+ struct drm_file *file_priv, int closing)
26949+{
26950+
26951+ if (dev_priv->xhw_file == file_priv &&
26952+ atomic_add_unless(&dev_priv->xhw_client, -1, 0)) {
26953+
26954+ if (closing)
26955+ psb_xhw_queue_empty(dev_priv);
26956+ else {
26957+ struct psb_xhw_buf buf;
26958+ INIT_LIST_HEAD(&buf.head);
26959+
26960+ psb_xhw_terminate(dev_priv, &buf);
26961+ psb_xhw_queue_empty(dev_priv);
26962+ }
26963+
26964+ dev_priv->xhw = NULL;
26965+ ttm_bo_kunmap(&dev_priv->xhw_kmap);
26966+ ttm_bo_unref(&dev_priv->xhw_bo);
26967+ dev_priv->xhw_file = NULL;
26968+ }
26969+}
26970+
26971+int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
26972+ struct drm_file *file_priv)
26973+{
26974+ struct drm_psb_xhw_init_arg *arg =
26975+ (struct drm_psb_xhw_init_arg *) data;
26976+ struct drm_psb_private *dev_priv =
26977+ (struct drm_psb_private *) dev->dev_private;
26978+ int ret = 0;
26979+ down_read(&dev_priv->sgx_sem);
26980+ psb_check_power_state(dev, PSB_DEVICE_SGX);
26981+ switch (arg->operation) {
26982+ case PSB_XHW_INIT:
26983+ ret = psb_xhw_init_init(dev, file_priv, arg);
26984+ break;
26985+ case PSB_XHW_TAKEDOWN:
26986+ psb_xhw_init_takedown(dev_priv, file_priv, 0);
26987+ break;
26988+ }
26989+ up_read(&dev_priv->sgx_sem);
26990+ return ret;
26991+}
26992+
26993+static int psb_xhw_in_empty(struct drm_psb_private *dev_priv)
26994+{
26995+ int empty;
26996+ unsigned long irq_flags;
26997+
26998+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
26999+ empty = list_empty(&dev_priv->xhw_in);
27000+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
27001+ return empty;
27002+}
27003+
27004+int psb_xhw_handler(struct drm_psb_private *dev_priv)
27005+{
27006+ unsigned long irq_flags;
27007+ struct drm_psb_xhw_arg *xa;
27008+ struct psb_xhw_buf *buf;
27009+
27010+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
27011+
27012+ if (!dev_priv->xhw_on) {
27013+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
27014+ return -EINVAL;
27015+ }
27016+
27017+ buf = dev_priv->xhw_cur_buf;
27018+ if (buf && buf->copy_back) {
27019+ xa = &buf->arg;
27020+ memcpy(xa, dev_priv->xhw, sizeof(*xa));
27021+ dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op;
27022+ atomic_set(&buf->done, 1);
27023+ wake_up(&dev_priv->xhw_caller_queue);
27024+ } else
27025+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
27026+
27027+ dev_priv->xhw_cur_buf = 0;
27028+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
27029+ return 0;
27030+}
27031+
27032+int psb_xhw_ioctl(struct drm_device *dev, void *data,
27033+ struct drm_file *file_priv)
27034+{
27035+ struct drm_psb_private *dev_priv =
27036+ (struct drm_psb_private *) dev->dev_private;
27037+ unsigned long irq_flags;
27038+ struct drm_psb_xhw_arg *xa;
27039+ int ret;
27040+ struct list_head *list;
27041+ struct psb_xhw_buf *buf;
27042+
27043+ if (!dev_priv)
27044+ return -EINVAL;
27045+
27046+ if (mutex_lock_interruptible(&dev_priv->xhw_mutex))
27047+ return -ERESTART;
27048+
27049+ if (psb_forced_user_interrupt(dev_priv)) {
27050+ mutex_unlock(&dev_priv->xhw_mutex);
27051+ return -EINVAL;
27052+ }
27053+
27054+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
27055+ while (list_empty(&dev_priv->xhw_in)) {
27056+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
27057+ ret = wait_event_interruptible_timeout(dev_priv->xhw_queue,
27058+ !psb_xhw_in_empty
27059+ (dev_priv), DRM_HZ);
27060+ if (ret == -ERESTARTSYS || ret == 0) {
27061+ mutex_unlock(&dev_priv->xhw_mutex);
27062+ return -ERESTART;
27063+ }
27064+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
27065+ }
27066+
27067+ list = dev_priv->xhw_in.next;
27068+ list_del_init(list);
27069+
27070+ buf = list_entry(list, struct psb_xhw_buf, head);
27071+ xa = &buf->arg;
27072+ memcpy(dev_priv->xhw, xa, sizeof(*xa));
27073+
27074+ if (unlikely(buf->copy_back))
27075+ dev_priv->xhw_cur_buf = buf;
27076+ else {
27077+ atomic_set(&buf->done, 1);
27078+ dev_priv->xhw_cur_buf = NULL;
27079+ }
27080+
27081+ if (xa->op == PSB_XHW_TERMINATE) {
27082+ dev_priv->xhw_on = 0;
27083+ wake_up(&dev_priv->xhw_caller_queue);
27084+ }
27085+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
27086+
27087+ mutex_unlock(&dev_priv->xhw_mutex);
27088+
27089+ return 0;
27090+}
27091diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c b/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c
27092--- a/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c 1969-12-31 16:00:00.000000000 -0800
27093+++ b/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c 2009-04-07 13:28:38.000000000 -0700
27094@@ -0,0 +1,149 @@
27095+/**************************************************************************
27096+ *
27097+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
27098+ * All Rights Reserved.
27099+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
27100+ * All Rights Reserved.
27101+ *
27102+ * Permission is hereby granted, free of charge, to any person obtaining a
27103+ * copy of this software and associated documentation files (the
27104+ * "Software"), to deal in the Software without restriction, including
27105+ * without limitation the rights to use, copy, modify, merge, publish,
27106+ * distribute, sub license, and/or sell copies of the Software, and to
27107+ * permit persons to whom the Software is furnished to do so, subject to
27108+ * the following conditions:
27109+ *
27110+ * The above copyright notice and this permission notice (including the
27111+ * next paragraph) shall be included in all copies or substantial portions
27112+ * of the Software.
27113+ *
27114+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27115+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27116+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
27117+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27118+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27119+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27120+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
27121+ *
27122+ **************************************************************************/
27123+/*
27124+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
27125+ * Keith Packard.
27126+ */
27127+
27128+#include "ttm/ttm_bo_driver.h"
27129+#ifdef TTM_HAS_AGP
27130+#include "ttm/ttm_placement_common.h"
27131+#include <linux/agp_backend.h>
27132+#include <asm/agp.h>
27133+#include <asm/io.h>
27134+
27135+struct ttm_agp_backend {
27136+ struct ttm_backend backend;
27137+ struct agp_memory *mem;
27138+ struct agp_bridge_data *bridge;
27139+};
27140+
27141+static int ttm_agp_populate(struct ttm_backend *backend,
27142+ unsigned long num_pages, struct page **pages,
27143+ struct page *dummy_read_page)
27144+{
27145+ struct ttm_agp_backend *agp_be =
27146+ container_of(backend, struct ttm_agp_backend, backend);
27147+ struct page **cur_page, **last_page = pages + num_pages;
27148+ struct agp_memory *mem;
27149+
27150+ mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
27151+ if (unlikely(mem == NULL))
27152+ return -ENOMEM;
27153+
27154+ mem->page_count = 0;
27155+ for (cur_page = pages; cur_page < last_page; ++cur_page) {
27156+ struct page *page = *cur_page;
27157+ if (!page) {
27158+ page = dummy_read_page;
27159+ }
27160+ mem->memory[mem->page_count++] =
27161+ phys_to_gart(page_to_phys(page));
27162+ }
27163+ agp_be->mem = mem;
27164+ return 0;
27165+}
27166+
27167+static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
27168+{
27169+ struct ttm_agp_backend *agp_be =
27170+ container_of(backend, struct ttm_agp_backend, backend);
27171+ struct agp_memory *mem = agp_be->mem;
27172+ int cached = (bo_mem->flags & TTM_PL_FLAG_CACHED);
27173+ int ret;
27174+
27175+ mem->is_flushed = 1;
27176+ mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
27177+
27178+ ret = agp_bind_memory(mem, bo_mem->mm_node->start);
27179+ if (ret)
27180+ printk(KERN_ERR "AGP Bind memory failed.\n");
27181+
27182+ return ret;
27183+}
27184+
27185+static int ttm_agp_unbind(struct ttm_backend *backend)
27186+{
27187+ struct ttm_agp_backend *agp_be =
27188+ container_of(backend, struct ttm_agp_backend, backend);
27189+
27190+ if (agp_be->mem->is_bound)
27191+ return agp_unbind_memory(agp_be->mem);
27192+ else
27193+ return 0;
27194+}
27195+
27196+static void ttm_agp_clear(struct ttm_backend *backend)
27197+{
27198+ struct ttm_agp_backend *agp_be =
27199+ container_of(backend, struct ttm_agp_backend, backend);
27200+ struct agp_memory *mem = agp_be->mem;
27201+
27202+ if (mem) {
27203+ ttm_agp_unbind(backend);
27204+ agp_free_memory(mem);
27205+ }
27206+ agp_be->mem = NULL;
27207+}
27208+
27209+static void ttm_agp_destroy(struct ttm_backend *backend)
27210+{
27211+ struct ttm_agp_backend *agp_be =
27212+ container_of(backend, struct ttm_agp_backend, backend);
27213+
27214+ if (agp_be->mem)
27215+ ttm_agp_clear(backend);
27216+ kfree(agp_be);
27217+}
27218+
27219+static struct ttm_backend_func ttm_agp_func = {
27220+ .populate = ttm_agp_populate,
27221+ .clear = ttm_agp_clear,
27222+ .bind = ttm_agp_bind,
27223+ .unbind = ttm_agp_unbind,
27224+ .destroy = ttm_agp_destroy,
27225+};
27226+
27227+struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
27228+ struct agp_bridge_data *bridge)
27229+{
27230+ struct ttm_agp_backend *agp_be;
27231+
27232+ agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
27233+ if (!agp_be)
27234+ return NULL;
27235+
27236+ agp_be->mem = NULL;
27237+ agp_be->bridge = bridge;
27238+ agp_be->backend.func = &ttm_agp_func;
27239+ agp_be->backend.bdev = bdev;
27240+ return &agp_be->backend;
27241+}
27242+
27243+#endif
27244diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_api.h b/drivers/gpu/drm/psb/ttm/ttm_bo_api.h
27245--- a/drivers/gpu/drm/psb/ttm/ttm_bo_api.h 1969-12-31 16:00:00.000000000 -0800
27246+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_api.h 2009-04-07 13:28:38.000000000 -0700
27247@@ -0,0 +1,578 @@
27248+/**************************************************************************
27249+ *
27250+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
27251+ * All Rights Reserved.
27252+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
27253+ * All Rights Reserved.
27254+ *
27255+ * Permission is hereby granted, free of charge, to any person obtaining a
27256+ * copy of this software and associated documentation files (the
27257+ * "Software"), to deal in the Software without restriction, including
27258+ * without limitation the rights to use, copy, modify, merge, publish,
27259+ * distribute, sub license, and/or sell copies of the Software, and to
27260+ * permit persons to whom the Software is furnished to do so, subject to
27261+ * the following conditions:
27262+ *
27263+ * The above copyright notice and this permission notice (including the
27264+ * next paragraph) shall be included in all copies or substantial portions
27265+ * of the Software.
27266+ *
27267+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27268+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27269+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
27270+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27271+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27272+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27273+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
27274+ *
27275+ **************************************************************************/
27276+/*
27277+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
27278+ */
27279+
27280+#ifndef _TTM_BO_API_H_
27281+#define _TTM_BO_API_H_
27282+
27283+#include <drm/drm_hashtab.h>
27284+#include <linux/kref.h>
27285+#include <linux/list.h>
27286+#include <linux/wait.h>
27287+#include <linux/mutex.h>
27288+#include <linux/mm.h>
27289+#include <linux/rbtree.h>
27290+
27291+struct ttm_bo_device;
27292+
27293+struct drm_mm_node;
27294+
27295+/**
27296+ * struct ttm_mem_reg
27297+ *
27298+ * @mm_node: Memory manager node.
27299+ * @size: Requested size of memory region.
27300+ * @num_pages: Actual size of memory region in pages.
27301+ * @page_alignment: Page alignment.
27302+ * @flags: Placement flags.
27303+ * @proposed_flags: Proposed placement flags.
27304+ *
27305+ * Structure indicating the placement and space resources used by a
27306+ * buffer object.
27307+ */
27308+
27309+struct ttm_mem_reg {
27310+ struct drm_mm_node *mm_node;
27311+ unsigned long size;
27312+ unsigned long num_pages;
27313+ uint32_t page_alignment;
27314+ uint32_t mem_type;
27315+ uint32_t flags;
27316+ uint32_t proposed_flags;
27317+};
27318+
27319+/**
27320+ * enum ttm_bo_type
27321+ *
27322+ * @ttm_bo_type_device: These are 'normal' buffers that can
27323+ * be mmapped by user space. Each of these bos occupy a slot in the
27324+ * device address space, that can be used for normal vm operations.
27325+ *
27326+ * @ttm_bo_type_user: These are user-space memory areas that are made
27327+ * available to the GPU by mapping the buffer pages into the GPU aperture
27328+ * space. These buffers cannot be mmaped from the device address space.
27329+ *
27330+ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
27331+ * but they cannot be accessed from user-space. For kernel-only use.
27332+ */
27333+
27334+enum ttm_bo_type {
27335+ ttm_bo_type_device,
27336+ ttm_bo_type_user,
27337+ ttm_bo_type_kernel
27338+};
27339+
27340+struct ttm_tt;
27341+
27342+/**
27343+ * struct ttm_buffer_object
27344+ *
27345+ * @bdev: Pointer to the buffer object device structure.
27346+ * @kref: Reference count of this buffer object. When this refcount reaches
27347+ * zero, the object is put on the delayed delete list.
27348+ * @list_kref: List reference count of this buffer object. This member is
27349+ * used to avoid destruction while the buffer object is still on a list.
27350+ * Lru lists may keep one refcount, the delayed delete list, and kref != 0
27351+ * keeps one refcount. When this refcount reaches zero,
27352+ * the object is destroyed.
27353+ * @proposed_flags: Proposed placement for the buffer. Changed only by the
27354+ * creator prior to validation as opposed to bo->mem.proposed_flags which is
27355+ * changed by the implementation prior to a buffer move if it wants to outsmart
27356+ * the buffer creator / user. This latter happens, for example, at eviction.
27357+ * @buffer_start: The virtual user-space start address of ttm_bo_type_user
27358+ * buffers.
27359+ * @type: The bo type.
27360+ * @offset: The current GPU offset, which can have different meanings
27361+ * depending on the memory type. For SYSTEM type memory, it should be 0.
27362+ * @mem: structure describing current placement.
27363+ * @val_seq: Sequence of the validation holding the @reserved lock.
27364+ * Used to avoid starvation when many processes compete to validate the
27365+ * buffer. This member is protected by the bo_device::lru_lock.
27366+ * @seq_valid: The value of @val_seq is valid. This value is protected by
27367+ * the bo_device::lru_lock.
27368+ * @lru: List head for the lru list.
27369+ * @ddestroy: List head for the delayed destroy list.
27370+ * @swap: List head for swap LRU list.
27371+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
27372+ * pinned in physical memory. If this behaviour is not desired, this member
27373+ * holds a pointer to a persistant shmem object.
27374+ * @destroy: Destruction function. If NULL, kfree is used.
27375+ * @sync_obj_arg: Opaque argument to synchronization object function.
27376+ * @sync_obj: Pointer to a synchronization object.
27377+ * @priv_flags: Flags describing buffer object internal state.
27378+ * @event_queue: Queue for processes waiting on buffer object status change.
27379+ * @mutex: Lock protecting all members with the exception of constant members
27380+ * and list heads. We should really use a spinlock here.
27381+ * @num_pages: Actual number of pages.
27382+ * @ttm: TTM structure holding system pages.
27383+ * @vm_hash: Hash item for fast address space lookup. Need to change to a
27384+ * rb-tree node.
27385+ * @vm_node: Address space manager node.
27386+ * @addr_space_offset: Address space offset.
27387+ * @cpu_writes: For synchronization. Number of cpu writers.
27388+ * @reserved: Deadlock-free lock used for synchronization state transitions.
27389+ * @acc_size: Accounted size for this object.
27390+ *
27391+ * Base class for TTM buffer object, that deals with data placement and CPU
27392+ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
27393+ * the driver can usually use the placement offset @offset directly as the
27394+ * GPU virtual address. For drivers implementing multiple
27395+ * GPU memory manager contexts, the driver should manage the address space
27396+ * in these contexts separately and use these objects to get the correct
27397+ * placement and caching for these GPU maps. This makes it possible to use
27398+ * these objects for even quite elaborate memory management schemes.
27399+ * The destroy member, the API visibility of this object makes it possible
27400+ * to derive driver specific types.
27401+ */
27402+
27403+struct ttm_buffer_object {
27404+ struct ttm_bo_device *bdev;
27405+ struct kref kref;
27406+ struct kref list_kref;
27407+
27408+ /*
27409+ * If there is a possibility that the usage variable is zero,
27410+ * then dev->struct_mutex should be locked before incrementing it.
27411+ */
27412+
27413+ uint32_t proposed_flags;
27414+ unsigned long buffer_start;
27415+ enum ttm_bo_type type;
27416+ unsigned long offset;
27417+ struct ttm_mem_reg mem;
27418+ uint32_t val_seq;
27419+ bool seq_valid;
27420+
27421+ struct list_head lru;
27422+ struct list_head ddestroy;
27423+ struct list_head swap;
27424+
27425+ struct file *persistant_swap_storage;
27426+
27427+ void (*destroy) (struct ttm_buffer_object *);
27428+
27429+ void *sync_obj_arg;
27430+ void *sync_obj;
27431+
27432+ uint32_t priv_flags;
27433+ wait_queue_head_t event_queue;
27434+ struct mutex mutex;
27435+ unsigned long num_pages;
27436+
27437+ struct ttm_tt *ttm;
27438+ struct rb_node vm_rb;
27439+ struct drm_mm_node *vm_node;
27440+ uint64_t addr_space_offset;
27441+
27442+ atomic_t cpu_writers;
27443+ atomic_t reserved;
27444+
27445+ size_t acc_size;
27446+};
27447+
27448+/**
27449+ * struct ttm_bo_kmap_obj
27450+ *
27451+ * @virtual: The current kernel virtual address.
27452+ * @page: The page when kmap'ing a single page.
27453+ * @bo_kmap_type: Type of bo_kmap.
27454+ *
27455+ * Object describing a kernel mapping. Since a TTM bo may be located
27456+ * in various memory types with various caching policies, the
27457+ * mapping can either be an ioremap, a vmap, a kmap or part of a
27458+ * premapped region.
27459+ */
27460+
27461+struct ttm_bo_kmap_obj {
27462+ void *virtual;
27463+ struct page *page;
27464+ enum {
27465+ ttm_bo_map_iomap,
27466+ ttm_bo_map_vmap,
27467+ ttm_bo_map_kmap,
27468+ ttm_bo_map_premapped,
27469+ } bo_kmap_type;
27470+};
27471+
27472+/**
27473+ * ttm_bo_reference - reference a struct ttm_buffer_object
27474+ *
27475+ * @bo: The buffer object.
27476+ *
27477+ * Returns a refcounted pointer to a buffer object.
27478+ */
27479+
27480+static inline struct ttm_buffer_object *ttm_bo_reference(struct
27481+ ttm_buffer_object *bo)
27482+{
27483+ kref_get(&bo->kref);
27484+ return bo;
27485+}
27486+
27487+/**
27488+ * ttm_bo_wait - wait for buffer idle.
27489+ *
27490+ * @bo: The buffer object.
27491+ * @interruptible: Use interruptible wait.
27492+ * @no_wait: Return immediately if buffer is busy.
27493+ *
27494+ * This function must be called with the bo::mutex held, and makes
27495+ * sure any previous rendering to the buffer is completed.
27496+ * Note: It might be necessary to block validations before the
27497+ * wait by reserving the buffer.
27498+ * Returns -EBUSY if no_wait is true and the buffer is busy.
27499+ * Returns -ERESTART if interrupted by a signal.
27500+ */
27501+extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
27502+ bool interruptible, bool no_wait);
27503+/**
27504+ * ttm_buffer_object_validate
27505+ *
27506+ * @bo: The buffer object.
27507+ * @interruptible: Sleep interruptible if sleeping.
27508+ * @no_wait: Return immediately if the buffer is busy.
27509+ *
27510+ * Changes placement and caching policy of the buffer object
27511+ * according to bo::proposed_flags.
27512+ * Returns
27513+ * -EINVAL on invalid proposed_flags.
27514+ * -ENOMEM on out-of-memory condition.
27515+ * -EBUSY if no_wait is true and buffer busy.
27516+ * -ERESTART if interrupted by a signal.
27517+ */
27518+extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
27519+ bool interruptible, bool no_wait);
27520+/**
27521+ * ttm_bo_unref
27522+ *
27523+ * @bo: The buffer object.
27524+ *
27525+ * Unreference and clear a pointer to a buffer object.
27526+ */
27527+extern void ttm_bo_unref(struct ttm_buffer_object **bo);
27528+
27529+/**
27530+ * ttm_bo_synccpu_write_grab
27531+ *
27532+ * @bo: The buffer object:
27533+ * @no_wait: Return immediately if buffer is busy.
27534+ *
27535+ * Synchronizes a buffer object for CPU RW access. This means
27536+ * blocking command submission that affects the buffer and
27537+ * waiting for buffer idle. This lock is recursive.
27538+ * Returns
27539+ * -EBUSY if the buffer is busy and no_wait is true.
27540+ * -ERESTART if interrupted by a signal.
27541+ */
27542+
27543+extern int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
27544+/**
27545+ * ttm_bo_synccpu_write_release:
27546+ *
27547+ * @bo : The buffer object.
27548+ *
27549+ * Releases a synccpu lock.
27550+ */
27551+extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
27552+
27553+/**
27554+ * ttm_buffer_object_init
27555+ *
27556+ * @bdev: Pointer to a ttm_bo_device struct.
27557+ * @bo: Pointer to a ttm_buffer_object to be initialized.
27558+ * @size: Requested size of buffer object.
27559+ * @type: Requested type of buffer object.
27560+ * @flags: Initial placement flags.
27561+ * @page_alignment: Data alignment in pages.
27562+ * @buffer_start: Virtual address of user space data backing a
27563+ * user buffer object.
27564+ * @interruptible: If needing to sleep to wait for GPU resources,
27565+ * sleep interruptible.
27566+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
27567+ * pinned in physical memory. If this behaviour is not desired, this member
27568+ * holds a pointer to a persistant shmem object. Typically, this would
27569+ * point to the shmem object backing a GEM object if TTM is used to back a
27570+ * GEM user interface.
27571+ * @acc_size: Accounted size for this object.
27572+ * @destroy: Destroy function. Use NULL for kfree().
27573+ *
27574+ * This function initializes a pre-allocated struct ttm_buffer_object.
27575+ * As this object may be part of a larger structure, this function,
27576+ * together with the @destroy function,
27577+ * enables driver-specific objects derived from a ttm_buffer_object.
27578+ * On successful return, the object kref and list_kref are set to 1.
27579+ * Returns
27580+ * -ENOMEM: Out of memory.
27581+ * -EINVAL: Invalid placement flags.
27582+ * -ERESTART: Interrupted by signal while sleeping waiting for resources.
27583+ */
27584+
27585+extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
27586+ struct ttm_buffer_object *bo,
27587+ unsigned long size,
27588+ enum ttm_bo_type type,
27589+ uint32_t flags,
27590+ uint32_t page_alignment,
27591+ unsigned long buffer_start,
27592+ bool interrubtible,
27593+ struct file *persistant_swap_storage,
27594+ size_t acc_size,
27595+ void (*destroy) (struct ttm_buffer_object *));
27596+/**
27597+ * ttm_bo_synccpu_object_init
27598+ *
27599+ * @bdev: Pointer to a ttm_bo_device struct.
27600+ * @bo: Pointer to a ttm_buffer_object to be initialized.
27601+ * @size: Requested size of buffer object.
27602+ * @type: Requested type of buffer object.
27603+ * @flags: Initial placement flags.
27604+ * @page_alignment: Data alignment in pages.
27605+ * @buffer_start: Virtual address of user space data backing a
27606+ * user buffer object.
27607+ * @interruptible: If needing to sleep while waiting for GPU resources,
27608+ * sleep interruptible.
27609+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
27610+ * pinned in physical memory. If this behaviour is not desired, this member
27611+ * holds a pointer to a persistant shmem object. Typically, this would
27612+ * point to the shmem object backing a GEM object if TTM is used to back a
27613+ * GEM user interface.
27614+ * @p_bo: On successful completion *p_bo points to the created object.
27615+ *
27616+ * This function allocates a ttm_buffer_object, and then calls
27617+ * ttm_buffer_object_init on that object.
27618+ * The destroy function is set to kfree().
27619+ * Returns
27620+ * -ENOMEM: Out of memory.
27621+ * -EINVAL: Invalid placement flags.
27622+ * -ERESTART: Interrupted by signal while waiting for resources.
27623+ */
27624+
27625+extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
27626+ unsigned long size,
27627+ enum ttm_bo_type type,
27628+ uint32_t flags,
27629+ uint32_t page_alignment,
27630+ unsigned long buffer_start,
27631+ bool interruptible,
27632+ struct file *persistant_swap_storage,
27633+ struct ttm_buffer_object **p_bo);
27634+
27635+/**
27636+ * ttm_bo_check_placement
27637+ *
27638+ * @bo: the buffer object.
27639+ * @set_flags: placement flags to set.
27640+ * @clr_flags: placement flags to clear.
27641+ *
27642+ * Performs minimal validity checking on an intended change of
27643+ * placement flags.
27644+ * Returns
27645+ * -EINVAL: Intended change is invalid or not allowed.
27646+ */
27647+
27648+extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
27649+ uint32_t set_flags, uint32_t clr_flags);
27650+
27651+/**
27652+ * ttm_bo_init_mm
27653+ *
27654+ * @bdev: Pointer to a ttm_bo_device struct.
27655+ * @mem_type: The memory type.
27656+ * @p_offset: offset for managed area in pages.
27657+ * @p_size: size managed area in pages.
27658+ *
27659+ * Initialize a manager for a given memory type.
27660+ * Note: if part of driver firstopen, it must be protected from a
27661+ * potentially racing lastclose.
27662+ * Returns:
27663+ * -EINVAL: invalid size or memory type.
27664+ * -ENOMEM: Not enough memory.
27665+ * May also return driver-specified errors.
27666+ */
27667+
27668+extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
27669+ unsigned long p_offset, unsigned long p_size);
27670+/**
27671+ * ttm_bo_clean_mm
27672+ *
27673+ * @bdev: Pointer to a ttm_bo_device struct.
27674+ * @mem_type: The memory type.
27675+ *
27676+ * Take down a manager for a given memory type after first walking
27677+ * the LRU list to evict any buffers left alive.
27678+ *
27679+ * Normally, this function is part of lastclose() or unload(), and at that
27680+ * point there shouldn't be any buffers left created by user-space, since
27681+ * there should've been removed by the file descriptor release() method.
27682+ * However, before this function is run, make sure to signal all sync objects,
27683+ * and verify that the delayed delete queue is empty. The driver must also
27684+ * make sure that there are no NO_EVICT buffers present in this memory type
27685+ * when the call is made.
27686+ *
27687+ * If this function is part of a VT switch, the caller must make sure that
27688+ * there are no appications currently validating buffers before this
27689+ * function is called. The caller can do that by first taking the
27690+ * struct ttm_bo_device::ttm_lock in write mode.
27691+ *
27692+ * Returns:
27693+ * -EINVAL: invalid or uninitialized memory type.
27694+ * -EBUSY: There are still buffers left in this memory type.
27695+ */
27696+
27697+extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
27698+
27699+/**
27700+ * ttm_bo_evict_mm
27701+ *
27702+ * @bdev: Pointer to a ttm_bo_device struct.
27703+ * @mem_type: The memory type.
27704+ *
27705+ * Evicts all buffers on the lru list of the memory type.
27706+ * This is normally part of a VT switch or an
27707+ * out-of-memory-space-due-to-fragmentation handler.
27708+ * The caller must make sure that there are no other processes
27709+ * currently validating buffers, and can do that by taking the
27710+ * struct ttm_bo_device::ttm_lock in write mode.
27711+ *
27712+ * Returns:
27713+ * -EINVAL: Invalid or uninitialized memory type.
27714+ * -ERESTART: The call was interrupted by a signal while waiting to
27715+ * evict a buffer.
27716+ */
27717+
27718+extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
27719+
27720+/**
27721+ * ttm_kmap_obj_virtual
27722+ *
27723+ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
27724+ * @is_iomem: Pointer to an integer that on return indicates 1 if the
27725+ * virtual map is io memory, 0 if normal memory.
27726+ *
27727+ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
27728+ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
27729+ * that should strictly be accessed by the iowriteXX() and similar functions.
27730+ */
27731+
27732+static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
27733+ bool *is_iomem)
27734+{
27735+ *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
27736+ map->bo_kmap_type == ttm_bo_map_premapped);
27737+ return map->virtual;
27738+}
27739+
27740+/**
27741+ * ttm_bo_kmap
27742+ *
27743+ * @bo: The buffer object.
27744+ * @start_page: The first page to map.
27745+ * @num_pages: Number of pages to map.
27746+ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
27747+ *
27748+ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
27749+ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
27750+ * used to obtain a virtual address to the data.
27751+ *
27752+ * Returns
27753+ * -ENOMEM: Out of memory.
27754+ * -EINVAL: Invalid range.
27755+ */
27756+
27757+extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
27758+ unsigned long num_pages, struct ttm_bo_kmap_obj *map);
27759+
27760+/**
27761+ * ttm_bo_kunmap
27762+ *
27763+ * @map: Object describing the map to unmap.
27764+ *
27765+ * Unmaps a kernel map set up by ttm_bo_kmap.
27766+ */
27767+
27768+extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
27769+
27770+#if 0
27771+#endif
27772+
27773+/**
27774+ * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
27775+ *
27776+ * @vma: vma as input from the fbdev mmap method.
27777+ * @bo: The bo backing the address space. The address space will
27778+ * have the same size as the bo, and start at offset 0.
27779+ *
27780+ * This function is intended to be called by the fbdev mmap method
27781+ * if the fbdev address space is to be backed by a bo.
27782+ */
27783+
27784+extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
27785+ struct ttm_buffer_object *bo);
27786+
27787+/**
27788+ * ttm_bo_mmap - mmap out of the ttm device address space.
27789+ *
27790+ * @filp: filp as input from the mmap method.
27791+ * @vma: vma as input from the mmap method.
27792+ * @bdev: Pointer to the ttm_bo_device with the address space manager.
27793+ *
27794+ * This function is intended to be called by the device mmap method.
27795+ * if the device address space is to be backed by the bo manager.
27796+ */
27797+
27798+extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
27799+ struct ttm_bo_device *bdev);
27800+
27801+/**
27802+ * ttm_bo_io
27803+ *
27804+ * @bdev: Pointer to the struct ttm_bo_device.
27805+ * @filp: Pointer to the struct file attempting to read / write.
27806+ * @wbuf: User-space pointer to address of buffer to write. NULL on read.
27807+ * @rbuf: User-space pointer to address of buffer to read into. Null on write.
27808+ * @count: Number of bytes to read / write.
27809+ * @f_pos: Pointer to current file position.
27810+ * @write: 1 for read, 0 for write.
27811+ *
27812+ * This function implements read / write into ttm buffer objects, and is intended to
27813+ * be called from the fops::read and fops::write method.
27814+ * Returns:
27815+ * See man (2) write, man(2) read. In particular, the function may return -EINTR if
27816+ * interrupted by a signal.
27817+ */
27818+
27819+extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
27820+ const char __user * wbuf, char __user * rbuf,
27821+ size_t count, loff_t * f_pos, bool write);
27822+
27823+extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
27824+
27825+#endif
27826diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo.c b/drivers/gpu/drm/psb/ttm/ttm_bo.c
27827--- a/drivers/gpu/drm/psb/ttm/ttm_bo.c 1969-12-31 16:00:00.000000000 -0800
27828+++ b/drivers/gpu/drm/psb/ttm/ttm_bo.c 2009-04-07 13:28:38.000000000 -0700
27829@@ -0,0 +1,1716 @@
27830+/**************************************************************************
27831+ *
27832+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
27833+ * All Rights Reserved.
27834+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
27835+ * All Rights Reserved.
27836+ *
27837+ * Permission is hereby granted, free of charge, to any person obtaining a
27838+ * copy of this software and associated documentation files (the
27839+ * "Software"), to deal in the Software without restriction, including
27840+ * without limitation the rights to use, copy, modify, merge, publish,
27841+ * distribute, sub license, and/or sell copies of the Software, and to
27842+ * permit persons to whom the Software is furnished to do so, subject to
27843+ * the following conditions:
27844+ *
27845+ * The above copyright notice and this permission notice (including the
27846+ * next paragraph) shall be included in all copies or substantial portions
27847+ * of the Software.
27848+ *
27849+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27850+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27851+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
27852+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27853+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27854+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27855+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
27856+ *
27857+ **************************************************************************/
27858+/*
27859+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
27860+ */
27861+
27862+#include "ttm/ttm_bo_driver.h"
27863+#include "ttm/ttm_placement_common.h"
27864+#include <linux/jiffies.h>
27865+#include <linux/slab.h>
27866+#include <linux/sched.h>
27867+#include <linux/mm.h>
27868+#include <linux/file.h>
27869+
27870+#define TTM_ASSERT_LOCKED(param)
27871+#define TTM_DEBUG(fmt, arg...)
27872+#define TTM_BO_HASH_ORDER 13
27873+
27874+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
27875+static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
27876+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
27877+
27878+static inline uint32_t ttm_bo_type_flags(unsigned type)
27879+{
27880+ return (1 << (type));
27881+}
27882+
27883+static void ttm_bo_release_list(struct kref *list_kref)
27884+{
27885+ struct ttm_buffer_object *bo =
27886+ container_of(list_kref, struct ttm_buffer_object, list_kref);
27887+ struct ttm_bo_device *bdev = bo->bdev;
27888+
27889+ BUG_ON(atomic_read(&bo->list_kref.refcount));
27890+ BUG_ON(atomic_read(&bo->kref.refcount));
27891+ BUG_ON(atomic_read(&bo->cpu_writers));
27892+ BUG_ON(bo->sync_obj != NULL);
27893+ BUG_ON(bo->mem.mm_node != NULL);
27894+ BUG_ON(!list_empty(&bo->lru));
27895+ BUG_ON(!list_empty(&bo->ddestroy));
27896+
27897+ if (bo->ttm)
27898+ ttm_tt_destroy(bo->ttm);
27899+ if (bo->destroy)
27900+ bo->destroy(bo);
27901+ else {
27902+ ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
27903+ kfree(bo);
27904+ }
27905+}
27906+
27907+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
27908+{
27909+
27910+ if (interruptible) {
27911+ int ret = 0;
27912+
27913+ ret = wait_event_interruptible(bo->event_queue,
27914+ atomic_read(&bo->reserved) == 0);
27915+ if (unlikely(ret != 0))
27916+ return -ERESTART;
27917+ } else {
27918+ wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
27919+ }
27920+ return 0;
27921+}
27922+
27923+static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
27924+{
27925+ struct ttm_bo_device *bdev = bo->bdev;
27926+ struct ttm_mem_type_manager *man;
27927+
27928+ BUG_ON(!atomic_read(&bo->reserved));
27929+
27930+ if (!(bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
27931+
27932+ BUG_ON(!list_empty(&bo->lru));
27933+
27934+ man = &bdev->man[bo->mem.mem_type];
27935+ list_add_tail(&bo->lru, &man->lru);
27936+ kref_get(&bo->list_kref);
27937+
27938+ if (bo->ttm != NULL) {
27939+ list_add_tail(&bo->swap, &bdev->swap_lru);
27940+ kref_get(&bo->list_kref);
27941+ }
27942+ }
27943+}
27944+
27945+/*
27946+ * Call with bdev->lru_lock and bdev->global->swap_lock held..
27947+ */
27948+
27949+static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
27950+{
27951+ int put_count = 0;
27952+
27953+ if (!list_empty(&bo->swap)) {
27954+ list_del_init(&bo->swap);
27955+ ++put_count;
27956+ }
27957+ if (!list_empty(&bo->lru)) {
27958+ list_del_init(&bo->lru);
27959+ ++put_count;
27960+ }
27961+
27962+ /*
27963+ * TODO: Add a driver hook to delete from
27964+ * driver-specific LRU's here.
27965+ */
27966+
27967+ return put_count;
27968+}
27969+
27970+int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
27971+ bool interruptible,
27972+ bool no_wait, bool use_sequence, uint32_t sequence)
27973+{
27974+ struct ttm_bo_device *bdev = bo->bdev;
27975+ int ret;
27976+
27977+ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
27978+ if (use_sequence && bo->seq_valid &&
27979+ (sequence - bo->val_seq < (1 << 31))) {
27980+ return -EAGAIN;
27981+ }
27982+
27983+ if (no_wait)
27984+ return -EBUSY;
27985+
27986+ spin_unlock(&bdev->lru_lock);
27987+ ret = ttm_bo_wait_unreserved(bo, interruptible);
27988+ spin_lock(&bdev->lru_lock);
27989+
27990+ if (unlikely(ret))
27991+ return ret;
27992+ }
27993+
27994+ if (use_sequence) {
27995+ bo->val_seq = sequence;
27996+ bo->seq_valid = true;
27997+ } else {
27998+ bo->seq_valid = false;
27999+ }
28000+
28001+ return 0;
28002+}
28003+
28004+static void ttm_bo_ref_bug(struct kref *list_kref)
28005+{
28006+ BUG();
28007+}
28008+
28009+int ttm_bo_reserve(struct ttm_buffer_object *bo,
28010+ bool interruptible,
28011+ bool no_wait, bool use_sequence, uint32_t sequence)
28012+{
28013+ struct ttm_bo_device *bdev = bo->bdev;
28014+ int put_count = 0;
28015+ int ret;
28016+
28017+ spin_lock(&bdev->lru_lock);
28018+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
28019+ sequence);
28020+ if (likely(ret == 0))
28021+ put_count = ttm_bo_del_from_lru(bo);
28022+ spin_unlock(&bdev->lru_lock);
28023+
28024+ while (put_count--)
28025+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
28026+
28027+ return ret;
28028+}
28029+
28030+void ttm_bo_unreserve(struct ttm_buffer_object *bo)
28031+{
28032+ struct ttm_bo_device *bdev = bo->bdev;
28033+
28034+ spin_lock(&bdev->lru_lock);
28035+ ttm_bo_add_to_lru(bo);
28036+ atomic_set(&bo->reserved, 0);
28037+ wake_up_all(&bo->event_queue);
28038+ spin_unlock(&bdev->lru_lock);
28039+}
28040+
28041+/*
28042+ * Call bo->mutex locked.
28043+ */
28044+
28045+static int ttm_bo_add_ttm(struct ttm_buffer_object *bo)
28046+{
28047+ struct ttm_bo_device *bdev = bo->bdev;
28048+ int ret = 0;
28049+ uint32_t page_flags = 0;
28050+
28051+ TTM_ASSERT_LOCKED(&bo->mutex);
28052+ bo->ttm = NULL;
28053+
28054+ switch (bo->type) {
28055+ case ttm_bo_type_device:
28056+ case ttm_bo_type_kernel:
28057+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
28058+ page_flags, bdev->dummy_read_page);
28059+ if (unlikely(bo->ttm == NULL))
28060+ ret = -ENOMEM;
28061+ break;
28062+ case ttm_bo_type_user:
28063+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
28064+ page_flags | TTM_PAGE_FLAG_USER,
28065+ bdev->dummy_read_page);
28066+ if (unlikely(bo->ttm == NULL))
28067+ ret = -ENOMEM;
28068+ break;
28069+
28070+ ret = ttm_tt_set_user(bo->ttm, current,
28071+ bo->buffer_start, bo->num_pages);
28072+ if (unlikely(ret != 0))
28073+ ttm_tt_destroy(bo->ttm);
28074+ break;
28075+ default:
28076+ printk(KERN_ERR "Illegal buffer object type\n");
28077+ ret = -EINVAL;
28078+ break;
28079+ }
28080+
28081+ return ret;
28082+}
28083+
28084+static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
28085+ struct ttm_mem_reg *mem,
28086+ bool evict, bool interruptible, bool no_wait)
28087+{
28088+ struct ttm_bo_device *bdev = bo->bdev;
28089+ bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
28090+ bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
28091+ struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
28092+ struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
28093+ int ret = 0;
28094+
28095+ if (old_is_pci || new_is_pci ||
28096+ ((mem->flags & bo->mem.flags & TTM_PL_MASK_CACHING) == 0))
28097+ ttm_bo_unmap_virtual(bo);
28098+
28099+ /*
28100+ * Create and bind a ttm if required.
28101+ */
28102+
28103+ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
28104+ ret = ttm_bo_add_ttm(bo);
28105+ if (ret)
28106+ goto out_err;
28107+
28108+ ret = ttm_tt_set_placement_caching(bo->ttm, mem->flags);
28109+ if (ret)
28110+ return ret;
28111+
28112+ if (mem->mem_type != TTM_PL_SYSTEM) {
28113+ ret = ttm_tt_bind(bo->ttm, mem);
28114+ if (ret)
28115+ goto out_err;
28116+ }
28117+
28118+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
28119+
28120+ struct ttm_mem_reg *old_mem = &bo->mem;
28121+ uint32_t save_flags = old_mem->flags;
28122+ uint32_t save_proposed_flags = old_mem->proposed_flags;
28123+
28124+ *old_mem = *mem;
28125+ mem->mm_node = NULL;
28126+ old_mem->proposed_flags = save_proposed_flags;
28127+ ttm_flag_masked(&save_flags, mem->flags,
28128+ TTM_PL_MASK_MEMTYPE);
28129+ goto moved;
28130+ }
28131+
28132+ }
28133+
28134+ if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
28135+ !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
28136+ ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
28137+ else if (bdev->driver->move)
28138+ ret = bdev->driver->move(bo, evict, interruptible,
28139+ no_wait, mem);
28140+ else
28141+ ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
28142+
28143+ if (ret)
28144+ goto out_err;
28145+
28146+ moved:
28147+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_EVICTED) {
28148+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.flags);
28149+ if (ret)
28150+ printk(KERN_ERR "Can not flush read caches\n");
28151+ }
28152+
28153+ ttm_flag_masked(&bo->priv_flags,
28154+ (evict) ? TTM_BO_PRIV_FLAG_EVICTED : 0,
28155+ TTM_BO_PRIV_FLAG_EVICTED);
28156+
28157+ if (bo->mem.mm_node)
28158+ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
28159+ bdev->man[bo->mem.mem_type].gpu_offset;
28160+
28161+ return 0;
28162+
28163+ out_err:
28164+ new_man = &bdev->man[bo->mem.mem_type];
28165+ if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
28166+ ttm_tt_unbind(bo->ttm);
28167+ ttm_tt_destroy(bo->ttm);
28168+ bo->ttm = NULL;
28169+ }
28170+
28171+ return ret;
28172+}
28173+
28174+static int ttm_bo_expire_sync_obj(struct ttm_buffer_object *bo,
28175+ bool allow_errors)
28176+{
28177+ struct ttm_bo_device *bdev = bo->bdev;
28178+ struct ttm_bo_driver *driver = bdev->driver;
28179+
28180+ if (bo->sync_obj) {
28181+ if (bdev->nice_mode) {
28182+ unsigned long _end = jiffies + 3 * HZ;
28183+ int ret;
28184+ do {
28185+ ret = ttm_bo_wait(bo, false, false, false);
28186+ if (ret && allow_errors)
28187+ return ret;
28188+
28189+ } while (ret && !time_after_eq(jiffies, _end));
28190+
28191+ if (bo->sync_obj) {
28192+ bdev->nice_mode = false;
28193+ printk(KERN_ERR "Detected probable GPU lockup. "
28194+ "Evicting buffer.\n");
28195+ }
28196+ }
28197+ if (bo->sync_obj) {
28198+ driver->sync_obj_unref(&bo->sync_obj);
28199+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
28200+ }
28201+ }
28202+ return 0;
28203+}
28204+
28205+/**
28206+ * If bo idle, remove from delayed- and lru lists, and unref.
28207+ * If not idle, and already on delayed list, do nothing.
28208+ * If not idle, and not on delayed list, put on delayed list,
28209+ * up the list_kref and schedule a delayed list check.
28210+ */
28211+
28212+static void ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
28213+{
28214+ struct ttm_bo_device *bdev = bo->bdev;
28215+ struct ttm_bo_driver *driver = bdev->driver;
28216+
28217+ mutex_lock(&bo->mutex);
28218+
28219+ if (bo->sync_obj && driver->sync_obj_signaled(bo->sync_obj,
28220+ bo->sync_obj_arg)) {
28221+ driver->sync_obj_unref(&bo->sync_obj);
28222+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
28223+ }
28224+
28225+ if (bo->sync_obj && remove_all)
28226+ (void)ttm_bo_expire_sync_obj(bo, false);
28227+
28228+ if (!bo->sync_obj) {
28229+ int put_count;
28230+
28231+ if (bo->ttm)
28232+ ttm_tt_unbind(bo->ttm);
28233+ spin_lock(&bdev->lru_lock);
28234+ if (!list_empty(&bo->ddestroy)) {
28235+ list_del_init(&bo->ddestroy);
28236+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
28237+ }
28238+ if (bo->mem.mm_node) {
28239+ drm_mm_put_block(bo->mem.mm_node);
28240+ bo->mem.mm_node = NULL;
28241+ }
28242+ put_count = ttm_bo_del_from_lru(bo);
28243+ spin_unlock(&bdev->lru_lock);
28244+ mutex_unlock(&bo->mutex);
28245+ while (put_count--)
28246+ kref_put(&bo->list_kref, ttm_bo_release_list);
28247+
28248+ return;
28249+ }
28250+
28251+ spin_lock(&bdev->lru_lock);
28252+ if (list_empty(&bo->ddestroy)) {
28253+ spin_unlock(&bdev->lru_lock);
28254+ driver->sync_obj_flush(bo->sync_obj, bo->sync_obj_arg);
28255+ spin_lock(&bdev->lru_lock);
28256+ if (list_empty(&bo->ddestroy)) {
28257+ kref_get(&bo->list_kref);
28258+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
28259+ }
28260+ spin_unlock(&bdev->lru_lock);
28261+ schedule_delayed_work(&bdev->wq,
28262+ ((HZ / 100) < 1) ? 1 : HZ / 100);
28263+ } else
28264+ spin_unlock(&bdev->lru_lock);
28265+
28266+ mutex_unlock(&bo->mutex);
28267+ return;
28268+}
28269+
28270+/**
28271+ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
28272+ * encountered buffers.
28273+ */
28274+
28275+static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
28276+{
28277+ struct ttm_buffer_object *entry, *nentry;
28278+ struct list_head *list, *next;
28279+ int ret;
28280+
28281+ spin_lock(&bdev->lru_lock);
28282+ list_for_each_safe(list, next, &bdev->ddestroy) {
28283+ entry = list_entry(list, struct ttm_buffer_object, ddestroy);
28284+ nentry = NULL;
28285+
28286+ /*
28287+ * Protect the next list entry from destruction while we
28288+ * unlock the lru_lock.
28289+ */
28290+
28291+ if (next != &bdev->ddestroy) {
28292+ nentry = list_entry(next, struct ttm_buffer_object,
28293+ ddestroy);
28294+ kref_get(&nentry->list_kref);
28295+ }
28296+ kref_get(&entry->list_kref);
28297+
28298+ spin_unlock(&bdev->lru_lock);
28299+ ttm_bo_cleanup_refs(entry, remove_all);
28300+ kref_put(&entry->list_kref, ttm_bo_release_list);
28301+ spin_lock(&bdev->lru_lock);
28302+
28303+ if (nentry) {
28304+ bool next_onlist = !list_empty(next);
28305+ kref_put(&nentry->list_kref, ttm_bo_release_list);
28306+
28307+ /*
28308+ * Someone might have raced us and removed the
28309+ * next entry from the list. We don't bother restarting
28310+ * list traversal.
28311+ */
28312+
28313+ if (!next_onlist)
28314+ break;
28315+ }
28316+ }
28317+ ret = !list_empty(&bdev->ddestroy);
28318+ spin_unlock(&bdev->lru_lock);
28319+
28320+ return ret;
28321+}
28322+
28323+static void ttm_bo_delayed_workqueue(struct work_struct *work)
28324+{
28325+ struct ttm_bo_device *bdev =
28326+ container_of(work, struct ttm_bo_device, wq.work);
28327+
28328+ if (ttm_bo_delayed_delete(bdev, false)) {
28329+ schedule_delayed_work(&bdev->wq,
28330+ ((HZ / 100) < 1) ? 1 : HZ / 100);
28331+ }
28332+}
28333+
28334+static void ttm_bo_release(struct kref *kref)
28335+{
28336+ struct ttm_buffer_object *bo =
28337+ container_of(kref, struct ttm_buffer_object, kref);
28338+ struct ttm_bo_device *bdev = bo->bdev;
28339+
28340+ if (likely(bo->vm_node != NULL)) {
28341+ rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
28342+ drm_mm_put_block(bo->vm_node);
28343+ }
28344+ write_unlock(&bdev->vm_lock);
28345+ ttm_bo_cleanup_refs(bo, false);
28346+ kref_put(&bo->list_kref, ttm_bo_release_list);
28347+ write_lock(&bdev->vm_lock);
28348+}
28349+
28350+void ttm_bo_unref(struct ttm_buffer_object **p_bo)
28351+{
28352+ struct ttm_buffer_object *bo = *p_bo;
28353+ struct ttm_bo_device *bdev = bo->bdev;
28354+
28355+ *p_bo = NULL;
28356+ write_lock(&bdev->vm_lock);
28357+ kref_put(&bo->kref, ttm_bo_release);
28358+ write_unlock(&bdev->vm_lock);
28359+}
28360+
28361+static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
28362+ bool interruptible, bool no_wait)
28363+{
28364+ int ret = 0;
28365+ struct ttm_bo_device *bdev = bo->bdev;
28366+ struct ttm_mem_reg evict_mem;
28367+
28368+ if (bo->mem.mem_type != mem_type)
28369+ goto out;
28370+
28371+ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
28372+ if (ret && ret != -ERESTART) {
28373+ printk(KERN_ERR "Failed to expire sync object before "
28374+ "buffer eviction.\n");
28375+ goto out;
28376+ }
28377+
28378+ BUG_ON(!atomic_read(&bo->reserved));
28379+
28380+ evict_mem = bo->mem;
28381+ evict_mem.mm_node = NULL;
28382+
28383+ evict_mem.proposed_flags = bdev->driver->evict_flags(bo);
28384+ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
28385+
28386+ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
28387+ if (unlikely(ret != 0 && ret != -ERESTART)) {
28388+ evict_mem.proposed_flags = TTM_PL_FLAG_SYSTEM;
28389+ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
28390+ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
28391+ }
28392+
28393+ if (ret) {
28394+ if (ret != -ERESTART)
28395+ printk(KERN_ERR "Failed to find memory space for "
28396+ "buffer 0x%p eviction.\n", bo);
28397+ goto out;
28398+ }
28399+
28400+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, no_wait);
28401+ if (ret) {
28402+ if (ret != -ERESTART)
28403+ printk(KERN_ERR "Buffer eviction failed\n");
28404+ goto out;
28405+ }
28406+
28407+ spin_lock(&bdev->lru_lock);
28408+ if (evict_mem.mm_node) {
28409+ drm_mm_put_block(evict_mem.mm_node);
28410+ evict_mem.mm_node = NULL;
28411+ }
28412+ spin_unlock(&bdev->lru_lock);
28413+
28414+ ttm_flag_masked(&bo->priv_flags, TTM_BO_PRIV_FLAG_EVICTED,
28415+ TTM_BO_PRIV_FLAG_EVICTED);
28416+
28417+ out:
28418+ return ret;
28419+}
28420+
28421+/**
28422+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
28423+ * space, or we've evicted everything and there isn't enough space.
28424+ */
28425+static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
28426+ struct ttm_mem_reg *mem,
28427+ uint32_t mem_type,
28428+ bool interruptible, bool no_wait)
28429+{
28430+ struct drm_mm_node *node;
28431+ struct ttm_buffer_object *entry;
28432+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
28433+ struct list_head *lru;
28434+ unsigned long num_pages = mem->num_pages;
28435+ int put_count = 0;
28436+ int ret;
28437+
28438+ retry_pre_get:
28439+ ret = drm_mm_pre_get(&man->manager);
28440+ if (unlikely(ret != 0))
28441+ return ret;
28442+
28443+ spin_lock(&bdev->lru_lock);
28444+ do {
28445+ node = drm_mm_search_free(&man->manager, num_pages,
28446+ mem->page_alignment, 1);
28447+ if (node)
28448+ break;
28449+
28450+ lru = &man->lru;
28451+ if (list_empty(lru))
28452+ break;
28453+
28454+ entry = list_first_entry(lru, struct ttm_buffer_object, lru);
28455+ kref_get(&entry->list_kref);
28456+
28457+ ret =
28458+ ttm_bo_reserve_locked(entry, interruptible, no_wait, false, 0);
28459+
28460+ if (likely(ret == 0))
28461+ put_count = ttm_bo_del_from_lru(entry);
28462+
28463+ spin_unlock(&bdev->lru_lock);
28464+
28465+ if (unlikely(ret != 0))
28466+ return ret;
28467+
28468+ while (put_count--)
28469+ kref_put(&entry->list_kref, ttm_bo_ref_bug);
28470+
28471+ mutex_lock(&entry->mutex);
28472+ ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
28473+ mutex_unlock(&entry->mutex);
28474+
28475+ ttm_bo_unreserve(entry);
28476+
28477+ kref_put(&entry->list_kref, ttm_bo_release_list);
28478+ if (ret)
28479+ return ret;
28480+
28481+ spin_lock(&bdev->lru_lock);
28482+ } while (1);
28483+
28484+ if (!node) {
28485+ spin_unlock(&bdev->lru_lock);
28486+ return -ENOMEM;
28487+ }
28488+
28489+ node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
28490+ if (unlikely(!node)) {
28491+ spin_unlock(&bdev->lru_lock);
28492+ goto retry_pre_get;
28493+ }
28494+
28495+ spin_unlock(&bdev->lru_lock);
28496+ mem->mm_node = node;
28497+ mem->mem_type = mem_type;
28498+ return 0;
28499+}
28500+
28501+static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
28502+ bool disallow_fixed,
28503+ uint32_t mem_type,
28504+ uint32_t mask, uint32_t * res_mask)
28505+{
28506+ uint32_t cur_flags = ttm_bo_type_flags(mem_type);
28507+
28508+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
28509+ return false;
28510+
28511+ if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
28512+ return false;
28513+
28514+ if ((mask & man->available_caching) == 0)
28515+ return false;
28516+ if (mask & man->default_caching)
28517+ cur_flags |= man->default_caching;
28518+ else if (mask & TTM_PL_FLAG_CACHED)
28519+ cur_flags |= TTM_PL_FLAG_CACHED;
28520+ else if (mask & TTM_PL_FLAG_WC)
28521+ cur_flags |= TTM_PL_FLAG_WC;
28522+ else
28523+ cur_flags |= TTM_PL_FLAG_UNCACHED;
28524+
28525+ *res_mask = cur_flags;
28526+ return true;
28527+}
28528+
28529+/**
28530+ * Creates space for memory region @mem according to its type.
28531+ *
28532+ * This function first searches for free space in compatible memory types in
28533+ * the priority order defined by the driver. If free space isn't found, then
28534+ * ttm_bo_mem_force_space is attempted in priority order to evict and find
28535+ * space.
28536+ */
28537+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
28538+ struct ttm_mem_reg *mem, bool interruptible, bool no_wait)
28539+{
28540+ struct ttm_bo_device *bdev = bo->bdev;
28541+ struct ttm_mem_type_manager *man;
28542+
28543+ uint32_t num_prios = bdev->driver->num_mem_type_prio;
28544+ const uint32_t *prios = bdev->driver->mem_type_prio;
28545+ uint32_t i;
28546+ uint32_t mem_type = TTM_PL_SYSTEM;
28547+ uint32_t cur_flags = 0;
28548+ bool type_found = false;
28549+ bool type_ok = false;
28550+ bool has_eagain = false;
28551+ struct drm_mm_node *node = NULL;
28552+ int ret;
28553+
28554+ mem->mm_node = NULL;
28555+ for (i = 0; i < num_prios; ++i) {
28556+ mem_type = prios[i];
28557+ man = &bdev->man[mem_type];
28558+
28559+ type_ok = ttm_bo_mt_compatible(man,
28560+ bo->type == ttm_bo_type_user,
28561+ mem_type, mem->proposed_flags,
28562+ &cur_flags);
28563+
28564+ if (!type_ok)
28565+ continue;
28566+
28567+ if (mem_type == TTM_PL_SYSTEM)
28568+ break;
28569+
28570+ if (man->has_type && man->use_type) {
28571+ type_found = true;
28572+ do {
28573+ ret = drm_mm_pre_get(&man->manager);
28574+ if (unlikely(ret))
28575+ return ret;
28576+
28577+ spin_lock(&bdev->lru_lock);
28578+ node = drm_mm_search_free(&man->manager,
28579+ mem->num_pages,
28580+ mem->page_alignment,
28581+ 1);
28582+ if (unlikely(!node)) {
28583+ spin_unlock(&bdev->lru_lock);
28584+ break;
28585+ }
28586+ node = drm_mm_get_block_atomic(node,
28587+ mem->num_pages,
28588+ mem->
28589+ page_alignment);
28590+ spin_unlock(&bdev->lru_lock);
28591+ } while (!node);
28592+ }
28593+ if (node)
28594+ break;
28595+ }
28596+
28597+ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
28598+ mem->mm_node = node;
28599+ mem->mem_type = mem_type;
28600+ mem->flags = cur_flags;
28601+ return 0;
28602+ }
28603+
28604+ if (!type_found)
28605+ return -EINVAL;
28606+
28607+ num_prios = bdev->driver->num_mem_busy_prio;
28608+ prios = bdev->driver->mem_busy_prio;
28609+
28610+ for (i = 0; i < num_prios; ++i) {
28611+ mem_type = prios[i];
28612+ man = &bdev->man[mem_type];
28613+
28614+ if (!man->has_type)
28615+ continue;
28616+
28617+ if (!ttm_bo_mt_compatible(man,
28618+ bo->type == ttm_bo_type_user,
28619+ mem_type,
28620+ mem->proposed_flags, &cur_flags))
28621+ continue;
28622+
28623+ ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
28624+ interruptible, no_wait);
28625+
28626+ if (ret == 0 && mem->mm_node) {
28627+ mem->flags = cur_flags;
28628+ return 0;
28629+ }
28630+
28631+ if (ret == -ERESTART)
28632+ has_eagain = true;
28633+ }
28634+
28635+ ret = (has_eagain) ? -ERESTART : -ENOMEM;
28636+ return ret;
28637+}
28638+
28639+/*
28640+ * Call bo->mutex locked.
28641+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
28642+ */
28643+
28644+static int ttm_bo_busy(struct ttm_buffer_object *bo)
28645+{
28646+ void *sync_obj = bo->sync_obj;
28647+ struct ttm_bo_driver *driver = bo->bdev->driver;
28648+
28649+ if (sync_obj) {
28650+ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
28651+ driver->sync_obj_unref(&bo->sync_obj);
28652+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
28653+ return 0;
28654+ }
28655+ driver->sync_obj_flush(sync_obj, bo->sync_obj_arg);
28656+ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
28657+ driver->sync_obj_unref(&bo->sync_obj);
28658+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
28659+ return 0;
28660+ }
28661+ return 1;
28662+ }
28663+ return 0;
28664+}
28665+
28666+int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
28667+{
28668+ int ret = 0;
28669+
28670+ if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
28671+ return -EBUSY;
28672+
28673+ ret = wait_event_interruptible(bo->event_queue,
28674+ atomic_read(&bo->cpu_writers) == 0);
28675+
28676+ if (ret == -ERESTARTSYS)
28677+ ret = -ERESTART;
28678+
28679+ return ret;
28680+}
28681+
28682+/*
28683+ * bo->mutex locked.
28684+ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
28685+ */
28686+
28687+int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t new_mem_flags,
28688+ bool interruptible, bool no_wait)
28689+{
28690+ struct ttm_bo_device *bdev = bo->bdev;
28691+ int ret = 0;
28692+ struct ttm_mem_reg mem;
28693+
28694+ BUG_ON(!atomic_read(&bo->reserved));
28695+
28696+ /*
28697+ * FIXME: It's possible to pipeline buffer moves.
28698+ * Have the driver move function wait for idle when necessary,
28699+ * instead of doing it here.
28700+ */
28701+
28702+ ttm_bo_busy(bo);
28703+ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
28704+ if (ret)
28705+ return ret;
28706+
28707+ mem.num_pages = bo->num_pages;
28708+ mem.size = mem.num_pages << PAGE_SHIFT;
28709+ mem.proposed_flags = new_mem_flags;
28710+ mem.page_alignment = bo->mem.page_alignment;
28711+
28712+ /*
28713+ * Determine where to move the buffer.
28714+ */
28715+
28716+ ret = ttm_bo_mem_space(bo, &mem, interruptible, no_wait);
28717+ if (ret)
28718+ goto out_unlock;
28719+
28720+ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
28721+
28722+ out_unlock:
28723+ if (ret && mem.mm_node) {
28724+ spin_lock(&bdev->lru_lock);
28725+ drm_mm_put_block(mem.mm_node);
28726+ spin_unlock(&bdev->lru_lock);
28727+ }
28728+ return ret;
28729+}
28730+
28731+static int ttm_bo_mem_compat(struct ttm_mem_reg *mem)
28732+{
28733+ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_MEM) == 0)
28734+ return 0;
28735+ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_CACHING) == 0)
28736+ return 0;
28737+
28738+ return 1;
28739+}
28740+
28741+int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
28742+ bool interruptible, bool no_wait)
28743+{
28744+ int ret;
28745+
28746+ BUG_ON(!atomic_read(&bo->reserved));
28747+ bo->mem.proposed_flags = bo->proposed_flags;
28748+
28749+ TTM_DEBUG("Proposed flags 0x%08lx, Old flags 0x%08lx\n",
28750+ (unsigned long)bo->mem.proposed_flags,
28751+ (unsigned long)bo->mem.flags);
28752+
28753+ /*
28754+ * Check whether we need to move buffer.
28755+ */
28756+
28757+ if (!ttm_bo_mem_compat(&bo->mem)) {
28758+ ret = ttm_bo_move_buffer(bo, bo->mem.proposed_flags,
28759+ interruptible, no_wait);
28760+ if (ret) {
28761+ if (ret != -ERESTART)
28762+ printk(KERN_ERR "Failed moving buffer. "
28763+ "Proposed placement 0x%08x\n",
28764+ bo->mem.proposed_flags);
28765+ if (ret == -ENOMEM)
28766+ printk(KERN_ERR "Out of aperture space or "
28767+ "DRM memory quota.\n");
28768+ return ret;
28769+ }
28770+ }
28771+
28772+ /*
28773+ * We might need to add a TTM.
28774+ */
28775+
28776+ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
28777+ ret = ttm_bo_add_ttm(bo);
28778+ if (ret)
28779+ return ret;
28780+ }
28781+ /*
28782+ * Validation has succeeded, move the access and other
28783+ * non-mapping-related flag bits from the proposed flags to
28784+ * the active flags
28785+ */
28786+
28787+ ttm_flag_masked(&bo->mem.flags, bo->proposed_flags,
28788+ ~TTM_PL_MASK_MEMTYPE);
28789+
28790+ return 0;
28791+}
28792+
28793+int
28794+ttm_bo_check_placement(struct ttm_buffer_object *bo,
28795+ uint32_t set_flags, uint32_t clr_flags)
28796+{
28797+ uint32_t new_mask = set_flags | clr_flags;
28798+
28799+ if ((bo->type == ttm_bo_type_user) && (clr_flags & TTM_PL_FLAG_CACHED)) {
28800+ printk(KERN_ERR
28801+ "User buffers require cache-coherent memory.\n");
28802+ return -EINVAL;
28803+ }
28804+
28805+ if (!capable(CAP_SYS_ADMIN)) {
28806+ if (new_mask & TTM_PL_FLAG_NO_EVICT) {
28807+ printk(KERN_ERR "Need to be root to modify"
28808+ " NO_EVICT status.\n");
28809+ return -EINVAL;
28810+ }
28811+
28812+ if ((clr_flags & bo->mem.flags & TTM_PL_MASK_MEMTYPE) &&
28813+ (bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
28814+ printk(KERN_ERR "Incompatible memory specification"
28815+ " for NO_EVICT buffer.\n");
28816+ return -EINVAL;
28817+ }
28818+ }
28819+ return 0;
28820+}
28821+
28822+int ttm_buffer_object_init(struct ttm_bo_device *bdev,
28823+ struct ttm_buffer_object *bo,
28824+ unsigned long size,
28825+ enum ttm_bo_type type,
28826+ uint32_t flags,
28827+ uint32_t page_alignment,
28828+ unsigned long buffer_start,
28829+ bool interruptible,
28830+ struct file *persistant_swap_storage,
28831+ size_t acc_size,
28832+ void (*destroy) (struct ttm_buffer_object *))
28833+{
28834+ int ret = 0;
28835+ unsigned long num_pages;
28836+
28837+ size += buffer_start & ~PAGE_MASK;
28838+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
28839+ if (num_pages == 0) {
28840+ printk(KERN_ERR "Illegal buffer object size.\n");
28841+ return -EINVAL;
28842+ }
28843+ bo->destroy = destroy;
28844+
28845+ mutex_init(&bo->mutex);
28846+ mutex_lock(&bo->mutex);
28847+ kref_init(&bo->kref);
28848+ kref_init(&bo->list_kref);
28849+ atomic_set(&bo->cpu_writers, 0);
28850+ atomic_set(&bo->reserved, 1);
28851+ init_waitqueue_head(&bo->event_queue);
28852+ INIT_LIST_HEAD(&bo->lru);
28853+ INIT_LIST_HEAD(&bo->ddestroy);
28854+ INIT_LIST_HEAD(&bo->swap);
28855+ bo->bdev = bdev;
28856+ bo->type = type;
28857+ bo->num_pages = num_pages;
28858+ bo->mem.mem_type = TTM_PL_SYSTEM;
28859+ bo->mem.num_pages = bo->num_pages;
28860+ bo->mem.mm_node = NULL;
28861+ bo->mem.page_alignment = page_alignment;
28862+ bo->buffer_start = buffer_start & PAGE_MASK;
28863+ bo->priv_flags = 0;
28864+ bo->mem.flags = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
28865+ bo->seq_valid = false;
28866+ bo->persistant_swap_storage = persistant_swap_storage;
28867+ bo->acc_size = acc_size;
28868+
28869+ ret = ttm_bo_check_placement(bo, flags, 0ULL);
28870+ if (unlikely(ret != 0))
28871+ goto out_err;
28872+
28873+ /*
28874+ * If no caching attributes are set, accept any form of caching.
28875+ */
28876+
28877+ if ((flags & TTM_PL_MASK_CACHING) == 0)
28878+ flags |= TTM_PL_MASK_CACHING;
28879+
28880+ bo->proposed_flags = flags;
28881+ bo->mem.proposed_flags = flags;
28882+
28883+ /*
28884+ * For ttm_bo_type_device buffers, allocate
28885+ * address space from the device.
28886+ */
28887+
28888+ if (bo->type == ttm_bo_type_device) {
28889+ ret = ttm_bo_setup_vm(bo);
28890+ if (ret)
28891+ goto out_err;
28892+ }
28893+
28894+ ret = ttm_buffer_object_validate(bo, interruptible, false);
28895+ if (ret)
28896+ goto out_err;
28897+
28898+ mutex_unlock(&bo->mutex);
28899+ ttm_bo_unreserve(bo);
28900+ return 0;
28901+
28902+ out_err:
28903+ mutex_unlock(&bo->mutex);
28904+ ttm_bo_unreserve(bo);
28905+ ttm_bo_unref(&bo);
28906+
28907+ return ret;
28908+}
28909+
28910+static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
28911+ unsigned long num_pages)
28912+{
28913+ size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
28914+ PAGE_MASK;
28915+
28916+ return bdev->ttm_bo_size + 2 * page_array_size;
28917+}
28918+
28919+int ttm_buffer_object_create(struct ttm_bo_device *bdev,
28920+ unsigned long size,
28921+ enum ttm_bo_type type,
28922+ uint32_t flags,
28923+ uint32_t page_alignment,
28924+ unsigned long buffer_start,
28925+ bool interruptible,
28926+ struct file *persistant_swap_storage,
28927+ struct ttm_buffer_object **p_bo)
28928+{
28929+ struct ttm_buffer_object *bo;
28930+ int ret;
28931+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
28932+
28933+ size_t acc_size =
28934+ ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
28935+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
28936+ if (unlikely(ret != 0))
28937+ return ret;
28938+
28939+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
28940+
28941+ if (unlikely(bo == NULL)) {
28942+ ttm_mem_global_free(mem_glob, acc_size, false);
28943+ return -ENOMEM;
28944+ }
28945+
28946+ ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
28947+ page_alignment, buffer_start,
28948+ interruptible,
28949+ persistant_swap_storage, acc_size, NULL);
28950+ if (likely(ret == 0))
28951+ *p_bo = bo;
28952+
28953+ return ret;
28954+}
28955+
28956+static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
28957+ uint32_t mem_type, bool allow_errors)
28958+{
28959+ int ret;
28960+
28961+ mutex_lock(&bo->mutex);
28962+
28963+ ret = ttm_bo_expire_sync_obj(bo, allow_errors);
28964+ if (ret)
28965+ goto out;
28966+
28967+ if (bo->mem.mem_type == mem_type)
28968+ ret = ttm_bo_evict(bo, mem_type, false, false);
28969+
28970+ if (ret) {
28971+ if (allow_errors) {
28972+ goto out;
28973+ } else {
28974+ ret = 0;
28975+ printk(KERN_ERR "Cleanup eviction failed\n");
28976+ }
28977+ }
28978+
28979+ out:
28980+ mutex_unlock(&bo->mutex);
28981+ return ret;
28982+}
28983+
28984+static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
28985+ struct list_head *head,
28986+ unsigned mem_type, bool allow_errors)
28987+{
28988+ struct ttm_buffer_object *entry;
28989+ int ret;
28990+ int put_count;
28991+
28992+ /*
28993+ * Can't use standard list traversal since we're unlocking.
28994+ */
28995+
28996+ spin_lock(&bdev->lru_lock);
28997+
28998+ while (!list_empty(head)) {
28999+ entry = list_first_entry(head, struct ttm_buffer_object, lru);
29000+ kref_get(&entry->list_kref);
29001+ ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
29002+ put_count = ttm_bo_del_from_lru(entry);
29003+ spin_unlock(&bdev->lru_lock);
29004+ while (put_count--)
29005+ kref_put(&entry->list_kref, ttm_bo_ref_bug);
29006+ BUG_ON(ret);
29007+ ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
29008+ ttm_bo_unreserve(entry);
29009+ kref_put(&entry->list_kref, ttm_bo_release_list);
29010+ spin_lock(&bdev->lru_lock);
29011+ }
29012+
29013+ spin_unlock(&bdev->lru_lock);
29014+
29015+ return 0;
29016+}
29017+
29018+int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
29019+{
29020+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
29021+ int ret = -EINVAL;
29022+
29023+ if (mem_type >= TTM_NUM_MEM_TYPES) {
29024+ printk(KERN_ERR "Illegal memory type %d\n", mem_type);
29025+ return ret;
29026+ }
29027+
29028+ if (!man->has_type) {
29029+ printk(KERN_ERR "Trying to take down uninitialized "
29030+ "memory manager type %u\n", mem_type);
29031+ return ret;
29032+ }
29033+
29034+ man->use_type = false;
29035+ man->has_type = false;
29036+
29037+ ret = 0;
29038+ if (mem_type > 0) {
29039+ ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
29040+
29041+ spin_lock(&bdev->lru_lock);
29042+ if (drm_mm_clean(&man->manager)) {
29043+ drm_mm_takedown(&man->manager);
29044+ } else {
29045+ ret = -EBUSY;
29046+ }
29047+ spin_unlock(&bdev->lru_lock);
29048+ }
29049+
29050+ return ret;
29051+}
29052+
29053+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
29054+{
29055+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
29056+
29057+ if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
29058+ printk(KERN_ERR "Illegal memory manager memory type %u.\n",
29059+ mem_type);
29060+ return -EINVAL;
29061+ }
29062+
29063+ if (!man->has_type) {
29064+ printk(KERN_ERR "Memory type %u has not been initialized.\n",
29065+ mem_type);
29066+ return 0;
29067+ }
29068+
29069+ return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
29070+}
29071+
29072+int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
29073+ unsigned long p_offset, unsigned long p_size)
29074+{
29075+ int ret = -EINVAL;
29076+ struct ttm_mem_type_manager *man;
29077+
29078+ if (type >= TTM_NUM_MEM_TYPES) {
29079+ printk(KERN_ERR "Illegal memory type %d\n", type);
29080+ return ret;
29081+ }
29082+
29083+ man = &bdev->man[type];
29084+ if (man->has_type) {
29085+ printk(KERN_ERR
29086+ "Memory manager already initialized for type %d\n",
29087+ type);
29088+ return ret;
29089+ }
29090+
29091+ ret = bdev->driver->init_mem_type(bdev, type, man);
29092+ if (ret)
29093+ return ret;
29094+
29095+ ret = 0;
29096+ if (type != TTM_PL_SYSTEM) {
29097+ if (!p_size) {
29098+ printk(KERN_ERR "Zero size memory manager type %d\n",
29099+ type);
29100+ return ret;
29101+ }
29102+ ret = drm_mm_init(&man->manager, p_offset, p_size);
29103+ if (ret)
29104+ return ret;
29105+ }
29106+ man->has_type = true;
29107+ man->use_type = true;
29108+ man->size = p_size;
29109+
29110+ INIT_LIST_HEAD(&man->lru);
29111+
29112+ return 0;
29113+}
29114+
29115+int ttm_bo_device_release(struct ttm_bo_device *bdev)
29116+{
29117+ int ret = 0;
29118+ unsigned i = TTM_NUM_MEM_TYPES;
29119+ struct ttm_mem_type_manager *man;
29120+
29121+ while (i--) {
29122+ man = &bdev->man[i];
29123+ if (man->has_type) {
29124+ man->use_type = false;
29125+ if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
29126+ ret = -EBUSY;
29127+ printk(KERN_ERR "DRM memory manager type %d "
29128+ "is not clean.\n", i);
29129+ }
29130+ man->has_type = false;
29131+ }
29132+ }
29133+
29134+ if (!cancel_delayed_work(&bdev->wq))
29135+ flush_scheduled_work();
29136+
29137+ while (ttm_bo_delayed_delete(bdev, true)) ;
29138+
29139+ spin_lock(&bdev->lru_lock);
29140+ if (list_empty(&bdev->ddestroy))
29141+ TTM_DEBUG("Delayed destroy list was clean\n");
29142+
29143+ if (list_empty(&bdev->man[0].lru))
29144+ TTM_DEBUG("Swap list was clean\n");
29145+ spin_unlock(&bdev->lru_lock);
29146+
29147+ ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
29148+ BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
29149+ write_lock(&bdev->vm_lock);
29150+ drm_mm_takedown(&bdev->addr_space_mm);
29151+ write_unlock(&bdev->vm_lock);
29152+
29153+ __free_page(bdev->dummy_read_page);
29154+ return ret;
29155+}
29156+
29157+/*
29158+ * This function is intended to be called on drm driver load.
29159+ * If you decide to call it from firstopen, you must protect the call
29160+ * from a potentially racing ttm_bo_driver_finish in lastclose.
29161+ * (This may happen on X server restart).
29162+ */
29163+
29164+int ttm_bo_device_init(struct ttm_bo_device *bdev,
29165+ struct ttm_mem_global *mem_glob,
29166+ struct ttm_bo_driver *driver, uint64_t file_page_offset)
29167+{
29168+ int ret = -EINVAL;
29169+
29170+ bdev->dummy_read_page = NULL;
29171+ rwlock_init(&bdev->vm_lock);
29172+ spin_lock_init(&bdev->lru_lock);
29173+
29174+ bdev->driver = driver;
29175+ bdev->mem_glob = mem_glob;
29176+
29177+ memset(bdev->man, 0, sizeof(bdev->man));
29178+
29179+ bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
29180+ if (unlikely(bdev->dummy_read_page == NULL)) {
29181+ ret = -ENOMEM;
29182+ goto out_err0;
29183+ }
29184+
29185+ /*
29186+ * Initialize the system memory buffer type.
29187+ * Other types need to be driver / IOCTL initialized.
29188+ */
29189+ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
29190+ if (unlikely(ret != 0))
29191+ goto out_err1;
29192+
29193+ bdev->addr_space_rb = RB_ROOT;
29194+ ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
29195+ if (unlikely(ret != 0))
29196+ goto out_err2;
29197+
29198+ INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
29199+ bdev->nice_mode = true;
29200+ INIT_LIST_HEAD(&bdev->ddestroy);
29201+ INIT_LIST_HEAD(&bdev->swap_lru);
29202+ bdev->dev_mapping = NULL;
29203+ ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
29204+ ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
29205+ if (unlikely(ret != 0)) {
29206+ printk(KERN_ERR "Could not register buffer object swapout.\n");
29207+ goto out_err2;
29208+ }
29209+ return 0;
29210+ out_err2:
29211+ ttm_bo_clean_mm(bdev, 0);
29212+ out_err1:
29213+ __free_page(bdev->dummy_read_page);
29214+ out_err0:
29215+ return ret;
29216+}
29217+
29218+/*
29219+ * buffer object vm functions.
29220+ */
29221+
29222+bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
29223+{
29224+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
29225+
29226+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
29227+ if (mem->mem_type == TTM_PL_SYSTEM)
29228+ return false;
29229+
29230+ if (man->flags & TTM_MEMTYPE_FLAG_CMA)
29231+ return false;
29232+
29233+ if (mem->flags & TTM_PL_FLAG_CACHED)
29234+ return false;
29235+ }
29236+ return true;
29237+}
29238+
29239+int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
29240+ struct ttm_mem_reg *mem,
29241+ unsigned long *bus_base,
29242+ unsigned long *bus_offset, unsigned long *bus_size)
29243+{
29244+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
29245+
29246+ *bus_size = 0;
29247+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
29248+ return -EINVAL;
29249+
29250+ if (ttm_mem_reg_is_pci(bdev, mem)) {
29251+ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
29252+ *bus_size = mem->num_pages << PAGE_SHIFT;
29253+ *bus_base = man->io_offset;
29254+ }
29255+
29256+ return 0;
29257+}
29258+
29259+/**
29260+ * \c Kill all user-space virtual mappings of this buffer object.
29261+ *
29262+ * \param bo The buffer object.
29263+ *
29264+ * Call bo->mutex locked.
29265+ */
29266+
29267+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
29268+{
29269+ struct ttm_bo_device *bdev = bo->bdev;
29270+ loff_t offset = (loff_t) bo->addr_space_offset;
29271+ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
29272+
29273+ if (!bdev->dev_mapping)
29274+ return;
29275+
29276+ unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
29277+}
29278+
29279+static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
29280+{
29281+ struct ttm_bo_device *bdev = bo->bdev;
29282+ struct rb_node **cur = &bdev->addr_space_rb.rb_node;
29283+ struct rb_node *parent = NULL;
29284+ struct ttm_buffer_object *cur_bo;
29285+ unsigned long offset = bo->vm_node->start;
29286+ unsigned long cur_offset;
29287+
29288+ while (*cur) {
29289+ parent = *cur;
29290+ cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
29291+ cur_offset = cur_bo->vm_node->start;
29292+ if (offset < cur_offset)
29293+ cur = &parent->rb_left;
29294+ else if (offset > cur_offset)
29295+ cur = &parent->rb_right;
29296+ else
29297+ BUG();
29298+ }
29299+
29300+ rb_link_node(&bo->vm_rb, parent, cur);
29301+ rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
29302+}
29303+
29304+/**
29305+ * ttm_bo_setup_vm:
29306+ *
29307+ * @bo: the buffer to allocate address space for
29308+ *
29309+ * Allocate address space in the drm device so that applications
29310+ * can mmap the buffer and access the contents. This only
29311+ * applies to ttm_bo_type_device objects as others are not
29312+ * placed in the drm device address space.
29313+ */
29314+
29315+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
29316+{
29317+ struct ttm_bo_device *bdev = bo->bdev;
29318+ int ret;
29319+
29320+ retry_pre_get:
29321+ ret = drm_mm_pre_get(&bdev->addr_space_mm);
29322+ if (unlikely(ret != 0))
29323+ return ret;
29324+
29325+ write_lock(&bdev->vm_lock);
29326+ bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
29327+ bo->mem.num_pages, 0, 0);
29328+
29329+ if (unlikely(bo->vm_node == NULL)) {
29330+ ret = -ENOMEM;
29331+ goto out_unlock;
29332+ }
29333+
29334+ bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
29335+ bo->mem.num_pages, 0);
29336+
29337+ if (unlikely(bo->vm_node == NULL)) {
29338+ write_unlock(&bdev->vm_lock);
29339+ goto retry_pre_get;
29340+ }
29341+
29342+ ttm_bo_vm_insert_rb(bo);
29343+ write_unlock(&bdev->vm_lock);
29344+ bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
29345+
29346+ return 0;
29347+ out_unlock:
29348+ write_unlock(&bdev->vm_lock);
29349+ return ret;
29350+}
29351+
29352+int ttm_bo_wait(struct ttm_buffer_object *bo,
29353+ bool lazy, bool interruptible, bool no_wait)
29354+{
29355+ struct ttm_bo_driver *driver = bo->bdev->driver;
29356+ void *sync_obj;
29357+ void *sync_obj_arg;
29358+ int ret = 0;
29359+
29360+ while (bo->sync_obj) {
29361+ if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
29362+ driver->sync_obj_unref(&bo->sync_obj);
29363+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
29364+ goto out;
29365+ }
29366+ if (no_wait) {
29367+ ret = -EBUSY;
29368+ goto out;
29369+ }
29370+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
29371+ sync_obj_arg = bo->sync_obj_arg;
29372+ mutex_unlock(&bo->mutex);
29373+ ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
29374+ lazy, interruptible);
29375+
29376+ mutex_lock(&bo->mutex);
29377+ if (unlikely(ret != 0)) {
29378+ driver->sync_obj_unref(&sync_obj);
29379+ return ret;
29380+ }
29381+
29382+ if (bo->sync_obj == sync_obj) {
29383+ driver->sync_obj_unref(&bo->sync_obj);
29384+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
29385+ }
29386+ driver->sync_obj_unref(&sync_obj);
29387+ }
29388+ out:
29389+ return 0;
29390+}
29391+
29392+void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
29393+{
29394+ atomic_set(&bo->reserved, 0);
29395+ wake_up_all(&bo->event_queue);
29396+}
29397+
29398+int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
29399+ bool no_wait)
29400+{
29401+ int ret;
29402+
29403+ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
29404+ if (no_wait)
29405+ return -EBUSY;
29406+ else if (interruptible) {
29407+ ret = wait_event_interruptible
29408+ (bo->event_queue, atomic_read(&bo->reserved) == 0);
29409+ if (unlikely(ret != 0))
29410+ return -ERESTART;
29411+ } else {
29412+ wait_event(bo->event_queue,
29413+ atomic_read(&bo->reserved) == 0);
29414+ }
29415+ }
29416+ return 0;
29417+}
29418+
29419+int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
29420+{
29421+ int ret = 0;
29422+
29423+ /*
29424+ * Using ttm_bo_reserve instead of ttm_bo_block_reservation
29425+ * makes sure the lru lists are updated.
29426+ */
29427+
29428+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
29429+ if (unlikely(ret != 0))
29430+ return ret;
29431+ mutex_lock(&bo->mutex);
29432+ ret = ttm_bo_wait(bo, false, true, no_wait);
29433+ if (unlikely(ret != 0))
29434+ goto out_err0;
29435+ atomic_inc(&bo->cpu_writers);
29436+ out_err0:
29437+ mutex_unlock(&bo->mutex);
29438+ ttm_bo_unreserve(bo);
29439+ return ret;
29440+}
29441+
29442+void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
29443+{
29444+ if (atomic_dec_and_test(&bo->cpu_writers))
29445+ wake_up_all(&bo->event_queue);
29446+}
29447+
29448+/**
29449+ * A buffer object shrink method that tries to swap out the first
29450+ * buffer object on the bo_global::swap_lru list.
29451+ */
29452+
29453+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
29454+{
29455+ struct ttm_bo_device *bdev =
29456+ container_of(shrink, struct ttm_bo_device, shrink);
29457+ struct ttm_buffer_object *bo;
29458+ int ret = -EBUSY;
29459+ int put_count;
29460+ uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
29461+
29462+ spin_lock(&bdev->lru_lock);
29463+ while (ret == -EBUSY) {
29464+ if (unlikely(list_empty(&bdev->swap_lru))) {
29465+ spin_unlock(&bdev->lru_lock);
29466+ return -EBUSY;
29467+ }
29468+
29469+ bo = list_first_entry(&bdev->swap_lru,
29470+ struct ttm_buffer_object, swap);
29471+ kref_get(&bo->list_kref);
29472+
29473+ /**
29474+ * Reserve buffer. Since we unlock while sleeping, we need
29475+ * to re-check that nobody removed us from the swap-list while
29476+ * we slept.
29477+ */
29478+
29479+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
29480+ if (unlikely(ret == -EBUSY)) {
29481+ spin_unlock(&bdev->lru_lock);
29482+ ttm_bo_wait_unreserved(bo, false);
29483+ kref_put(&bo->list_kref, ttm_bo_release_list);
29484+ spin_lock(&bdev->lru_lock);
29485+ }
29486+ }
29487+
29488+ BUG_ON(ret != 0);
29489+ put_count = ttm_bo_del_from_lru(bo);
29490+ spin_unlock(&bdev->lru_lock);
29491+
29492+ while (put_count--)
29493+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
29494+
29495+ /**
29496+ * Wait for GPU, then move to system cached.
29497+ */
29498+
29499+ mutex_lock(&bo->mutex);
29500+ ret = ttm_bo_wait(bo, false, false, false);
29501+ if (unlikely(ret != 0))
29502+ goto out;
29503+
29504+ if ((bo->mem.flags & swap_placement) != swap_placement) {
29505+ struct ttm_mem_reg evict_mem;
29506+
29507+ evict_mem = bo->mem;
29508+ evict_mem.mm_node = NULL;
29509+ evict_mem.proposed_flags =
29510+ TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
29511+ evict_mem.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
29512+ evict_mem.mem_type = TTM_PL_SYSTEM;
29513+
29514+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, false, false);
29515+ if (unlikely(ret != 0))
29516+ goto out;
29517+ }
29518+
29519+ ttm_bo_unmap_virtual(bo);
29520+
29521+ /**
29522+ * Swap out. Buffer will be swapped in again as soon as
29523+ * anyone tries to access a ttm page.
29524+ */
29525+
29526+ ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
29527+ out:
29528+ mutex_unlock(&bo->mutex);
29529+
29530+ /**
29531+ *
29532+ * Unreserve without putting on LRU to avoid swapping out an
29533+ * already swapped buffer.
29534+ */
29535+
29536+ atomic_set(&bo->reserved, 0);
29537+ wake_up_all(&bo->event_queue);
29538+ kref_put(&bo->list_kref, ttm_bo_release_list);
29539+ return ret;
29540+}
29541+
29542+void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
29543+{
29544+ while (ttm_bo_swapout(&bdev->shrink) == 0) ;
29545+}
29546diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h b/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h
29547--- a/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h 1969-12-31 16:00:00.000000000 -0800
29548+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h 2009-04-07 13:28:38.000000000 -0700
29549@@ -0,0 +1,859 @@
29550+/**************************************************************************
29551+ *
29552+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
29553+ * All Rights Reserved.
29554+ * Copyright (c) 2009 Vmware, Inc., Palo Alto, CA., USA
29555+ * All Rights Reserved.
29556+ *
29557+ * Permission is hereby granted, free of charge, to any person obtaining a
29558+ * copy of this software and associated documentation files (the
29559+ * "Software"), to deal in the Software without restriction, including
29560+ * without limitation the rights to use, copy, modify, merge, publish,
29561+ * distribute, sub license, and/or sell copies of the Software, and to
29562+ * permit persons to whom the Software is furnished to do so, subject to
29563+ * the following conditions:
29564+ *
29565+ * The above copyright notice and this permission notice (including the
29566+ * next paragraph) shall be included in all copies or substantial portions
29567+ * of the Software.
29568+ *
29569+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29570+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29571+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
29572+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
29573+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
29574+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29575+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
29576+ *
29577+ **************************************************************************/
29578+/*
29579+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
29580+ */
29581+#ifndef _TTM_BO_DRIVER_H_
29582+#define _TTM_BO_DRIVER_H_
29583+
29584+#include "ttm/ttm_bo_api.h"
29585+#include "ttm/ttm_memory.h"
29586+#include <drm/drm_mm.h>
29587+#include "linux/workqueue.h"
29588+#include "linux/fs.h"
29589+#include "linux/spinlock.h"
29590+
29591+struct ttm_backend;
29592+
29593+struct ttm_backend_func {
29594+ /**
29595+ * struct ttm_backend_func member populate
29596+ *
29597+ * @backend: Pointer to a struct ttm_backend.
29598+ * @num_pages: Number of pages to populate.
29599+ * @pages: Array of pointers to ttm pages.
29600+ * @dummy_read_page: Page to be used instead of NULL pages in the
29601+ * array @pages.
29602+ *
29603+ * Populate the backend with ttm pages. Depending on the backend,
29604+ * it may or may not copy the @pages array.
29605+ */
29606+ int (*populate) (struct ttm_backend * backend,
29607+ unsigned long num_pages, struct page ** pages,
29608+ struct page * dummy_read_page);
29609+ /**
29610+ * struct ttm_backend_func member clear
29611+ *
29612+ * @backend: Pointer to a struct ttm_backend.
29613+ *
29614+ * This is an "unpopulate" function. Release all resources
29615+ * allocated with populate.
29616+ */
29617+ void (*clear) (struct ttm_backend * backend);
29618+
29619+ /**
29620+ * struct ttm_backend_func member bind
29621+ *
29622+ * @backend: Pointer to a struct ttm_backend.
29623+ * @bo_mem: Pointer to a struct ttm_mem_reg describing the
29624+ * memory type and location for binding.
29625+ *
29626+ * Bind the backend pages into the aperture in the location
29627+ * indicated by @bo_mem. This function should be able to handle
29628+ * differences between aperture- and system page sizes.
29629+ */
29630+ int (*bind) (struct ttm_backend * backend, struct ttm_mem_reg * bo_mem);
29631+
29632+ /**
29633+ * struct ttm_backend_func member unbind
29634+ *
29635+ * @backend: Pointer to a struct ttm_backend.
29636+ *
29637+ * Unbind previously bound backend pages. This function should be
29638+ * able to handle differences between aperture- and system page sizes.
29639+ */
29640+ int (*unbind) (struct ttm_backend * backend);
29641+
29642+ /**
29643+ * struct ttm_backend_func member destroy
29644+ *
29645+ * @backend: Pointer to a struct ttm_backend.
29646+ *
29647+ * Destroy the backend.
29648+ */
29649+ void (*destroy) (struct ttm_backend * backend);
29650+};
29651+
29652+/**
29653+ * struct ttm_backend
29654+ *
29655+ * @bdev: Pointer to a struct ttm_bo_device.
29656+ * @flags: For driver use.
29657+ * @func: Pointer to a struct ttm_backend_func that describes
29658+ * the backend methods.
29659+ *
29660+ */
29661+
29662+struct ttm_backend {
29663+ struct ttm_bo_device *bdev;
29664+ uint32_t flags;
29665+ struct ttm_backend_func *func;
29666+};
29667+
29668+#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
29669+#define TTM_PAGE_FLAG_USER (1 << 1)
29670+#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
29671+#define TTM_PAGE_FLAG_WRITE (1 << 3)
29672+#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
29673+#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
29674+
29675+enum ttm_caching_state {
29676+ tt_uncached,
29677+ tt_wc,
29678+ tt_cached
29679+};
29680+
29681+/**
29682+ * struct ttm_tt
29683+ *
29684+ * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
29685+ * pointer.
29686+ * @pages: Array of pages backing the data.
29687+ * @first_himem_page: Himem pages are put last in the page array, which
29688+ * enables us to run caching attribute changes on only the first part
29689+ * of the page array containing lomem pages. This is the index of the
29690+ * first himem page.
29691+ * @last_lomem_page: Index of the last lomem page in the page array.
29692+ * @num_pages: Number of pages in the page array.
29693+ * @bdev: Pointer to the current struct ttm_bo_device.
29694+ * @be: Pointer to the ttm backend.
29695+ * @tsk: The task for user ttm.
29696+ * @start: virtual address for user ttm.
29697+ * @swap_storage: Pointer to shmem struct file for swap storage.
29698+ * @caching_state: The current caching state of the pages.
29699+ * @state: The current binding state of the pages.
29700+ *
29701+ * This is a structure holding the pages, caching- and aperture binding
29702+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
29703+ * memory.
29704+ */
29705+
29706+struct ttm_tt {
29707+ struct page *dummy_read_page;
29708+ struct page **pages;
29709+ long first_himem_page;
29710+ long last_lomem_page;
29711+ uint32_t page_flags;
29712+ unsigned long num_pages;
29713+ struct ttm_bo_device *bdev;
29714+ struct ttm_backend *be;
29715+ struct task_struct *tsk;
29716+ unsigned long start;
29717+ struct file *swap_storage;
29718+ enum ttm_caching_state caching_state;
29719+ enum {
29720+ tt_bound,
29721+ tt_unbound,
29722+ tt_unpopulated,
29723+ } state;
29724+};
29725+
29726+#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
29727+#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
29728+#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
29729+ before kernel access. */
29730+#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
29731+
29732+/**
29733+ * struct ttm_mem_type_manager
29734+ *
29735+ * @has_type: The memory type has been initialized.
29736+ * @use_type: The memory type is enabled.
29737+ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
29738+ * managed by this memory type.
29739+ * @gpu_offset: If used, the GPU offset of the first managed page of
29740+ * fixed memory or the first managed location in an aperture.
29741+ * @io_offset: The io_offset of the first managed page of IO memory or
29742+ * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
29743+ * memory, this should be set to NULL.
29744+ * @io_size: The size of a managed IO region (fixed memory or aperture).
29745+ * @io_addr: Virtual kernel address if the io region is pre-mapped. For
29746+ * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
29747+ * @io_addr should be set to NULL.
29748+ * @size: Size of the managed region.
29749+ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
29750+ * as defined in ttm_placement_common.h
29751+ * @default_caching: The default caching policy used for a buffer object
29752+ * placed in this memory type if the user doesn't provide one.
29753+ * @manager: The range manager used for this memory type. FIXME: If the aperture
29754+ * has a page size different from the underlying system, the granularity
29755+ * of this manager should take care of this. But the range allocating code
29756+ * in ttm_bo.c needs to be modified for this.
29757+ * @lru: The lru list for this memory type.
29758+ *
29759+ * This structure is used to identify and manage memory types for a device.
29760+ * It's set up by the ttm_bo_driver::init_mem_type method.
29761+ */
29762+
29763+struct ttm_mem_type_manager {
29764+
29765+ /*
29766+ * No protection. Constant from start.
29767+ */
29768+
29769+ bool has_type;
29770+ bool use_type;
29771+ uint32_t flags;
29772+ unsigned long gpu_offset;
29773+ unsigned long io_offset;
29774+ unsigned long io_size;
29775+ void *io_addr;
29776+ uint64_t size;
29777+ uint32_t available_caching;
29778+ uint32_t default_caching;
29779+
29780+ /*
29781+ * Protected by the bdev->lru_lock.
29782+ * TODO: Consider one lru_lock per ttm_mem_type_manager.
29783+ * Plays ill with list removal, though.
29784+ */
29785+
29786+ struct drm_mm manager;
29787+ struct list_head lru;
29788+};
29789+
29790+/**
29791+ * struct ttm_bo_driver
29792+ *
29793+ * @mem_type_prio: Priority array of memory types to place a buffer object in
29794+ * if it fits without evicting buffers from any of these memory types.
29795+ * @mem_busy_prio: Priority array of memory types to place a buffer object in
29796+ * if it needs to evict buffers to make room.
29797+ * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
29798+ * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
29799+ * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
29800+ * @invalidate_caches: Callback to invalidate read caches when a buffer object
29801+ * has been evicted.
29802+ * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager structure.
29803+ * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
29804+ * @move: Callback for a driver to hook in accelerated functions to move a buffer.
29805+ * If set to NULL, a potentially slow memcpy() move is used.
29806+ * @sync_obj_signaled: See ttm_fence_api.h
29807+ * @sync_obj_wait: See ttm_fence_api.h
29808+ * @sync_obj_flush: See ttm_fence_api.h
29809+ * @sync_obj_unref: See ttm_fence_api.h
29810+ * @sync_obj_ref: See ttm_fence_api.h
29811+ */
29812+
29813+struct ttm_bo_driver {
29814+ const uint32_t *mem_type_prio;
29815+ const uint32_t *mem_busy_prio;
29816+ uint32_t num_mem_type_prio;
29817+ uint32_t num_mem_busy_prio;
29818+
29819+ /**
29820+ * struct ttm_bo_driver member create_ttm_backend_entry
29821+ *
29822+ * @bdev: The buffer object device.
29823+ *
29824+ * Create a driver specific struct ttm_backend.
29825+ */
29826+
29827+ struct ttm_backend *(*create_ttm_backend_entry)
29828+ (struct ttm_bo_device * bdev);
29829+
29830+ /**
29831+ * struct ttm_bo_driver member invalidate_caches
29832+ *
29833+ * @bdev: the buffer object device.
29834+ * @flags: new placement of the rebound buffer object.
29835+ *
29836+ * A previosly evicted buffer has been rebound in a
29837+ * potentially new location. Tell the driver that it might
29838+ * consider invalidating read (texture) caches on the next command
29839+ * submission as a consequence.
29840+ */
29841+
29842+ int (*invalidate_caches) (struct ttm_bo_device * bdev, uint32_t flags);
29843+ int (*init_mem_type) (struct ttm_bo_device * bdev, uint32_t type,
29844+ struct ttm_mem_type_manager * man);
29845+ /**
29846+ * struct ttm_bo_driver member evict_flags:
29847+ *
29848+ * @bo: the buffer object to be evicted
29849+ *
29850+ * Return the bo flags for a buffer which is not mapped to the hardware.
29851+ * These will be placed in proposed_flags so that when the move is
29852+ * finished, they'll end up in bo->mem.flags
29853+ */
29854+
29855+ uint32_t(*evict_flags) (struct ttm_buffer_object * bo);
29856+ /**
29857+ * struct ttm_bo_driver member move:
29858+ *
29859+ * @bo: the buffer to move
29860+ * @evict: whether this motion is evicting the buffer from
29861+ * the graphics address space
29862+ * @interruptible: Use interruptible sleeps if possible when sleeping.
29863+ * @no_wait: whether this should give up and return -EBUSY
29864+ * if this move would require sleeping
29865+ * @new_mem: the new memory region receiving the buffer
29866+ *
29867+ * Move a buffer between two memory regions.
29868+ */
29869+ int (*move) (struct ttm_buffer_object * bo,
29870+ bool evict, bool interruptible,
29871+ bool no_wait, struct ttm_mem_reg * new_mem);
29872+
29873+ /**
29874+ * struct ttm_bo_driver_member verify_access
29875+ *
29876+ * @bo: Pointer to a buffer object.
29877+ * @filp: Pointer to a struct file trying to access the object.
29878+ *
29879+ * Called from the map / write / read methods to verify that the
29880+ * caller is permitted to access the buffer object.
29881+ * This member may be set to NULL, which will refuse this kind of
29882+ * access for all buffer objects.
29883+ * This function should return 0 if access is granted, -EPERM otherwise.
29884+ */
29885+ int (*verify_access) (struct ttm_buffer_object * bo,
29886+ struct file * filp);
29887+
29888+ /**
29889+ * In case a driver writer dislikes the TTM fence objects,
29890+ * the driver writer can replace those with sync objects of
29891+ * his / her own. If it turns out that no driver writer is
29892+ * using these. I suggest we remove these hooks and plug in
29893+ * fences directly. The bo driver needs the following functionality:
29894+ * See the corresponding functions in the fence object API
29895+ * documentation.
29896+ */
29897+
29898+ bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
29899+ int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
29900+ bool lazy, bool interruptible);
29901+ int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
29902+ void (*sync_obj_unref) (void **sync_obj);
29903+ void *(*sync_obj_ref) (void *sync_obj);
29904+};
29905+
29906+#define TTM_NUM_MEM_TYPES 10
29907+
29908+#define TTM_BO_PRIV_FLAG_EVICTED (1 << 0) /* Buffer object is evicted. */
29909+#define TTM_BO_PRIV_FLAG_MOVING (1 << 1) /* Buffer object is moving and needs
29910+ idling before CPU mapping */
29911+/**
29912+ * struct ttm_bo_device - Buffer object driver device-specific data.
29913+ *
29914+ * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
29915+ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
29916+ * @count: Current number of buffer object.
29917+ * @pages: Current number of pinned pages.
29918+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
29919+ * of unpopulated pages.
29920+ * @shrink: A shrink callback object used for buffre object swap.
29921+ * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
29922+ * used by a buffer object. This is excluding page arrays and backing pages.
29923+ * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
29924+ * @man: An array of mem_type_managers.
29925+ * @addr_space_mm: Range manager for the device address space.
29926+ * lru_lock: Spinlock that protects the buffer+device lru lists and
29927+ * ddestroy lists.
29928+ * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
29929+ * If a GPU lockup has been detected, this is forced to 0.
29930+ * @dev_mapping: A pointer to the struct address_space representing the
29931+ * device address space.
29932+ * @wq: Work queue structure for the delayed delete workqueue.
29933+ *
29934+ */
29935+
29936+struct ttm_bo_device {
29937+
29938+ /*
29939+ * Constant after bo device init / atomic.
29940+ */
29941+
29942+ struct ttm_mem_global *mem_glob;
29943+ struct ttm_bo_driver *driver;
29944+ struct page *dummy_read_page;
29945+ struct ttm_mem_shrink shrink;
29946+
29947+ size_t ttm_bo_extra_size;
29948+ size_t ttm_bo_size;
29949+
29950+ rwlock_t vm_lock;
29951+ /*
29952+ * Protected by the vm lock.
29953+ */
29954+ struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
29955+ struct rb_root addr_space_rb;
29956+ struct drm_mm addr_space_mm;
29957+
29958+ /*
29959+ * Might want to change this to one lock per manager.
29960+ */
29961+ spinlock_t lru_lock;
29962+ /*
29963+ * Protected by the lru lock.
29964+ */
29965+ struct list_head ddestroy;
29966+ struct list_head swap_lru;
29967+
29968+ /*
29969+ * Protected by load / firstopen / lastclose /unload sync.
29970+ */
29971+
29972+ bool nice_mode;
29973+ struct address_space *dev_mapping;
29974+
29975+ /*
29976+ * Internal protection.
29977+ */
29978+
29979+ struct delayed_work wq;
29980+};
29981+
29982+/**
29983+ * ttm_flag_masked
29984+ *
29985+ * @old: Pointer to the result and original value.
29986+ * @new: New value of bits.
29987+ * @mask: Mask of bits to change.
29988+ *
29989+ * Convenience function to change a number of bits identified by a mask.
29990+ */
29991+
29992+static inline uint32_t
29993+ttm_flag_masked(uint32_t * old, uint32_t new, uint32_t mask)
29994+{
29995+ *old ^= (*old ^ new) & mask;
29996+ return *old;
29997+}
29998+
29999+/**
30000+ * ttm_tt_create
30001+ *
30002+ * @bdev: pointer to a struct ttm_bo_device:
30003+ * @size: Size of the data needed backing.
30004+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
30005+ * @dummy_read_page: See struct ttm_bo_device.
30006+ *
30007+ * Create a struct ttm_tt to back data with system memory pages.
30008+ * No pages are actually allocated.
30009+ * Returns:
30010+ * NULL: Out of memory.
30011+ */
30012+extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
30013+ unsigned long size,
30014+ uint32_t page_flags,
30015+ struct page *dummy_read_page);
30016+
30017+/**
30018+ * ttm_tt_set_user:
30019+ *
30020+ * @ttm: The struct ttm_tt to populate.
30021+ * @tsk: A struct task_struct for which @start is a valid user-space address.
30022+ * @start: A valid user-space address.
30023+ * @num_pages: Size in pages of the user memory area.
30024+ *
30025+ * Populate a struct ttm_tt with a user-space memory area after first pinning
30026+ * the pages backing it.
30027+ * Returns:
30028+ * !0: Error.
30029+ */
30030+
30031+extern int ttm_tt_set_user(struct ttm_tt *ttm,
30032+ struct task_struct *tsk,
30033+ unsigned long start, unsigned long num_pages);
30034+
30035+/**
30036+ * ttm_ttm_bind:
30037+ *
30038+ * @ttm: The struct ttm_tt containing backing pages.
30039+ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
30040+ *
30041+ * Bind the pages of @ttm to an aperture location identified by @bo_mem
30042+ */
30043+extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
30044+
30045+/**
30046+ * ttm_ttm_destroy:
30047+ *
30048+ * @ttm: The struct ttm_tt.
30049+ *
30050+ * Unbind, unpopulate and destroy a struct ttm_tt.
30051+ */
30052+extern void ttm_tt_destroy(struct ttm_tt *ttm);
30053+
30054+/**
30055+ * ttm_ttm_unbind:
30056+ *
30057+ * @ttm: The struct ttm_tt.
30058+ *
30059+ * Unbind a struct ttm_tt.
30060+ */
30061+extern void ttm_tt_unbind(struct ttm_tt *ttm);
30062+
30063+/**
30064+ * ttm_ttm_destroy:
30065+ *
30066+ * @ttm: The struct ttm_tt.
30067+ * @index: Index of the desired page.
30068+ *
30069+ * Return a pointer to the struct page backing @ttm at page
30070+ * index @index. If the page is unpopulated, one will be allocated to
30071+ * populate that index.
30072+ *
30073+ * Returns:
30074+ * NULL on OOM.
30075+ */
30076+extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
30077+
30078+/**
30079+ * ttm_tt_cache_flush:
30080+ *
30081+ * @pages: An array of pointers to struct page:s to flush.
30082+ * @num_pages: Number of pages to flush.
30083+ *
30084+ * Flush the data of the indicated pages from the cpu caches.
30085+ * This is used when changing caching attributes of the pages from
30086+ * cache-coherent.
30087+ */
30088+extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
30089+
30090+/**
30091+ * ttm_tt_set_placement_caching:
30092+ *
30093+ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
30094+ * @placement: Flag indicating the desired caching policy.
30095+ *
30096+ * This function will change caching policy of any default kernel mappings of
30097+ * the pages backing @ttm. If changing from cached to uncached or write-combined,
30098+ * all CPU caches will first be flushed to make sure the data of the pages
30099+ * hit RAM. This function may be very costly as it involves global TLB
30100+ * and cache flushes and potential page splitting / combining.
30101+ */
30102+extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
30103+extern int ttm_tt_swapout(struct ttm_tt *ttm,
30104+ struct file *persistant_swap_storage);
30105+
30106+/*
30107+ * ttm_bo.c
30108+ */
30109+
30110+/**
30111+ * ttm_mem_reg_is_pci
30112+ *
30113+ * @bdev: Pointer to a struct ttm_bo_device.
30114+ * @mem: A valid struct ttm_mem_reg.
30115+ *
30116+ * Returns true if the memory described by @mem is PCI memory,
30117+ * false otherwise.
30118+ */
30119+extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
30120+ struct ttm_mem_reg *mem);
30121+
30122+/**
30123+ * ttm_bo_mem_space
30124+ *
30125+ * @bo: Pointer to a struct ttm_buffer_object. the data of which
30126+ * we want to allocate space for.
30127+ * @mem: A struct ttm_mem_reg with the struct ttm_mem_reg::proposed_flags set
30128+ * up.
30129+ * @interruptible: Sleep interruptible when sliping.
30130+ * @no_wait: Don't sleep waiting for space to become available.
30131+ *
30132+ * Allocate memory space for the buffer object pointed to by @bo, using
30133+ * the placement flags in @mem, potentially evicting other idle buffer objects.
30134+ * This function may sleep while waiting for space to become available.
30135+ * Returns:
30136+ * -EBUSY: No space available (only if no_wait == 1).
30137+ * -ENOMEM: Could not allocate memory for the buffer object, either due to
30138+ * fragmentation or concurrent allocators.
30139+ * -ERESTART: An interruptible sleep was interrupted by a signal.
30140+ */
30141+extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
30142+ struct ttm_mem_reg *mem,
30143+ bool interruptible, bool no_wait);
30144+/**
30145+ * ttm_bo_wait_for_cpu
30146+ *
30147+ * @bo: Pointer to a struct ttm_buffer_object.
30148+ * @no_wait: Don't sleep while waiting.
30149+ *
30150+ * Wait until a buffer object is no longer sync'ed for CPU access.
30151+ * Returns:
30152+ * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
30153+ * -ERESTART: An interruptible sleep was interrupted by a signal.
30154+ */
30155+
30156+extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
30157+
30158+/**
30159+ * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
30160+ *
30161+ * @bo Pointer to a struct ttm_buffer_object.
30162+ * @bus_base On return the base of the PCI region
30163+ * @bus_offset On return the byte offset into the PCI region
30164+ * @bus_size On return the byte size of the buffer object or zero if
30165+ * the buffer object memory is not accessible through a PCI region.
30166+ *
30167+ * Returns:
30168+ * -EINVAL if the buffer object is currently not mappable.
30169+ * 0 otherwise.
30170+ */
30171+
30172+extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
30173+ struct ttm_mem_reg *mem,
30174+ unsigned long *bus_base,
30175+ unsigned long *bus_offset,
30176+ unsigned long *bus_size);
30177+
30178+extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
30179+
30180+/**
30181+ * ttm_bo_device_init
30182+ *
30183+ * @bdev: A pointer to a struct ttm_bo_device to initialize.
30184+ * @mem_global: A pointer to an initialized struct ttm_mem_global.
30185+ * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
30186+ * @file_page_offset: Offset into the device address space that is available
30187+ * for buffer data. This ensures compatibility with other users of the
30188+ * address space.
30189+ *
30190+ * Initializes a struct ttm_bo_device:
30191+ * Returns:
30192+ * !0: Failure.
30193+ */
30194+extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
30195+ struct ttm_mem_global *mem_glob,
30196+ struct ttm_bo_driver *driver,
30197+ uint64_t file_page_offset);
30198+
30199+/**
30200+ * ttm_bo_reserve:
30201+ *
30202+ * @bo: A pointer to a struct ttm_buffer_object.
30203+ * @interruptible: Sleep interruptible if waiting.
30204+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
30205+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
30206+ * it to become unreserved if @sequence < (@bo)->sequence.
30207+ *
30208+ * Locks a buffer object for validation. (Or prevents other processes from
30209+ * locking it for validation) and removes it from lru lists, while taking
30210+ * a number of measures to prevent deadlocks.
30211+ *
30212+ * Deadlocks may occur when two processes try to reserve multiple buffers in
30213+ * different order, either by will or as a result of a buffer being evicted
30214+ * to make room for a buffer already reserved. (Buffers are reserved before
30215+ * they are evicted). The following algorithm prevents such deadlocks from
30216+ * occuring:
30217+ * 1) Buffers are reserved with the lru spinlock held. Upon successful
30218+ * reservation they are removed from the lru list. This stops a reserved buffer
30219+ * from being evicted. However the lru spinlock is released between the time
30220+ * a buffer is selected for eviction and the time it is reserved.
30221+ * Therefore a check is made when a buffer is reserved for eviction, that it
30222+ * is still the first buffer in the lru list, before it is removed from the
30223+ * list. @check_lru == 1 forces this check. If it fails, the function returns
30224+ * -EINVAL, and the caller should then choose a new buffer to evict and repeat
30225+ * the procedure.
30226+ * 2) Processes attempting to reserve multiple buffers other than for eviction,
30227+ * (typically execbuf), should first obtain a unique 32-bit
30228+ * validation sequence number,
30229+ * and call this function with @use_sequence == 1 and @sequence == the unique
30230+ * sequence number. If upon call of this function, the buffer object is already
30231+ * reserved, the validation sequence is checked against the validation
30232+ * sequence of the process currently reserving the buffer,
30233+ * and if the current validation sequence is greater than that of the process
30234+ * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
30235+ * waiting for the buffer to become unreserved, after which it retries reserving.
30236+ * The caller should, when receiving an -EAGAIN error
30237+ * release all its buffer reservations, wait for @bo to become unreserved, and
30238+ * then rerun the validation with the same validation sequence. This procedure
30239+ * will always guarantee that the process with the lowest validation sequence
30240+ * will eventually succeed, preventing both deadlocks and starvation.
30241+ *
30242+ * Returns:
30243+ * -EAGAIN: The reservation may cause a deadlock. Release all buffer reservations,
30244+ * wait for @bo to become unreserved and try again. (only if use_sequence == 1).
30245+ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
30246+ * a signal. Release all buffer reservations and return to user-space.
30247+ */
30248+extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
30249+ bool interruptible,
30250+ bool no_wait, bool use_sequence, uint32_t sequence);
30251+
30252+/**
30253+ * ttm_bo_unreserve
30254+ *
30255+ * @bo: A pointer to a struct ttm_buffer_object.
30256+ *
30257+ * Unreserve a previous reservation of @bo.
30258+ */
30259+extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
30260+
30261+/**
30262+ * ttm_bo_wait_unreserved
30263+ *
30264+ * @bo: A pointer to a struct ttm_buffer_object.
30265+ *
30266+ * Wait for a struct ttm_buffer_object to become unreserved.
30267+ * This is typically used in the execbuf code to relax cpu-usage when
30268+ * a potential deadlock condition backoff.
30269+ */
30270+extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
30271+ bool interruptible);
30272+
30273+/**
30274+ * ttm_bo_block_reservation
30275+ *
30276+ * @bo: A pointer to a struct ttm_buffer_object.
30277+ * @interruptible: Use interruptible sleep when waiting.
30278+ * @no_wait: Don't sleep, but rather return -EBUSY.
30279+ *
30280+ * Block reservation for validation by simply reserving the buffer. This is intended
30281+ * for single buffer use only without eviction, and thus needs no deadlock protection.
30282+ *
30283+ * Returns:
30284+ * -EBUSY: If no_wait == 1 and the buffer is already reserved.
30285+ * -ERESTART: If interruptible == 1 and the process received a signal while sleeping.
30286+ */
30287+extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
30288+ bool interruptible, bool no_wait);
30289+
30290+/**
30291+ * ttm_bo_unblock_reservation
30292+ *
30293+ * @bo: A pointer to a struct ttm_buffer_object.
30294+ *
30295+ * Unblocks reservation leaving lru lists untouched.
30296+ */
30297+extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
30298+
30299+/*
30300+ * ttm_bo_util.c
30301+ */
30302+
30303+/**
30304+ * ttm_bo_move_ttm
30305+ *
30306+ * @bo: A pointer to a struct ttm_buffer_object.
30307+ * @evict: 1: This is an eviction. Don't try to pipeline.
30308+ * @no_wait: Never sleep, but rather return with -EBUSY.
30309+ * @new_mem: struct ttm_mem_reg indicating where to move.
30310+ *
30311+ * Optimized move function for a buffer object with both old and
30312+ * new placement backed by a TTM. The function will, if successful,
30313+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
30314+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
30315+ * data remains untouched, and it's up to the caller to free the
30316+ * memory space indicated by @new_mem.
30317+ * Returns:
30318+ * !0: Failure.
30319+ */
30320+
30321+extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
30322+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem);
30323+
30324+/**
30325+ * ttm_bo_move_memcpy
30326+ *
30327+ * @bo: A pointer to a struct ttm_buffer_object.
30328+ * @evict: 1: This is an eviction. Don't try to pipeline.
30329+ * @no_wait: Never sleep, but rather return with -EBUSY.
30330+ * @new_mem: struct ttm_mem_reg indicating where to move.
30331+ *
30332+ * Fallback move function for a mappable buffer object in mappable memory.
30333+ * The function will, if successful,
30334+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
30335+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
30336+ * data remains untouched, and it's up to the caller to free the
30337+ * memory space indicated by @new_mem.
30338+ * Returns:
30339+ * !0: Failure.
30340+ */
30341+
30342+extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
30343+ bool evict,
30344+ bool no_wait, struct ttm_mem_reg *new_mem);
30345+
30346+/**
30347+ * ttm_bo_free_old_node
30348+ *
30349+ * @bo: A pointer to a struct ttm_buffer_object.
30350+ *
30351+ * Utility function to free an old placement after a successful move.
30352+ */
30353+extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
30354+
30355+/**
30356+ * ttm_bo_move_accel_cleanup.
30357+ *
30358+ * @bo: A pointer to a struct ttm_buffer_object.
30359+ * @sync_obj: A sync object that signals when moving is complete.
30360+ * @sync_obj_arg: An argument to pass to the sync object idle / wait
30361+ * functions.
30362+ * @evict: This is an evict move. Don't return until the buffer is idle.
30363+ * @no_wait: Never sleep, but rather return with -EBUSY.
30364+ * @new_mem: struct ttm_mem_reg indicating where to move.
30365+ *
30366+ * Accelerated move function to be called when an accelerated move
30367+ * has been scheduled. The function will create a new temporary buffer object
30368+ * representing the old placement, and put the sync object on both buffer
30369+ * objects. After that the newly created buffer object is unref'd to be
30370+ * destroyed when the move is complete. This will help pipeline
30371+ * buffer moves.
30372+ */
30373+
30374+extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
30375+ void *sync_obj,
30376+ void *sync_obj_arg,
30377+ bool evict, bool no_wait,
30378+ struct ttm_mem_reg *new_mem);
30379+/**
30380+ * ttm_io_prot
30381+ *
30382+ * @c_state: Caching state.
30383+ * @tmp: Page protection flag for a normal, cached mapping.
30384+ *
30385+ * Utility function that returns the pgprot_t that should be used for
30386+ * setting up a PTE with the caching model indicated by @c_state.
30387+ */
30388+extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
30389+
30390+#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
30391+#define TTM_HAS_AGP
30392+#include <linux/agp_backend.h>
30393+
30394+/**
30395+ * ttm_agp_backend_init
30396+ *
30397+ * @bdev: Pointer to a struct ttm_bo_device.
30398+ * @bridge: The agp bridge this device is sitting on.
30399+ *
30400+ * Create a TTM backend that uses the indicated AGP bridge as an aperture
30401+ * for TT memory. This function uses the linux agpgart interface to
30402+ * bind and unbind memory backing a ttm_tt.
30403+ */
30404+extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
30405+ struct agp_bridge_data *bridge);
30406+#endif
30407+
30408+#endif
30409diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_util.c b/drivers/gpu/drm/psb/ttm/ttm_bo_util.c
30410--- a/drivers/gpu/drm/psb/ttm/ttm_bo_util.c 1969-12-31 16:00:00.000000000 -0800
30411+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_util.c 2009-04-07 13:28:38.000000000 -0700
30412@@ -0,0 +1,529 @@
30413+/**************************************************************************
30414+ *
30415+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
30416+ * All Rights Reserved.
30417+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
30418+ * All Rights Reserved.
30419+ *
30420+ * Permission is hereby granted, free of charge, to any person obtaining a
30421+ * copy of this software and associated documentation files (the
30422+ * "Software"), to deal in the Software without restriction, including
30423+ * without limitation the rights to use, copy, modify, merge, publish,
30424+ * distribute, sub license, and/or sell copies of the Software, and to
30425+ * permit persons to whom the Software is furnished to do so, subject to
30426+ * the following conditions:
30427+ *
30428+ * The above copyright notice and this permission notice (including the
30429+ * next paragraph) shall be included in all copies or substantial portions
30430+ * of the Software.
30431+ *
30432+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30433+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30434+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
30435+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
30436+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
30437+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
30438+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
30439+ *
30440+ **************************************************************************/
30441+/*
30442+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30443+ */
30444+
30445+#include "ttm/ttm_bo_driver.h"
30446+#include "ttm/ttm_placement_common.h"
30447+#include "ttm/ttm_pat_compat.h"
30448+#include <linux/io.h>
30449+#include <linux/highmem.h>
30450+#include <linux/wait.h>
30451+#include <linux/version.h>
30452+
30453+void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
30454+{
30455+ struct ttm_mem_reg *old_mem = &bo->mem;
30456+
30457+ if (old_mem->mm_node) {
30458+ spin_lock(&bo->bdev->lru_lock);
30459+ drm_mm_put_block(old_mem->mm_node);
30460+ spin_unlock(&bo->bdev->lru_lock);
30461+ }
30462+ old_mem->mm_node = NULL;
30463+}
30464+
30465+int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
30466+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
30467+{
30468+ struct ttm_tt *ttm = bo->ttm;
30469+ struct ttm_mem_reg *old_mem = &bo->mem;
30470+ uint32_t save_flags = old_mem->flags;
30471+ uint32_t save_proposed_flags = old_mem->proposed_flags;
30472+ int ret;
30473+
30474+ if (old_mem->mem_type != TTM_PL_SYSTEM) {
30475+ ttm_tt_unbind(ttm);
30476+ ttm_bo_free_old_node(bo);
30477+ ttm_flag_masked(&old_mem->flags, TTM_PL_FLAG_SYSTEM,
30478+ TTM_PL_MASK_MEM);
30479+ old_mem->mem_type = TTM_PL_SYSTEM;
30480+ save_flags = old_mem->flags;
30481+ }
30482+
30483+ ret = ttm_tt_set_placement_caching(ttm, new_mem->flags);
30484+ if (unlikely(ret != 0))
30485+ return ret;
30486+
30487+ if (new_mem->mem_type != TTM_PL_SYSTEM) {
30488+ ret = ttm_tt_bind(ttm, new_mem);
30489+ if (unlikely(ret != 0))
30490+ return ret;
30491+ }
30492+
30493+ *old_mem = *new_mem;
30494+ new_mem->mm_node = NULL;
30495+ old_mem->proposed_flags = save_proposed_flags;
30496+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
30497+ return 0;
30498+}
30499+
30500+int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
30501+ void **virtual)
30502+{
30503+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
30504+ unsigned long bus_offset;
30505+ unsigned long bus_size;
30506+ unsigned long bus_base;
30507+ int ret;
30508+ void *addr;
30509+
30510+ *virtual = NULL;
30511+ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
30512+ if (ret || bus_size == 0)
30513+ return ret;
30514+
30515+ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
30516+ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
30517+ else {
30518+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
30519+ if (mem->flags & TTM_PL_FLAG_WC)
30520+ addr = ioremap_wc(bus_base + bus_offset, bus_size);
30521+ else
30522+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
30523+#else
30524+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
30525+#endif
30526+ if (!addr)
30527+ return -ENOMEM;
30528+ }
30529+ *virtual = addr;
30530+ return 0;
30531+}
30532+
30533+void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
30534+ void *virtual)
30535+{
30536+ struct ttm_mem_type_manager *man;
30537+
30538+ man = &bdev->man[mem->mem_type];
30539+
30540+ if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
30541+ iounmap(virtual);
30542+}
30543+
30544+static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
30545+{
30546+ uint32_t *dstP =
30547+ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
30548+ uint32_t *srcP =
30549+ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
30550+
30551+ int i;
30552+ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
30553+ iowrite32(ioread32(srcP++), dstP++);
30554+ return 0;
30555+}
30556+
30557+static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
30558+ unsigned long page)
30559+{
30560+ struct page *d = ttm_tt_get_page(ttm, page);
30561+ void *dst;
30562+
30563+ if (!d)
30564+ return -ENOMEM;
30565+
30566+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
30567+ dst = kmap(d);
30568+ if (!dst)
30569+ return -ENOMEM;
30570+
30571+ memcpy_fromio(dst, src, PAGE_SIZE);
30572+ kunmap(d);
30573+ return 0;
30574+}
30575+
30576+static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
30577+ unsigned long page)
30578+{
30579+ struct page *s = ttm_tt_get_page(ttm, page);
30580+ void *src;
30581+
30582+ if (!s)
30583+ return -ENOMEM;
30584+
30585+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
30586+ src = kmap(s);
30587+ if (!src)
30588+ return -ENOMEM;
30589+
30590+ memcpy_toio(dst, src, PAGE_SIZE);
30591+ kunmap(s);
30592+ return 0;
30593+}
30594+
30595+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
30596+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
30597+{
30598+ struct ttm_bo_device *bdev = bo->bdev;
30599+ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
30600+ struct ttm_tt *ttm = bo->ttm;
30601+ struct ttm_mem_reg *old_mem = &bo->mem;
30602+ struct ttm_mem_reg old_copy = *old_mem;
30603+ void *old_iomap;
30604+ void *new_iomap;
30605+ int ret;
30606+ uint32_t save_flags = old_mem->flags;
30607+ uint32_t save_proposed_flags = old_mem->proposed_flags;
30608+ unsigned long i;
30609+ unsigned long page;
30610+ unsigned long add = 0;
30611+ int dir;
30612+
30613+ ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
30614+ if (ret)
30615+ return ret;
30616+ ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
30617+ if (ret)
30618+ goto out;
30619+
30620+ if (old_iomap == NULL && new_iomap == NULL)
30621+ goto out2;
30622+ if (old_iomap == NULL && ttm == NULL)
30623+ goto out2;
30624+
30625+ add = 0;
30626+ dir = 1;
30627+
30628+ if ((old_mem->mem_type == new_mem->mem_type) &&
30629+ (new_mem->mm_node->start <
30630+ old_mem->mm_node->start + old_mem->mm_node->size)) {
30631+ dir = -1;
30632+ add = new_mem->num_pages - 1;
30633+ }
30634+
30635+ for (i = 0; i < new_mem->num_pages; ++i) {
30636+ page = i * dir + add;
30637+ if (old_iomap == NULL)
30638+ ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
30639+ else if (new_iomap == NULL)
30640+ ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
30641+ else
30642+ ret = ttm_copy_io_page(new_iomap, old_iomap, page);
30643+ if (ret)
30644+ goto out1;
30645+ }
30646+ mb();
30647+ out2:
30648+ ttm_bo_free_old_node(bo);
30649+
30650+ *old_mem = *new_mem;
30651+ new_mem->mm_node = NULL;
30652+ old_mem->proposed_flags = save_proposed_flags;
30653+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
30654+
30655+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
30656+ ttm_tt_unbind(ttm);
30657+ ttm_tt_destroy(ttm);
30658+ bo->ttm = NULL;
30659+ }
30660+
30661+ out1:
30662+ ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
30663+ out:
30664+ ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
30665+ return ret;
30666+}
30667+
30668+/**
30669+ * ttm_buffer_object_transfer
30670+ *
30671+ * @bo: A pointer to a struct ttm_buffer_object.
30672+ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
30673+ * holding the data of @bo with the old placement.
30674+ *
30675+ * This is a utility function that may be called after an accelerated move
30676+ * has been scheduled. A new buffer object is created as a placeholder for
30677+ * the old data while it's being copied. When that buffer object is idle,
30678+ * it can be destroyed, releasing the space of the old placement.
30679+ * Returns:
30680+ * !0: Failure.
30681+ */
30682+
30683+static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
30684+ struct ttm_buffer_object **new_obj)
30685+{
30686+ struct ttm_buffer_object *fbo;
30687+ struct ttm_bo_device *bdev = bo->bdev;
30688+ struct ttm_bo_driver *driver = bdev->driver;
30689+
30690+ fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
30691+ if (!fbo)
30692+ return -ENOMEM;
30693+
30694+ *fbo = *bo;
30695+ mutex_init(&fbo->mutex);
30696+ mutex_lock(&fbo->mutex);
30697+
30698+ init_waitqueue_head(&fbo->event_queue);
30699+ INIT_LIST_HEAD(&fbo->ddestroy);
30700+ INIT_LIST_HEAD(&fbo->lru);
30701+
30702+ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
30703+ if (fbo->mem.mm_node)
30704+ fbo->mem.mm_node->private = (void *)fbo;
30705+ kref_init(&fbo->list_kref);
30706+ kref_init(&fbo->kref);
30707+
30708+ mutex_unlock(&fbo->mutex);
30709+
30710+ *new_obj = fbo;
30711+ return 0;
30712+}
30713+
30714+pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
30715+{
30716+#if defined(__i386__) || defined(__x86_64__)
30717+ if (caching_flags & TTM_PL_FLAG_WC) {
30718+ tmp = pgprot_ttm_x86_wc(tmp);
30719+ } else if (boot_cpu_data.x86 > 3) {
30720+ tmp = pgprot_noncached(tmp);
30721+ }
30722+#elif defined(__powerpc__)
30723+ if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
30724+ pgprot_val(tmp) |= _PAGE_NO_CACHE;
30725+ if (caching_flags & TTM_PL_FLAG_UNCACHED)
30726+ pgprot_val(tmp) |= _PAGE_GUARDED;
30727+ }
30728+#endif
30729+#if defined(__ia64__)
30730+ if (caching_flags & TTM_PL_FLAG_WC)
30731+ tmp = pgprot_writecombine(tmp);
30732+ else
30733+ tmp = pgprot_noncached(tmp);
30734+#endif
30735+#if defined(__sparc__)
30736+ if (!(caching_flags & TTM_PL_FLAG_CACHED))
30737+ tmp = pgprot_noncached(tmp);
30738+#endif
30739+ return tmp;
30740+}
30741+
30742+static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
30743+ unsigned long bus_base,
30744+ unsigned long bus_offset,
30745+ unsigned long bus_size,
30746+ struct ttm_bo_kmap_obj *map)
30747+{
30748+ struct ttm_bo_device * bdev = bo->bdev;
30749+ struct ttm_mem_reg * mem = &bo->mem;
30750+ struct ttm_mem_type_manager * man = &bdev->man[mem->mem_type];
30751+
30752+ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
30753+ map->bo_kmap_type = ttm_bo_map_premapped;
30754+ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);} else {
30755+ map->bo_kmap_type = ttm_bo_map_iomap;
30756+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
30757+ if (mem->flags & TTM_PL_FLAG_WC)
30758+ map->virtual = ioremap_wc(bus_base + bus_offset, bus_size);
30759+ else
30760+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
30761+#else
30762+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
30763+#endif
30764+ }
30765+ return (!map->virtual) ? -ENOMEM : 0;
30766+}
30767+
30768+static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
30769+ unsigned long start_page,
30770+ unsigned long num_pages,
30771+ struct ttm_bo_kmap_obj *map)
30772+{
30773+ struct ttm_mem_reg * mem = &bo->mem; pgprot_t prot;
30774+ struct ttm_tt * ttm = bo->ttm;
30775+ struct page * d;
30776+ int i;
30777+ BUG_ON(!ttm);
30778+ if (num_pages == 1 && (mem->flags & TTM_PL_FLAG_CACHED)) {
30779+ /*
30780+ * We're mapping a single page, and the desired
30781+ * page protection is consistent with the bo.
30782+ */
30783+ map->bo_kmap_type = ttm_bo_map_kmap;
30784+ map->page = ttm_tt_get_page(ttm, start_page);
30785+ map->virtual = kmap(map->page);
30786+ } else {
30787+ /*
30788+ * Populate the part we're mapping;
30789+ */
30790+ for (i = start_page; i < start_page + num_pages; ++i) {
30791+ d = ttm_tt_get_page(ttm, i); if (!d)
30792+ return -ENOMEM;
30793+ }
30794+
30795+ /*
30796+ * We need to use vmap to get the desired page protection
30797+ * or to make the buffer object look contigous.
30798+ */
30799+ prot = (mem->flags & TTM_PL_FLAG_CACHED) ?
30800+ PAGE_KERNEL :
30801+ ttm_io_prot(mem->flags, PAGE_KERNEL);
30802+ map->bo_kmap_type = ttm_bo_map_vmap;
30803+ map->virtual = vmap(ttm->pages + start_page, num_pages, 0, prot);
30804+ }
30805+ return (!map->virtual) ? -ENOMEM : 0;
30806+}
30807+
30808+int ttm_bo_kmap(struct ttm_buffer_object *bo,
30809+ unsigned long start_page, unsigned long num_pages,
30810+ struct ttm_bo_kmap_obj *map)
30811+{
30812+ int ret;
30813+ unsigned long bus_base;
30814+ unsigned long bus_offset;
30815+ unsigned long bus_size;
30816+ BUG_ON(!list_empty(&bo->swap));
30817+ map->virtual = NULL;
30818+ if (num_pages > bo->num_pages)
30819+ return -EINVAL;
30820+ if (start_page > bo->num_pages)
30821+ return -EINVAL;
30822+#if 0
30823+ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
30824+ return -EPERM;
30825+#endif
30826+ ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
30827+ &bus_offset, &bus_size);
30828+ if (ret)
30829+ return ret;
30830+ if (bus_size == 0) {
30831+ return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
30832+ } else {
30833+ bus_offset += start_page << PAGE_SHIFT;
30834+ bus_size = num_pages << PAGE_SHIFT;
30835+ return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
30836+ }
30837+}
30838+
30839+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
30840+{
30841+ if (!map->virtual)
30842+ return;
30843+ switch (map->bo_kmap_type) {
30844+ case ttm_bo_map_iomap:
30845+ iounmap(map->virtual);
30846+ break;
30847+ case ttm_bo_map_vmap:
30848+ vunmap(map->virtual);
30849+ break;
30850+ case ttm_bo_map_kmap:
30851+ kunmap(map->page);
30852+ break;
30853+ case ttm_bo_map_premapped:
30854+ break;
30855+ default:
30856+ BUG();
30857+ }
30858+ map->virtual = NULL;
30859+ map->page = NULL;
30860+}
30861+
30862+int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
30863+ unsigned long dst_offset,
30864+ unsigned long *pfn, pgprot_t * prot)
30865+{
30866+ struct ttm_mem_reg * mem = &bo->mem;
30867+ struct ttm_bo_device * bdev = bo->bdev;
30868+ unsigned long bus_offset;
30869+ unsigned long bus_size;
30870+ unsigned long bus_base;
30871+ int ret;
30872+ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
30873+ &bus_size);
30874+ if (ret)
30875+ return -EINVAL;
30876+ if (bus_size != 0)
30877+ * pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
30878+ else
30879+ if (!bo->ttm)
30880+ return -EINVAL;
30881+ else
30882+ *pfn =
30883+ page_to_pfn(ttm_tt_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
30884+ *prot =
30885+ (mem->flags & TTM_PL_FLAG_CACHED) ? PAGE_KERNEL : ttm_io_prot(mem->
30886+ flags,
30887+ PAGE_KERNEL);
30888+ return 0;
30889+}
30890+
30891+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
30892+ void *sync_obj,
30893+ void *sync_obj_arg,
30894+ bool evict, bool no_wait,
30895+ struct ttm_mem_reg *new_mem)
30896+{
30897+ struct ttm_bo_device * bdev = bo->bdev;
30898+ struct ttm_bo_driver * driver = bdev->driver;
30899+ struct ttm_mem_type_manager * man = &bdev->man[new_mem->mem_type];
30900+ struct ttm_mem_reg * old_mem = &bo->mem;
30901+ int ret;
30902+ uint32_t save_flags = old_mem->flags;
30903+ uint32_t save_proposed_flags = old_mem->proposed_flags;
30904+ struct ttm_buffer_object * old_obj;
30905+ if (bo->sync_obj)
30906+ driver->sync_obj_unref(&bo->sync_obj);
30907+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
30908+ bo->sync_obj_arg = sync_obj_arg;
30909+ if (evict) {
30910+ ret = ttm_bo_wait(bo, false, false, false);
30911+ if (ret)
30912+ return ret;
30913+ ttm_bo_free_old_node(bo);
30914+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm != NULL)) {
30915+ ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL;
30916+ }
30917+ } else {
30918+
30919+ /* This should help pipeline ordinary buffer moves.
30920+ *
30921+ * Hang old buffer memory on a new buffer object,
30922+ * and leave it to be released when the GPU
30923+ * operation has completed.
30924+ */
30925+ ret = ttm_buffer_object_transfer(bo, &old_obj);
30926+ if (ret)
30927+ return ret;
30928+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
30929+ old_obj->ttm = NULL;
30930+ else
30931+ bo->ttm = NULL;
30932+ bo->priv_flags |= TTM_BO_PRIV_FLAG_MOVING;
30933+ ttm_bo_unreserve(old_obj);
30934+ }
30935+
30936+ *old_mem = *new_mem;
30937+ new_mem->mm_node = NULL;
30938+ old_mem->proposed_flags = save_proposed_flags;
30939+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
30940+ return 0;
30941+}
30942diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c b/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c
30943--- a/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c 1969-12-31 16:00:00.000000000 -0800
30944+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c 2009-04-07 13:28:38.000000000 -0700
30945@@ -0,0 +1,596 @@
30946+/**************************************************************************
30947+ *
30948+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
30949+ * All Rights Reserved.
30950+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
30951+ * All Rights Reserved.
30952+ *
30953+ * Permission is hereby granted, free of charge, to any person obtaining a
30954+ * copy of this software and associated documentation files (the
30955+ * "Software"), to deal in the Software without restriction, including
30956+ * without limitation the rights to use, copy, modify, merge, publish,
30957+ * distribute, sub license, and/or sell copies of the Software, and to
30958+ * permit persons to whom the Software is furnished to do so, subject to
30959+ * the following conditions:
30960+ *
30961+ * The above copyright notice and this permission notice (including the
30962+ * next paragraph) shall be included in all copies or substantial portions
30963+ * of the Software.
30964+ *
30965+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30966+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30967+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
30968+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
30969+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
30970+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
30971+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
30972+ *
30973+ **************************************************************************/
30974+/*
30975+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30976+ */
30977+
30978+
30979+#include "ttm/ttm_bo_driver.h"
30980+#include "ttm/ttm_placement_common.h"
30981+#include <linux/mm.h>
30982+#include <linux/version.h>
30983+#include <linux/rbtree.h>
30984+#include <asm/uaccess.h>
30985+
30986+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25))
30987+#error "TTM doesn't build on kernel versions below 2.6.25."
30988+#endif
30989+
30990+#define TTM_BO_VM_NUM_PREFAULT 16
30991+
30992+static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
30993+ unsigned long page_start,
30994+ unsigned long num_pages)
30995+{
30996+ struct rb_node *cur = bdev->addr_space_rb.rb_node;
30997+ unsigned long cur_offset;
30998+ struct ttm_buffer_object *bo;
30999+ struct ttm_buffer_object *best_bo = NULL;
31000+
31001+ while (likely(cur != NULL)) {
31002+ bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
31003+ cur_offset = bo->vm_node->start;
31004+ if (page_start >= cur_offset) {
31005+ cur = cur->rb_right;
31006+ best_bo = bo;
31007+ if (page_start == cur_offset)
31008+ break;
31009+ } else
31010+ cur = cur->rb_left;
31011+ }
31012+
31013+ if (unlikely(best_bo == NULL))
31014+ return NULL;
31015+
31016+ if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
31017+ (page_start + num_pages)))
31018+ return NULL;
31019+
31020+ return best_bo;
31021+}
31022+
31023+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
31024+static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
31025+{
31026+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
31027+ vma->vm_private_data;
31028+ struct ttm_bo_device *bdev = bo->bdev;
31029+ unsigned long bus_base;
31030+ unsigned long bus_offset;
31031+ unsigned long bus_size;
31032+ unsigned long page_offset;
31033+ unsigned long page_last;
31034+ unsigned long pfn;
31035+ struct ttm_tt *ttm = NULL;
31036+ struct page *page;
31037+ int ret;
31038+ int i;
31039+ bool is_iomem;
31040+ unsigned long address = (unsigned long)vmf->virtual_address;
31041+ int retval = VM_FAULT_NOPAGE;
31042+
31043+ ret = ttm_bo_reserve(bo, true, false, false, 0);
31044+ if (unlikely(ret != 0))
31045+ return VM_FAULT_NOPAGE;
31046+
31047+ mutex_lock(&bo->mutex);
31048+
31049+ /*
31050+ * Wait for buffer data in transit, due to a pipelined
31051+ * move.
31052+ */
31053+
31054+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
31055+ ret = ttm_bo_wait(bo, false, true, false);
31056+ if (unlikely(ret != 0)) {
31057+ retval = (ret != -ERESTART) ?
31058+ VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
31059+ goto out_unlock;
31060+ }
31061+ }
31062+
31063+ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
31064+ &bus_size);
31065+ if (unlikely(ret != 0)) {
31066+ retval = VM_FAULT_SIGBUS;
31067+ goto out_unlock;
31068+ }
31069+
31070+ is_iomem = (bus_size != 0);
31071+
31072+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
31073+ bo->vm_node->start - vma->vm_pgoff;
31074+ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
31075+ bo->vm_node->start - vma->vm_pgoff;
31076+
31077+ if (unlikely(page_offset >= bo->num_pages)) {
31078+ retval = VM_FAULT_SIGBUS;
31079+ goto out_unlock;
31080+ }
31081+
31082+ /*
31083+ * Strictly, we're not allowed to modify vma->vm_page_prot here,
31084+ * since the mmap_sem is only held in read mode. However, we
31085+ * modify only the caching bits of vma->vm_page_prot and
31086+ * consider those bits protected by
31087+ * the bo->mutex, as we should be the only writers.
31088+ * There shouldn't really be any readers of these bits except
31089+ * within vm_insert_mixed()? fork?
31090+ *
31091+ * TODO: Add a list of vmas to the bo, and change the
31092+ * vma->vm_page_prot when the object changes caching policy, with
31093+ * the correct locks held.
31094+ */
31095+
31096+ if (is_iomem) {
31097+ vma->vm_page_prot = ttm_io_prot(bo->mem.flags,
31098+ vma->vm_page_prot);
31099+ } else {
31100+ ttm = bo->ttm;
31101+ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ?
31102+ vm_get_page_prot(vma->vm_flags) :
31103+ ttm_io_prot(bo->mem.flags, vma->vm_page_prot);
31104+ }
31105+
31106+ /*
31107+ * Speculatively prefault a number of pages. Only error on
31108+ * first page.
31109+ */
31110+
31111+ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
31112+
31113+ if (is_iomem)
31114+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
31115+ page_offset;
31116+ else {
31117+ page = ttm_tt_get_page(ttm, page_offset);
31118+ if (unlikely(!page && i == 0)) {
31119+ retval = VM_FAULT_OOM;
31120+ goto out_unlock;
31121+ } else if (unlikely(!page)) {
31122+ break;
31123+ }
31124+ pfn = page_to_pfn(page);
31125+ }
31126+
31127+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
31128+ ret = vm_insert_mixed(vma, address, pfn);
31129+#else
31130+ ret = vm_insert_pfn(vma, address, pfn);
31131+#endif
31132+ /*
31133+ * Somebody beat us to this PTE or prefaulting to
31134+ * an already populated PTE, or prefaulting error.
31135+ */
31136+
31137+ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
31138+ break;
31139+ else if (unlikely(ret != 0)) {
31140+ retval =
31141+ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
31142+ goto out_unlock;
31143+
31144+ }
31145+
31146+ address += PAGE_SIZE;
31147+ if (unlikely(++page_offset >= page_last))
31148+ break;
31149+ }
31150+
31151+ out_unlock:
31152+ mutex_unlock(&bo->mutex);
31153+ ttm_bo_unreserve(bo);
31154+ return retval;
31155+}
31156+
31157+#else
31158+
31159+static unsigned long ttm_bo_vm_nopfn(struct vm_area_struct *vma,
31160+ unsigned long address)
31161+{
31162+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
31163+ vma->vm_private_data;
31164+ struct ttm_bo_device *bdev = bo->bdev;
31165+ unsigned long bus_base;
31166+ unsigned long bus_offset;
31167+ unsigned long bus_size;
31168+ unsigned long page_offset;
31169+ unsigned long page_last;
31170+ unsigned long pfn;
31171+ struct ttm_tt *ttm = NULL;
31172+ struct page *page;
31173+ int ret;
31174+ int i;
31175+ bool is_iomem;
31176+ unsigned long retval = NOPFN_REFAULT;
31177+
31178+ ret = ttm_bo_reserve(bo, true, false, false, 0);
31179+ if (unlikely(ret != 0))
31180+ return NOPFN_REFAULT;
31181+
31182+ mutex_lock(&bo->mutex);
31183+
31184+ /*
31185+ * Wait for buffer data in transit, due to a pipelined
31186+ * move.
31187+ */
31188+
31189+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
31190+ ret = ttm_bo_wait(bo, false, true, false);
31191+ if (unlikely(ret != 0)) {
31192+ retval = (ret != -ERESTART) ?
31193+ NOPFN_SIGBUS : NOPFN_REFAULT;
31194+ goto out_unlock;
31195+ }
31196+ }
31197+
31198+ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
31199+ &bus_size);
31200+ if (unlikely(ret != 0)) {
31201+ printk(KERN_ERR "Attempted buffer object access "
31202+ "of unmappable object.\n");
31203+ retval = NOPFN_SIGBUS;
31204+ goto out_unlock;
31205+ }
31206+
31207+ is_iomem = (bus_size != 0);
31208+
31209+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
31210+ bo->vm_node->start - vma->vm_pgoff;
31211+
31212+ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
31213+ bo->vm_node->start - vma->vm_pgoff;
31214+
31215+ if (unlikely(page_offset >= bo->num_pages)) {
31216+ printk(KERN_ERR "Attempted buffer object access "
31217+ "outside object.\n");
31218+ retval = NOPFN_SIGBUS;
31219+ goto out_unlock;
31220+ }
31221+
31222+ /*
31223+ * Strictly, we're not allowed to modify vma->vm_page_prot here,
31224+ * since the mmap_sem is only held in read mode. However, we
31225+ * modify only the caching bits of vma->vm_page_prot and
31226+ * consider those bits protected by
31227+ * the bo->mutex, as we should be the only writers.
31228+ * There shouldn't really be any readers of these bits except
31229+ * within vm_insert_mixed()? fork?
31230+ *
31231+ * TODO: Add a list of vmas to the bo, and change the
31232+ * vma->vm_page_prot when the object changes caching policy, with
31233+ * the correct locks held.
31234+ */
31235+
31236+ if (is_iomem) {
31237+ vma->vm_page_prot = ttm_io_prot(bo->mem.flags,
31238+ vma->vm_page_prot);
31239+ } else {
31240+ ttm = bo->ttm;
31241+ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ?
31242+ vm_get_page_prot(vma->vm_flags) :
31243+ ttm_io_prot(bo->mem.flags, vma->vm_page_prot);
31244+ }
31245+
31246+ /*
31247+ * Speculatively prefault a number of pages. Only error on
31248+ * first page.
31249+ */
31250+
31251+ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
31252+
31253+ if (is_iomem)
31254+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
31255+ page_offset;
31256+ else {
31257+ page = ttm_tt_get_page(ttm, page_offset);
31258+ if (unlikely(!page && i == 0)) {
31259+ retval = NOPFN_OOM;
31260+ goto out_unlock;
31261+ } else if (unlikely(!page)) {
31262+ break;
31263+ }
31264+ pfn = page_to_pfn(page);
31265+ }
31266+
31267+ ret = vm_insert_pfn(vma, address, pfn);
31268+ if (unlikely(ret == -EBUSY || (ret != 0 && i != 0)))
31269+ break;
31270+
31271+ /*
31272+ * Somebody beat us to this PTE or prefaulting to
31273+ * an already populated PTE, or prefaulting error.
31274+ */
31275+
31276+ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
31277+ break;
31278+ else if (unlikely(ret != 0)) {
31279+ retval =
31280+ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
31281+ goto out_unlock;
31282+ }
31283+
31284+ address += PAGE_SIZE;
31285+ if (unlikely(++page_offset >= page_last))
31286+ break;
31287+ }
31288+
31289+ out_unlock:
31290+ mutex_unlock(&bo->mutex);
31291+ ttm_bo_unreserve(bo);
31292+ return retval;
31293+}
31294+#endif
31295+
31296+static void ttm_bo_vm_open(struct vm_area_struct *vma)
31297+{
31298+ struct ttm_buffer_object *bo =
31299+ (struct ttm_buffer_object *)vma->vm_private_data;
31300+
31301+ (void)ttm_bo_reference(bo);
31302+}
31303+
31304+static void ttm_bo_vm_close(struct vm_area_struct *vma)
31305+{
31306+ struct ttm_buffer_object *bo =
31307+ (struct ttm_buffer_object *)vma->vm_private_data;
31308+
31309+ ttm_bo_unref(&bo);
31310+ vma->vm_private_data = NULL;
31311+}
31312+
31313+static struct vm_operations_struct ttm_bo_vm_ops = {
31314+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
31315+ .fault = ttm_bo_vm_fault,
31316+#else
31317+ .nopfn = ttm_bo_vm_nopfn,
31318+#endif
31319+ .open = ttm_bo_vm_open,
31320+ .close = ttm_bo_vm_close
31321+};
31322+
31323+int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
31324+ struct ttm_bo_device *bdev)
31325+{
31326+ struct ttm_bo_driver *driver;
31327+ struct ttm_buffer_object *bo;
31328+ int ret;
31329+
31330+ read_lock(&bdev->vm_lock);
31331+ bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
31332+ (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
31333+ if (likely(bo != NULL))
31334+ ttm_bo_reference(bo);
31335+ read_unlock(&bdev->vm_lock);
31336+
31337+ if (unlikely(bo == NULL)) {
31338+ printk(KERN_ERR "Could not find buffer object to map.\n");
31339+ ret = -EINVAL;
31340+ goto out_unref;
31341+ }
31342+
31343+ driver = bo->bdev->driver;
31344+ if (unlikely(!driver->verify_access)) {
31345+ ret = -EPERM;
31346+ goto out_unref;
31347+ }
31348+ ret = driver->verify_access(bo, filp);
31349+ if (unlikely(ret != 0))
31350+ goto out_unref;
31351+
31352+ vma->vm_ops = &ttm_bo_vm_ops;
31353+
31354+ /*
31355+ * Note: We're transferring the bo reference to
31356+ * vma->vm_private_data here.
31357+ */
31358+
31359+ vma->vm_private_data = bo;
31360+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
31361+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
31362+#else
31363+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
31364+#endif
31365+ return 0;
31366+ out_unref:
31367+ ttm_bo_unref(&bo);
31368+ return ret;
31369+}
31370+
31371+int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
31372+{
31373+ if (vma->vm_pgoff != 0)
31374+ return -EACCES;
31375+
31376+ vma->vm_ops = &ttm_bo_vm_ops;
31377+ vma->vm_private_data = ttm_bo_reference(bo);
31378+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
31379+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
31380+#else
31381+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
31382+#endif
31383+ return 0;
31384+}
31385+
31386+ssize_t ttm_bo_io(struct ttm_bo_device * bdev, struct file * filp,
31387+ const char __user * wbuf, char __user * rbuf, size_t count,
31388+ loff_t * f_pos, bool write)
31389+{
31390+ struct ttm_buffer_object *bo;
31391+ struct ttm_bo_driver *driver;
31392+ struct ttm_bo_kmap_obj map;
31393+ unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
31394+ unsigned long kmap_offset;
31395+ unsigned long kmap_end;
31396+ unsigned long kmap_num;
31397+ size_t io_size;
31398+ unsigned int page_offset;
31399+ char *virtual;
31400+ int ret;
31401+ bool no_wait = false;
31402+ bool dummy;
31403+
31404+ driver = bo->bdev->driver;
31405+ read_lock(&bdev->vm_lock);
31406+ bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
31407+ if (likely(bo != NULL))
31408+ ttm_bo_reference(bo);
31409+ read_unlock(&bdev->vm_lock);
31410+
31411+ if (unlikely(bo == NULL))
31412+ return -EFAULT;
31413+
31414+ if (unlikely(driver->verify_access))
31415+ return -EPERM;
31416+
31417+ ret = driver->verify_access(bo, filp);
31418+ if (unlikely(ret != 0))
31419+ goto out_unref;
31420+
31421+ kmap_offset = dev_offset - bo->vm_node->start;
31422+ if (unlikely(kmap_offset) >= bo->num_pages) {
31423+ ret = -EFBIG;
31424+ goto out_unref;
31425+ }
31426+
31427+ page_offset = *f_pos & ~PAGE_MASK;
31428+ io_size = bo->num_pages - kmap_offset;
31429+ io_size = (io_size << PAGE_SHIFT) - page_offset;
31430+ if (count < io_size)
31431+ io_size = count;
31432+
31433+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
31434+ kmap_num = kmap_end - kmap_offset + 1;
31435+
31436+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
31437+
31438+ switch (ret) {
31439+ case 0:
31440+ break;
31441+ case -ERESTART:
31442+ ret = -EINTR;
31443+ goto out_unref;
31444+ case -EBUSY:
31445+ ret = -EAGAIN;
31446+ goto out_unref;
31447+ default:
31448+ goto out_unref;
31449+ }
31450+
31451+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
31452+ if (unlikely(ret != 0))
31453+ goto out_unref;
31454+
31455+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
31456+ virtual += page_offset;
31457+
31458+ if (write)
31459+ ret = copy_from_user(virtual, wbuf, io_size);
31460+ else
31461+ ret = copy_to_user(rbuf, virtual, io_size);
31462+
31463+ ttm_bo_kunmap(&map);
31464+ ttm_bo_unreserve(bo);
31465+ ttm_bo_unref(&bo);
31466+
31467+ if (unlikely(ret != 0))
31468+ return -EFBIG;
31469+
31470+ *f_pos += io_size;
31471+
31472+ return io_size;
31473+ out_unref:
31474+ ttm_bo_unref(&bo);
31475+ return ret;
31476+}
31477+
31478+ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object * bo, const char __user * wbuf,
31479+ char __user * rbuf, size_t count, loff_t * f_pos,
31480+ bool write)
31481+{
31482+ struct ttm_bo_kmap_obj map;
31483+ unsigned long kmap_offset;
31484+ unsigned long kmap_end;
31485+ unsigned long kmap_num;
31486+ size_t io_size;
31487+ unsigned int page_offset;
31488+ char *virtual;
31489+ int ret;
31490+ bool no_wait = false;
31491+ bool dummy;
31492+
31493+ kmap_offset = (*f_pos >> PAGE_SHIFT);
31494+ if (unlikely(kmap_offset) >= bo->num_pages)
31495+ return -EFBIG;
31496+
31497+ page_offset = *f_pos & ~PAGE_MASK;
31498+ io_size = bo->num_pages - kmap_offset;
31499+ io_size = (io_size << PAGE_SHIFT) - page_offset;
31500+ if (count < io_size)
31501+ io_size = count;
31502+
31503+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
31504+ kmap_num = kmap_end - kmap_offset + 1;
31505+
31506+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
31507+
31508+ switch (ret) {
31509+ case 0:
31510+ break;
31511+ case -ERESTART:
31512+ return -EINTR;
31513+ case -EBUSY:
31514+ return -EAGAIN;
31515+ default:
31516+ return ret;
31517+ }
31518+
31519+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
31520+ if (unlikely(ret != 0))
31521+ return ret;
31522+
31523+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
31524+ virtual += page_offset;
31525+
31526+ if (write)
31527+ ret = copy_from_user(virtual, wbuf, io_size);
31528+ else
31529+ ret = copy_to_user(rbuf, virtual, io_size);
31530+
31531+ ttm_bo_kunmap(&map);
31532+ ttm_bo_unreserve(bo);
31533+ ttm_bo_unref(&bo);
31534+
31535+ if (unlikely(ret != 0))
31536+ return ret;
31537+
31538+ *f_pos += io_size;
31539+
31540+ return io_size;
31541+}
31542diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c
31543--- a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c 1969-12-31 16:00:00.000000000 -0800
31544+++ b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c 2009-04-07 13:28:38.000000000 -0700
31545@@ -0,0 +1,115 @@
31546+/**************************************************************************
31547+ *
31548+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
31549+ * All Rights Reserved.
31550+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
31551+ * All Rights Reserved.
31552+ *
31553+ * Permission is hereby granted, free of charge, to any person obtaining a
31554+ * copy of this software and associated documentation files (the
31555+ * "Software"), to deal in the Software without restriction, including
31556+ * without limitation the rights to use, copy, modify, merge, publish,
31557+ * distribute, sub license, and/or sell copies of the Software, and to
31558+ * permit persons to whom the Software is furnished to do so, subject to
31559+ * the following conditions:
31560+ *
31561+ * The above copyright notice and this permission notice (including the
31562+ * next paragraph) shall be included in all copies or substantial portions
31563+ * of the Software.
31564+ *
31565+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31566+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31567+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
31568+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
31569+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
31570+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
31571+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
31572+ *
31573+ **************************************************************************/
31574+
31575+#include "ttm/ttm_execbuf_util.h"
31576+#include "ttm/ttm_bo_driver.h"
31577+#include "ttm/ttm_placement_common.h"
31578+#include <linux/wait.h>
31579+#include <linux/sched.h>
31580+
31581+void ttm_eu_backoff_reservation(struct list_head *list)
31582+{
31583+ struct ttm_validate_buffer *entry;
31584+
31585+ list_for_each_entry(entry, list, head) {
31586+ struct ttm_buffer_object *bo = entry->bo;
31587+ if (!entry->reserved)
31588+ continue;
31589+
31590+ entry->reserved = false;
31591+ ttm_bo_unreserve(bo);
31592+ }
31593+}
31594+
31595+/*
31596+ * Reserve buffers for validation.
31597+ *
31598+ * If a buffer in the list is marked for CPU access, we back off and
31599+ * wait for that buffer to become free for GPU access.
31600+ *
31601+ * If a buffer is reserved for another validation, the validator with
31602+ * the highest validation sequence backs off and waits for that buffer
31603+ * to become unreserved. This prevents deadlocks when validating multiple
31604+ * buffers in different orders.
31605+ */
31606+
31607+int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
31608+{
31609+ struct ttm_validate_buffer *entry;
31610+ int ret;
31611+
31612+ retry:
31613+ list_for_each_entry(entry, list, head) {
31614+ struct ttm_buffer_object *bo = entry->bo;
31615+
31616+ entry->reserved = false;
31617+ ret = ttm_bo_reserve(bo, true, false, true, val_seq);
31618+ if (ret != 0) {
31619+ ttm_eu_backoff_reservation(list);
31620+ if (ret == -EAGAIN) {
31621+ ret = ttm_bo_wait_unreserved(bo, true);
31622+ if (unlikely(ret != 0))
31623+ return ret;
31624+ goto retry;
31625+ } else
31626+ return ret;
31627+ }
31628+
31629+ entry->reserved = true;
31630+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
31631+ ttm_eu_backoff_reservation(list);
31632+ ret = ttm_bo_wait_cpu(bo, false);
31633+ if (ret)
31634+ return ret;
31635+ goto retry;
31636+ }
31637+ }
31638+ return 0;
31639+}
31640+
31641+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
31642+{
31643+ struct ttm_validate_buffer *entry;
31644+
31645+ list_for_each_entry(entry, list, head) {
31646+ struct ttm_buffer_object *bo = entry->bo;
31647+ struct ttm_bo_driver *driver = bo->bdev->driver;
31648+ void *old_sync_obj;
31649+
31650+ mutex_lock(&bo->mutex);
31651+ old_sync_obj = bo->sync_obj;
31652+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
31653+ bo->sync_obj_arg = entry->new_sync_obj_arg;
31654+ mutex_unlock(&bo->mutex);
31655+ ttm_bo_unreserve(bo);
31656+ entry->reserved = false;
31657+ if (old_sync_obj)
31658+ driver->sync_obj_unref(&old_sync_obj);
31659+ }
31660+}
31661diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h
31662--- a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h 1969-12-31 16:00:00.000000000 -0800
31663+++ b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h 2009-04-07 13:28:38.000000000 -0700
31664@@ -0,0 +1,110 @@
31665+/**************************************************************************
31666+ *
31667+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
31668+ * All Rights Reserved.
31669+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
31670+ * All Rights Reserved.
31671+ *
31672+ * Permission is hereby granted, free of charge, to any person obtaining a
31673+ * copy of this software and associated documentation files (the
31674+ * "Software"), to deal in the Software without restriction, including
31675+ * without limitation the rights to use, copy, modify, merge, publish,
31676+ * distribute, sub license, and/or sell copies of the Software, and to
31677+ * permit persons to whom the Software is furnished to do so, subject to
31678+ * the following conditions:
31679+ *
31680+ * The above copyright notice and this permission notice (including the
31681+ * next paragraph) shall be included in all copies or substantial portions
31682+ * of the Software.
31683+ *
31684+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31685+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31686+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
31687+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
31688+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
31689+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
31690+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
31691+ *
31692+ **************************************************************************/
31693+/*
31694+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
31695+ */
31696+
31697+#ifndef _TTM_EXECBUF_UTIL_H_
31698+#define _TTM_EXECBUF_UTIL_H_
31699+
31700+#include "ttm/ttm_bo_api.h"
31701+#include "ttm/ttm_fence_api.h"
31702+#include <linux/list.h>
31703+
31704+/**
31705+ * struct ttm_validate_buffer
31706+ *
31707+ * @head: list head for thread-private list.
31708+ * @bo: refcounted buffer object pointer.
31709+ * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
31710+ * adding a new sync object.
31711+ * @reservied: Indicates whether @bo has been reserved for validation.
31712+ */
31713+
31714+struct ttm_validate_buffer {
31715+ struct list_head head;
31716+ struct ttm_buffer_object *bo;
31717+ void *new_sync_obj_arg;
31718+ bool reserved;
31719+};
31720+
31721+/**
31722+ * function ttm_eu_backoff_reservation
31723+ *
31724+ * @list: thread private list of ttm_validate_buffer structs.
31725+ *
31726+ * Undoes all buffer validation reservations for bos pointed to by
31727+ * the list entries.
31728+ */
31729+
31730+extern void ttm_eu_backoff_reservation(struct list_head *list);
31731+
31732+/**
31733+ * function ttm_eu_reserve_buffers
31734+ *
31735+ * @list: thread private list of ttm_validate_buffer structs.
31736+ * @val_seq: A unique sequence number.
31737+ *
31738+ * Tries to reserve bos pointed to by the list entries for validation.
31739+ * If the function returns 0, all buffers are marked as "unfenced",
31740+ * taken off the lru lists and are not synced for write CPU usage.
31741+ *
31742+ * If the function detects a deadlock due to multiple threads trying to
31743+ * reserve the same buffers in reverse order, all threads except one will
31744+ * back off and retry. This function may sleep while waiting for
31745+ * CPU write reservations to be cleared, and for other threads to
31746+ * unreserve their buffers.
31747+ *
31748+ * This function may return -ERESTART or -EAGAIN if the calling process
31749+ * receives a signal while waiting. In that case, no buffers on the list
31750+ * will be reserved upon return.
31751+ *
31752+ * Buffers reserved by this function should be unreserved by
31753+ * a call to either ttm_eu_backoff_reservation() or
31754+ * ttm_eu_fence_buffer_objects() when command submission is complete or
31755+ * has failed.
31756+ */
31757+
31758+extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
31759+
31760+/**
31761+ * function ttm_eu_fence_buffer_objects.
31762+ *
31763+ * @list: thread private list of ttm_validate_buffer structs.
31764+ * @sync_obj: The new sync object for the buffers.
31765+ *
31766+ * This function should be called when command submission is complete, and
31767+ * it will add a new sync object to bos pointed to by entries on @list.
31768+ * It also unreserves all buffers, putting them on lru lists.
31769+ *
31770+ */
31771+
31772+extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
31773+
31774+#endif
31775diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_api.h b/drivers/gpu/drm/psb/ttm/ttm_fence_api.h
31776--- a/drivers/gpu/drm/psb/ttm/ttm_fence_api.h 1969-12-31 16:00:00.000000000 -0800
31777+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_api.h 2009-04-07 13:28:38.000000000 -0700
31778@@ -0,0 +1,277 @@
31779+/**************************************************************************
31780+ *
31781+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
31782+ * All Rights Reserved.
31783+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
31784+ * All Rights Reserved.
31785+ *
31786+ * Permission is hereby granted, free of charge, to any person obtaining a
31787+ * copy of this software and associated documentation files (the
31788+ * "Software"), to deal in the Software without restriction, including
31789+ * without limitation the rights to use, copy, modify, merge, publish,
31790+ * distribute, sub license, and/or sell copies of the Software, and to
31791+ * permit persons to whom the Software is furnished to do so, subject to
31792+ * the following conditions:
31793+ *
31794+ * The above copyright notice and this permission notice (including the
31795+ * next paragraph) shall be included in all copies or substantial portions
31796+ * of the Software.
31797+ *
31798+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31799+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31800+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
31801+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
31802+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
31803+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
31804+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
31805+ *
31806+ **************************************************************************/
31807+/*
31808+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
31809+ */
31810+#ifndef _TTM_FENCE_API_H_
31811+#define _TTM_FENCE_API_H_
31812+
31813+#include <linux/list.h>
31814+#include <linux/kref.h>
31815+
31816+#define TTM_FENCE_FLAG_EMIT (1 << 0)
31817+#define TTM_FENCE_TYPE_EXE (1 << 0)
31818+
31819+struct ttm_fence_device;
31820+
31821+/**
31822+ * struct ttm_fence_info
31823+ *
31824+ * @fence_class: The fence class.
31825+ * @fence_type: Bitfield indicating types for this fence.
31826+ * @signaled_types: Bitfield indicating which types are signaled.
31827+ * @error: Last error reported from the device.
31828+ *
31829+ * Used as output from the ttm_fence_get_info
31830+ */
31831+
31832+struct ttm_fence_info {
31833+ uint32_t signaled_types;
31834+ uint32_t error;
31835+};
31836+
31837+/**
31838+ * struct ttm_fence_object
31839+ *
31840+ * @fdev: Pointer to the fence device struct.
31841+ * @kref: Holds the reference count of this fence object.
31842+ * @ring: List head used for the circular list of not-completely
31843+ * signaled fences.
31844+ * @info: Data for fast retrieval using the ttm_fence_get_info()
31845+ * function.
31846+ * @timeout_jiffies: Absolute jiffies value indicating when this fence
31847+ * object times out and, if waited on, calls ttm_fence_lockup
31848+ * to check for and resolve a GPU lockup.
31849+ * @sequence: Fence sequence number.
31850+ * @waiting_types: Types currently waited on.
31851+ * @destroy: Called to free the fence object, when its refcount has
31852+ * reached zero. If NULL, kfree is used.
31853+ *
31854+ * This struct is provided in the driver interface so that drivers can
31855+ * derive from it and create their own fence implementation. All members
31856+ * are private to the fence implementation and the fence driver callbacks.
31857+ * Otherwise a driver may access the derived object using container_of().
31858+ */
31859+
31860+struct ttm_fence_object {
31861+ struct ttm_fence_device *fdev;
31862+ struct kref kref;
31863+ uint32_t fence_class;
31864+ uint32_t fence_type;
31865+
31866+ /*
31867+ * The below fields are protected by the fence class
31868+ * manager spinlock.
31869+ */
31870+
31871+ struct list_head ring;
31872+ struct ttm_fence_info info;
31873+ unsigned long timeout_jiffies;
31874+ uint32_t sequence;
31875+ uint32_t waiting_types;
31876+ void (*destroy) (struct ttm_fence_object *);
31877+};
31878+
31879+/**
31880+ * ttm_fence_object_init
31881+ *
31882+ * @fdev: Pointer to a struct ttm_fence_device.
31883+ * @fence_class: Fence class for this fence.
31884+ * @type: Fence type for this fence.
31885+ * @create_flags: Flags indicating varios actions at init time. At this point
31886+ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
31887+ * the command stream.
31888+ * @destroy: Destroy function. If NULL, kfree() is used.
31889+ * @fence: The struct ttm_fence_object to initialize.
31890+ *
31891+ * Initialize a pre-allocated fence object. This function, together with the
31892+ * destroy function makes it possible to derive driver-specific fence objects.
31893+ */
31894+
31895+extern int
31896+ttm_fence_object_init(struct ttm_fence_device *fdev,
31897+ uint32_t fence_class,
31898+ uint32_t type,
31899+ uint32_t create_flags,
31900+ void (*destroy) (struct ttm_fence_object * fence),
31901+ struct ttm_fence_object *fence);
31902+
31903+/**
31904+ * ttm_fence_object_create
31905+ *
31906+ * @fdev: Pointer to a struct ttm_fence_device.
31907+ * @fence_class: Fence class for this fence.
31908+ * @type: Fence type for this fence.
31909+ * @create_flags: Flags indicating varios actions at init time. At this point
31910+ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
31911+ * the command stream.
31912+ * @c_fence: On successful termination, *(@c_fence) will point to the created
31913+ * fence object.
31914+ *
31915+ * Create and initialize a struct ttm_fence_object. The destroy function will
31916+ * be set to kfree().
31917+ */
31918+
31919+extern int
31920+ttm_fence_object_create(struct ttm_fence_device *fdev,
31921+ uint32_t fence_class,
31922+ uint32_t type,
31923+ uint32_t create_flags,
31924+ struct ttm_fence_object **c_fence);
31925+
31926+/**
31927+ * ttm_fence_object_wait
31928+ *
31929+ * @fence: The fence object to wait on.
31930+ * @lazy: Allow sleeps to reduce the cpu-usage if polling.
31931+ * @interruptible: Sleep interruptible when waiting.
31932+ * @type_mask: Wait for the given type_mask to signal.
31933+ *
31934+ * Wait for a fence to signal the given type_mask. The function will
31935+ * perform a fence_flush using type_mask. (See ttm_fence_object_flush).
31936+ *
31937+ * Returns
31938+ * -ERESTART if interrupted by a signal.
31939+ * May return driver-specific error codes if timed-out.
31940+ */
31941+
31942+extern int
31943+ttm_fence_object_wait(struct ttm_fence_object *fence,
31944+ bool lazy, bool interruptible, uint32_t type_mask);
31945+
31946+/**
31947+ * ttm_fence_object_flush
31948+ *
31949+ * @fence: The fence object to flush.
31950+ * @flush_mask: Fence types to flush.
31951+ *
31952+ * Make sure that the given fence eventually signals the
31953+ * types indicated by @flush_mask. Note that this may or may not
31954+ * map to a CPU or GPU flush.
31955+ */
31956+
31957+extern int
31958+ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t flush_mask);
31959+
31960+/**
31961+ * ttm_fence_get_info
31962+ *
31963+ * @fence: The fence object.
31964+ *
31965+ * Copy the info block from the fence while holding relevant locks.
31966+ */
31967+
31968+struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence);
31969+
31970+/**
31971+ * ttm_fence_object_ref
31972+ *
31973+ * @fence: The fence object.
31974+ *
31975+ * Return a ref-counted pointer to the fence object indicated by @fence.
31976+ */
31977+
31978+static inline struct ttm_fence_object *ttm_fence_object_ref(struct
31979+ ttm_fence_object
31980+ *fence)
31981+{
31982+ kref_get(&fence->kref);
31983+ return fence;
31984+}
31985+
31986+/**
31987+ * ttm_fence_object_unref
31988+ *
31989+ * @p_fence: Pointer to a ref-counted pinter to a struct ttm_fence_object.
31990+ *
31991+ * Unreference the fence object pointed to by *(@p_fence), clearing
31992+ * *(p_fence).
31993+ */
31994+
31995+extern void ttm_fence_object_unref(struct ttm_fence_object **p_fence);
31996+
31997+/**
31998+ * ttm_fence_object_signaled
31999+ *
32000+ * @fence: Pointer to the struct ttm_fence_object.
32001+ * @mask: Type mask to check whether signaled.
32002+ *
32003+ * This function checks (without waiting) whether the fence object
32004+ * pointed to by @fence has signaled the types indicated by @mask,
32005+ * and returns 1 if true, 0 if false. This function does NOT perform
32006+ * an implicit fence flush.
32007+ */
32008+
32009+extern bool
32010+ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask);
32011+
32012+/**
32013+ * ttm_fence_class
32014+ *
32015+ * @fence: Pointer to the struct ttm_fence_object.
32016+ *
32017+ * Convenience function that returns the fence class of a struct ttm_fence_object.
32018+ */
32019+
32020+static inline uint32_t ttm_fence_class(const struct ttm_fence_object *fence)
32021+{
32022+ return fence->fence_class;
32023+}
32024+
32025+/**
32026+ * ttm_fence_types
32027+ *
32028+ * @fence: Pointer to the struct ttm_fence_object.
32029+ *
32030+ * Convenience function that returns the fence types of a struct ttm_fence_object.
32031+ */
32032+
32033+static inline uint32_t ttm_fence_types(const struct ttm_fence_object *fence)
32034+{
32035+ return fence->fence_type;
32036+}
32037+
32038+/*
32039+ * The functions below are wrappers to the above functions, with
32040+ * similar names but with sync_obj omitted. These wrappers are intended
32041+ * to be plugged directly into the buffer object driver's sync object
32042+ * API, if the driver chooses to use ttm_fence_objects as buffer object
32043+ * sync objects. In the prototypes below, a sync_obj is cast to a
32044+ * struct ttm_fence_object, whereas a sync_arg is cast to an uint32_t representing
32045+ * a fence_type argument.
32046+ */
32047+
32048+extern bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg);
32049+extern int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
32050+ bool lazy, bool interruptible);
32051+extern int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg);
32052+extern void ttm_fence_sync_obj_unref(void **sync_obj);
32053+extern void *ttm_fence_sync_obj_ref(void *sync_obj);
32054+
32055+#endif
32056diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence.c b/drivers/gpu/drm/psb/ttm/ttm_fence.c
32057--- a/drivers/gpu/drm/psb/ttm/ttm_fence.c 1969-12-31 16:00:00.000000000 -0800
32058+++ b/drivers/gpu/drm/psb/ttm/ttm_fence.c 2009-04-07 13:28:38.000000000 -0700
32059@@ -0,0 +1,607 @@
32060+/**************************************************************************
32061+ *
32062+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
32063+ * All Rights Reserved.
32064+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
32065+ * All Rights Reserved.
32066+ *
32067+ * Permission is hereby granted, free of charge, to any person obtaining a
32068+ * copy of this software and associated documentation files (the
32069+ * "Software"), to deal in the Software without restriction, including
32070+ * without limitation the rights to use, copy, modify, merge, publish,
32071+ * distribute, sub license, and/or sell copies of the Software, and to
32072+ * permit persons to whom the Software is furnished to do so, subject to
32073+ * the following conditions:
32074+ *
32075+ * The above copyright notice and this permission notice (including the
32076+ * next paragraph) shall be included in all copies or substantial portions
32077+ * of the Software.
32078+ *
32079+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32080+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32081+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
32082+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
32083+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
32084+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
32085+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
32086+ *
32087+ **************************************************************************/
32088+/*
32089+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32090+ */
32091+
32092+#include "ttm/ttm_fence_api.h"
32093+#include "ttm/ttm_fence_driver.h"
32094+#include <linux/wait.h>
32095+#include <linux/sched.h>
32096+
32097+#include <drm/drmP.h>
32098+
32099+/*
32100+ * Simple implementation for now.
32101+ */
32102+
32103+static void ttm_fence_lockup(struct ttm_fence_object *fence, uint32_t mask)
32104+{
32105+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32106+
32107+ printk(KERN_ERR "GPU lockup dectected on engine %u "
32108+ "fence type 0x%08x\n",
32109+ (unsigned int)fence->fence_class, (unsigned int)mask);
32110+ /*
32111+ * Give engines some time to idle?
32112+ */
32113+
32114+ write_lock(&fc->lock);
32115+ ttm_fence_handler(fence->fdev, fence->fence_class,
32116+ fence->sequence, mask, -EBUSY);
32117+ write_unlock(&fc->lock);
32118+}
32119+
32120+/*
32121+ * Convenience function to be called by fence::wait methods that
32122+ * need polling.
32123+ */
32124+
32125+int ttm_fence_wait_polling(struct ttm_fence_object *fence, bool lazy,
32126+ bool interruptible, uint32_t mask)
32127+{
32128+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32129+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
32130+ uint32_t count = 0;
32131+ int ret;
32132+ unsigned long end_jiffies = fence->timeout_jiffies;
32133+
32134+ DECLARE_WAITQUEUE(entry, current);
32135+ add_wait_queue(&fc->fence_queue, &entry);
32136+
32137+ ret = 0;
32138+
32139+ for (;;) {
32140+ __set_current_state((interruptible) ?
32141+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
32142+ if (ttm_fence_object_signaled(fence, mask))
32143+ break;
32144+ if (time_after_eq(jiffies, end_jiffies)) {
32145+ if (driver->lockup)
32146+ driver->lockup(fence, mask);
32147+ else
32148+ ttm_fence_lockup(fence, mask);
32149+ continue;
32150+ }
32151+ if (lazy)
32152+ schedule_timeout(1);
32153+ else if ((++count & 0x0F) == 0) {
32154+ __set_current_state(TASK_RUNNING);
32155+ schedule();
32156+ __set_current_state((interruptible) ?
32157+ TASK_INTERRUPTIBLE :
32158+ TASK_UNINTERRUPTIBLE);
32159+ }
32160+ if (interruptible && signal_pending(current)) {
32161+ ret = -ERESTART;
32162+ break;
32163+ }
32164+ }
32165+ __set_current_state(TASK_RUNNING);
32166+ remove_wait_queue(&fc->fence_queue, &entry);
32167+ return ret;
32168+}
32169+
32170+/*
32171+ * Typically called by the IRQ handler.
32172+ */
32173+
32174+void ttm_fence_handler(struct ttm_fence_device *fdev, uint32_t fence_class,
32175+ uint32_t sequence, uint32_t type, uint32_t error)
32176+{
32177+ int wake = 0;
32178+ uint32_t diff;
32179+ uint32_t relevant_type;
32180+ uint32_t new_type;
32181+ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
32182+ const struct ttm_fence_driver *driver = ttm_fence_driver_from_dev(fdev);
32183+ struct list_head *head;
32184+ struct ttm_fence_object *fence, *next;
32185+ bool found = false;
32186+
32187+ if (list_empty(&fc->ring))
32188+ return;
32189+
32190+ list_for_each_entry(fence, &fc->ring, ring) {
32191+ diff = (sequence - fence->sequence) & fc->sequence_mask;
32192+ if (diff > fc->wrap_diff) {
32193+ found = true;
32194+ break;
32195+ }
32196+ }
32197+
32198+ fc->waiting_types &= ~type;
32199+ head = (found) ? &fence->ring : &fc->ring;
32200+
32201+ list_for_each_entry_safe_reverse(fence, next, head, ring) {
32202+ if (&fence->ring == &fc->ring)
32203+ break;
32204+
32205+ DRM_DEBUG("Fence 0x%08lx, sequence 0x%08x, type 0x%08x\n",
32206+ (unsigned long)fence, fence->sequence,
32207+ fence->fence_type);
32208+
32209+ if (error) {
32210+ fence->info.error = error;
32211+ fence->info.signaled_types = fence->fence_type;
32212+ list_del_init(&fence->ring);
32213+ wake = 1;
32214+ break;
32215+ }
32216+
32217+ relevant_type = type & fence->fence_type;
32218+ new_type = (fence->info.signaled_types | relevant_type) ^
32219+ fence->info.signaled_types;
32220+
32221+ if (new_type) {
32222+ fence->info.signaled_types |= new_type;
32223+ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
32224+ (unsigned long)fence,
32225+ fence->info.signaled_types);
32226+
32227+ if (unlikely(driver->signaled))
32228+ driver->signaled(fence);
32229+
32230+ if (driver->needed_flush)
32231+ fc->pending_flush |=
32232+ driver->needed_flush(fence);
32233+
32234+ if (new_type & fence->waiting_types)
32235+ wake = 1;
32236+ }
32237+
32238+ fc->waiting_types |=
32239+ fence->waiting_types & ~fence->info.signaled_types;
32240+
32241+ if (!(fence->fence_type & ~fence->info.signaled_types)) {
32242+ DRM_DEBUG("Fence completely signaled 0x%08lx\n",
32243+ (unsigned long)fence);
32244+ list_del_init(&fence->ring);
32245+ }
32246+ }
32247+
32248+ /*
32249+ * Reinstate lost waiting types.
32250+ */
32251+
32252+ if ((fc->waiting_types & type) != type) {
32253+ head = head->prev;
32254+ list_for_each_entry(fence, head, ring) {
32255+ if (&fence->ring == &fc->ring)
32256+ break;
32257+ diff =
32258+ (fc->highest_waiting_sequence -
32259+ fence->sequence) & fc->sequence_mask;
32260+ if (diff > fc->wrap_diff)
32261+ break;
32262+
32263+ fc->waiting_types |=
32264+ fence->waiting_types & ~fence->info.signaled_types;
32265+ }
32266+ }
32267+
32268+ if (wake)
32269+ wake_up_all(&fc->fence_queue);
32270+}
32271+
32272+static void ttm_fence_unring(struct ttm_fence_object *fence)
32273+{
32274+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32275+ unsigned long irq_flags;
32276+
32277+ write_lock_irqsave(&fc->lock, irq_flags);
32278+ list_del_init(&fence->ring);
32279+ write_unlock_irqrestore(&fc->lock, irq_flags);
32280+}
32281+
32282+bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask)
32283+{
32284+ unsigned long flags;
32285+ bool signaled;
32286+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
32287+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32288+
32289+ mask &= fence->fence_type;
32290+ read_lock_irqsave(&fc->lock, flags);
32291+ signaled = (mask & fence->info.signaled_types) == mask;
32292+ read_unlock_irqrestore(&fc->lock, flags);
32293+ if (!signaled && driver->poll) {
32294+ write_lock_irqsave(&fc->lock, flags);
32295+ driver->poll(fence->fdev, fence->fence_class, mask);
32296+ signaled = (mask & fence->info.signaled_types) == mask;
32297+ write_unlock_irqrestore(&fc->lock, flags);
32298+ }
32299+ return signaled;
32300+}
32301+
32302+int ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t type)
32303+{
32304+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
32305+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32306+ unsigned long irq_flags;
32307+ uint32_t saved_pending_flush;
32308+ uint32_t diff;
32309+ bool call_flush;
32310+
32311+ if (type & ~fence->fence_type) {
32312+ DRM_ERROR("Flush trying to extend fence type, "
32313+ "0x%x, 0x%x\n", type, fence->fence_type);
32314+ return -EINVAL;
32315+ }
32316+
32317+ write_lock_irqsave(&fc->lock, irq_flags);
32318+ fence->waiting_types |= type;
32319+ fc->waiting_types |= fence->waiting_types;
32320+ diff = (fence->sequence - fc->highest_waiting_sequence) &
32321+ fc->sequence_mask;
32322+
32323+ if (diff < fc->wrap_diff)
32324+ fc->highest_waiting_sequence = fence->sequence;
32325+
32326+ /*
32327+ * fence->waiting_types has changed. Determine whether
32328+ * we need to initiate some kind of flush as a result of this.
32329+ */
32330+
32331+ saved_pending_flush = fc->pending_flush;
32332+ if (driver->needed_flush)
32333+ fc->pending_flush |= driver->needed_flush(fence);
32334+
32335+ if (driver->poll)
32336+ driver->poll(fence->fdev, fence->fence_class,
32337+ fence->waiting_types);
32338+
32339+ call_flush = (fc->pending_flush != 0);
32340+ write_unlock_irqrestore(&fc->lock, irq_flags);
32341+
32342+ if (call_flush && driver->flush)
32343+ driver->flush(fence->fdev, fence->fence_class);
32344+
32345+ return 0;
32346+}
32347+
32348+/*
32349+ * Make sure old fence objects are signaled before their fence sequences are
32350+ * wrapped around and reused.
32351+ */
32352+
32353+void ttm_fence_flush_old(struct ttm_fence_device *fdev,
32354+ uint32_t fence_class, uint32_t sequence)
32355+{
32356+ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
32357+ struct ttm_fence_object *fence;
32358+ unsigned long irq_flags;
32359+ const struct ttm_fence_driver *driver = fdev->driver;
32360+ bool call_flush;
32361+
32362+ uint32_t diff;
32363+
32364+ write_lock_irqsave(&fc->lock, irq_flags);
32365+
32366+ list_for_each_entry_reverse(fence, &fc->ring, ring) {
32367+ diff = (sequence - fence->sequence) & fc->sequence_mask;
32368+ if (diff <= fc->flush_diff)
32369+ break;
32370+
32371+ fence->waiting_types = fence->fence_type;
32372+ fc->waiting_types |= fence->fence_type;
32373+
32374+ if (driver->needed_flush)
32375+ fc->pending_flush |= driver->needed_flush(fence);
32376+ }
32377+
32378+ if (driver->poll)
32379+ driver->poll(fdev, fence_class, fc->waiting_types);
32380+
32381+ call_flush = (fc->pending_flush != 0);
32382+ write_unlock_irqrestore(&fc->lock, irq_flags);
32383+
32384+ if (call_flush && driver->flush)
32385+ driver->flush(fdev, fence->fence_class);
32386+
32387+ /*
32388+ * FIXME: Shold we implement a wait here for really old fences?
32389+ */
32390+
32391+}
32392+
32393+int ttm_fence_object_wait(struct ttm_fence_object *fence,
32394+ bool lazy, bool interruptible, uint32_t mask)
32395+{
32396+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
32397+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32398+ int ret = 0;
32399+ unsigned long timeout;
32400+ unsigned long cur_jiffies;
32401+ unsigned long to_jiffies;
32402+
32403+ if (mask & ~fence->fence_type) {
32404+ DRM_ERROR("Wait trying to extend fence type"
32405+ " 0x%08x 0x%08x\n", mask, fence->fence_type);
32406+ BUG();
32407+ return -EINVAL;
32408+ }
32409+
32410+ if (driver->wait)
32411+ return driver->wait(fence, lazy, interruptible, mask);
32412+
32413+ ttm_fence_object_flush(fence, mask);
32414+ retry:
32415+ if (!driver->has_irq ||
32416+ driver->has_irq(fence->fdev, fence->fence_class, mask)) {
32417+
32418+ cur_jiffies = jiffies;
32419+ to_jiffies = fence->timeout_jiffies;
32420+
32421+ timeout = (time_after(to_jiffies, cur_jiffies)) ?
32422+ to_jiffies - cur_jiffies : 1;
32423+
32424+ if (interruptible)
32425+ ret = wait_event_interruptible_timeout
32426+ (fc->fence_queue,
32427+ ttm_fence_object_signaled(fence, mask), timeout);
32428+ else
32429+ ret = wait_event_timeout
32430+ (fc->fence_queue,
32431+ ttm_fence_object_signaled(fence, mask), timeout);
32432+
32433+ if (unlikely(ret == -ERESTARTSYS))
32434+ return -ERESTART;
32435+
32436+ if (unlikely(ret == 0)) {
32437+ if (driver->lockup)
32438+ driver->lockup(fence, mask);
32439+ else
32440+ ttm_fence_lockup(fence, mask);
32441+ goto retry;
32442+ }
32443+
32444+ return 0;
32445+ }
32446+
32447+ return ttm_fence_wait_polling(fence, lazy, interruptible, mask);
32448+}
32449+
32450+int ttm_fence_object_emit(struct ttm_fence_object *fence, uint32_t fence_flags,
32451+ uint32_t fence_class, uint32_t type)
32452+{
32453+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
32454+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32455+ unsigned long flags;
32456+ uint32_t sequence;
32457+ unsigned long timeout;
32458+ int ret;
32459+
32460+ ttm_fence_unring(fence);
32461+ ret = driver->emit(fence->fdev,
32462+ fence_class, fence_flags, &sequence, &timeout);
32463+ if (ret)
32464+ return ret;
32465+
32466+ write_lock_irqsave(&fc->lock, flags);
32467+ fence->fence_class = fence_class;
32468+ fence->fence_type = type;
32469+ fence->waiting_types = 0;
32470+ fence->info.signaled_types = 0;
32471+ fence->info.error = 0;
32472+ fence->sequence = sequence;
32473+ fence->timeout_jiffies = timeout;
32474+ if (list_empty(&fc->ring))
32475+ fc->highest_waiting_sequence = sequence - 1;
32476+ list_add_tail(&fence->ring, &fc->ring);
32477+ fc->latest_queued_sequence = sequence;
32478+ write_unlock_irqrestore(&fc->lock, flags);
32479+ return 0;
32480+}
32481+
32482+int ttm_fence_object_init(struct ttm_fence_device *fdev,
32483+ uint32_t fence_class,
32484+ uint32_t type,
32485+ uint32_t create_flags,
32486+ void (*destroy) (struct ttm_fence_object *),
32487+ struct ttm_fence_object *fence)
32488+{
32489+ int ret = 0;
32490+
32491+ kref_init(&fence->kref);
32492+ fence->fence_class = fence_class;
32493+ fence->fence_type = type;
32494+ fence->info.signaled_types = 0;
32495+ fence->waiting_types = 0;
32496+ fence->sequence = 0;
32497+ fence->info.error = 0;
32498+ fence->fdev = fdev;
32499+ fence->destroy = destroy;
32500+ INIT_LIST_HEAD(&fence->ring);
32501+ atomic_inc(&fdev->count);
32502+
32503+ if (create_flags & TTM_FENCE_FLAG_EMIT) {
32504+ ret = ttm_fence_object_emit(fence, create_flags,
32505+ fence->fence_class, type);
32506+ }
32507+
32508+ return ret;
32509+}
32510+
32511+int ttm_fence_object_create(struct ttm_fence_device *fdev,
32512+ uint32_t fence_class,
32513+ uint32_t type,
32514+ uint32_t create_flags,
32515+ struct ttm_fence_object **c_fence)
32516+{
32517+ struct ttm_fence_object *fence;
32518+ int ret;
32519+
32520+ ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*fence), false, false, false);
32521+ if (unlikely(ret != 0)) {
32522+ printk(KERN_ERR "Out of memory creating fence object\n");
32523+ return ret;
32524+ }
32525+
32526+ fence = kmalloc(sizeof(*fence), GFP_KERNEL);
32527+ if (!fence) {
32528+ printk(KERN_ERR "Out of memory creating fence object\n");
32529+ ttm_mem_global_free(fdev->mem_glob, sizeof(*fence), false);
32530+ return -ENOMEM;
32531+ }
32532+
32533+ ret = ttm_fence_object_init(fdev, fence_class, type,
32534+ create_flags, NULL, fence);
32535+ if (ret) {
32536+ ttm_fence_object_unref(&fence);
32537+ return ret;
32538+ }
32539+ *c_fence = fence;
32540+
32541+ return 0;
32542+}
32543+
32544+static void ttm_fence_object_destroy(struct kref *kref)
32545+{
32546+ struct ttm_fence_object *fence =
32547+ container_of(kref, struct ttm_fence_object, kref);
32548+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32549+ unsigned long irq_flags;
32550+
32551+ write_lock_irqsave(&fc->lock, irq_flags);
32552+ list_del_init(&fence->ring);
32553+ write_unlock_irqrestore(&fc->lock, irq_flags);
32554+
32555+ atomic_dec(&fence->fdev->count);
32556+ if (fence->destroy)
32557+ fence->destroy(fence);
32558+ else {
32559+ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*fence), false);
32560+ kfree(fence);
32561+ }
32562+}
32563+
32564+void ttm_fence_device_release(struct ttm_fence_device *fdev)
32565+{
32566+ kfree(fdev->fence_class);
32567+}
32568+
32569+int
32570+ttm_fence_device_init(int num_classes,
32571+ struct ttm_mem_global *mem_glob,
32572+ struct ttm_fence_device *fdev,
32573+ const struct ttm_fence_class_init *init,
32574+ bool replicate_init, const struct ttm_fence_driver *driver)
32575+{
32576+ struct ttm_fence_class_manager *fc;
32577+ const struct ttm_fence_class_init *fci;
32578+ int i;
32579+
32580+ fdev->mem_glob = mem_glob;
32581+ fdev->fence_class = kzalloc(num_classes *
32582+ sizeof(*fdev->fence_class), GFP_KERNEL);
32583+
32584+ if (unlikely(!fdev->fence_class))
32585+ return -ENOMEM;
32586+
32587+ fdev->num_classes = num_classes;
32588+ atomic_set(&fdev->count, 0);
32589+ fdev->driver = driver;
32590+
32591+ for (i = 0; i < fdev->num_classes; ++i) {
32592+ fc = &fdev->fence_class[i];
32593+ fci = &init[(replicate_init) ? 0 : i];
32594+
32595+ fc->wrap_diff = fci->wrap_diff;
32596+ fc->flush_diff = fci->flush_diff;
32597+ fc->sequence_mask = fci->sequence_mask;
32598+
32599+ rwlock_init(&fc->lock);
32600+ INIT_LIST_HEAD(&fc->ring);
32601+ init_waitqueue_head(&fc->fence_queue);
32602+ }
32603+
32604+ return 0;
32605+}
32606+
32607+struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence)
32608+{
32609+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
32610+ struct ttm_fence_info tmp;
32611+ unsigned long irq_flags;
32612+
32613+ read_lock_irqsave(&fc->lock, irq_flags);
32614+ tmp = fence->info;
32615+ read_unlock_irqrestore(&fc->lock, irq_flags);
32616+
32617+ return tmp;
32618+}
32619+
32620+void ttm_fence_object_unref(struct ttm_fence_object **p_fence)
32621+{
32622+ struct ttm_fence_object *fence = *p_fence;
32623+
32624+ *p_fence = NULL;
32625+ (void)kref_put(&fence->kref, &ttm_fence_object_destroy);
32626+}
32627+
32628+/*
32629+ * Placement / BO sync object glue.
32630+ */
32631+
32632+bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg)
32633+{
32634+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
32635+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
32636+
32637+ return ttm_fence_object_signaled(fence, fence_types);
32638+}
32639+
32640+int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
32641+ bool lazy, bool interruptible)
32642+{
32643+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
32644+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
32645+
32646+ return ttm_fence_object_wait(fence, lazy, interruptible, fence_types);
32647+}
32648+
32649+int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg)
32650+{
32651+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
32652+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
32653+
32654+ return ttm_fence_object_flush(fence, fence_types);
32655+}
32656+
32657+void ttm_fence_sync_obj_unref(void **sync_obj)
32658+{
32659+ ttm_fence_object_unref((struct ttm_fence_object **)sync_obj);
32660+}
32661+
32662+void *ttm_fence_sync_obj_ref(void *sync_obj)
32663+{
32664+ return (void *)
32665+ ttm_fence_object_ref((struct ttm_fence_object *)sync_obj);
32666+}
32667diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h b/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h
32668--- a/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h 1969-12-31 16:00:00.000000000 -0800
32669+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h 2009-04-07 13:28:38.000000000 -0700
32670@@ -0,0 +1,309 @@
32671+/**************************************************************************
32672+ *
32673+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
32674+ * All Rights Reserved.
32675+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
32676+ * All Rights Reserved.
32677+ *
32678+ * Permission is hereby granted, free of charge, to any person obtaining a
32679+ * copy of this software and associated documentation files (the
32680+ * "Software"), to deal in the Software without restriction, including
32681+ * without limitation the rights to use, copy, modify, merge, publish,
32682+ * distribute, sub license, and/or sell copies of the Software, and to
32683+ * permit persons to whom the Software is furnished to do so, subject to
32684+ * the following conditions:
32685+ *
32686+ * The above copyright notice and this permission notice (including the
32687+ * next paragraph) shall be included in all copies or substantial portions
32688+ * of the Software.
32689+ *
32690+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32691+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32692+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
32693+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
32694+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
32695+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
32696+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
32697+ *
32698+ **************************************************************************/
32699+/*
32700+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32701+ */
32702+#ifndef _TTM_FENCE_DRIVER_H_
32703+#define _TTM_FENCE_DRIVER_H_
32704+
32705+#include <linux/kref.h>
32706+#include <linux/spinlock.h>
32707+#include <linux/wait.h>
32708+#include "ttm_fence_api.h"
32709+#include "ttm_memory.h"
32710+
32711+/** @file ttm_fence_driver.h
32712+ *
32713+ * Definitions needed for a driver implementing the
32714+ * ttm_fence subsystem.
32715+ */
32716+
32717+/**
32718+ * struct ttm_fence_class_manager:
32719+ *
32720+ * @wrap_diff: Sequence difference to catch 32-bit wrapping.
32721+ * if (seqa - seqb) > @wrap_diff, then seqa < seqb.
32722+ * @flush_diff: Sequence difference to trigger fence flush.
32723+ * if (cur_seq - seqa) > @flush_diff, then consider fence object with
32724+ * seqa as old an needing a flush.
32725+ * @sequence_mask: Mask of valid bits in a fence sequence.
32726+ * @lock: Lock protecting this struct as well as fence objects
32727+ * associated with this struct.
32728+ * @ring: Circular sequence-ordered list of fence objects.
32729+ * @pending_flush: Fence types currently needing a flush.
32730+ * @waiting_types: Fence types that are currently waited for.
32731+ * @fence_queue: Queue of waiters on fences belonging to this fence class.
32732+ * @highest_waiting_sequence: Sequence number of the fence with highest sequence
32733+ * number and that is waited for.
32734+ * @latest_queued_sequence: Sequence number of the fence latest queued on the ring.
32735+ */
32736+
32737+struct ttm_fence_class_manager {
32738+
32739+ /*
32740+ * Unprotected constant members.
32741+ */
32742+
32743+ uint32_t wrap_diff;
32744+ uint32_t flush_diff;
32745+ uint32_t sequence_mask;
32746+
32747+ /*
32748+ * The rwlock protects this structure as well as
32749+ * the data in all fence objects belonging to this
32750+ * class. This should be OK as most fence objects are
32751+ * only read from once they're created.
32752+ */
32753+
32754+ rwlock_t lock;
32755+ struct list_head ring;
32756+ uint32_t pending_flush;
32757+ uint32_t waiting_types;
32758+ wait_queue_head_t fence_queue;
32759+ uint32_t highest_waiting_sequence;
32760+ uint32_t latest_queued_sequence;
32761+};
32762+
32763+/**
32764+ * struct ttm_fence_device
32765+ *
32766+ * @fence_class: Array of fence class managers.
32767+ * @num_classes: Array dimension of @fence_class.
32768+ * @count: Current number of fence objects for statistics.
32769+ * @driver: Driver struct.
32770+ *
32771+ * Provided in the driver interface so that the driver can derive
32772+ * from this struct for its driver_private, and accordingly
32773+ * access the driver_private from the fence driver callbacks.
32774+ *
32775+ * All members except "count" are initialized at creation and
32776+ * never touched after that. No protection needed.
32777+ *
32778+ * This struct is private to the fence implementation and to the fence
32779+ * driver callbacks, and may otherwise be used by drivers only to
32780+ * obtain the derived device_private object using container_of().
32781+ */
32782+
32783+struct ttm_fence_device {
32784+ struct ttm_mem_global *mem_glob;
32785+ struct ttm_fence_class_manager *fence_class;
32786+ uint32_t num_classes;
32787+ atomic_t count;
32788+ const struct ttm_fence_driver *driver;
32789+};
32790+
32791+/**
32792+ * struct ttm_fence_class_init
32793+ *
32794+ * @wrap_diff: Fence sequence number wrap indicator. If
32795+ * (sequence1 - sequence2) > @wrap_diff, then sequence1 is
32796+ * considered to be older than sequence2.
32797+ * @flush_diff: Fence sequence number flush indicator.
32798+ * If a non-completely-signaled fence has a fence sequence number
32799+ * sequence1 and (sequence1 - current_emit_sequence) > @flush_diff,
32800+ * the fence is considered too old and it will be flushed upon the
32801+ * next call of ttm_fence_flush_old(), to make sure no fences with
32802+ * stale sequence numbers remains unsignaled. @flush_diff should
32803+ * be sufficiently less than @wrap_diff.
32804+ * @sequence_mask: Mask with valid bits of the fence sequence
32805+ * number set to 1.
32806+ *
32807+ * This struct is used as input to ttm_fence_device_init.
32808+ */
32809+
32810+struct ttm_fence_class_init {
32811+ uint32_t wrap_diff;
32812+ uint32_t flush_diff;
32813+ uint32_t sequence_mask;
32814+};
32815+
32816+/**
32817+ * struct ttm_fence_driver
32818+ *
32819+ * @has_irq: Called by a potential waiter. Should return 1 if a
32820+ * fence object with indicated parameters is expected to signal
32821+ * automatically, and 0 if the fence implementation needs to
32822+ * repeatedly call @poll to make it signal.
32823+ * @emit: Make sure a fence with the given parameters is
32824+ * present in the indicated command stream. Return its sequence number
32825+ * in "breadcrumb".
32826+ * @poll: Check and report sequences of the given "fence_class"
32827+ * that have signaled "types"
32828+ * @flush: Make sure that the types indicated by the bitfield
32829+ * ttm_fence_class_manager::pending_flush will eventually
32830+ * signal. These bits have been put together using the
32831+ * result from the needed_flush function described below.
32832+ * @needed_flush: Given the fence_class and fence_types indicated by
32833+ * "fence", and the last received fence sequence of this
32834+ * fence class, indicate what types need a fence flush to
32835+ * signal. Return as a bitfield.
32836+ * @wait: Set to non-NULL if the driver wants to override the fence
32837+ * wait implementation. Return 0 on success, -EBUSY on failure,
32838+ * and -ERESTART if interruptible and a signal is pending.
32839+ * @signaled: Driver callback that is called whenever a
32840+ * ttm_fence_object::signaled_types has changed status.
32841+ * This function is called from atomic context,
32842+ * with the ttm_fence_class_manager::lock held in write mode.
32843+ * @lockup: Driver callback that is called whenever a wait has exceeded
32844+ * the lifetime of a fence object.
32845+ * If there is a GPU lockup,
32846+ * this function should, if possible, reset the GPU,
32847+ * call the ttm_fence_handler with an error status, and
32848+ * return. If no lockup was detected, simply extend the
32849+ * fence timeout_jiffies and return. The driver might
32850+ * want to protect the lockup check with a mutex and cache a
32851+ * non-locked-up status for a while to avoid an excessive
32852+ * amount of lockup checks from every waiting thread.
32853+ */
32854+
32855+struct ttm_fence_driver {
32856+ bool (*has_irq) (struct ttm_fence_device * fdev,
32857+ uint32_t fence_class, uint32_t flags);
32858+ int (*emit) (struct ttm_fence_device * fdev,
32859+ uint32_t fence_class,
32860+ uint32_t flags,
32861+ uint32_t * breadcrumb, unsigned long *timeout_jiffies);
32862+ void (*flush) (struct ttm_fence_device * fdev, uint32_t fence_class);
32863+ void (*poll) (struct ttm_fence_device * fdev,
32864+ uint32_t fence_class, uint32_t types);
32865+ uint32_t(*needed_flush)
32866+ (struct ttm_fence_object * fence);
32867+ int (*wait) (struct ttm_fence_object * fence, bool lazy,
32868+ bool interruptible, uint32_t mask);
32869+ void (*signaled) (struct ttm_fence_object * fence);
32870+ void (*lockup) (struct ttm_fence_object * fence, uint32_t fence_types);
32871+};
32872+
32873+/**
32874+ * function ttm_fence_device_init
32875+ *
32876+ * @num_classes: Number of fence classes for this fence implementation.
32877+ * @mem_global: Pointer to the global memory accounting info.
32878+ * @fdev: Pointer to an uninitialised struct ttm_fence_device.
32879+ * @init: Array of initialization info for each fence class.
32880+ * @replicate_init: Use the first @init initialization info for all classes.
32881+ * @driver: Driver callbacks.
32882+ *
32883+ * Initialize a struct ttm_fence_driver structure. Returns -ENOMEM if
32884+ * out-of-memory. Otherwise returns 0.
32885+ */
32886+extern int
32887+ttm_fence_device_init(int num_classes,
32888+ struct ttm_mem_global *mem_glob,
32889+ struct ttm_fence_device *fdev,
32890+ const struct ttm_fence_class_init *init,
32891+ bool replicate_init,
32892+ const struct ttm_fence_driver *driver);
32893+
32894+/**
32895+ * function ttm_fence_device_release
32896+ *
32897+ * @fdev: Pointer to the fence device.
32898+ *
32899+ * Release all resources held by a fence device. Note that before
32900+ * this function is called, the caller must have made sure all fence
32901+ * objects belonging to this fence device are completely signaled.
32902+ */
32903+
32904+extern void ttm_fence_device_release(struct ttm_fence_device *fdev);
32905+
32906+/**
32907+ * ttm_fence_handler - the fence handler.
32908+ *
32909+ * @fdev: Pointer to the fence device.
32910+ * @fence_class: Fence class that signals.
32911+ * @sequence: Signaled sequence.
32912+ * @type: Types that signal.
32913+ * @error: Error from the engine.
32914+ *
32915+ * This function signals all fences with a sequence previous to the
32916+ * @sequence argument, and belonging to @fence_class. The signaled fence
32917+ * types are provided in @type. If error is non-zero, the error member
32918+ * of the fence with sequence = @sequence is set to @error. This value
32919+ * may be reported back to user-space, indicating, for example an illegal
32920+ * 3D command or illegal mpeg data.
32921+ *
32922+ * This function is typically called from the driver::poll method when the
32923+ * command sequence preceding the fence marker has executed. It should be
32924+ * called with the ttm_fence_class_manager::lock held in write mode and
32925+ * may be called from interrupt context.
32926+ */
32927+
32928+extern void
32929+ttm_fence_handler(struct ttm_fence_device *fdev,
32930+ uint32_t fence_class,
32931+ uint32_t sequence, uint32_t type, uint32_t error);
32932+
32933+/**
32934+ * ttm_fence_driver_from_dev
32935+ *
32936+ * @fdev: The ttm fence device.
32937+ *
32938+ * Returns a pointer to the fence driver struct.
32939+ */
32940+
32941+static inline const struct ttm_fence_driver *ttm_fence_driver_from_dev(struct
32942+ ttm_fence_device
32943+ *fdev)
32944+{
32945+ return fdev->driver;
32946+}
32947+
32948+/**
32949+ * ttm_fence_driver
32950+ *
32951+ * @fence: Pointer to a ttm fence object.
32952+ *
32953+ * Returns a pointer to the fence driver struct.
32954+ */
32955+
32956+static inline const struct ttm_fence_driver *ttm_fence_driver(struct
32957+ ttm_fence_object
32958+ *fence)
32959+{
32960+ return ttm_fence_driver_from_dev(fence->fdev);
32961+}
32962+
32963+/**
32964+ * ttm_fence_fc
32965+ *
32966+ * @fence: Pointer to a ttm fence object.
32967+ *
32968+ * Returns a pointer to the struct ttm_fence_class_manager for the
32969+ * fence class of @fence.
32970+ */
32971+
32972+static inline struct ttm_fence_class_manager *ttm_fence_fc(struct
32973+ ttm_fence_object
32974+ *fence)
32975+{
32976+ return &fence->fdev->fence_class[fence->fence_class];
32977+}
32978+
32979+#endif
32980diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_user.c b/drivers/gpu/drm/psb/ttm/ttm_fence_user.c
32981--- a/drivers/gpu/drm/psb/ttm/ttm_fence_user.c 1969-12-31 16:00:00.000000000 -0800
32982+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_user.c 2009-04-07 13:28:38.000000000 -0700
32983@@ -0,0 +1,242 @@
32984+/**************************************************************************
32985+ *
32986+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
32987+ * All Rights Reserved.
32988+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
32989+ * All Rights Reserved.
32990+ *
32991+ * Permission is hereby granted, free of charge, to any person obtaining a
32992+ * copy of this software and associated documentation files (the
32993+ * "Software"), to deal in the Software without restriction, including
32994+ * without limitation the rights to use, copy, modify, merge, publish,
32995+ * distribute, sub license, and/or sell copies of the Software, and to
32996+ * permit persons to whom the Software is furnished to do so, subject to
32997+ * the following conditions:
32998+ *
32999+ * The above copyright notice and this permission notice (including the
33000+ * next paragraph) shall be included in all copies or substantial portions
33001+ * of the Software.
33002+ *
33003+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33004+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33005+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
33006+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
33007+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
33008+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
33009+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
33010+ *
33011+ **************************************************************************/
33012+/*
33013+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
33014+ */
33015+
33016+#include <drm/drmP.h>
33017+#include "ttm/ttm_fence_user.h"
33018+#include "ttm/ttm_object.h"
33019+#include "ttm/ttm_fence_driver.h"
33020+#include "ttm/ttm_userobj_api.h"
33021+
33022+/**
33023+ * struct ttm_fence_user_object
33024+ *
33025+ * @base: The base object used for user-space visibility and refcounting.
33026+ *
33027+ * @fence: The fence object itself.
33028+ *
33029+ */
33030+
33031+struct ttm_fence_user_object {
33032+ struct ttm_base_object base;
33033+ struct ttm_fence_object fence;
33034+};
33035+
33036+static struct ttm_fence_user_object *ttm_fence_user_object_lookup(struct
33037+ ttm_object_file
33038+ *tfile,
33039+ uint32_t
33040+ handle)
33041+{
33042+ struct ttm_base_object *base;
33043+
33044+ base = ttm_base_object_lookup(tfile, handle);
33045+ if (unlikely(base == NULL)) {
33046+ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
33047+ (unsigned long)handle);
33048+ return NULL;
33049+ }
33050+
33051+ if (unlikely(base->object_type != ttm_fence_type)) {
33052+ ttm_base_object_unref(&base);
33053+ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
33054+ (unsigned long)handle);
33055+ return NULL;
33056+ }
33057+
33058+ return container_of(base, struct ttm_fence_user_object, base);
33059+}
33060+
33061+/*
33062+ * The fence object destructor.
33063+ */
33064+
33065+static void ttm_fence_user_destroy(struct ttm_fence_object *fence)
33066+{
33067+ struct ttm_fence_user_object *ufence =
33068+ container_of(fence, struct ttm_fence_user_object, fence);
33069+
33070+ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*ufence), false);
33071+ kfree(ufence);
33072+}
33073+
33074+/*
33075+ * The base object destructor. We basically unly unreference the
33076+ * attached fence object.
33077+ */
33078+
33079+static void ttm_fence_user_release(struct ttm_base_object **p_base)
33080+{
33081+ struct ttm_fence_user_object *ufence;
33082+ struct ttm_base_object *base = *p_base;
33083+ struct ttm_fence_object *fence;
33084+
33085+ *p_base = NULL;
33086+
33087+ if (unlikely(base == NULL))
33088+ return;
33089+
33090+ ufence = container_of(base, struct ttm_fence_user_object, base);
33091+ fence = &ufence->fence;
33092+ ttm_fence_object_unref(&fence);
33093+}
33094+
33095+int
33096+ttm_fence_user_create(struct ttm_fence_device *fdev,
33097+ struct ttm_object_file *tfile,
33098+ uint32_t fence_class,
33099+ uint32_t fence_types,
33100+ uint32_t create_flags,
33101+ struct ttm_fence_object **fence, uint32_t * user_handle)
33102+{
33103+ int ret;
33104+ struct ttm_fence_object *tmp;
33105+ struct ttm_fence_user_object *ufence;
33106+
33107+ ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*ufence), false, false, false);
33108+ if (unlikely(ret != 0))
33109+ return -ENOMEM;
33110+
33111+ ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
33112+ if (unlikely(ufence == NULL)) {
33113+ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
33114+ return -ENOMEM;
33115+ }
33116+
33117+ ret = ttm_fence_object_init(fdev,
33118+ fence_class,
33119+ fence_types, create_flags,
33120+ &ttm_fence_user_destroy, &ufence->fence);
33121+
33122+ if (unlikely(ret != 0))
33123+ goto out_err0;
33124+
33125+ /*
33126+ * One fence ref is held by the fence ptr we return.
33127+ * The other one by the base object. Need to up the
33128+ * fence refcount before we publish this object to
33129+ * user-space.
33130+ */
33131+
33132+ tmp = ttm_fence_object_ref(&ufence->fence);
33133+ ret = ttm_base_object_init(tfile, &ufence->base,
33134+ false, ttm_fence_type,
33135+ &ttm_fence_user_release, NULL);
33136+
33137+ if (unlikely(ret != 0))
33138+ goto out_err1;
33139+
33140+ *fence = &ufence->fence;
33141+ *user_handle = ufence->base.hash.key;
33142+
33143+ return 0;
33144+ out_err1:
33145+ ttm_fence_object_unref(&tmp);
33146+ tmp = &ufence->fence;
33147+ ttm_fence_object_unref(&tmp);
33148+ return ret;
33149+ out_err0:
33150+ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
33151+ kfree(ufence);
33152+ return ret;
33153+}
33154+
33155+int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data)
33156+{
33157+ int ret;
33158+ union ttm_fence_signaled_arg *arg = data;
33159+ struct ttm_fence_object *fence;
33160+ struct ttm_fence_info info;
33161+ struct ttm_fence_user_object *ufence;
33162+ struct ttm_base_object *base;
33163+ ret = 0;
33164+
33165+ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
33166+ if (unlikely(ufence == NULL))
33167+ return -EINVAL;
33168+
33169+ fence = &ufence->fence;
33170+
33171+ if (arg->req.flush) {
33172+ ret = ttm_fence_object_flush(fence, arg->req.fence_type);
33173+ if (unlikely(ret != 0))
33174+ goto out;
33175+ }
33176+
33177+ info = ttm_fence_get_info(fence);
33178+ arg->rep.signaled_types = info.signaled_types;
33179+ arg->rep.fence_error = info.error;
33180+
33181+ out:
33182+ base = &ufence->base;
33183+ ttm_base_object_unref(&base);
33184+ return ret;
33185+}
33186+
33187+int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data)
33188+{
33189+ int ret;
33190+ union ttm_fence_finish_arg *arg = data;
33191+ struct ttm_fence_user_object *ufence;
33192+ struct ttm_base_object *base;
33193+ struct ttm_fence_object *fence;
33194+ ret = 0;
33195+
33196+ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
33197+ if (unlikely(ufence == NULL))
33198+ return -EINVAL;
33199+
33200+ fence = &ufence->fence;
33201+
33202+ ret = ttm_fence_object_wait(fence,
33203+ arg->req.mode & TTM_FENCE_FINISH_MODE_LAZY,
33204+ true, arg->req.fence_type);
33205+ if (likely(ret == 0)) {
33206+ struct ttm_fence_info info = ttm_fence_get_info(fence);
33207+
33208+ arg->rep.signaled_types = info.signaled_types;
33209+ arg->rep.fence_error = info.error;
33210+ }
33211+
33212+ base = &ufence->base;
33213+ ttm_base_object_unref(&base);
33214+
33215+ return ret;
33216+}
33217+
33218+int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data)
33219+{
33220+ struct ttm_fence_unref_arg *arg = data;
33221+ int ret = 0;
33222+
33223+ ret = ttm_ref_object_base_unref(tfile, arg->handle, ttm_fence_type);
33224+ return ret;
33225+}
33226diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_user.h b/drivers/gpu/drm/psb/ttm/ttm_fence_user.h
33227--- a/drivers/gpu/drm/psb/ttm/ttm_fence_user.h 1969-12-31 16:00:00.000000000 -0800
33228+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_user.h 2009-04-07 13:28:38.000000000 -0700
33229@@ -0,0 +1,147 @@
33230+/**************************************************************************
33231+ *
33232+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
33233+ * All Rights Reserved.
33234+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
33235+ * All Rights Reserved.
33236+ *
33237+ * Permission is hereby granted, free of charge, to any person obtaining a
33238+ * copy of this software and associated documentation files (the
33239+ * "Software"), to deal in the Software without restriction, including
33240+ * without limitation the rights to use, copy, modify, merge, publish,
33241+ * distribute, sub license, and/or sell copies of the Software, and to
33242+ * permit persons to whom the Software is furnished to do so, subject to
33243+ * the following conditions:
33244+ *
33245+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33246+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33247+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
33248+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
33249+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
33250+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
33251+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
33252+ *
33253+ * The above copyright notice and this permission notice (including the
33254+ * next paragraph) shall be included in all copies or substantial portions
33255+ * of the Software.
33256+ *
33257+ **************************************************************************/
33258+/*
33259+ * Authors
33260+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
33261+ */
33262+
33263+#ifndef TTM_FENCE_USER_H
33264+#define TTM_FENCE_USER_H
33265+
33266+#if !defined(__KERNEL__) && !defined(_KERNEL)
33267+#include <stdint.h>
33268+#endif
33269+
33270+#define TTM_FENCE_MAJOR 0
33271+#define TTM_FENCE_MINOR 1
33272+#define TTM_FENCE_PL 0
33273+#define TTM_FENCE_DATE "080819"
33274+
33275+/**
33276+ * struct ttm_fence_signaled_req
33277+ *
33278+ * @handle: Handle to the fence object. Input.
33279+ *
33280+ * @fence_type: Fence types we want to flush. Input.
33281+ *
33282+ * @flush: Boolean. Flush the indicated fence_types. Input.
33283+ *
33284+ * Argument to the TTM_FENCE_SIGNALED ioctl.
33285+ */
33286+
33287+struct ttm_fence_signaled_req {
33288+ uint32_t handle;
33289+ uint32_t fence_type;
33290+ int32_t flush;
33291+ uint32_t pad64;
33292+};
33293+
33294+/**
33295+ * struct ttm_fence_rep
33296+ *
33297+ * @signaled_types: Fence type that has signaled.
33298+ *
33299+ * @fence_error: Command execution error.
33300+ * Hardware errors that are consequences of the execution
33301+ * of the command stream preceding the fence are reported
33302+ * here.
33303+ *
33304+ * Output argument to the TTM_FENCE_SIGNALED and
33305+ * TTM_FENCE_FINISH ioctls.
33306+ */
33307+
33308+struct ttm_fence_rep {
33309+ uint32_t signaled_types;
33310+ uint32_t fence_error;
33311+};
33312+
33313+union ttm_fence_signaled_arg {
33314+ struct ttm_fence_signaled_req req;
33315+ struct ttm_fence_rep rep;
33316+};
33317+
33318+/*
33319+ * Waiting mode flags for the TTM_FENCE_FINISH ioctl.
33320+ *
33321+ * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling
33322+ * wait.
33323+ *
33324+ * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU,
33325+ * but return -EBUSY if the buffer is busy.
33326+ */
33327+
33328+#define TTM_FENCE_FINISH_MODE_LAZY (1 << 0)
33329+#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1)
33330+
33331+/**
33332+ * struct ttm_fence_finish_req
33333+ *
33334+ * @handle: Handle to the fence object. Input.
33335+ *
33336+ * @fence_type: Fence types we want to finish.
33337+ *
33338+ * @mode: Wait mode.
33339+ *
33340+ * Input to the TTM_FENCE_FINISH ioctl.
33341+ */
33342+
33343+struct ttm_fence_finish_req {
33344+ uint32_t handle;
33345+ uint32_t fence_type;
33346+ uint32_t mode;
33347+ uint32_t pad64;
33348+};
33349+
33350+union ttm_fence_finish_arg {
33351+ struct ttm_fence_finish_req req;
33352+ struct ttm_fence_rep rep;
33353+};
33354+
33355+/**
33356+ * struct ttm_fence_unref_arg
33357+ *
33358+ * @handle: Handle to the fence object.
33359+ *
33360+ * Argument to the TTM_FENCE_UNREF ioctl.
33361+ */
33362+
33363+struct ttm_fence_unref_arg {
33364+ uint32_t handle;
33365+ uint32_t pad64;
33366+};
33367+
33368+/*
33369+ * Ioctl offsets frome extenstion start.
33370+ */
33371+
33372+#define TTM_FENCE_SIGNALED 0x01
33373+#define TTM_FENCE_FINISH 0x02
33374+#define TTM_FENCE_UNREF 0x03
33375+
33376+#endif
33377diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_lock.c b/drivers/gpu/drm/psb/ttm/ttm_lock.c
33378--- a/drivers/gpu/drm/psb/ttm/ttm_lock.c 1969-12-31 16:00:00.000000000 -0800
33379+++ b/drivers/gpu/drm/psb/ttm/ttm_lock.c 2009-04-07 13:28:38.000000000 -0700
33380@@ -0,0 +1,162 @@
33381+/**************************************************************************
33382+ *
33383+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
33384+ * All Rights Reserved.
33385+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
33386+ * All Rights Reserved.
33387+ *
33388+ * Permission is hereby granted, free of charge, to any person obtaining a
33389+ * copy of this software and associated documentation files (the
33390+ * "Software"), to deal in the Software without restriction, including
33391+ * without limitation the rights to use, copy, modify, merge, publish,
33392+ * distribute, sub license, and/or sell copies of the Software, and to
33393+ * permit persons to whom the Software is furnished to do so, subject to
33394+ * the following conditions:
33395+ *
33396+ * The above copyright notice and this permission notice (including the
33397+ * next paragraph) shall be included in all copies or substantial portions
33398+ * of the Software.
33399+ *
33400+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33401+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33402+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
33403+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
33404+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
33405+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
33406+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
33407+ *
33408+ **************************************************************************/
33409+/*
33410+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
33411+ */
33412+
33413+#include "ttm/ttm_lock.h"
33414+#include <asm/atomic.h>
33415+#include <linux/errno.h>
33416+#include <linux/wait.h>
33417+#include <linux/sched.h>
33418+
33419+void ttm_lock_init(struct ttm_lock *lock)
33420+{
33421+ init_waitqueue_head(&lock->queue);
33422+ atomic_set(&lock->write_lock_pending, 0);
33423+ atomic_set(&lock->readers, 0);
33424+ lock->kill_takers = false;
33425+ lock->signal = SIGKILL;
33426+}
33427+
33428+void ttm_read_unlock(struct ttm_lock *lock)
33429+{
33430+ if (atomic_dec_and_test(&lock->readers))
33431+ wake_up_all(&lock->queue);
33432+}
33433+
33434+int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
33435+{
33436+ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
33437+ int ret;
33438+
33439+ if (!interruptible) {
33440+ wait_event(lock->queue,
33441+ atomic_read(&lock->write_lock_pending) == 0);
33442+ continue;
33443+ }
33444+ ret = wait_event_interruptible
33445+ (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
33446+ if (ret)
33447+ return -ERESTART;
33448+ }
33449+
33450+ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
33451+ int ret;
33452+ if (!interruptible) {
33453+ wait_event(lock->queue,
33454+ atomic_read(&lock->readers) != -1);
33455+ continue;
33456+ }
33457+ ret = wait_event_interruptible
33458+ (lock->queue, atomic_read(&lock->readers) != -1);
33459+ if (ret)
33460+ return -ERESTART;
33461+ }
33462+
33463+ if (unlikely(lock->kill_takers)) {
33464+ send_sig(lock->signal, current, 0);
33465+ ttm_read_unlock(lock);
33466+ return -ERESTART;
33467+ }
33468+
33469+ return 0;
33470+}
33471+
33472+static int __ttm_write_unlock(struct ttm_lock *lock)
33473+{
33474+ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
33475+ return -EINVAL;
33476+ wake_up_all(&lock->queue);
33477+ return 0;
33478+}
33479+
33480+static void ttm_write_lock_remove(struct ttm_base_object **p_base)
33481+{
33482+ struct ttm_base_object *base = *p_base;
33483+ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
33484+ int ret;
33485+
33486+ *p_base = NULL;
33487+ ret = __ttm_write_unlock(lock);
33488+ BUG_ON(ret != 0);
33489+}
33490+
33491+int ttm_write_lock(struct ttm_lock *lock,
33492+ bool interruptible,
33493+ struct ttm_object_file *tfile)
33494+{
33495+ int ret = 0;
33496+
33497+ atomic_inc(&lock->write_lock_pending);
33498+
33499+ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
33500+ if (!interruptible) {
33501+ wait_event(lock->queue,
33502+ atomic_read(&lock->readers) == 0);
33503+ continue;
33504+ }
33505+ ret = wait_event_interruptible
33506+ (lock->queue, atomic_read(&lock->readers) == 0);
33507+
33508+ if (ret) {
33509+ if (atomic_dec_and_test(&lock->write_lock_pending))
33510+ wake_up_all(&lock->queue);
33511+ return -ERESTART;
33512+ }
33513+ }
33514+
33515+ if (atomic_dec_and_test(&lock->write_lock_pending))
33516+ wake_up_all(&lock->queue);
33517+
33518+ if (unlikely(lock->kill_takers)) {
33519+ send_sig(lock->signal, current, 0);
33520+ __ttm_write_unlock(lock);
33521+ return -ERESTART;
33522+ }
33523+
33524+ /*
33525+ * Add a base-object, the destructor of which will
33526+ * make sure the lock is released if the client dies
33527+ * while holding it.
33528+ */
33529+
33530+ ret = ttm_base_object_init(tfile, &lock->base, false,
33531+ ttm_lock_type, &ttm_write_lock_remove, NULL);
33532+ if (ret)
33533+ (void)__ttm_write_unlock(lock);
33534+
33535+ return ret;
33536+}
33537+
33538+int ttm_write_unlock(struct ttm_lock *lock, struct ttm_object_file *tfile)
33539+{
33540+ return ttm_ref_object_base_unref(tfile,
33541+ lock->base.hash.key, TTM_REF_USAGE);
33542+}
33543diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_lock.h b/drivers/gpu/drm/psb/ttm/ttm_lock.h
33544--- a/drivers/gpu/drm/psb/ttm/ttm_lock.h 1969-12-31 16:00:00.000000000 -0800
33545+++ b/drivers/gpu/drm/psb/ttm/ttm_lock.h 2009-04-07 13:28:38.000000000 -0700
33546@@ -0,0 +1,181 @@
33547+/**************************************************************************
33548+ *
33549+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
33550+ * All Rights Reserved.
33551+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
33552+ * All Rights Reserved.
33553+ *
33554+ * Permission is hereby granted, free of charge, to any person obtaining a
33555+ * copy of this software and associated documentation files (the
33556+ * "Software"), to deal in the Software without restriction, including
33557+ * without limitation the rights to use, copy, modify, merge, publish,
33558+ * distribute, sub license, and/or sell copies of the Software, and to
33559+ * permit persons to whom the Software is furnished to do so, subject to
33560+ * the following conditions:
33561+ *
33562+ * The above copyright notice and this permission notice (including the
33563+ * next paragraph) shall be included in all copies or substantial portions
33564+ * of the Software.
33565+ *
33566+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33567+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33568+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
33569+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
33570+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
33571+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
33572+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
33573+ *
33574+ **************************************************************************/
33575+/*
33576+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
33577+ */
33578+
33579+/** @file ttm_lock.h
33580+ * This file implements a simple replacement for the buffer manager use
33581+ * of the DRM heavyweight hardware lock.
33582+ * The lock is a read-write lock. Taking it in read mode is fast, and
33583+ * intended for in-kernel use only.
33584+ * Taking it in write mode is slow.
33585+ *
33586+ * The write mode is used only when there is a need to block all
33587+ * user-space processes from validating buffers.
33588+ * It's allowed to leave kernel space with the write lock held.
33589+ * If a user-space process dies while having the write-lock,
33590+ * it will be released during the file descriptor release.
33591+ *
33592+ * The read lock is typically placed at the start of an IOCTL- or
33593+ * user-space callable function that may end up allocating a memory area.
33594+ * This includes setstatus, super-ioctls and faults; the latter may move
33595+ * unmappable regions to mappable. It's a bug to leave kernel space with the
33596+ * read lock held.
33597+ *
33598+ * Both read- and write lock taking is interruptible for low signal-delivery
33599+ * latency. The locking functions will return -ERESTART if interrupted by a
33600+ * signal.
33601+ *
33602+ * Locking order: The lock should be taken BEFORE any TTM mutexes
33603+ * or spinlocks.
33604+ *
33605+ * Typical usages:
33606+ * a) VT-switching, when we want to clean VRAM and perhaps AGP. The lock
33607+ * stops it from being repopulated.
33608+ * b) out-of-VRAM or out-of-aperture space, in which case the process
33609+ * receiving the out-of-space notification may take the lock in write mode
33610+ * and evict all buffers prior to start validating its own buffers.
33611+ */
33612+
33613+#ifndef _TTM_LOCK_H_
33614+#define _TTM_LOCK_H_
33615+
33616+#include "ttm_object.h"
33617+#include <linux/wait.h>
33618+#include <asm/atomic.h>
33619+
33620+/**
33621+ * struct ttm_lock
33622+ *
33623+ * @base: ttm base object used solely to release the lock if the client
33624+ * holding the lock dies.
33625+ * @queue: Queue for processes waiting for lock change-of-status.
33626+ * @write_lock_pending: Flag indicating that a write-lock is pending. Avoids
33627+ * write lock starvation.
33628+ * @readers: The lock status: A negative number indicates that a write lock is
33629+ * held. Positive values indicate number of concurrent readers.
33630+ */
33631+
33632+struct ttm_lock {
33633+ struct ttm_base_object base;
33634+ wait_queue_head_t queue;
33635+ atomic_t write_lock_pending;
33636+ atomic_t readers;
33637+ bool kill_takers;
33638+ int signal;
33639+};
33640+
33641+/**
33642+ * ttm_lock_init
33643+ *
33644+ * @lock: Pointer to a struct ttm_lock
33645+ * Initializes the lock.
33646+ */
33647+extern void ttm_lock_init(struct ttm_lock *lock);
33648+
33649+/**
33650+ * ttm_read_unlock
33651+ *
33652+ * @lock: Pointer to a struct ttm_lock
33653+ *
33654+ * Releases a read lock.
33655+ */
33656+
33657+extern void ttm_read_unlock(struct ttm_lock *lock);
33658+
33659+/**
33660+ * ttm_read_unlock
33661+ *
33662+ * @lock: Pointer to a struct ttm_lock
33663+ * @interruptible: Interruptible sleeping while waiting for a lock.
33664+ *
33665+ * Takes the lock in read mode.
33666+ * Returns:
33667+ * -ERESTART If interrupted by a signal and interruptible is true.
33668+ */
33669+
33670+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
33671+
33672+/**
33673+ * ttm_write_lock
33674+ *
33675+ * @lock: Pointer to a struct ttm_lock
33676+ * @interruptible: Interruptible sleeping while waiting for a lock.
33677+ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
33678+ * application taking the lock.
33679+ *
33680+ * Takes the lock in write mode.
33681+ * Returns:
33682+ * -ERESTART If interrupted by a signal and interruptible is true.
33683+ * -ENOMEM: Out of memory when locking.
33684+ */
33685+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible,
33686+ struct ttm_object_file *tfile);
33687+
33688+/**
33689+ * ttm_write_unlock
33690+ *
33691+ * @lock: Pointer to a struct ttm_lock
33692+ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
33693+ * application taking the lock.
33694+ *
33695+ * Releases a write lock.
33696+ * Returns:
33697+ * -EINVAL If the lock was not held.
33698+ */
33699+extern int ttm_write_unlock(struct ttm_lock *lock,
33700+ struct ttm_object_file *tfile);
33701+
33702+/**
33703+ * ttm_lock_set_kill
33704+ *
33705+ * @lock: Pointer to a struct ttm_lock
33706+ * @val: Boolean whether to kill processes taking the lock.
33707+ * @signal: Signal to send to the process taking the lock.
33708+ *
33709+ * The kill-when-taking-lock functionality is used to kill processes that keep
33710+ * on using the TTM functionality when its resources has been taken down, for
33711+ * example when the X server exits. A typical sequence would look like this:
33712+ * - X server takes lock in write mode.
33713+ * - ttm_lock_set_kill() is called with @val set to true.
33714+ * - As part of X server exit, TTM resources are taken down.
33715+ * - X server releases the lock on file release.
33716+ * - Another dri client wants to render, takes the lock and is killed.
33717+ *
33718+ */
33719+
33720+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, int signal)
33721+{
33722+ lock->kill_takers = val;
33723+ if (val)
33724+ lock->signal = signal;
33725+}
33726+
33727+#endif
33728diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_memory.c b/drivers/gpu/drm/psb/ttm/ttm_memory.c
33729--- a/drivers/gpu/drm/psb/ttm/ttm_memory.c 1969-12-31 16:00:00.000000000 -0800
33730+++ b/drivers/gpu/drm/psb/ttm/ttm_memory.c 2009-04-07 13:28:38.000000000 -0700
33731@@ -0,0 +1,232 @@
33732+/**************************************************************************
33733+ *
33734+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
33735+ * All Rights Reserved.
33736+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
33737+ * All Rights Reserved.
33738+ *
33739+ * Permission is hereby granted, free of charge, to any person obtaining a
33740+ * copy of this software and associated documentation files (the
33741+ * "Software"), to deal in the Software without restriction, including
33742+ * without limitation the rights to use, copy, modify, merge, publish,
33743+ * distribute, sub license, and/or sell copies of the Software, and to
33744+ * permit persons to whom the Software is furnished to do so, subject to
33745+ * the following conditions:
33746+ *
33747+ * The above copyright notice and this permission notice (including the
33748+ * next paragraph) shall be included in all copies or substantial portions
33749+ * of the Software.
33750+ *
33751+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33752+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33753+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
33754+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
33755+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
33756+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
33757+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
33758+ *
33759+ **************************************************************************/
33760+
33761+#include "ttm/ttm_memory.h"
33762+#include <linux/spinlock.h>
33763+#include <linux/sched.h>
33764+#include <linux/wait.h>
33765+#include <linux/mm.h>
33766+
33767+#define TTM_MEMORY_ALLOC_RETRIES 4
33768+
33769+/**
33770+ * At this point we only support a single shrink callback.
33771+ * Extend this if needed, perhaps using a linked list of callbacks.
33772+ * Note that this function is reentrant:
33773+ * many threads may try to swap out at any given time.
33774+ */
33775+
33776+static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
33777+ uint64_t extra)
33778+{
33779+ int ret;
33780+ struct ttm_mem_shrink *shrink;
33781+ uint64_t target;
33782+ uint64_t total_target;
33783+
33784+ spin_lock(&glob->lock);
33785+ if (glob->shrink == NULL)
33786+ goto out;
33787+
33788+ if (from_workqueue) {
33789+ target = glob->swap_limit;
33790+ total_target = glob->total_memory_swap_limit;
33791+ } else if (capable(CAP_SYS_ADMIN)) {
33792+ total_target = glob->emer_total_memory;
33793+ target = glob->emer_memory;
33794+ } else {
33795+ total_target = glob->max_total_memory;
33796+ target = glob->max_memory;
33797+ }
33798+
33799+ total_target = (extra >= total_target) ? 0: total_target - extra;
33800+ target = (extra >= target) ? 0: target - extra;
33801+
33802+ while (glob->used_memory > target ||
33803+ glob->used_total_memory > total_target) {
33804+ shrink = glob->shrink;
33805+ spin_unlock(&glob->lock);
33806+ ret = shrink->do_shrink(shrink);
33807+ spin_lock(&glob->lock);
33808+ if (unlikely(ret != 0))
33809+ goto out;
33810+ }
33811+ out:
33812+ spin_unlock(&glob->lock);
33813+}
33814+
33815+static void ttm_shrink_work(struct work_struct *work)
33816+{
33817+ struct ttm_mem_global *glob =
33818+ container_of(work, struct ttm_mem_global, work);
33819+
33820+ ttm_shrink(glob, true, 0ULL);
33821+}
33822+
33823+int ttm_mem_global_init(struct ttm_mem_global *glob)
33824+{
33825+ struct sysinfo si;
33826+ uint64_t mem;
33827+
33828+ spin_lock_init(&glob->lock);
33829+ glob->swap_queue = create_singlethread_workqueue("ttm_swap");
33830+ INIT_WORK(&glob->work, ttm_shrink_work);
33831+ init_waitqueue_head(&glob->queue);
33832+
33833+ si_meminfo(&si);
33834+
33835+ mem = si.totalram - si.totalhigh;
33836+ mem *= si.mem_unit;
33837+
33838+ glob->max_memory = mem >> 1;
33839+ glob->emer_memory = glob->max_memory + (mem >> 2);
33840+ glob->swap_limit = glob->max_memory - (mem >> 5);
33841+ glob->used_memory = 0;
33842+ glob->used_total_memory = 0;
33843+ glob->shrink = NULL;
33844+
33845+ mem = si.totalram;
33846+ mem *= si.mem_unit;
33847+
33848+ glob->max_total_memory = mem >> 1;
33849+ glob->emer_total_memory = glob->max_total_memory + (mem >> 2);
33850+ glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 5);
33851+
33852+ printk(KERN_INFO "TTM available graphics memory: %llu MiB\n",
33853+ glob->max_total_memory >> 20);
33854+ printk(KERN_INFO "TTM available object memory: %llu MiB\n",
33855+ glob->max_memory >> 20);
33856+ printk(KERN_INFO "TTM available swap breakpoint: %llu MiB\n",
33857+ glob->swap_limit >> 20);
33858+
33859+ return 0;
33860+}
33861+
33862+void ttm_mem_global_release(struct ttm_mem_global *glob)
33863+{
33864+ printk(KERN_INFO "Used total memory is %llu bytes.\n",
33865+ (unsigned long long)glob->used_total_memory);
33866+ flush_workqueue(glob->swap_queue);
33867+ destroy_workqueue(glob->swap_queue);
33868+ glob->swap_queue = NULL;
33869+}
33870+
33871+static inline void ttm_check_swapping(struct ttm_mem_global *glob)
33872+{
33873+ bool needs_swapping;
33874+
33875+ spin_lock(&glob->lock);
33876+ needs_swapping = (glob->used_memory > glob->swap_limit ||
33877+ glob->used_total_memory >
33878+ glob->total_memory_swap_limit);
33879+ spin_unlock(&glob->lock);
33880+
33881+ if (unlikely(needs_swapping))
33882+ (void)queue_work(glob->swap_queue, &glob->work);
33883+
33884+}
33885+
33886+void ttm_mem_global_free(struct ttm_mem_global *glob,
33887+ uint64_t amount, bool himem)
33888+{
33889+ spin_lock(&glob->lock);
33890+ glob->used_total_memory -= amount;
33891+ if (!himem)
33892+ glob->used_memory -= amount;
33893+ wake_up_all(&glob->queue);
33894+ spin_unlock(&glob->lock);
33895+}
33896+
33897+static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
33898+ uint64_t amount, bool himem, bool reserve)
33899+{
33900+ uint64_t limit;
33901+ uint64_t lomem_limit;
33902+ int ret = -ENOMEM;
33903+
33904+ spin_lock(&glob->lock);
33905+
33906+ if (capable(CAP_SYS_ADMIN)) {
33907+ limit = glob->emer_total_memory;
33908+ lomem_limit = glob->emer_memory;
33909+ } else {
33910+ limit = glob->max_total_memory;
33911+ lomem_limit = glob->max_memory;
33912+ }
33913+
33914+ if (unlikely(glob->used_total_memory + amount > limit))
33915+ goto out_unlock;
33916+ if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
33917+ goto out_unlock;
33918+
33919+ if (reserve) {
33920+ glob->used_total_memory += amount;
33921+ if (!himem)
33922+ glob->used_memory += amount;
33923+ }
33924+ ret = 0;
33925+ out_unlock:
33926+ spin_unlock(&glob->lock);
33927+ ttm_check_swapping(glob);
33928+
33929+ return ret;
33930+}
33931+
33932+int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
33933+ bool no_wait, bool interruptible, bool himem)
33934+{
33935+ int count = TTM_MEMORY_ALLOC_RETRIES;
33936+
33937+ while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true) != 0)) {
33938+ if (no_wait)
33939+ return -ENOMEM;
33940+ if (unlikely(count-- == 0))
33941+ return -ENOMEM;
33942+ ttm_shrink(glob, false, memory + (memory >> 2) + 16);
33943+ }
33944+
33945+ return 0;
33946+}
33947+
33948+size_t ttm_round_pot(size_t size)
33949+{
33950+ if ((size & (size - 1)) == 0)
33951+ return size;
33952+ else if (size > PAGE_SIZE)
33953+ return PAGE_ALIGN(size);
33954+ else {
33955+ size_t tmp_size = 4;
33956+
33957+ while (tmp_size < size)
33958+ tmp_size <<= 1;
33959+
33960+ return tmp_size;
33961+ }
33962+ return 0;
33963+}
33964diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_memory.h b/drivers/gpu/drm/psb/ttm/ttm_memory.h
33965--- a/drivers/gpu/drm/psb/ttm/ttm_memory.h 1969-12-31 16:00:00.000000000 -0800
33966+++ b/drivers/gpu/drm/psb/ttm/ttm_memory.h 2009-04-07 13:28:38.000000000 -0700
33967@@ -0,0 +1,154 @@
33968+/**************************************************************************
33969+ *
33970+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
33971+ * All Rights Reserved.
33972+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
33973+ * All Rights Reserved.
33974+ *
33975+ * Permission is hereby granted, free of charge, to any person obtaining a
33976+ * copy of this software and associated documentation files (the
33977+ * "Software"), to deal in the Software without restriction, including
33978+ * without limitation the rights to use, copy, modify, merge, publish,
33979+ * distribute, sub license, and/or sell copies of the Software, and to
33980+ * permit persons to whom the Software is furnished to do so, subject to
33981+ * the following conditions:
33982+ *
33983+ * The above copyright notice and this permission notice (including the
33984+ * next paragraph) shall be included in all copies or substantial portions
33985+ * of the Software.
33986+ *
33987+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33988+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33989+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
33990+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
33991+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
33992+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
33993+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
33994+ *
33995+ **************************************************************************/
33996+
33997+#ifndef TTM_MEMORY_H
33998+#define TTM_MEMORY_H
33999+
34000+#include <linux/workqueue.h>
34001+#include <linux/spinlock.h>
34002+#include <linux/wait.h>
34003+
34004+/**
34005+ * struct ttm_mem_shrink - callback to shrink TTM memory usage.
34006+ *
34007+ * @do_shrink: The callback function.
34008+ *
34009+ * Arguments to the do_shrink functions are intended to be passed using
34010+ * inheritance. That is, the argument class derives from struct ttm_mem_srink,
34011+ * and can be accessed using container_of().
34012+ */
34013+
34014+struct ttm_mem_shrink {
34015+ int (*do_shrink) (struct ttm_mem_shrink *);
34016+};
34017+
34018+/**
34019+ * struct ttm_mem_global - Global memory accounting structure.
34020+ *
34021+ * @shrink: A single callback to shrink TTM memory usage. Extend this
34022+ * to a linked list to be able to handle multiple callbacks when needed.
34023+ * @swap_queue: A workqueue to handle shrinking in low memory situations. We
34024+ * need a separate workqueue since it will spend a lot of time waiting
34025+ * for the GPU, and this will otherwise block other workqueue tasks(?)
34026+ * At this point we use only a single-threaded workqueue.
34027+ * @work: The workqueue callback for the shrink queue.
34028+ * @queue: Wait queue for processes suspended waiting for memory.
34029+ * @lock: Lock to protect the @shrink - and the memory accounting members,
34030+ * that is, essentially the whole structure with some exceptions.
34031+ * @emer_memory: Lowmem memory limit available for root.
34032+ * @max_memory: Lowmem memory limit available for non-root.
34033+ * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
34034+ * @used_memory: Currently used lowmem memory.
34035+ * @used_total_memory: Currently used total (lowmem + highmem) memory.
34036+ * @total_memory_swap_limit: Total memory limit where the shrink workqueue
34037+ * kicks in.
34038+ * @max_total_memory: Total memory available to non-root processes.
34039+ * @emer_total_memory: Total memory available to root processes.
34040+ *
34041+ * Note that this structure is not per device. It should be global for all
34042+ * graphics devices.
34043+ */
34044+
34045+struct ttm_mem_global {
34046+ struct ttm_mem_shrink *shrink;
34047+ struct workqueue_struct *swap_queue;
34048+ struct work_struct work;
34049+ wait_queue_head_t queue;
34050+ spinlock_t lock;
34051+ uint64_t emer_memory;
34052+ uint64_t max_memory;
34053+ uint64_t swap_limit;
34054+ uint64_t used_memory;
34055+ uint64_t used_total_memory;
34056+ uint64_t total_memory_swap_limit;
34057+ uint64_t max_total_memory;
34058+ uint64_t emer_total_memory;
34059+};
34060+
34061+/**
34062+ * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
34063+ *
34064+ * @shrink: The object to initialize.
34065+ * @func: The callback function.
34066+ */
34067+
34068+static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
34069+ int (*func) (struct ttm_mem_shrink *))
34070+{
34071+ shrink->do_shrink = func;
34072+}
34073+
34074+/**
34075+ * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
34076+ *
34077+ * @glob: The struct ttm_mem_global object to register with.
34078+ * @shrink: An initialized struct ttm_mem_shrink object to register.
34079+ *
34080+ * Returns:
34081+ * -EBUSY: There's already a callback registered. (May change).
34082+ */
34083+
34084+static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
34085+ struct ttm_mem_shrink *shrink)
34086+{
34087+ spin_lock(&glob->lock);
34088+ if (glob->shrink != NULL) {
34089+ spin_unlock(&glob->lock);
34090+ return -EBUSY;
34091+ }
34092+ glob->shrink = shrink;
34093+ spin_unlock(&glob->lock);
34094+ return 0;
34095+}
34096+
34097+/**
34098+ * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
34099+ *
34100+ * @glob: The struct ttm_mem_global object to unregister from.
34101+ * @shrink: A previously registert struct ttm_mem_shrink object.
34102+ *
34103+ */
34104+
34105+static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
34106+ struct ttm_mem_shrink *shrink)
34107+{
34108+ spin_lock(&glob->lock);
34109+ BUG_ON(glob->shrink != shrink);
34110+ glob->shrink = NULL;
34111+ spin_unlock(&glob->lock);
34112+}
34113+
34114+extern int ttm_mem_global_init(struct ttm_mem_global *glob);
34115+extern void ttm_mem_global_release(struct ttm_mem_global *glob);
34116+extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
34117+ bool no_wait, bool interruptible, bool himem);
34118+extern void ttm_mem_global_free(struct ttm_mem_global *glob,
34119+ uint64_t amount, bool himem);
34120+extern size_t ttm_round_pot(size_t size);
34121+#endif
34122diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_object.c b/drivers/gpu/drm/psb/ttm/ttm_object.c
34123--- a/drivers/gpu/drm/psb/ttm/ttm_object.c 1969-12-31 16:00:00.000000000 -0800
34124+++ b/drivers/gpu/drm/psb/ttm/ttm_object.c 2009-04-07 13:28:38.000000000 -0700
34125@@ -0,0 +1,444 @@
34126+/**************************************************************************
34127+ *
34128+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
34129+ * All Rights Reserved.
34130+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
34131+ * All Rights Reserved.
34132+ *
34133+ * Permission is hereby granted, free of charge, to any person obtaining a
34134+ * copy of this software and associated documentation files (the
34135+ * "Software"), to deal in the Software without restriction, including
34136+ * without limitation the rights to use, copy, modify, merge, publish,
34137+ * distribute, sub license, and/or sell copies of the Software, and to
34138+ * permit persons to whom the Software is furnished to do so, subject to
34139+ * the following conditions:
34140+ *
34141+ * The above copyright notice and this permission notice (including the
34142+ * next paragraph) shall be included in all copies or substantial portions
34143+ * of the Software.
34144+ *
34145+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
34146+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34147+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34148+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
34149+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
34150+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
34151+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
34152+ *
34153+ **************************************************************************/
34154+/*
34155+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
34156+ */
34157+/** @file ttm_ref_object.c
34158+ *
34159+ * Base- and reference object implementation for the various
34160+ * ttm objects. Implements reference counting, minimal security checks
34161+ * and release on file close.
34162+ */
34163+
34164+/**
34165+ * struct ttm_object_file
34166+ *
34167+ * @tdev: Pointer to the ttm_object_device.
34168+ *
34169+ * @lock: Lock that protects the ref_list list and the
34170+ * ref_hash hash tables.
34171+ *
34172+ * @ref_list: List of ttm_ref_objects to be destroyed at
34173+ * file release.
34174+ *
34175+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
34176+ * for fast lookup of ref objects given a base object.
34177+ */
34178+
34179+#include "ttm/ttm_object.h"
34180+#include <linux/list.h>
34181+#include <linux/spinlock.h>
34182+#include <linux/slab.h>
34183+#include <asm/atomic.h>
34184+
34185+struct ttm_object_file {
34186+ struct ttm_object_device *tdev;
34187+ rwlock_t lock;
34188+ struct list_head ref_list;
34189+ struct drm_open_hash ref_hash[TTM_REF_NUM];
34190+ struct kref refcount;
34191+};
34192+
34193+/**
34194+ * struct ttm_object_device
34195+ *
34196+ * @object_lock: lock that protects the object_hash hash table.
34197+ *
34198+ * @object_hash: hash table for fast lookup of object global names.
34199+ *
34200+ * @object_count: Per device object count.
34201+ *
34202+ * This is the per-device data structure needed for ttm object management.
34203+ */
34204+
34205+struct ttm_object_device {
34206+ rwlock_t object_lock;
34207+ struct drm_open_hash object_hash;
34208+ atomic_t object_count;
34209+ struct ttm_mem_global *mem_glob;
34210+};
34211+
34212+/**
34213+ * struct ttm_ref_object
34214+ *
34215+ * @hash: Hash entry for the per-file object reference hash.
34216+ *
34217+ * @head: List entry for the per-file list of ref-objects.
34218+ *
34219+ * @kref: Ref count.
34220+ *
34221+ * @obj: Base object this ref object is referencing.
34222+ *
34223+ * @ref_type: Type of ref object.
34224+ *
34225+ * This is similar to an idr object, but it also has a hash table entry
34226+ * that allows lookup with a pointer to the referenced object as a key. In
34227+ * that way, one can easily detect whether a base object is referenced by
34228+ * a particular ttm_object_file. It also carries a ref count to avoid creating
34229+ * multiple ref objects if a ttm_object_file references the same base object more
34230+ * than once.
34231+ */
34232+
34233+struct ttm_ref_object {
34234+ struct drm_hash_item hash;
34235+ struct list_head head;
34236+ struct kref kref;
34237+ struct ttm_base_object *obj;
34238+ enum ttm_ref_type ref_type;
34239+ struct ttm_object_file *tfile;
34240+};
34241+
34242+static inline struct ttm_object_file *
34243+ttm_object_file_ref(struct ttm_object_file *tfile)
34244+{
34245+ kref_get(&tfile->refcount);
34246+ return tfile;
34247+}
34248+
34249+static void ttm_object_file_destroy(struct kref *kref)
34250+{
34251+ struct ttm_object_file *tfile =
34252+ container_of(kref, struct ttm_object_file, refcount);
34253+
34254+// printk(KERN_INFO "Freeing 0x%08lx\n", (unsigned long) tfile);
34255+ kfree(tfile);
34256+}
34257+
34258+
34259+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
34260+{
34261+ struct ttm_object_file *tfile = *p_tfile;
34262+
34263+ *p_tfile = NULL;
34264+ kref_put(&tfile->refcount, ttm_object_file_destroy);
34265+}
34266+
34267+
34268+int ttm_base_object_init(struct ttm_object_file *tfile,
34269+ struct ttm_base_object *base,
34270+ bool shareable,
34271+ enum ttm_object_type object_type,
34272+ void (*refcount_release) (struct ttm_base_object **),
34273+ void (*ref_obj_release) (struct ttm_base_object *,
34274+ enum ttm_ref_type ref_type))
34275+{
34276+ struct ttm_object_device *tdev = tfile->tdev;
34277+ int ret;
34278+
34279+ base->shareable = shareable;
34280+ base->tfile = ttm_object_file_ref(tfile);
34281+ base->refcount_release = refcount_release;
34282+ base->ref_obj_release = ref_obj_release;
34283+ base->object_type = object_type;
34284+ write_lock(&tdev->object_lock);
34285+ kref_init(&base->refcount);
34286+ ret = drm_ht_just_insert_please(&tdev->object_hash,
34287+ &base->hash,
34288+ (unsigned long)base, 31, 0, 0);
34289+ write_unlock(&tdev->object_lock);
34290+ if (unlikely(ret != 0))
34291+ goto out_err0;
34292+
34293+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
34294+ if (unlikely(ret != 0))
34295+ goto out_err1;
34296+
34297+ ttm_base_object_unref(&base);
34298+
34299+ return 0;
34300+ out_err1:
34301+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
34302+ out_err0:
34303+ return ret;
34304+}
34305+
34306+static void ttm_release_base(struct kref *kref)
34307+{
34308+ struct ttm_base_object *base =
34309+ container_of(kref, struct ttm_base_object, refcount);
34310+ struct ttm_object_device *tdev = base->tfile->tdev;
34311+
34312+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
34313+ write_unlock(&tdev->object_lock);
34314+ if (base->refcount_release) {
34315+ ttm_object_file_unref(&base->tfile);
34316+ base->refcount_release(&base);
34317+ }
34318+ write_lock(&tdev->object_lock);
34319+}
34320+
34321+void ttm_base_object_unref(struct ttm_base_object **p_base)
34322+{
34323+ struct ttm_base_object *base = *p_base;
34324+ struct ttm_object_device *tdev = base->tfile->tdev;
34325+
34326+ // printk(KERN_INFO "TTM base object unref.\n");
34327+ *p_base = NULL;
34328+
34329+ /*
34330+ * Need to take the lock here to avoid racing with
34331+ * users trying to look up the object.
34332+ */
34333+
34334+ write_lock(&tdev->object_lock);
34335+ (void)kref_put(&base->refcount, &ttm_release_base);
34336+ write_unlock(&tdev->object_lock);
34337+}
34338+
34339+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
34340+ uint32_t key)
34341+{
34342+ struct ttm_object_device *tdev = tfile->tdev;
34343+ struct ttm_base_object *base;
34344+ struct drm_hash_item *hash;
34345+ int ret;
34346+
34347+ read_lock(&tdev->object_lock);
34348+ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
34349+
34350+ if (likely(ret == 0)) {
34351+ base = drm_hash_entry(hash, struct ttm_base_object, hash);
34352+ kref_get(&base->refcount);
34353+ }
34354+ read_unlock(&tdev->object_lock);
34355+
34356+ if (unlikely(ret != 0))
34357+ return NULL;
34358+
34359+ if (tfile != base->tfile && !base->shareable) {
34360+ printk(KERN_ERR "Attempted access of non-shareable object.\n");
34361+ ttm_base_object_unref(&base);
34362+ return NULL;
34363+ }
34364+
34365+ return base;
34366+}
34367+
34368+int ttm_ref_object_add(struct ttm_object_file *tfile,
34369+ struct ttm_base_object *base,
34370+ enum ttm_ref_type ref_type, bool *existed)
34371+{
34372+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
34373+ struct ttm_ref_object *ref;
34374+ struct drm_hash_item *hash;
34375+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
34376+ int ret = -EINVAL;
34377+
34378+ if (existed != NULL)
34379+ *existed = true;
34380+
34381+ while (ret == -EINVAL) {
34382+ read_lock(&tfile->lock);
34383+ ret = drm_ht_find_item(ht, base->hash.key, &hash);
34384+
34385+ if (ret == 0) {
34386+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
34387+ kref_get(&ref->kref);
34388+ read_unlock(&tfile->lock);
34389+ break;
34390+ }
34391+
34392+ read_unlock(&tfile->lock);
34393+ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false, false, false);
34394+ if (unlikely(ret != 0))
34395+ return ret;
34396+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
34397+ if (unlikely(ref == NULL)) {
34398+ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
34399+ return -ENOMEM;
34400+ }
34401+
34402+ ref->hash.key = base->hash.key;
34403+ ref->obj = base;
34404+ ref->tfile = tfile;
34405+ ref->ref_type = ref_type;
34406+ kref_init(&ref->kref);
34407+
34408+ write_lock(&tfile->lock);
34409+ ret = drm_ht_insert_item(ht, &ref->hash);
34410+
34411+ if (likely(ret == 0)) {
34412+ list_add_tail(&ref->head, &tfile->ref_list);
34413+ kref_get(&base->refcount);
34414+ write_unlock(&tfile->lock);
34415+ if (existed != NULL)
34416+ *existed = false;
34417+ break;
34418+ }
34419+
34420+ write_unlock(&tfile->lock);
34421+ BUG_ON(ret != -EINVAL);
34422+
34423+ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
34424+ kfree(ref);
34425+ }
34426+
34427+ return ret;
34428+}
34429+
34430+static void ttm_ref_object_release(struct kref *kref)
34431+{
34432+ struct ttm_ref_object *ref =
34433+ container_of(kref, struct ttm_ref_object, kref);
34434+ struct ttm_base_object *base = ref->obj;
34435+ struct ttm_object_file *tfile = ref->tfile;
34436+ struct drm_open_hash *ht;
34437+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
34438+
34439+ ht = &tfile->ref_hash[ref->ref_type];
34440+ (void)drm_ht_remove_item(ht, &ref->hash);
34441+ list_del(&ref->head);
34442+ write_unlock(&tfile->lock);
34443+
34444+ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
34445+ base->ref_obj_release(base, ref->ref_type);
34446+
34447+ ttm_base_object_unref(&ref->obj);
34448+ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
34449+ kfree(ref);
34450+ write_lock(&tfile->lock);
34451+}
34452+
34453+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
34454+ unsigned long key, enum ttm_ref_type ref_type)
34455+{
34456+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
34457+ struct ttm_ref_object *ref;
34458+ struct drm_hash_item *hash;
34459+ int ret;
34460+
34461+ write_lock(&tfile->lock);
34462+ ret = drm_ht_find_item(ht, key, &hash);
34463+ if (unlikely(ret != 0)) {
34464+ write_unlock(&tfile->lock);
34465+ return -EINVAL;
34466+ }
34467+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
34468+ kref_put(&ref->kref, ttm_ref_object_release);
34469+ write_unlock(&tfile->lock);
34470+ return 0;
34471+}
34472+
34473+void ttm_object_file_release(struct ttm_object_file **p_tfile)
34474+{
34475+ struct ttm_ref_object *ref;
34476+ struct list_head *list;
34477+ unsigned int i;
34478+ struct ttm_object_file *tfile = *p_tfile;
34479+
34480+ *p_tfile = NULL;
34481+ write_lock(&tfile->lock);
34482+
34483+ /*
34484+ * Since we release the lock within the loop, we have to
34485+ * restart it from the beginning each time.
34486+ */
34487+
34488+ while (!list_empty(&tfile->ref_list)) {
34489+ list = tfile->ref_list.next;
34490+ ref = list_entry(list, struct ttm_ref_object, head);
34491+ ttm_ref_object_release(&ref->kref);
34492+ }
34493+
34494+ for (i = 0; i < TTM_REF_NUM; ++i) {
34495+ drm_ht_remove(&tfile->ref_hash[i]);
34496+ }
34497+
34498+ write_unlock(&tfile->lock);
34499+ ttm_object_file_unref(&tfile);
34500+}
34501+
34502+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
34503+ unsigned int hash_order)
34504+{
34505+ struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
34506+ unsigned int i;
34507+ unsigned int j = 0;
34508+ int ret;
34509+
34510+ if (unlikely(tfile == NULL))
34511+ return NULL;
34512+
34513+ rwlock_init(&tfile->lock);
34514+ tfile->tdev = tdev;
34515+ kref_init(&tfile->refcount);
34516+ INIT_LIST_HEAD(&tfile->ref_list);
34517+
34518+ for (i = 0; i < TTM_REF_NUM; ++i) {
34519+ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
34520+ if (ret) {
34521+ j = i;
34522+ goto out_err;
34523+ }
34524+ }
34525+
34526+ return tfile;
34527+ out_err:
34528+ for (i = 0; i < j; ++i) {
34529+ drm_ht_remove(&tfile->ref_hash[i]);
34530+ }
34531+ kfree(tfile);
34532+
34533+ return NULL;
34534+}
34535+
34536+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
34537+ *mem_glob,
34538+ unsigned int hash_order)
34539+{
34540+ struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
34541+ int ret;
34542+
34543+ if (unlikely(tdev == NULL))
34544+ return NULL;
34545+
34546+ tdev->mem_glob = mem_glob;
34547+ rwlock_init(&tdev->object_lock);
34548+ atomic_set(&tdev->object_count, 0);
34549+ ret = drm_ht_create(&tdev->object_hash, hash_order);
34550+
34551+ if (likely(ret == 0))
34552+ return tdev;
34553+
34554+ kfree(tdev);
34555+ return NULL;
34556+}
34557+
34558+void ttm_object_device_release(struct ttm_object_device **p_tdev)
34559+{
34560+ struct ttm_object_device *tdev = *p_tdev;
34561+
34562+ *p_tdev = NULL;
34563+
34564+ write_lock(&tdev->object_lock);
34565+ drm_ht_remove(&tdev->object_hash);
34566+ write_unlock(&tdev->object_lock);
34567+
34568+ kfree(tdev);
34569+}
34570diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_object.h b/drivers/gpu/drm/psb/ttm/ttm_object.h
34571--- a/drivers/gpu/drm/psb/ttm/ttm_object.h 1969-12-31 16:00:00.000000000 -0800
34572+++ b/drivers/gpu/drm/psb/ttm/ttm_object.h 2009-04-07 13:28:38.000000000 -0700
34573@@ -0,0 +1,269 @@
34574+/**************************************************************************
34575+ *
34576+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
34577+ * All Rights Reserved.
34578+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
34579+ * All Rights Reserved.
34580+ *
34581+ * Permission is hereby granted, free of charge, to any person obtaining a
34582+ * copy of this software and associated documentation files (the
34583+ * "Software"), to deal in the Software without restriction, including
34584+ * without limitation the rights to use, copy, modify, merge, publish,
34585+ * distribute, sub license, and/or sell copies of the Software, and to
34586+ * permit persons to whom the Software is furnished to do so, subject to
34587+ * the following conditions:
34588+ *
34589+ * The above copyright notice and this permission notice (including the
34590+ * next paragraph) shall be included in all copies or substantial portions
34591+ * of the Software.
34592+ *
34593+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
34594+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34595+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34596+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
34597+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
34598+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
34599+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
34600+ *
34601+ **************************************************************************/
34602+/*
34603+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
34604+ */
34605+/** @file ttm_ref_object.h
34606+ *
34607+ * Base- and reference object implementation for the various
34608+ * ttm objects. Implements reference counting, minimal security checks
34609+ * and release on file close.
34610+ */
34611+
34612+#ifndef _TTM_OBJECT_H_
34613+#define _TTM_OBJECT_H_
34614+
34615+#include <linux/list.h>
34616+#include <drm/drm_hashtab.h>
34617+#include <linux/kref.h>
34618+#include <ttm/ttm_memory.h>
34619+
34620+/**
34621+ * enum ttm_ref_type
34622+ *
34623+ * Describes what type of reference a ref object holds.
34624+ *
34625+ * TTM_REF_USAGE is a simple refcount on a base object.
34626+ *
34627+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
34628+ * buffer object.
34629+ *
34630+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
34631+ * buffer object.
34632+ *
34633+ */
34634+
34635+enum ttm_ref_type {
34636+ TTM_REF_USAGE,
34637+ TTM_REF_SYNCCPU_READ,
34638+ TTM_REF_SYNCCPU_WRITE,
34639+ TTM_REF_NUM
34640+};
34641+
34642+/**
34643+ * enum ttm_object_type
34644+ *
34645+ * One entry per ttm object type.
34646+ * Device-specific types should use the
34647+ * ttm_driver_typex types.
34648+ */
34649+
34650+enum ttm_object_type {
34651+ ttm_fence_type,
34652+ ttm_buffer_type,
34653+ ttm_lock_type,
34654+ ttm_driver_type0 = 256,
34655+ ttm_driver_type1
34656+};
34657+
34658+struct ttm_object_file;
34659+struct ttm_object_device;
34660+
34661+/**
34662+ * struct ttm_base_object
34663+ *
34664+ * @hash: hash entry for the per-device object hash.
34665+ * @type: derived type this object is base class for.
34666+ * @shareable: Other ttm_object_files can access this object.
34667+ *
34668+ * @tfile: Pointer to ttm_object_file of the creator.
34669+ * NULL if the object was not created by a user request.
34670+ * (kernel object).
34671+ *
34672+ * @refcount: Number of references to this object, not
34673+ * including the hash entry. A reference to a base object can
34674+ * only be held by a ref object.
34675+ *
34676+ * @refcount_release: A function to be called when there are
34677+ * no more references to this object. This function should
34678+ * destroy the object (or make sure destruction eventually happens),
34679+ * and when it is called, the object has
34680+ * already been taken out of the per-device hash. The parameter
34681+ * "base" should be set to NULL by the function.
34682+ *
34683+ * @ref_obj_release: A function to be called when a reference object
34684+ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
34685+ * this function may, for example, release a lock held by a user-space
34686+ * process.
34687+ *
34688+ * This struct is intended to be used as a base struct for objects that
34689+ * are visible to user-space. It provides a global name, race-safe
34690+ * access and refcounting, minimal access contol and hooks for unref actions.
34691+ */
34692+
34693+struct ttm_base_object {
34694+ struct drm_hash_item hash;
34695+ enum ttm_object_type object_type;
34696+ bool shareable;
34697+ struct ttm_object_file *tfile;
34698+ struct kref refcount;
34699+ void (*refcount_release) (struct ttm_base_object ** base);
34700+ void (*ref_obj_release) (struct ttm_base_object * base,
34701+ enum ttm_ref_type ref_type);
34702+};
34703+
34704+/**
34705+ * ttm_base_object_init
34706+ *
34707+ * @tfile: Pointer to a struct ttm_object_file.
34708+ * @base: The struct ttm_base_object to initialize.
34709+ * @shareable: This object is shareable with other applcations.
34710+ * (different @tfile pointers.)
34711+ * @type: The object type.
34712+ * @refcount_release: See the struct ttm_base_object description.
34713+ * @ref_obj_release: See the struct ttm_base_object description.
34714+ *
34715+ * Initializes a struct ttm_base_object.
34716+ */
34717+
34718+extern int ttm_base_object_init(struct ttm_object_file *tfile,
34719+ struct ttm_base_object *base,
34720+ bool shareable,
34721+ enum ttm_object_type type,
34722+ void (*refcount_release) (struct ttm_base_object
34723+ **),
34724+ void (*ref_obj_release) (struct ttm_base_object
34725+ *,
34726+ enum ttm_ref_type
34727+ ref_type));
34728+
34729+/**
34730+ * ttm_base_object_lookup
34731+ *
34732+ * @tfile: Pointer to a struct ttm_object_file.
34733+ * @key: Hash key
34734+ *
34735+ * Looks up a struct ttm_base_object with the key @key.
34736+ * Also verifies that the object is visible to the application, by
34737+ * comparing the @tfile argument and checking the object shareable flag.
34738+ */
34739+
34740+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
34741+ *tfile, uint32_t key);
34742+
34743+/**
34744+ * ttm_base_object_unref
34745+ *
34746+ * @p_base: Pointer to a pointer referncing a struct ttm_base_object.
34747+ *
34748+ * Decrements the base object refcount and clears the pointer pointed to by
34749+ * p_base.
34750+ */
34751+
34752+extern void ttm_base_object_unref(struct ttm_base_object **p_base);
34753+
34754+/**
34755+ * ttm_ref_object_add.
34756+ *
34757+ * @tfile: A struct ttm_object_file representing the application owning the
34758+ * ref_object.
34759+ * @base: The base object to reference.
34760+ * @ref_type: The type of reference.
34761+ * @existed: Upon completion, indicates that an identical reference object
34762+ * already existed, and the refcount was upped on that object instead.
34763+ *
34764+ * Adding a ref object to a base object is basically like referencing the
34765+ * base object, but a user-space application holds the reference. When the
34766+ * file corresponding to @tfile is closed, all its reference objects are
34767+ * deleted. A reference object can have different types depending on what
34768+ * it's intended for. It can be refcounting to prevent object destruction,
34769+ * When user-space takes a lock, it can add a ref object to that lock to
34770+ * make sure the lock is released if the application dies. A ref object
34771+ * will hold a single reference on a base object.
34772+ */
34773+extern int ttm_ref_object_add(struct ttm_object_file *tfile,
34774+ struct ttm_base_object *base,
34775+ enum ttm_ref_type ref_type, bool *existed);
34776+/**
34777+ * ttm_ref_object_base_unref
34778+ *
34779+ * @key: Key representing the base object.
34780+ * @ref_type: Ref type of the ref object to be dereferenced.
34781+ *
34782+ * Unreference a ref object with type @ref_type
34783+ * on the base object identified by @key. If there are no duplicate
34784+ * references, the ref object will be destroyed and the base object
34785+ * will be unreferenced.
34786+ */
34787+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
34788+ unsigned long key,
34789+ enum ttm_ref_type ref_type);
34790+
34791+/**
34792+ * ttm_object_file_init - initialize a struct ttm_object file
34793+ *
34794+ * @tdev: A struct ttm_object device this file is initialized on.
34795+ * @hash_order: Order of the hash table used to hold the reference objects.
34796+ *
34797+ * This is typically called by the file_ops::open function.
34798+ */
34799+
34800+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
34801+ *tdev,
34802+ unsigned int hash_order);
34803+
34804+/**
34805+ * ttm_object_file_release - release data held by a ttm_object_file
34806+ *
34807+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
34808+ * *p_tfile will be set to NULL by this function.
34809+ *
34810+ * Releases all data associated by a ttm_object_file.
34811+ * Typically called from file_ops::release. The caller must
34812+ * ensure that there are no concurrent users of tfile.
34813+ */
34814+
34815+extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
34816+
34817+/**
34818+ * ttm_object device init - initialize a struct ttm_object_device
34819+ *
34820+ * @hash_order: Order of hash table used to hash the base objects.
34821+ *
34822+ * This function is typically called on device initialization to prepare
34823+ * data structures needed for ttm base and ref objects.
34824+ */
34825+
34826+extern struct ttm_object_device *ttm_object_device_init
34827+ (struct ttm_mem_global *mem_glob, unsigned int hash_order);
34828+
34829+/**
34830+ * ttm_object_device_release - release data held by a ttm_object_device
34831+ *
34832+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
34833+ * *p_tdev will be set to NULL by this function.
34834+ *
34835+ * Releases all data associated by a ttm_object_device.
34836+ * Typically called from driver::unload before the destruction of the
34837+ * device private data structure.
34838+ */
34839+
34840+extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
34841+
34842+#endif
34843diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c
34844--- a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c 1969-12-31 16:00:00.000000000 -0800
34845+++ b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c 2009-04-07 13:28:38.000000000 -0700
34846@@ -0,0 +1,178 @@
34847+/**************************************************************************
34848+ *
34849+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
34850+ * All Rights Reserved.
34851+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
34852+ * All Rights Reserved.
34853+ *
34854+ * Permission is hereby granted, free of charge, to any person obtaining a
34855+ * copy of this software and associated documentation files (the
34856+ * "Software"), to deal in the Software without restriction, including
34857+ * without limitation the rights to use, copy, modify, merge, publish,
34858+ * distribute, sub license, and/or sell copies of the Software, and to
34859+ * permit persons to whom the Software is furnished to do so, subject to
34860+ * the following conditions:
34861+ *
34862+ * The above copyright notice and this permission notice (including the
34863+ * next paragraph) shall be included in all copies or substantial portions
34864+ * of the Software.
34865+ *
34866+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
34867+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34868+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34869+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
34870+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
34871+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
34872+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
34873+ *
34874+ **************************************************************************/
34875+/*
34876+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
34877+ */
34878+
34879+#include "ttm/ttm_pat_compat.h"
34880+#include <linux/version.h>
34881+
34882+#include <linux/spinlock.h>
34883+#include <asm/pgtable.h>
34884+
34885+#if (defined(CONFIG_X86) && !defined(CONFIG_X86_PAT))
34886+#include <asm/tlbflush.h>
34887+#include <asm/msr.h>
34888+#include <asm/system.h>
34889+#include <linux/notifier.h>
34890+#include <linux/cpu.h>
34891+
34892+#ifndef MSR_IA32_CR_PAT
34893+#define MSR_IA32_CR_PAT 0x0277
34894+#endif
34895+
34896+#ifndef _PAGE_PAT
34897+#define _PAGE_PAT 0x080
34898+#endif
34899+
34900+static int ttm_has_pat = 0;
34901+
34902+/*
34903+ * Used at resume-time when CPU-s are fired up.
34904+ */
34905+
34906+static void ttm_pat_ipi_handler(void *notused)
34907+{
34908+ u32 v1, v2;
34909+
34910+ rdmsr(MSR_IA32_CR_PAT, v1, v2);
34911+ v2 &= 0xFFFFFFF8;
34912+ v2 |= 0x00000001;
34913+ wbinvd();
34914+ wrmsr(MSR_IA32_CR_PAT, v1, v2);
34915+ wbinvd();
34916+ __flush_tlb_all();
34917+}
34918+
34919+static void ttm_pat_enable(void)
34920+{
34921+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27))
34922+ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1, 1) != 0) {
34923+#else
34924+ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1) != 0) {
34925+#endif
34926+ printk(KERN_ERR "Timed out setting up CPU PAT.\n");
34927+ }
34928+}
34929+
34930+void ttm_pat_resume(void)
34931+{
34932+ if (unlikely(!ttm_has_pat))
34933+ return;
34934+
34935+ ttm_pat_enable();
34936+}
34937+
34938+static int psb_cpu_callback(struct notifier_block *nfb,
34939+ unsigned long action, void *hcpu)
34940+{
34941+ if (action == CPU_ONLINE) {
34942+ ttm_pat_resume();
34943+ }
34944+
34945+ return 0;
34946+}
34947+
34948+static struct notifier_block psb_nb = {
34949+ .notifier_call = psb_cpu_callback,
34950+ .priority = 1
34951+};
34952+
34953+/*
34954+ * Set i386 PAT entry PAT4 to Write-combining memory type on all processors.
34955+ */
34956+
34957+void ttm_pat_init(void)
34958+{
34959+ if (likely(ttm_has_pat))
34960+ return;
34961+
34962+ if (!boot_cpu_has(X86_FEATURE_PAT)) {
34963+ return;
34964+ }
34965+
34966+ ttm_pat_enable();
34967+
34968+ if (num_present_cpus() > 1)
34969+ register_cpu_notifier(&psb_nb);
34970+
34971+ ttm_has_pat = 1;
34972+}
34973+
34974+void ttm_pat_takedown(void)
34975+{
34976+ if (unlikely(!ttm_has_pat))
34977+ return;
34978+
34979+ if (num_present_cpus() > 1)
34980+ unregister_cpu_notifier(&psb_nb);
34981+
34982+ ttm_has_pat = 0;
34983+}
34984+
34985+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
34986+{
34987+ if (likely(ttm_has_pat)) {
34988+ pgprot_val(prot) |= _PAGE_PAT;
34989+ return prot;
34990+ } else {
34991+ return pgprot_noncached(prot);
34992+ }
34993+}
34994+
34995+#else
34996+
34997+void ttm_pat_init(void)
34998+{
34999+}
35000+
35001+void ttm_pat_takedown(void)
35002+{
35003+}
35004+
35005+void ttm_pat_resume(void)
35006+{
35007+}
35008+
35009+#ifdef CONFIG_X86
35010+#include <asm/pat.h>
35011+
35012+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
35013+{
35014+ uint32_t cache_bits = ((1) ? _PAGE_CACHE_WC : _PAGE_CACHE_UC_MINUS);
35015+
35016+ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | cache_bits);
35017+}
35018+#else
35019+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
35020+{
35021+ BUG();
35022+}
35023+#endif
35024+#endif
35025diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h
35026--- a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h 1969-12-31 16:00:00.000000000 -0800
35027+++ b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h 2009-04-07 13:28:38.000000000 -0700
35028@@ -0,0 +1,41 @@
35029+/**************************************************************************
35030+ *
35031+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
35032+ * All Rights Reserved.
35033+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
35034+ * All Rights Reserved.
35035+ *
35036+ * Permission is hereby granted, free of charge, to any person obtaining a
35037+ * copy of this software and associated documentation files (the
35038+ * "Software"), to deal in the Software without restriction, including
35039+ * without limitation the rights to use, copy, modify, merge, publish,
35040+ * distribute, sub license, and/or sell copies of the Software, and to
35041+ * permit persons to whom the Software is furnished to do so, subject to
35042+ * the following conditions:
35043+ *
35044+ * The above copyright notice and this permission notice (including the
35045+ * next paragraph) shall be included in all copies or substantial portions
35046+ * of the Software.
35047+ *
35048+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35049+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
35050+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35051+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35052+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
35053+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
35054+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
35055+ *
35056+ **************************************************************************/
35057+/*
35058+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
35059+ */
35060+
35061+#ifndef _TTM_PAT_COMPAT_
35062+#define _TTM_PAT_COMPAT_
35063+#include <asm/page.h>
35064+
35065+extern void ttm_pat_init(void);
35066+extern void ttm_pat_takedown(void);
35067+extern void ttm_pat_resume(void);
35068+extern pgprot_t pgprot_ttm_x86_wc(pgprot_t prot);
35069+#endif
35070diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_placement_common.h b/drivers/gpu/drm/psb/ttm/ttm_placement_common.h
35071--- a/drivers/gpu/drm/psb/ttm/ttm_placement_common.h 1969-12-31 16:00:00.000000000 -0800
35072+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_common.h 2009-04-07 13:28:38.000000000 -0700
35073@@ -0,0 +1,96 @@
35074+/**************************************************************************
35075+ *
35076+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
35077+ * All Rights Reserved.
35078+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
35079+ * All Rights Reserved.
35080+ *
35081+ * Permission is hereby granted, free of charge, to any person obtaining a
35082+ * copy of this software and associated documentation files (the
35083+ * "Software"), to deal in the Software without restriction, including
35084+ * without limitation the rights to use, copy, modify, merge, publish,
35085+ * distribute, sub license, and/or sell copies of the Software, and to
35086+ * permit persons to whom the Software is furnished to do so, subject to
35087+ * the following conditions:
35088+ *
35089+ * The above copyright notice and this permission notice (including the
35090+ * next paragraph) shall be included in all copies or substantial portions
35091+ * of the Software.
35092+ *
35093+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35094+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
35095+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35096+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35097+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
35098+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
35099+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
35100+ *
35101+ **************************************************************************/
35102+/*
35103+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
35104+ */
35105+
35106+#ifndef _TTM_PL_COMMON_H_
35107+#define _TTM_PL_COMMON_H_
35108+/*
35109+ * Memory regions for data placement.
35110+ */
35111+
35112+#define TTM_PL_SYSTEM 0
35113+#define TTM_PL_TT 1
35114+#define TTM_PL_VRAM 2
35115+#define TTM_PL_PRIV0 3
35116+#define TTM_PL_PRIV1 4
35117+#define TTM_PL_PRIV2 5
35118+#define TTM_PL_PRIV3 6
35119+#define TTM_PL_PRIV4 7
35120+#define TTM_PL_PRIV5 8
35121+#define TTM_PL_CI 9
35122+#define TTM_PL_SWAPPED 15
35123+
35124+#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
35125+#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
35126+#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
35127+#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
35128+#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
35129+#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
35130+#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
35131+#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
35132+#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
35133+#define TTM_PL_FLAG_CI (1 << TTM_PL_CI)
35134+#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
35135+#define TTM_PL_MASK_MEM 0x0000FFFF
35136+
35137+/*
35138+ * Other flags that affects data placement.
35139+ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
35140+ * if available.
35141+ * TTM_PL_FLAG_SHARED means that another application may
35142+ * reference the buffer.
35143+ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
35144+ * be evicted to make room for other buffers.
35145+ */
35146+
35147+#define TTM_PL_FLAG_CACHED (1 << 16)
35148+#define TTM_PL_FLAG_UNCACHED (1 << 17)
35149+#define TTM_PL_FLAG_WC (1 << 18)
35150+#define TTM_PL_FLAG_SHARED (1 << 20)
35151+#define TTM_PL_FLAG_NO_EVICT (1 << 21)
35152+
35153+#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
35154+ TTM_PL_FLAG_UNCACHED | \
35155+ TTM_PL_FLAG_WC)
35156+
35157+#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
35158+
35159+/*
35160+ * Access flags to be used for CPU- and GPU- mappings.
35161+ * The idea is that the TTM synchronization mechanism will
35162+ * allow concurrent READ access and exclusive write access.
35163+ * Currently GPU- and CPU accesses are exclusive.
35164+ */
35165+
35166+#define TTM_ACCESS_READ (1 << 0)
35167+#define TTM_ACCESS_WRITE (1 << 1)
35168+
35169+#endif
35170diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_placement_user.c b/drivers/gpu/drm/psb/ttm/ttm_placement_user.c
35171--- a/drivers/gpu/drm/psb/ttm/ttm_placement_user.c 1969-12-31 16:00:00.000000000 -0800
35172+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_user.c 2009-04-07 13:28:38.000000000 -0700
35173@@ -0,0 +1,468 @@
35174+/**************************************************************************
35175+ *
35176+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
35177+ * All Rights Reserved.
35178+ *
35179+ * Permission is hereby granted, free of charge, to any person obtaining a
35180+ * copy of this software and associated documentation files (the
35181+ * "Software"), to deal in the Software without restriction, including
35182+ * without limitation the rights to use, copy, modify, merge, publish,
35183+ * distribute, sub license, and/or sell copies of the Software, and to
35184+ * permit persons to whom the Software is furnished to do so, subject to
35185+ * the following conditions:
35186+ *
35187+ * The above copyright notice and this permission notice (including the
35188+ * next paragraph) shall be included in all copies or substantial portions
35189+ * of the Software.
35190+ *
35191+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35192+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
35193+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35194+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35195+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
35196+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
35197+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
35198+ *
35199+ **************************************************************************/
35200+/*
35201+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
35202+ */
35203+
35204+#include "ttm/ttm_placement_user.h"
35205+#include "ttm/ttm_bo_driver.h"
35206+#include "ttm/ttm_object.h"
35207+#include "ttm/ttm_userobj_api.h"
35208+#include "ttm/ttm_lock.h"
35209+
35210+struct ttm_bo_user_object {
35211+ struct ttm_base_object base;
35212+ struct ttm_buffer_object bo;
35213+};
35214+
35215+static size_t pl_bo_size = 0;
35216+
35217+static size_t ttm_pl_size(struct ttm_bo_device *bdev, unsigned long num_pages)
35218+{
35219+ size_t page_array_size =
35220+ (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
35221+
35222+ if (unlikely(pl_bo_size == 0)) {
35223+ pl_bo_size = bdev->ttm_bo_extra_size +
35224+ ttm_round_pot(sizeof(struct ttm_bo_user_object));
35225+ }
35226+
35227+ return bdev->ttm_bo_size + 2 * page_array_size;
35228+}
35229+
35230+static struct ttm_bo_user_object *ttm_bo_user_lookup(struct ttm_object_file
35231+ *tfile, uint32_t handle)
35232+{
35233+ struct ttm_base_object *base;
35234+
35235+ base = ttm_base_object_lookup(tfile, handle);
35236+ if (unlikely(base == NULL)) {
35237+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
35238+ (unsigned long)handle);
35239+ return NULL;
35240+ }
35241+
35242+ if (unlikely(base->object_type != ttm_buffer_type)) {
35243+ ttm_base_object_unref(&base);
35244+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
35245+ (unsigned long)handle);
35246+ return NULL;
35247+ }
35248+
35249+ return container_of(base, struct ttm_bo_user_object, base);
35250+}
35251+
35252+struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
35253+ *tfile, uint32_t handle)
35254+{
35255+ struct ttm_bo_user_object *user_bo;
35256+ struct ttm_base_object *base;
35257+
35258+ user_bo = ttm_bo_user_lookup(tfile, handle);
35259+ if (unlikely(user_bo == NULL))
35260+ return NULL;
35261+
35262+ (void)ttm_bo_reference(&user_bo->bo);
35263+ base = &user_bo->base;
35264+ ttm_base_object_unref(&base);
35265+ return &user_bo->bo;
35266+}
35267+
35268+static void ttm_bo_user_destroy(struct ttm_buffer_object *bo)
35269+{
35270+ struct ttm_bo_user_object *user_bo =
35271+ container_of(bo, struct ttm_bo_user_object, bo);
35272+
35273+ ttm_mem_global_free(bo->bdev->mem_glob, bo->acc_size, false);
35274+ kfree(user_bo);
35275+}
35276+
35277+static void ttm_bo_user_release(struct ttm_base_object **p_base)
35278+{
35279+ struct ttm_bo_user_object *user_bo;
35280+ struct ttm_base_object *base = *p_base;
35281+ struct ttm_buffer_object *bo;
35282+
35283+ *p_base = NULL;
35284+
35285+ if (unlikely(base == NULL))
35286+ return;
35287+
35288+ user_bo = container_of(base, struct ttm_bo_user_object, base);
35289+ bo = &user_bo->bo;
35290+ ttm_bo_unref(&bo);
35291+}
35292+
35293+static void ttm_bo_user_ref_release(struct ttm_base_object *base,
35294+ enum ttm_ref_type ref_type)
35295+{
35296+ struct ttm_bo_user_object *user_bo =
35297+ container_of(base, struct ttm_bo_user_object, base);
35298+ struct ttm_buffer_object *bo = &user_bo->bo;
35299+
35300+ switch (ref_type) {
35301+ case TTM_REF_SYNCCPU_WRITE:
35302+ ttm_bo_synccpu_write_release(bo);
35303+ break;
35304+ default:
35305+ BUG();
35306+ }
35307+}
35308+
35309+static void ttm_pl_fill_rep(struct ttm_buffer_object *bo,
35310+ struct ttm_pl_rep *rep)
35311+{
35312+ struct ttm_bo_user_object *user_bo =
35313+ container_of(bo, struct ttm_bo_user_object, bo);
35314+
35315+ rep->gpu_offset = bo->offset;
35316+ rep->bo_size = bo->num_pages << PAGE_SHIFT;
35317+ rep->map_handle = bo->addr_space_offset;
35318+ rep->placement = bo->mem.flags;
35319+ rep->handle = user_bo->base.hash.key;
35320+ rep->sync_object_arg = (uint32_t) (unsigned long)bo->sync_obj_arg;
35321+}
35322+
35323+int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
35324+ struct ttm_bo_device *bdev,
35325+ struct ttm_lock *lock, void *data)
35326+{
35327+ union ttm_pl_create_arg *arg = data;
35328+ struct ttm_pl_create_req *req = &arg->req;
35329+ struct ttm_pl_rep *rep = &arg->rep;
35330+ struct ttm_buffer_object *bo;
35331+ struct ttm_buffer_object *tmp;
35332+ struct ttm_bo_user_object *user_bo;
35333+ uint32_t flags;
35334+ int ret = 0;
35335+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
35336+ size_t acc_size =
35337+ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
35338+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
35339+ if (unlikely(ret != 0))
35340+ return ret;
35341+
35342+ flags = req->placement;
35343+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
35344+ if (unlikely(user_bo == NULL)) {
35345+ ttm_mem_global_free(mem_glob, acc_size, false);
35346+ return -ENOMEM;
35347+ }
35348+
35349+ bo = &user_bo->bo;
35350+ ret = ttm_read_lock(lock, true);
35351+ if (unlikely(ret != 0)) {
35352+ ttm_mem_global_free(mem_glob, acc_size, false);
35353+ kfree(user_bo);
35354+ return ret;
35355+ }
35356+
35357+ ret = ttm_buffer_object_init(bdev, bo, req->size,
35358+ ttm_bo_type_device, flags,
35359+ req->page_alignment, 0, true,
35360+ NULL, acc_size, &ttm_bo_user_destroy);
35361+ ttm_read_unlock(lock);
35362+
35363+ /*
35364+ * Note that the ttm_buffer_object_init function
35365+ * would've called the destroy function on failure!!
35366+ */
35367+
35368+ if (unlikely(ret != 0))
35369+ goto out;
35370+
35371+ tmp = ttm_bo_reference(bo);
35372+ ret = ttm_base_object_init(tfile, &user_bo->base,
35373+ flags & TTM_PL_FLAG_SHARED,
35374+ ttm_buffer_type,
35375+ &ttm_bo_user_release,
35376+ &ttm_bo_user_ref_release);
35377+ if (unlikely(ret != 0))
35378+ goto out_err;
35379+
35380+ mutex_lock(&bo->mutex);
35381+ ttm_pl_fill_rep(bo, rep);
35382+ mutex_unlock(&bo->mutex);
35383+ ttm_bo_unref(&bo);
35384+ out:
35385+ return 0;
35386+ out_err:
35387+ ttm_bo_unref(&tmp);
35388+ ttm_bo_unref(&bo);
35389+ return ret;
35390+}
35391+
35392+int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
35393+ struct ttm_bo_device *bdev,
35394+ struct ttm_lock *lock, void *data)
35395+{
35396+ union ttm_pl_create_ub_arg *arg = data;
35397+ struct ttm_pl_create_ub_req *req = &arg->req;
35398+ struct ttm_pl_rep *rep = &arg->rep;
35399+ struct ttm_buffer_object *bo;
35400+ struct ttm_buffer_object *tmp;
35401+ struct ttm_bo_user_object *user_bo;
35402+ uint32_t flags;
35403+ int ret = 0;
35404+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
35405+ size_t acc_size =
35406+ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
35407+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
35408+ if (unlikely(ret != 0))
35409+ return ret;
35410+
35411+ flags = req->placement;
35412+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
35413+ if (unlikely(user_bo == NULL)) {
35414+ ttm_mem_global_free(mem_glob, acc_size, false);
35415+ return -ENOMEM;
35416+ }
35417+ ret = ttm_read_lock(lock, true);
35418+ if (unlikely(ret != 0)) {
35419+ ttm_mem_global_free(mem_glob, acc_size, false);
35420+ kfree(user_bo);
35421+ return ret;
35422+ }
35423+ bo = &user_bo->bo;
35424+ ret = ttm_buffer_object_init(bdev, bo, req->size,
35425+ ttm_bo_type_user, flags,
35426+ req->page_alignment, req->user_address,
35427+ true, NULL, acc_size, &ttm_bo_user_destroy);
35428+
35429+ /*
35430+ * Note that the ttm_buffer_object_init function
35431+ * would've called the destroy function on failure!!
35432+ */
35433+ ttm_read_unlock(lock);
35434+ if (unlikely(ret != 0))
35435+ goto out;
35436+
35437+ tmp = ttm_bo_reference(bo);
35438+ ret = ttm_base_object_init(tfile, &user_bo->base,
35439+ flags & TTM_PL_FLAG_SHARED,
35440+ ttm_buffer_type,
35441+ &ttm_bo_user_release,
35442+ &ttm_bo_user_ref_release);
35443+ if (unlikely(ret != 0))
35444+ goto out_err;
35445+
35446+ mutex_lock(&bo->mutex);
35447+ ttm_pl_fill_rep(bo, rep);
35448+ mutex_unlock(&bo->mutex);
35449+ ttm_bo_unref(&bo);
35450+ out:
35451+ return 0;
35452+ out_err:
35453+ ttm_bo_unref(&tmp);
35454+ ttm_bo_unref(&bo);
35455+ return ret;
35456+}
35457+
35458+int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data)
35459+{
35460+ union ttm_pl_reference_arg *arg = data;
35461+ struct ttm_pl_rep *rep = &arg->rep;
35462+ struct ttm_bo_user_object *user_bo;
35463+ struct ttm_buffer_object *bo;
35464+ struct ttm_base_object *base;
35465+ int ret;
35466+
35467+ user_bo = ttm_bo_user_lookup(tfile, arg->req.handle);
35468+ if (unlikely(user_bo == NULL)) {
35469+ printk(KERN_ERR "Could not reference buffer object.\n");
35470+ return -EINVAL;
35471+ }
35472+
35473+ bo = &user_bo->bo;
35474+ ret = ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
35475+ if (unlikely(ret != 0)) {
35476+ printk(KERN_ERR
35477+ "Could not add a reference to buffer object.\n");
35478+ goto out;
35479+ }
35480+
35481+ mutex_lock(&bo->mutex);
35482+ ttm_pl_fill_rep(bo, rep);
35483+ mutex_unlock(&bo->mutex);
35484+
35485+ out:
35486+ base = &user_bo->base;
35487+ ttm_base_object_unref(&base);
35488+ return ret;
35489+}
35490+
35491+int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data)
35492+{
35493+ struct ttm_pl_reference_req *arg = data;
35494+
35495+ return ttm_ref_object_base_unref(tfile, arg->handle, TTM_REF_USAGE);
35496+}
35497+
35498+int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data)
35499+{
35500+ struct ttm_pl_synccpu_arg *arg = data;
35501+ struct ttm_bo_user_object *user_bo;
35502+ struct ttm_buffer_object *bo;
35503+ struct ttm_base_object *base;
35504+ bool existed;
35505+ int ret;
35506+
35507+ switch (arg->op) {
35508+ case TTM_PL_SYNCCPU_OP_GRAB:
35509+ user_bo = ttm_bo_user_lookup(tfile, arg->handle);
35510+ if (unlikely(user_bo == NULL)) {
35511+ printk(KERN_ERR
35512+ "Could not find buffer object for synccpu.\n");
35513+ return -EINVAL;
35514+ }
35515+ bo = &user_bo->bo;
35516+ base = &user_bo->base;
35517+ ret = ttm_bo_synccpu_write_grab(bo,
35518+ arg->access_mode &
35519+ TTM_PL_SYNCCPU_MODE_NO_BLOCK);
35520+ if (unlikely(ret != 0)) {
35521+ ttm_base_object_unref(&base);
35522+ goto out;
35523+ }
35524+ ret = ttm_ref_object_add(tfile, &user_bo->base,
35525+ TTM_REF_SYNCCPU_WRITE, &existed);
35526+ if (existed || ret != 0)
35527+ ttm_bo_synccpu_write_release(bo);
35528+ ttm_base_object_unref(&base);
35529+ break;
35530+ case TTM_PL_SYNCCPU_OP_RELEASE:
35531+ ret = ttm_ref_object_base_unref(tfile, arg->handle,
35532+ TTM_REF_SYNCCPU_WRITE);
35533+ break;
35534+ default:
35535+ ret = -EINVAL;
35536+ break;
35537+ }
35538+ out:
35539+ return ret;
35540+}
35541+
35542+int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
35543+ struct ttm_lock *lock, void *data)
35544+{
35545+ union ttm_pl_setstatus_arg *arg = data;
35546+ struct ttm_pl_setstatus_req *req = &arg->req;
35547+ struct ttm_pl_rep *rep = &arg->rep;
35548+ struct ttm_buffer_object *bo;
35549+ struct ttm_bo_device *bdev;
35550+ int ret;
35551+
35552+ bo = ttm_buffer_object_lookup(tfile, req->handle);
35553+ if (unlikely(bo == NULL)) {
35554+ printk(KERN_ERR
35555+ "Could not find buffer object for setstatus.\n");
35556+ return -EINVAL;
35557+ }
35558+
35559+ bdev = bo->bdev;
35560+
35561+ ret = ttm_read_lock(lock, true);
35562+ if (unlikely(ret != 0))
35563+ goto out_err0;
35564+
35565+ ret = ttm_bo_reserve(bo, true, false, false, 0);
35566+ if (unlikely(ret != 0))
35567+ goto out_err1;
35568+
35569+ ret = ttm_bo_wait_cpu(bo, false);
35570+ if (unlikely(ret != 0))
35571+ goto out_err2;
35572+
35573+ mutex_lock(&bo->mutex);
35574+ ret = ttm_bo_check_placement(bo, req->set_placement,
35575+ req->clr_placement);
35576+ if (unlikely(ret != 0))
35577+ goto out_err2;
35578+
35579+ bo->proposed_flags = (bo->proposed_flags | req->set_placement)
35580+ & ~req->clr_placement;
35581+ ret = ttm_buffer_object_validate(bo, true, false);
35582+ if (unlikely(ret != 0))
35583+ goto out_err2;
35584+
35585+ ttm_pl_fill_rep(bo, rep);
35586+ out_err2:
35587+ mutex_unlock(&bo->mutex);
35588+ ttm_bo_unreserve(bo);
35589+ out_err1:
35590+ ttm_read_unlock(lock);
35591+ out_err0:
35592+ ttm_bo_unref(&bo);
35593+ return ret;
35594+}
35595+
35596+int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data)
35597+{
35598+ struct ttm_pl_waitidle_arg *arg = data;
35599+ struct ttm_buffer_object *bo;
35600+ int ret;
35601+
35602+ bo = ttm_buffer_object_lookup(tfile, arg->handle);
35603+ if (unlikely(bo == NULL)) {
35604+ printk(KERN_ERR "Could not find buffer object for waitidle.\n");
35605+ return -EINVAL;
35606+ }
35607+
35608+ ret =
35609+ ttm_bo_block_reservation(bo, true,
35610+ arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
35611+ if (unlikely(ret != 0))
35612+ goto out;
35613+ mutex_lock(&bo->mutex);
35614+ ret = ttm_bo_wait(bo,
35615+ arg->mode & TTM_PL_WAITIDLE_MODE_LAZY,
35616+ true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
35617+ mutex_unlock(&bo->mutex);
35618+ ttm_bo_unblock_reservation(bo);
35619+ out:
35620+ ttm_bo_unref(&bo);
35621+ return ret;
35622+}
35623+
35624+int ttm_pl_verify_access(struct ttm_buffer_object *bo,
35625+ struct ttm_object_file *tfile)
35626+{
35627+ struct ttm_bo_user_object *ubo;
35628+
35629+ /*
35630+ * Check bo subclass.
35631+ */
35632+
35633+ if (unlikely(bo->destroy != &ttm_bo_user_destroy))
35634+ return -EPERM;
35635+
35636+ ubo = container_of(bo, struct ttm_bo_user_object, bo);
35637+ if (likely(ubo->base.shareable || ubo->base.tfile == tfile))
35638+ return 0;
35639+
35640+ return -EPERM;
35641+}
35642diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_placement_user.h b/drivers/gpu/drm/psb/ttm/ttm_placement_user.h
35643--- a/drivers/gpu/drm/psb/ttm/ttm_placement_user.h 1969-12-31 16:00:00.000000000 -0800
35644+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_user.h 2009-04-07 13:28:38.000000000 -0700
35645@@ -0,0 +1,259 @@
35646+/**************************************************************************
35647+ *
35648+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
35649+ * All Rights Reserved.
35650+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
35651+ * All Rights Reserved.
35652+ *
35653+ * Permission is hereby granted, free of charge, to any person obtaining a
35654+ * copy of this software and associated documentation files (the
35655+ * "Software"), to deal in the Software without restriction, including
35656+ * without limitation the rights to use, copy, modify, merge, publish,
35657+ * distribute, sub license, and/or sell copies of the Software, and to
35658+ * permit persons to whom the Software is furnished to do so, subject to
35659+ * the following conditions:
35660+ *
35661+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35662+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
35663+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35664+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35665+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
35666+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
35667+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
35668+ *
35669+ * The above copyright notice and this permission notice (including the
35670+ * next paragraph) shall be included in all copies or substantial portions
35671+ * of the Software.
35672+ *
35673+ **************************************************************************/
35674+/*
35675+ * Authors
35676+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
35677+ */
35678+
35679+#ifndef _TTM_PLACEMENT_USER_H_
35680+#define _TTM_PLACEMENT_USER_H_
35681+
35682+#if !defined(__KERNEL__) && !defined(_KERNEL)
35683+#include <stdint.h>
35684+#else
35685+#include <linux/kernel.h>
35686+#endif
35687+
35688+#include "ttm/ttm_placement_common.h"
35689+
35690+#define TTM_PLACEMENT_MAJOR 0
35691+#define TTM_PLACEMENT_MINOR 1
35692+#define TTM_PLACEMENT_PL 0
35693+#define TTM_PLACEMENT_DATE "080819"
35694+
35695+/**
35696+ * struct ttm_pl_create_req
35697+ *
35698+ * @size: The buffer object size.
35699+ * @placement: Flags that indicate initial acceptable
35700+ * placement.
35701+ * @page_alignment: Required alignment in pages.
35702+ *
35703+ * Input to the TTM_BO_CREATE ioctl.
35704+ */
35705+
35706+struct ttm_pl_create_req {
35707+ uint64_t size;
35708+ uint32_t placement;
35709+ uint32_t page_alignment;
35710+};
35711+
35712+/**
35713+ * struct ttm_pl_create_ub_req
35714+ *
35715+ * @size: The buffer object size.
35716+ * @user_address: User-space address of the memory area that
35717+ * should be used to back the buffer object cast to 64-bit.
35718+ * @placement: Flags that indicate initial acceptable
35719+ * placement.
35720+ * @page_alignment: Required alignment in pages.
35721+ *
35722+ * Input to the TTM_BO_CREATE_UB ioctl.
35723+ */
35724+
35725+struct ttm_pl_create_ub_req {
35726+ uint64_t size;
35727+ uint64_t user_address;
35728+ uint32_t placement;
35729+ uint32_t page_alignment;
35730+};
35731+
35732+/**
35733+ * struct ttm_pl_rep
35734+ *
35735+ * @gpu_offset: The current offset into the memory region used.
35736+ * This can be used directly by the GPU if there are no
35737+ * additional GPU mapping procedures used by the driver.
35738+ *
35739+ * @bo_size: Actual buffer object size.
35740+ *
35741+ * @map_handle: Offset into the device address space.
35742+ * Used for map, seek, read, write. This will never change
35743+ * during the lifetime of an object.
35744+ *
35745+ * @placement: Flag indicating the placement status of
35746+ * the buffer object using the TTM_PL flags above.
35747+ *
35748+ * @sync_object_arg: Used for user-space synchronization and
35749+ * depends on the synchronization model used. If fences are
35750+ * used, this is the buffer_object::fence_type_mask
35751+ *
35752+ * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and
35753+ * TTM_PL_SETSTATUS ioctls.
35754+ */
35755+
35756+struct ttm_pl_rep {
35757+ uint64_t gpu_offset;
35758+ uint64_t bo_size;
35759+ uint64_t map_handle;
35760+ uint32_t placement;
35761+ uint32_t handle;
35762+ uint32_t sync_object_arg;
35763+ uint32_t pad64;
35764+};
35765+
35766+/**
35767+ * struct ttm_pl_setstatus_req
35768+ *
35769+ * @set_placement: Placement flags to set.
35770+ *
35771+ * @clr_placement: Placement flags to clear.
35772+ *
35773+ * @handle: The object handle
35774+ *
35775+ * Input to the TTM_PL_SETSTATUS ioctl.
35776+ */
35777+
35778+struct ttm_pl_setstatus_req {
35779+ uint32_t set_placement;
35780+ uint32_t clr_placement;
35781+ uint32_t handle;
35782+ uint32_t pad64;
35783+};
35784+
35785+/**
35786+ * struct ttm_pl_reference_req
35787+ *
35788+ * @handle: The object to put a reference on.
35789+ *
35790+ * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls.
35791+ */
35792+
35793+struct ttm_pl_reference_req {
35794+ uint32_t handle;
35795+ uint32_t pad64;
35796+};
35797+
35798+/*
35799+ * ACCESS mode flags for SYNCCPU.
35800+ *
35801+ * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not
35802+ * writing to the buffer.
35803+ *
35804+ * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not
35805+ * accessing the buffer.
35806+ *
35807+ * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait
35808+ * for GPU accesses to finish but return -EBUSY.
35809+ *
35810+ * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable
35811+ * memory while synchronized for CPU.
35812+ */
35813+
35814+#define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ
35815+#define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE
35816+#define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2)
35817+#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3)
35818+
35819+/**
35820+ * struct ttm_pl_synccpu_arg
35821+ *
35822+ * @handle: The object to synchronize.
35823+ *
35824+ * @access_mode: access mode indicated by the
35825+ * TTM_SYNCCPU_MODE flags.
35826+ *
35827+ * @op: indicates whether to grab or release the
35828+ * buffer for cpu usage.
35829+ *
35830+ * Input to the TTM_PL_SYNCCPU ioctl.
35831+ */
35832+
35833+struct ttm_pl_synccpu_arg {
35834+ uint32_t handle;
35835+ uint32_t access_mode;
35836+ enum {
35837+ TTM_PL_SYNCCPU_OP_GRAB,
35838+ TTM_PL_SYNCCPU_OP_RELEASE
35839+ } op;
35840+ uint32_t pad64;
35841+};
35842+
35843+/*
35844+ * Waiting mode flags for the TTM_BO_WAITIDLE ioctl.
35845+ *
35846+ * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling
35847+ * wait.
35848+ *
35849+ * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU,
35850+ * but return -EBUSY if the buffer is busy.
35851+ */
35852+
35853+#define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0)
35854+#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1)
35855+
35856+/**
35857+ * struct ttm_waitidle_arg
35858+ *
35859+ * @handle: The object to synchronize.
35860+ *
35861+ * @mode: wait mode indicated by the
35862+ * TTM_SYNCCPU_MODE flags.
35863+ *
35864+ * Argument to the TTM_BO_WAITIDLE ioctl.
35865+ */
35866+
35867+struct ttm_pl_waitidle_arg {
35868+ uint32_t handle;
35869+ uint32_t mode;
35870+};
35871+
35872+union ttm_pl_create_arg {
35873+ struct ttm_pl_create_req req;
35874+ struct ttm_pl_rep rep;
35875+};
35876+
35877+union ttm_pl_reference_arg {
35878+ struct ttm_pl_reference_req req;
35879+ struct ttm_pl_rep rep;
35880+};
35881+
35882+union ttm_pl_setstatus_arg {
35883+ struct ttm_pl_setstatus_req req;
35884+ struct ttm_pl_rep rep;
35885+};
35886+
35887+union ttm_pl_create_ub_arg {
35888+ struct ttm_pl_create_ub_req req;
35889+ struct ttm_pl_rep rep;
35890+};
35891+
35892+/*
35893+ * Ioctl offsets.
35894+ */
35895+
35896+#define TTM_PL_CREATE 0x00
35897+#define TTM_PL_REFERENCE 0x01
35898+#define TTM_PL_UNREF 0x02
35899+#define TTM_PL_SYNCCPU 0x03
35900+#define TTM_PL_WAITIDLE 0x04
35901+#define TTM_PL_SETSTATUS 0x05
35902+#define TTM_PL_CREATE_UB 0x06
35903+
35904+#endif
35905diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_regman.h b/drivers/gpu/drm/psb/ttm/ttm_regman.h
35906--- a/drivers/gpu/drm/psb/ttm/ttm_regman.h 1969-12-31 16:00:00.000000000 -0800
35907+++ b/drivers/gpu/drm/psb/ttm/ttm_regman.h 2009-04-07 13:28:38.000000000 -0700
35908@@ -0,0 +1,74 @@
35909+/**************************************************************************
35910+ *
35911+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
35912+ * All Rights Reserved.
35913+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
35914+ * All Rights Reserved.
35915+ *
35916+ * Permission is hereby granted, free of charge, to any person obtaining a
35917+ * copy of this software and associated documentation files (the
35918+ * "Software"), to deal in the Software without restriction, including
35919+ * without limitation the rights to use, copy, modify, merge, publish,
35920+ * distribute, sub license, and/or sell copies of the Software, and to
35921+ * permit persons to whom the Software is furnished to do so, subject to
35922+ * the following conditions:
35923+ *
35924+ * The above copyright notice and this permission notice (including the
35925+ * next paragraph) shall be included in all copies or substantial portions
35926+ * of the Software.
35927+ *
35928+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35929+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
35930+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35931+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35932+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
35933+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
35934+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
35935+ *
35936+ **************************************************************************/
35937+/*
35938+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
35939+ */
35940+
35941+#ifndef _TTM_REGMAN_H_
35942+#define _TTM_REGMAN_H_
35943+
35944+#include <linux/list.h>
35945+
35946+struct ttm_fence_object;
35947+
35948+struct ttm_reg {
35949+ struct list_head head;
35950+ struct ttm_fence_object *fence;
35951+ uint32_t fence_type;
35952+ uint32_t new_fence_type;
35953+};
35954+
35955+struct ttm_reg_manager {
35956+ struct list_head free;
35957+ struct list_head lru;
35958+ struct list_head unfenced;
35959+
35960+ int (*reg_reusable)(const struct ttm_reg *reg, const void *data);
35961+ void (*reg_destroy)(struct ttm_reg *reg);
35962+};
35963+
35964+extern int ttm_regs_alloc(struct ttm_reg_manager *manager,
35965+ const void *data,
35966+ uint32_t fence_class,
35967+ uint32_t fence_type,
35968+ int interruptible,
35969+ int no_wait,
35970+ struct ttm_reg **reg);
35971+
35972+extern void ttm_regs_fence(struct ttm_reg_manager *regs,
35973+ struct ttm_fence_object *fence);
35974+
35975+extern void ttm_regs_free(struct ttm_reg_manager *manager);
35976+extern void ttm_regs_add(struct ttm_reg_manager *manager, struct ttm_reg *reg);
35977+extern void ttm_regs_init(struct ttm_reg_manager *manager,
35978+ int (*reg_reusable)(const struct ttm_reg *,
35979+ const void *),
35980+ void (*reg_destroy)(struct ttm_reg *));
35981+
35982+#endif
35983diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_tt.c b/drivers/gpu/drm/psb/ttm/ttm_tt.c
35984--- a/drivers/gpu/drm/psb/ttm/ttm_tt.c 1969-12-31 16:00:00.000000000 -0800
35985+++ b/drivers/gpu/drm/psb/ttm/ttm_tt.c 2009-04-07 13:28:38.000000000 -0700
35986@@ -0,0 +1,655 @@
35987+/**************************************************************************
35988+ *
35989+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
35990+ * All Rights Reserved.
35991+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
35992+ * All Rights Reserved.
35993+ *
35994+ * Permission is hereby granted, free of charge, to any person obtaining a
35995+ * copy of this software and associated documentation files (the
35996+ * "Software"), to deal in the Software without restriction, including
35997+ * without limitation the rights to use, copy, modify, merge, publish,
35998+ * distribute, sub license, and/or sell copies of the Software, and to
35999+ * permit persons to whom the Software is furnished to do so, subject to
36000+ * the following conditions:
36001+ *
36002+ * The above copyright notice and this permission notice (including the
36003+ * next paragraph) shall be included in all copies or substantial portions
36004+ * of the Software.
36005+ *
36006+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36007+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36008+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
36009+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
36010+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
36011+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
36012+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
36013+ *
36014+ **************************************************************************/
36015+/*
36016+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
36017+ */
36018+
36019+#include <linux/version.h>
36020+#include <linux/vmalloc.h>
36021+#include <linux/sched.h>
36022+#include <linux/highmem.h>
36023+#include <linux/pagemap.h>
36024+#include <linux/file.h>
36025+#include <linux/swap.h>
36026+#include "ttm/ttm_bo_driver.h"
36027+#include "ttm/ttm_placement_common.h"
36028+
36029+static int ttm_tt_swapin(struct ttm_tt *ttm);
36030+
36031+#if defined( CONFIG_X86 )
36032+static void ttm_tt_clflush_page(struct page *page)
36033+{
36034+ uint8_t *page_virtual;
36035+ unsigned int i;
36036+
36037+ if (unlikely(page == NULL))
36038+ return;
36039+
36040+ page_virtual = kmap_atomic(page, KM_USER0);
36041+
36042+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
36043+ clflush(page_virtual + i);
36044+
36045+ kunmap_atomic(page_virtual, KM_USER0);
36046+}
36047+
36048+static void ttm_tt_cache_flush_clflush(struct page *pages[],
36049+ unsigned long num_pages)
36050+{
36051+ unsigned long i;
36052+
36053+ mb();
36054+ for (i = 0; i < num_pages; ++i)
36055+ ttm_tt_clflush_page(*pages++);
36056+ mb();
36057+}
36058+#else
36059+static void ttm_tt_ipi_handler(void *null)
36060+{
36061+ ;
36062+}
36063+#endif
36064+
36065+void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
36066+{
36067+
36068+#if defined( CONFIG_X86 )
36069+ if (cpu_has_clflush) {
36070+ ttm_tt_cache_flush_clflush(pages, num_pages);
36071+ return;
36072+ }
36073+#else
36074+ if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1, 1) != 0)
36075+ printk(KERN_ERR "Timed out waiting for drm cache flush.\n");
36076+#endif
36077+}
36078+
36079+/**
36080+ * Allocates storage for pointers to the pages that back the ttm.
36081+ *
36082+ * Uses kmalloc if possible. Otherwise falls back to vmalloc.
36083+ */
36084+static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
36085+{
36086+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
36087+ ttm->pages = NULL;
36088+
36089+ if (size <= PAGE_SIZE)
36090+ ttm->pages = kzalloc(size, GFP_KERNEL);
36091+
36092+ if (!ttm->pages) {
36093+ ttm->pages = vmalloc_user(size);
36094+ if (ttm->pages)
36095+ ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
36096+ }
36097+}
36098+
36099+static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
36100+{
36101+ if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
36102+ vfree(ttm->pages);
36103+ ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
36104+ } else {
36105+ kfree(ttm->pages);
36106+ }
36107+ ttm->pages = NULL;
36108+}
36109+
36110+static struct page *ttm_tt_alloc_page(void)
36111+{
36112+ return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
36113+}
36114+
36115+static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
36116+{
36117+ int write;
36118+ int dirty;
36119+ struct page *page;
36120+ int i;
36121+ struct ttm_backend *be = ttm->be;
36122+
36123+ BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
36124+ write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
36125+ dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
36126+
36127+ if (be)
36128+ be->func->clear(be);
36129+
36130+ for (i = 0; i < ttm->num_pages; ++i) {
36131+ page = ttm->pages[i];
36132+ if (page == NULL)
36133+ continue;
36134+
36135+ if (page == ttm->dummy_read_page) {
36136+ BUG_ON(write);
36137+ continue;
36138+ }
36139+
36140+ if (write && dirty && !PageReserved(page))
36141+ set_page_dirty_lock(page);
36142+
36143+ ttm->pages[i] = NULL;
36144+ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
36145+ put_page(page);
36146+ }
36147+ ttm->state = tt_unpopulated;
36148+ ttm->first_himem_page = ttm->num_pages;
36149+ ttm->last_lomem_page = -1;
36150+}
36151+
36152+static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
36153+{
36154+ struct page *p;
36155+ struct ttm_bo_device *bdev = ttm->bdev;
36156+ struct ttm_mem_global *mem_glob = bdev->mem_glob;
36157+ int ret;
36158+
36159+ while (NULL == (p = ttm->pages[index])) {
36160+ p = ttm_tt_alloc_page();
36161+
36162+ if (!p)
36163+ return NULL;
36164+
36165+ if (PageHighMem(p)) {
36166+ ret =
36167+ ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, true);
36168+ if (unlikely(ret != 0))
36169+ goto out_err;
36170+ ttm->pages[--ttm->first_himem_page] = p;
36171+ } else {
36172+ ret =
36173+ ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, false);
36174+ if (unlikely(ret != 0))
36175+ goto out_err;
36176+ ttm->pages[++ttm->last_lomem_page] = p;
36177+ }
36178+ }
36179+ return p;
36180+ out_err:
36181+ put_page(p);
36182+ return NULL;
36183+}
36184+
36185+struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
36186+{
36187+ int ret;
36188+
36189+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
36190+ ret = ttm_tt_swapin(ttm);
36191+ if (unlikely(ret != 0))
36192+ return NULL;
36193+ }
36194+ return __ttm_tt_get_page(ttm, index);
36195+}
36196+
36197+int ttm_tt_populate(struct ttm_tt *ttm)
36198+{
36199+ struct page *page;
36200+ unsigned long i;
36201+ struct ttm_backend *be;
36202+ int ret;
36203+
36204+ if (ttm->state != tt_unpopulated)
36205+ return 0;
36206+
36207+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
36208+ ret = ttm_tt_swapin(ttm);
36209+ if (unlikely(ret != 0))
36210+ return ret;
36211+ }
36212+
36213+ be = ttm->be;
36214+
36215+ for (i = 0; i < ttm->num_pages; ++i) {
36216+ page = __ttm_tt_get_page(ttm, i);
36217+ if (!page)
36218+ return -ENOMEM;
36219+ }
36220+
36221+ be->func->populate(be, ttm->num_pages, ttm->pages,
36222+ ttm->dummy_read_page);
36223+ ttm->state = tt_unbound;
36224+ return 0;
36225+}
36226+
36227+#ifdef CONFIG_X86
36228+static inline int ttm_tt_set_page_caching(struct page *p,
36229+ enum ttm_caching_state c_state)
36230+{
36231+ if (PageHighMem(p))
36232+ return 0;
36233+
36234+ switch (c_state) {
36235+ case tt_cached:
36236+ return set_pages_wb(p, 1);
36237+ case tt_wc:
36238+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
36239+ return set_memory_wc((unsigned long) page_address(p), 1);
36240+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) */
36241+ default:
36242+ return set_pages_uc(p, 1);
36243+ }
36244+}
36245+#else /* CONFIG_X86 */
36246+static inline int ttm_tt_set_page_caching(struct page *p,
36247+ enum ttm_caching_state c_state)
36248+{
36249+ return 0;
36250+}
36251+#endif /* CONFIG_X86 */
36252+
36253+/*
36254+ * Change caching policy for the linear kernel map
36255+ * for range of pages in a ttm.
36256+ */
36257+
36258+static int ttm_tt_set_caching(struct ttm_tt *ttm,
36259+ enum ttm_caching_state c_state)
36260+{
36261+ int i, j;
36262+ struct page *cur_page;
36263+ int ret;
36264+
36265+ if (ttm->caching_state == c_state)
36266+ return 0;
36267+
36268+ if (c_state != tt_cached) {
36269+ ret = ttm_tt_populate(ttm);
36270+ if (unlikely(ret != 0))
36271+ return ret;
36272+ }
36273+
36274+ if (ttm->caching_state == tt_cached)
36275+ ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
36276+
36277+ for (i = 0; i < ttm->num_pages; ++i) {
36278+ cur_page = ttm->pages[i];
36279+ if (likely(cur_page != NULL)) {
36280+ ret = ttm_tt_set_page_caching(cur_page, c_state);
36281+ if (unlikely(ret != 0))
36282+ goto out_err;
36283+ }
36284+ }
36285+
36286+ ttm->caching_state = c_state;
36287+
36288+ return 0;
36289+
36290+ out_err:
36291+ for (j = 0; j < i; ++j) {
36292+ cur_page = ttm->pages[j];
36293+ if (likely(cur_page != NULL)) {
36294+ (void)ttm_tt_set_page_caching(cur_page,
36295+ ttm->caching_state);
36296+ }
36297+ }
36298+
36299+ return ret;
36300+}
36301+
36302+int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
36303+{
36304+ enum ttm_caching_state state;
36305+
36306+ if (placement & TTM_PL_FLAG_WC)
36307+ state = tt_wc;
36308+ else if (placement & TTM_PL_FLAG_UNCACHED)
36309+ state = tt_uncached;
36310+ else
36311+ state = tt_cached;
36312+
36313+ return ttm_tt_set_caching(ttm, state);
36314+}
36315+
36316+static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
36317+{
36318+ int i;
36319+ struct page *cur_page;
36320+ struct ttm_backend *be = ttm->be;
36321+
36322+ if (be)
36323+ be->func->clear(be);
36324+ (void)ttm_tt_set_caching(ttm, tt_cached);
36325+ for (i = 0; i < ttm->num_pages; ++i) {
36326+ cur_page = ttm->pages[i];
36327+ ttm->pages[i] = NULL;
36328+ if (cur_page) {
36329+ if (page_count(cur_page) != 1)
36330+ printk(KERN_ERR
36331+ "Erroneous page count. Leaking pages.\n");
36332+ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
36333+ PageHighMem(cur_page));
36334+ __free_page(cur_page);
36335+ }
36336+ }
36337+ ttm->state = tt_unpopulated;
36338+ ttm->first_himem_page = ttm->num_pages;
36339+ ttm->last_lomem_page = -1;
36340+}
36341+
36342+void ttm_tt_destroy(struct ttm_tt *ttm)
36343+{
36344+ struct ttm_backend *be;
36345+
36346+ if (unlikely(ttm == NULL))
36347+ return;
36348+
36349+ be = ttm->be;
36350+ if (likely(be != NULL)) {
36351+ be->func->destroy(be);
36352+ ttm->be = NULL;
36353+ }
36354+
36355+ if (likely(ttm->pages != NULL)) {
36356+ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
36357+ ttm_tt_free_user_pages(ttm);
36358+ else
36359+ ttm_tt_free_alloced_pages(ttm);
36360+
36361+ ttm_tt_free_page_directory(ttm);
36362+ }
36363+
36364+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
36365+ ttm->swap_storage)
36366+ fput(ttm->swap_storage);
36367+
36368+ kfree(ttm);
36369+}
36370+
36371+int ttm_tt_set_user(struct ttm_tt *ttm,
36372+ struct task_struct *tsk,
36373+ unsigned long start, unsigned long num_pages)
36374+{
36375+ struct mm_struct *mm = tsk->mm;
36376+ int ret;
36377+ int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
36378+ struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
36379+
36380+ BUG_ON(num_pages != ttm->num_pages);
36381+ BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
36382+
36383+ /**
36384+ * Account user pages as lowmem pages for now.
36385+ */
36386+
36387+ ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, false, false, false);
36388+ if (unlikely(ret != 0))
36389+ return ret;
36390+
36391+ down_read(&mm->mmap_sem);
36392+ ret = get_user_pages(tsk, mm, start, num_pages,
36393+ write, 0, ttm->pages, NULL);
36394+ up_read(&mm->mmap_sem);
36395+
36396+ if (ret != num_pages && write) {
36397+ ttm_tt_free_user_pages(ttm);
36398+ ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
36399+ return -ENOMEM;
36400+ }
36401+
36402+ ttm->tsk = tsk;
36403+ ttm->start = start;
36404+ ttm->state = tt_unbound;
36405+
36406+ return 0;
36407+}
36408+
36409+struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
36410+ uint32_t page_flags, struct page *dummy_read_page)
36411+{
36412+ struct ttm_bo_driver *bo_driver = bdev->driver;
36413+ struct ttm_tt *ttm;
36414+
36415+ if (!bo_driver)
36416+ return NULL;
36417+
36418+ ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
36419+ if (!ttm)
36420+ return NULL;
36421+
36422+ ttm->bdev = bdev;
36423+
36424+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
36425+ ttm->first_himem_page = ttm->num_pages;
36426+ ttm->last_lomem_page = -1;
36427+ ttm->caching_state = tt_cached;
36428+ ttm->page_flags = page_flags;
36429+
36430+ ttm->dummy_read_page = dummy_read_page;
36431+
36432+ ttm_tt_alloc_page_directory(ttm);
36433+ if (!ttm->pages) {
36434+ ttm_tt_destroy(ttm);
36435+ printk(KERN_ERR "Failed allocating page table\n");
36436+ return NULL;
36437+ }
36438+ ttm->be = bo_driver->create_ttm_backend_entry(bdev);
36439+ if (!ttm->be) {
36440+ ttm_tt_destroy(ttm);
36441+ printk(KERN_ERR "Failed creating ttm backend entry\n");
36442+ return NULL;
36443+ }
36444+ ttm->state = tt_unpopulated;
36445+ return ttm;
36446+}
36447+
36448+/**
36449+ * ttm_tt_unbind:
36450+ *
36451+ * @ttm: the object to unbind from the graphics device
36452+ *
36453+ * Unbind an object from the aperture. This removes the mappings
36454+ * from the graphics device and flushes caches if necessary.
36455+ */
36456+void ttm_tt_unbind(struct ttm_tt *ttm)
36457+{
36458+ int ret;
36459+ struct ttm_backend *be = ttm->be;
36460+
36461+ if (ttm->state == tt_bound) {
36462+ ret = be->func->unbind(be);
36463+ BUG_ON(ret);
36464+ }
36465+ ttm->state = tt_unbound;
36466+}
36467+
36468+/**
36469+ * ttm_tt_bind:
36470+ *
36471+ * @ttm: the ttm object to bind to the graphics device
36472+ *
36473+ * @bo_mem: the aperture memory region which will hold the object
36474+ *
36475+ * Bind a ttm object to the aperture. This ensures that the necessary
36476+ * pages are allocated, flushes CPU caches as needed and marks the
36477+ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
36478+ * modified by the GPU
36479+ */
36480+
36481+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
36482+{
36483+ int ret = 0;
36484+ struct ttm_backend *be;
36485+
36486+ if (!ttm)
36487+ return -EINVAL;
36488+
36489+ if (ttm->state == tt_bound)
36490+ return 0;
36491+
36492+ be = ttm->be;
36493+
36494+ ret = ttm_tt_populate(ttm);
36495+ if (ret)
36496+ return ret;
36497+
36498+ ret = be->func->bind(be, bo_mem);
36499+ if (ret) {
36500+ printk(KERN_ERR "Couldn't bind backend.\n");
36501+ return ret;
36502+ }
36503+
36504+ ttm->state = tt_bound;
36505+
36506+ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
36507+ ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
36508+ return 0;
36509+}
36510+
36511+static int ttm_tt_swapin(struct ttm_tt *ttm)
36512+{
36513+ struct address_space *swap_space;
36514+ struct file *swap_storage;
36515+ struct page *from_page;
36516+ struct page *to_page;
36517+ void *from_virtual;
36518+ void *to_virtual;
36519+ int i;
36520+ int ret;
36521+
36522+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
36523+ ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
36524+ ttm->num_pages);
36525+ if (unlikely(ret != 0))
36526+ return ret;
36527+
36528+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
36529+ return 0;
36530+ }
36531+
36532+ swap_storage = ttm->swap_storage;
36533+ BUG_ON(swap_storage == NULL);
36534+
36535+ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
36536+
36537+ for (i = 0; i < ttm->num_pages; ++i) {
36538+ from_page = read_mapping_page(swap_space, i, NULL);
36539+ if (IS_ERR(from_page))
36540+ goto out_err;
36541+ to_page = __ttm_tt_get_page(ttm, i);
36542+ if (unlikely(to_page == NULL))
36543+ goto out_err;
36544+
36545+ preempt_disable();
36546+ from_virtual = kmap_atomic(from_page, KM_USER0);
36547+ to_virtual = kmap_atomic(to_page, KM_USER1);
36548+ memcpy(to_virtual, from_virtual, PAGE_SIZE);
36549+ kunmap_atomic(to_virtual, KM_USER1);
36550+ kunmap_atomic(from_virtual, KM_USER0);
36551+ preempt_enable();
36552+ page_cache_release(from_page);
36553+ }
36554+
36555+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
36556+ fput(swap_storage);
36557+ ttm->swap_storage = NULL;
36558+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
36559+
36560+ return 0;
36561+ out_err:
36562+ ttm_tt_free_alloced_pages(ttm);
36563+ return -ENOMEM;
36564+}
36565+
36566+int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
36567+{
36568+ struct address_space *swap_space;
36569+ struct file *swap_storage;
36570+ struct page *from_page;
36571+ struct page *to_page;
36572+ void *from_virtual;
36573+ void *to_virtual;
36574+ int i;
36575+
36576+ BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
36577+ BUG_ON(ttm->caching_state != tt_cached);
36578+
36579+ /*
36580+ * For user buffers, just unpin the pages, as there should be
36581+ * vma references.
36582+ */
36583+
36584+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
36585+ ttm_tt_free_user_pages(ttm);
36586+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
36587+ ttm->swap_storage = NULL;
36588+ return 0;
36589+ }
36590+
36591+ if (!persistant_swap_storage) {
36592+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
36593+ swap_storage = shmem_file_setup("ttm swap",
36594+ ttm->num_pages << PAGE_SHIFT,
36595+ 0);
36596+ if (unlikely(IS_ERR(swap_storage))) {
36597+ printk(KERN_ERR "Failed allocating swap storage.\n");
36598+ return -ENOMEM;
36599+ }
36600+#else
36601+ return -ENOMEM;
36602+#endif
36603+ } else
36604+ swap_storage = persistant_swap_storage;
36605+
36606+ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
36607+
36608+ for (i = 0; i < ttm->num_pages; ++i) {
36609+ from_page = ttm->pages[i];
36610+ if (unlikely(from_page == NULL))
36611+ continue;
36612+ to_page = read_mapping_page(swap_space, i, NULL);
36613+ if (unlikely(to_page == NULL))
36614+ goto out_err;
36615+
36616+ preempt_disable();
36617+ from_virtual = kmap_atomic(from_page, KM_USER0);
36618+ to_virtual = kmap_atomic(to_page, KM_USER1);
36619+ memcpy(to_virtual, from_virtual, PAGE_SIZE);
36620+ kunmap_atomic(to_virtual, KM_USER1);
36621+ kunmap_atomic(from_virtual, KM_USER0);
36622+ preempt_enable();
36623+ set_page_dirty(to_page);
36624+ mark_page_accessed(to_page);
36625+// unlock_page(to_page);
36626+ page_cache_release(to_page);
36627+ }
36628+
36629+ ttm_tt_free_alloced_pages(ttm);
36630+ ttm->swap_storage = swap_storage;
36631+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
36632+ if (persistant_swap_storage)
36633+ ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
36634+
36635+ return 0;
36636+ out_err:
36637+ if (!persistant_swap_storage)
36638+ fput(swap_storage);
36639+
36640+ return -ENOMEM;
36641+}
36642diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h b/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h
36643--- a/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h 1969-12-31 16:00:00.000000000 -0800
36644+++ b/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h 2009-04-07 13:28:38.000000000 -0700
36645@@ -0,0 +1,79 @@
36646+/**************************************************************************
36647+ *
36648+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
36649+ * All Rights Reserved.
36650+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
36651+ * All Rights Reserved.
36652+ *
36653+ * Permission is hereby granted, free of charge, to any person obtaining a
36654+ * copy of this software and associated documentation files (the
36655+ * "Software"), to deal in the Software without restriction, including
36656+ * without limitation the rights to use, copy, modify, merge, publish,
36657+ * distribute, sub license, and/or sell copies of the Software, and to
36658+ * permit persons to whom the Software is furnished to do so, subject to
36659+ * the following conditions:
36660+ *
36661+ * The above copyright notice and this permission notice (including the
36662+ * next paragraph) shall be included in all copies or substantial portions
36663+ * of the Software.
36664+ *
36665+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36666+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36667+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
36668+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
36669+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
36670+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
36671+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
36672+ *
36673+ **************************************************************************/
36674+/*
36675+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
36676+ */
36677+
36678+#ifndef _TTM_USEROBJ_API_H_
36679+#define _TTM_USEROBJ_API_H_
36680+
36681+#include "ttm/ttm_placement_user.h"
36682+#include "ttm/ttm_fence_user.h"
36683+#include "ttm/ttm_object.h"
36684+#include "ttm/ttm_fence_api.h"
36685+#include "ttm/ttm_bo_api.h"
36686+
36687+struct ttm_lock;
36688+
36689+/*
36690+ * User ioctls.
36691+ */
36692+
36693+extern int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
36694+ struct ttm_bo_device *bdev,
36695+ struct ttm_lock *lock, void *data);
36696+extern int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
36697+ struct ttm_bo_device *bdev,
36698+ struct ttm_lock *lock, void *data);
36699+extern int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data);
36700+extern int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data);
36701+extern int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data);
36702+extern int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
36703+ struct ttm_lock *lock, void *data);
36704+extern int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data);
36705+extern int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data);
36706+extern int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data);
36707+extern int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data);
36708+
36709+extern int
36710+ttm_fence_user_create(struct ttm_fence_device *fdev,
36711+ struct ttm_object_file *tfile,
36712+ uint32_t fence_class,
36713+ uint32_t fence_types,
36714+ uint32_t create_flags,
36715+ struct ttm_fence_object **fence, uint32_t * user_handle);
36716+
36717+extern struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
36718+ *tfile,
36719+ uint32_t handle);
36720+
36721+extern int
36722+ttm_pl_verify_access(struct ttm_buffer_object *bo,
36723+ struct ttm_object_file *tfile);
36724+#endif
36725diff -uNr a/include/drm/drm_compat.h b/include/drm/drm_compat.h
36726--- a/include/drm/drm_compat.h 1969-12-31 16:00:00.000000000 -0800
36727+++ b/include/drm/drm_compat.h 2009-04-07 13:28:38.000000000 -0700
36728@@ -0,0 +1,238 @@
36729+/**
36730+ * \file drm_compat.h
36731+ * Backward compatability definitions for Direct Rendering Manager
36732+ *
36733+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
36734+ * \author Gareth Hughes <gareth@valinux.com>
36735+ */
36736+
36737+/*
36738+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
36739+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
36740+ * All rights reserved.
36741+ *
36742+ * Permission is hereby granted, free of charge, to any person obtaining a
36743+ * copy of this software and associated documentation files (the "Software"),
36744+ * to deal in the Software without restriction, including without limitation
36745+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
36746+ * and/or sell copies of the Software, and to permit persons to whom the
36747+ * Software is furnished to do so, subject to the following conditions:
36748+ *
36749+ * The above copyright notice and this permission notice (including the next
36750+ * paragraph) shall be included in all copies or substantial portions of the
36751+ * Software.
36752+ *
36753+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36754+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36755+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
36756+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
36757+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
36758+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
36759+ * OTHER DEALINGS IN THE SOFTWARE.
36760+ */
36761+
36762+#ifndef _DRM_COMPAT_H_
36763+#define _DRM_COMPAT_H_
36764+
36765+#ifndef minor
36766+#define minor(x) MINOR((x))
36767+#endif
36768+
36769+#ifndef MODULE_LICENSE
36770+#define MODULE_LICENSE(x)
36771+#endif
36772+
36773+#ifndef preempt_disable
36774+#define preempt_disable()
36775+#define preempt_enable()
36776+#endif
36777+
36778+#ifndef pte_offset_map
36779+#define pte_offset_map pte_offset
36780+#define pte_unmap(pte)
36781+#endif
36782+
36783+#ifndef module_param
36784+#define module_param(name, type, perm)
36785+#endif
36786+
36787+/* older kernels had different irq args */
36788+
36789+#ifndef list_for_each_safe
36790+#define list_for_each_safe(pos, n, head) \
36791+ for (pos = (head)->next, n = pos->next; pos != (head); \
36792+ pos = n, n = pos->next)
36793+#endif
36794+
36795+#ifndef list_for_each_entry
36796+#define list_for_each_entry(pos, head, member) \
36797+ for (pos = list_entry((head)->next, typeof(*pos), member), \
36798+ prefetch(pos->member.next); \
36799+ &pos->member != (head); \
36800+ pos = list_entry(pos->member.next, typeof(*pos), member), \
36801+ prefetch(pos->member.next))
36802+#endif
36803+
36804+#ifndef list_for_each_entry_safe
36805+#define list_for_each_entry_safe(pos, n, head, member) \
36806+ for (pos = list_entry((head)->next, typeof(*pos), member), \
36807+ n = list_entry(pos->member.next, typeof(*pos), member); \
36808+ &pos->member != (head); \
36809+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
36810+#endif
36811+
36812+#ifndef __user
36813+#define __user
36814+#endif
36815+
36816+#if !defined(__put_page)
36817+#define __put_page(p) atomic_dec(&(p)->count)
36818+#endif
36819+
36820+#if !defined(__GFP_COMP)
36821+#define __GFP_COMP 0
36822+#endif
36823+
36824+#if !defined(IRQF_SHARED)
36825+#define IRQF_SHARED SA_SHIRQ
36826+#endif
36827+
36828+
36829+
36830+#ifndef DEFINE_SPINLOCK
36831+#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
36832+#endif
36833+
36834+/* old architectures */
36835+#ifdef __AMD64__
36836+#define __x86_64__
36837+#endif
36838+
36839+/* sysfs __ATTR macro */
36840+#ifndef __ATTR
36841+#define __ATTR(_name,_mode,_show,_store) { \
36842+ .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \
36843+ .show = _show, \
36844+ .store = _store, \
36845+}
36846+#endif
36847+
36848+
36849+#ifndef list_for_each_entry_safe_reverse
36850+#define list_for_each_entry_safe_reverse(pos, n, head, member) \
36851+ for (pos = list_entry((head)->prev, typeof(*pos), member), \
36852+ n = list_entry(pos->member.prev, typeof(*pos), member); \
36853+ &pos->member != (head); \
36854+ pos = n, n = list_entry(n->member.prev, typeof(*n), member))
36855+#endif
36856+
36857+#include <linux/mm.h>
36858+#include <asm/page.h>
36859+
36860+
36861+#define DRM_FULL_MM_COMPAT
36862+
36863+
36864+/*
36865+ * Flush relevant caches and clear a VMA structure so that page references
36866+ * will cause a page fault. Don't flush tlbs.
36867+ */
36868+
36869+extern void drm_clear_vma(struct vm_area_struct *vma,
36870+ unsigned long addr, unsigned long end);
36871+
36872+/*
36873+ * Return the PTE protection map entries for the VMA flags given by
36874+ * flags. This is a functional interface to the kernel's protection map.
36875+ */
36876+
36877+extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
36878+
36879+#ifndef GFP_DMA32
36880+#define GFP_DMA32 GFP_KERNEL
36881+#endif
36882+#ifndef __GFP_DMA32
36883+#define __GFP_DMA32 GFP_KERNEL
36884+#endif
36885+
36886+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
36887+
36888+/*
36889+ * These are too slow in earlier kernels.
36890+ */
36891+
36892+extern int drm_unmap_page_from_agp(struct page *page);
36893+extern int drm_map_page_into_agp(struct page *page);
36894+
36895+#define map_page_into_agp drm_map_page_into_agp
36896+#define unmap_page_from_agp drm_unmap_page_from_agp
36897+#endif
36898+
36899+
36900+
36901+
36902+
36903+/* fixme when functions are upstreamed - upstreamed for 2.6.23 */
36904+#ifdef DRM_IDR_COMPAT_FN
36905+int idr_for_each(struct idr *idp,
36906+ int (*fn)(int id, void *p, void *data), void *data);
36907+void idr_remove_all(struct idr *idp);
36908+#endif
36909+
36910+
36911+
36912+
36913+
36914+
36915+#ifndef PM_EVENT_PRETHAW
36916+#define PM_EVENT_PRETHAW 3
36917+#endif
36918+
36919+
36920+#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM) && \
36921+ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)))
36922+#define DRM_KMAP_ATOMIC_PROT_PFN
36923+extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
36924+ pgprot_t protection);
36925+#endif
36926+
36927+#if !defined(flush_agp_mappings)
36928+#define flush_agp_mappings() do {} while(0)
36929+#endif
36930+
36931+#ifndef DMA_BIT_MASK
36932+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
36933+#endif
36934+
36935+#ifndef VM_CAN_NONLINEAR
36936+#define DRM_VM_NOPAGE 1
36937+#endif
36938+
36939+#ifdef DRM_VM_NOPAGE
36940+
36941+extern struct page *drm_vm_nopage(struct vm_area_struct *vma,
36942+ unsigned long address, int *type);
36943+
36944+extern struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
36945+ unsigned long address, int *type);
36946+
36947+extern struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
36948+ unsigned long address, int *type);
36949+
36950+extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
36951+ unsigned long address, int *type);
36952+#endif
36953+
36954+#define drm_on_each_cpu(handler, data, wait) \
36955+ on_each_cpu(handler, data, wait)
36956+
36957+
36958+#ifndef OS_HAS_GEM
36959+#define OS_HAS_GEM 1
36960+#endif
36961+
36962+#ifndef current_euid
36963+#define current_euid() (current->euid)
36964+#endif
36965+
36966+#endif
36967diff -uNr a/include/drm/drm_internal.h b/include/drm/drm_internal.h
36968--- a/include/drm/drm_internal.h 1969-12-31 16:00:00.000000000 -0800
36969+++ b/include/drm/drm_internal.h 2009-04-07 13:28:38.000000000 -0700
36970@@ -0,0 +1,40 @@
36971+/*
36972+ * Copyright 2007 Red Hat, Inc
36973+ * All rights reserved.
36974+ *
36975+ * Permission is hereby granted, free of charge, to any person obtaining a
36976+ * copy of this software and associated documentation files (the "Software"),
36977+ * to deal in the Software without restriction, including without limitation
36978+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
36979+ * and/or sell copies of the Software, and to permit persons to whom the
36980+ * Software is furnished to do so, subject to the following conditions:
36981+ *
36982+ * The above copyright notice and this permission notice (including the next
36983+ * paragraph) shall be included in all copies or substantial portions of the
36984+ * Software.
36985+ *
36986+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36987+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36988+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
36989+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
36990+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
36991+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
36992+ * OTHER DEALINGS IN THE SOFTWARE.
36993+ */
36994+
36995+/* This header file holds function prototypes and data types that are
36996+ * internal to the drm (not exported to user space) but shared across
36997+ * drivers and platforms */
36998+
36999+#ifndef __DRM_INTERNAL_H__
37000+#define __DRM_INTERNAL_H__
37001+
37002+/**
37003+ * Drawable information.
37004+ */
37005+struct drm_drawable_info {
37006+ unsigned int num_rects;
37007+ struct drm_clip_rect *rects;
37008+};
37009+
37010+#endif
37011diff -uNr a/include/drm/ttm/ttm_fence_user.h b/include/drm/ttm/ttm_fence_user.h
37012--- a/include/drm/ttm/ttm_fence_user.h 1969-12-31 16:00:00.000000000 -0800
37013+++ b/include/drm/ttm/ttm_fence_user.h 2009-04-07 13:28:38.000000000 -0700
37014@@ -0,0 +1,147 @@
37015+/**************************************************************************
37016+ *
37017+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
37018+ * All Rights Reserved.
37019+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
37020+ * All Rights Reserved.
37021+ *
37022+ * Permission is hereby granted, free of charge, to any person obtaining a
37023+ * copy of this software and associated documentation files (the
37024+ * "Software"), to deal in the Software without restriction, including
37025+ * without limitation the rights to use, copy, modify, merge, publish,
37026+ * distribute, sub license, and/or sell copies of the Software, and to
37027+ * permit persons to whom the Software is furnished to do so, subject to
37028+ * the following conditions:
37029+ *
37030+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37031+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37032+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
37033+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
37034+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
37035+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37036+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
37037+ *
37038+ * The above copyright notice and this permission notice (including the
37039+ * next paragraph) shall be included in all copies or substantial portions
37040+ * of the Software.
37041+ *
37042+ **************************************************************************/
37043+/*
37044+ * Authors
37045+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37046+ */
37047+
37048+#ifndef TTM_FENCE_USER_H
37049+#define TTM_FENCE_USER_H
37050+
37051+#if !defined(__KERNEL__) && !defined(_KERNEL)
37052+#include <stdint.h>
37053+#endif
37054+
37055+#define TTM_FENCE_MAJOR 0
37056+#define TTM_FENCE_MINOR 1
37057+#define TTM_FENCE_PL 0
37058+#define TTM_FENCE_DATE "080819"
37059+
37060+/**
37061+ * struct ttm_fence_signaled_req
37062+ *
37063+ * @handle: Handle to the fence object. Input.
37064+ *
37065+ * @fence_type: Fence types we want to flush. Input.
37066+ *
37067+ * @flush: Boolean. Flush the indicated fence_types. Input.
37068+ *
37069+ * Argument to the TTM_FENCE_SIGNALED ioctl.
37070+ */
37071+
37072+struct ttm_fence_signaled_req {
37073+ uint32_t handle;
37074+ uint32_t fence_type;
37075+ int32_t flush;
37076+ uint32_t pad64;
37077+};
37078+
37079+/**
37080+ * struct ttm_fence_rep
37081+ *
37082+ * @signaled_types: Fence type that has signaled.
37083+ *
37084+ * @fence_error: Command execution error.
37085+ * Hardware errors that are consequences of the execution
37086+ * of the command stream preceding the fence are reported
37087+ * here.
37088+ *
37089+ * Output argument to the TTM_FENCE_SIGNALED and
37090+ * TTM_FENCE_FINISH ioctls.
37091+ */
37092+
37093+struct ttm_fence_rep {
37094+ uint32_t signaled_types;
37095+ uint32_t fence_error;
37096+};
37097+
37098+union ttm_fence_signaled_arg {
37099+ struct ttm_fence_signaled_req req;
37100+ struct ttm_fence_rep rep;
37101+};
37102+
37103+/*
37104+ * Waiting mode flags for the TTM_FENCE_FINISH ioctl.
37105+ *
37106+ * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling
37107+ * wait.
37108+ *
37109+ * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU,
37110+ * but return -EBUSY if the buffer is busy.
37111+ */
37112+
37113+#define TTM_FENCE_FINISH_MODE_LAZY (1 << 0)
37114+#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1)
37115+
37116+/**
37117+ * struct ttm_fence_finish_req
37118+ *
37119+ * @handle: Handle to the fence object. Input.
37120+ *
37121+ * @fence_type: Fence types we want to finish.
37122+ *
37123+ * @mode: Wait mode.
37124+ *
37125+ * Input to the TTM_FENCE_FINISH ioctl.
37126+ */
37127+
37128+struct ttm_fence_finish_req {
37129+ uint32_t handle;
37130+ uint32_t fence_type;
37131+ uint32_t mode;
37132+ uint32_t pad64;
37133+};
37134+
37135+union ttm_fence_finish_arg {
37136+ struct ttm_fence_finish_req req;
37137+ struct ttm_fence_rep rep;
37138+};
37139+
37140+/**
37141+ * struct ttm_fence_unref_arg
37142+ *
37143+ * @handle: Handle to the fence object.
37144+ *
37145+ * Argument to the TTM_FENCE_UNREF ioctl.
37146+ */
37147+
37148+struct ttm_fence_unref_arg {
37149+ uint32_t handle;
37150+ uint32_t pad64;
37151+};
37152+
37153+/*
37154+ * Ioctl offsets frome extenstion start.
37155+ */
37156+
37157+#define TTM_FENCE_SIGNALED 0x01
37158+#define TTM_FENCE_FINISH 0x02
37159+#define TTM_FENCE_UNREF 0x03
37160+
37161+#endif
37162diff -uNr a/include/drm/ttm/ttm_placement_common.h b/include/drm/ttm/ttm_placement_common.h
37163--- a/include/drm/ttm/ttm_placement_common.h 1969-12-31 16:00:00.000000000 -0800
37164+++ b/include/drm/ttm/ttm_placement_common.h 2009-04-07 13:28:38.000000000 -0700
37165@@ -0,0 +1,96 @@
37166+/**************************************************************************
37167+ *
37168+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
37169+ * All Rights Reserved.
37170+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
37171+ * All Rights Reserved.
37172+ *
37173+ * Permission is hereby granted, free of charge, to any person obtaining a
37174+ * copy of this software and associated documentation files (the
37175+ * "Software"), to deal in the Software without restriction, including
37176+ * without limitation the rights to use, copy, modify, merge, publish,
37177+ * distribute, sub license, and/or sell copies of the Software, and to
37178+ * permit persons to whom the Software is furnished to do so, subject to
37179+ * the following conditions:
37180+ *
37181+ * The above copyright notice and this permission notice (including the
37182+ * next paragraph) shall be included in all copies or substantial portions
37183+ * of the Software.
37184+ *
37185+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37186+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37187+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
37188+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
37189+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
37190+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37191+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
37192+ *
37193+ **************************************************************************/
37194+/*
37195+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
37196+ */
37197+
37198+#ifndef _TTM_PL_COMMON_H_
37199+#define _TTM_PL_COMMON_H_
37200+/*
37201+ * Memory regions for data placement.
37202+ */
37203+
37204+#define TTM_PL_SYSTEM 0
37205+#define TTM_PL_TT 1
37206+#define TTM_PL_VRAM 2
37207+#define TTM_PL_PRIV0 3
37208+#define TTM_PL_PRIV1 4
37209+#define TTM_PL_PRIV2 5
37210+#define TTM_PL_PRIV3 6
37211+#define TTM_PL_PRIV4 7
37212+#define TTM_PL_PRIV5 8
37213+#define TTM_PL_CI 9
37214+#define TTM_PL_SWAPPED 15
37215+
37216+#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
37217+#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
37218+#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
37219+#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
37220+#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
37221+#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
37222+#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
37223+#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
37224+#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
37225+#define TTM_PL_FLAG_CI (1 << TTM_PL_CI)
37226+#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
37227+#define TTM_PL_MASK_MEM 0x0000FFFF
37228+
37229+/*
37230+ * Other flags that affects data placement.
37231+ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
37232+ * if available.
37233+ * TTM_PL_FLAG_SHARED means that another application may
37234+ * reference the buffer.
37235+ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
37236+ * be evicted to make room for other buffers.
37237+ */
37238+
37239+#define TTM_PL_FLAG_CACHED (1 << 16)
37240+#define TTM_PL_FLAG_UNCACHED (1 << 17)
37241+#define TTM_PL_FLAG_WC (1 << 18)
37242+#define TTM_PL_FLAG_SHARED (1 << 20)
37243+#define TTM_PL_FLAG_NO_EVICT (1 << 21)
37244+
37245+#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
37246+ TTM_PL_FLAG_UNCACHED | \
37247+ TTM_PL_FLAG_WC)
37248+
37249+#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
37250+
37251+/*
37252+ * Access flags to be used for CPU- and GPU- mappings.
37253+ * The idea is that the TTM synchronization mechanism will
37254+ * allow concurrent READ access and exclusive write access.
37255+ * Currently GPU- and CPU accesses are exclusive.
37256+ */
37257+
37258+#define TTM_ACCESS_READ (1 << 0)
37259+#define TTM_ACCESS_WRITE (1 << 1)
37260+
37261+#endif
37262diff -uNr a/include/drm/ttm/ttm_placement_user.h b/include/drm/ttm/ttm_placement_user.h
37263--- a/include/drm/ttm/ttm_placement_user.h 1969-12-31 16:00:00.000000000 -0800
37264+++ b/include/drm/ttm/ttm_placement_user.h 2009-04-07 13:28:38.000000000 -0700
37265@@ -0,0 +1,259 @@
37266+/**************************************************************************
37267+ *
37268+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
37269+ * All Rights Reserved.
37270+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
37271+ * All Rights Reserved.
37272+ *
37273+ * Permission is hereby granted, free of charge, to any person obtaining a
37274+ * copy of this software and associated documentation files (the
37275+ * "Software"), to deal in the Software without restriction, including
37276+ * without limitation the rights to use, copy, modify, merge, publish,
37277+ * distribute, sub license, and/or sell copies of the Software, and to
37278+ * permit persons to whom the Software is furnished to do so, subject to
37279+ * the following conditions:
37280+ *
37281+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37282+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37283+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
37284+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
37285+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
37286+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37287+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
37288+ *
37289+ * The above copyright notice and this permission notice (including the
37290+ * next paragraph) shall be included in all copies or substantial portions
37291+ * of the Software.
37292+ *
37293+ **************************************************************************/
37294+/*
37295+ * Authors
37296+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37297+ */
37298+
37299+#ifndef _TTM_PLACEMENT_USER_H_
37300+#define _TTM_PLACEMENT_USER_H_
37301+
37302+#if !defined(__KERNEL__) && !defined(_KERNEL)
37303+#include <stdint.h>
37304+#else
37305+#include <linux/kernel.h>
37306+#endif
37307+
37308+#include "ttm/ttm_placement_common.h"
37309+
37310+#define TTM_PLACEMENT_MAJOR 0
37311+#define TTM_PLACEMENT_MINOR 1
37312+#define TTM_PLACEMENT_PL 0
37313+#define TTM_PLACEMENT_DATE "080819"
37314+
37315+/**
37316+ * struct ttm_pl_create_req
37317+ *
37318+ * @size: The buffer object size.
37319+ * @placement: Flags that indicate initial acceptable
37320+ * placement.
37321+ * @page_alignment: Required alignment in pages.
37322+ *
37323+ * Input to the TTM_BO_CREATE ioctl.
37324+ */
37325+
37326+struct ttm_pl_create_req {
37327+ uint64_t size;
37328+ uint32_t placement;
37329+ uint32_t page_alignment;
37330+};
37331+
37332+/**
37333+ * struct ttm_pl_create_ub_req
37334+ *
37335+ * @size: The buffer object size.
37336+ * @user_address: User-space address of the memory area that
37337+ * should be used to back the buffer object cast to 64-bit.
37338+ * @placement: Flags that indicate initial acceptable
37339+ * placement.
37340+ * @page_alignment: Required alignment in pages.
37341+ *
37342+ * Input to the TTM_BO_CREATE_UB ioctl.
37343+ */
37344+
37345+struct ttm_pl_create_ub_req {
37346+ uint64_t size;
37347+ uint64_t user_address;
37348+ uint32_t placement;
37349+ uint32_t page_alignment;
37350+};
37351+
37352+/**
37353+ * struct ttm_pl_rep
37354+ *
37355+ * @gpu_offset: The current offset into the memory region used.
37356+ * This can be used directly by the GPU if there are no
37357+ * additional GPU mapping procedures used by the driver.
37358+ *
37359+ * @bo_size: Actual buffer object size.
37360+ *
37361+ * @map_handle: Offset into the device address space.
37362+ * Used for map, seek, read, write. This will never change
37363+ * during the lifetime of an object.
37364+ *
37365+ * @placement: Flag indicating the placement status of
37366+ * the buffer object using the TTM_PL flags above.
37367+ *
37368+ * @sync_object_arg: Used for user-space synchronization and
37369+ * depends on the synchronization model used. If fences are
37370+ * used, this is the buffer_object::fence_type_mask
37371+ *
37372+ * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and
37373+ * TTM_PL_SETSTATUS ioctls.
37374+ */
37375+
37376+struct ttm_pl_rep {
37377+ uint64_t gpu_offset;
37378+ uint64_t bo_size;
37379+ uint64_t map_handle;
37380+ uint32_t placement;
37381+ uint32_t handle;
37382+ uint32_t sync_object_arg;
37383+ uint32_t pad64;
37384+};
37385+
37386+/**
37387+ * struct ttm_pl_setstatus_req
37388+ *
37389+ * @set_placement: Placement flags to set.
37390+ *
37391+ * @clr_placement: Placement flags to clear.
37392+ *
37393+ * @handle: The object handle
37394+ *
37395+ * Input to the TTM_PL_SETSTATUS ioctl.
37396+ */
37397+
37398+struct ttm_pl_setstatus_req {
37399+ uint32_t set_placement;
37400+ uint32_t clr_placement;
37401+ uint32_t handle;
37402+ uint32_t pad64;
37403+};
37404+
37405+/**
37406+ * struct ttm_pl_reference_req
37407+ *
37408+ * @handle: The object to put a reference on.
37409+ *
37410+ * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls.
37411+ */
37412+
37413+struct ttm_pl_reference_req {
37414+ uint32_t handle;
37415+ uint32_t pad64;
37416+};
37417+
37418+/*
37419+ * ACCESS mode flags for SYNCCPU.
37420+ *
37421+ * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not
37422+ * writing to the buffer.
37423+ *
37424+ * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not
37425+ * accessing the buffer.
37426+ *
37427+ * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait
37428+ * for GPU accesses to finish but return -EBUSY.
37429+ *
37430+ * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable
37431+ * memory while synchronized for CPU.
37432+ */
37433+
37434+#define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ
37435+#define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE
37436+#define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2)
37437+#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3)
37438+
37439+/**
37440+ * struct ttm_pl_synccpu_arg
37441+ *
37442+ * @handle: The object to synchronize.
37443+ *
37444+ * @access_mode: access mode indicated by the
37445+ * TTM_SYNCCPU_MODE flags.
37446+ *
37447+ * @op: indicates whether to grab or release the
37448+ * buffer for cpu usage.
37449+ *
37450+ * Input to the TTM_PL_SYNCCPU ioctl.
37451+ */
37452+
37453+struct ttm_pl_synccpu_arg {
37454+ uint32_t handle;
37455+ uint32_t access_mode;
37456+ enum {
37457+ TTM_PL_SYNCCPU_OP_GRAB,
37458+ TTM_PL_SYNCCPU_OP_RELEASE
37459+ } op;
37460+ uint32_t pad64;
37461+};
37462+
37463+/*
37464+ * Waiting mode flags for the TTM_BO_WAITIDLE ioctl.
37465+ *
37466+ * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling
37467+ * wait.
37468+ *
37469+ * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU,
37470+ * but return -EBUSY if the buffer is busy.
37471+ */
37472+
37473+#define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0)
37474+#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1)
37475+
37476+/**
37477+ * struct ttm_waitidle_arg
37478+ *
37479+ * @handle: The object to synchronize.
37480+ *
37481+ * @mode: wait mode indicated by the
37482+ * TTM_SYNCCPU_MODE flags.
37483+ *
37484+ * Argument to the TTM_BO_WAITIDLE ioctl.
37485+ */
37486+
37487+struct ttm_pl_waitidle_arg {
37488+ uint32_t handle;
37489+ uint32_t mode;
37490+};
37491+
37492+union ttm_pl_create_arg {
37493+ struct ttm_pl_create_req req;
37494+ struct ttm_pl_rep rep;
37495+};
37496+
37497+union ttm_pl_reference_arg {
37498+ struct ttm_pl_reference_req req;
37499+ struct ttm_pl_rep rep;
37500+};
37501+
37502+union ttm_pl_setstatus_arg {
37503+ struct ttm_pl_setstatus_req req;
37504+ struct ttm_pl_rep rep;
37505+};
37506+
37507+union ttm_pl_create_ub_arg {
37508+ struct ttm_pl_create_ub_req req;
37509+ struct ttm_pl_rep rep;
37510+};
37511+
37512+/*
37513+ * Ioctl offsets.
37514+ */
37515+
37516+#define TTM_PL_CREATE 0x00
37517+#define TTM_PL_REFERENCE 0x01
37518+#define TTM_PL_UNREF 0x02
37519+#define TTM_PL_SYNCCPU 0x03
37520+#define TTM_PL_WAITIDLE 0x04
37521+#define TTM_PL_SETSTATUS 0x05
37522+#define TTM_PL_CREATE_UB 0x06
37523+
37524+#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0001-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-retry-root-mount.patch
index 8f34a0f3f4..8f34a0f3f4 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0001-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-retry-root-mount.patch
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0007-acer-error-msg.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-silence-acer-message.patch
index 7bf897ab57..7bf897ab57 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0007-acer-error-msg.patch
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-silence-acer-message.patch
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0004-superreadahead-patch.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-sreadahead.patch
index e4e2001104..e4e2001104 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0004-superreadahead-patch.patch
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-sreadahead.patch
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-timberdale.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-timberdale.patch
new file mode 100644
index 0000000000..c36e5ba4ad
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-timberdale.patch
@@ -0,0 +1,6095 @@
1Patch provided by Mocean in order to enable the timberdale subsystem of the Russelville board.
2
3Signed-off-by: Joel Clark <joel.clark@intel.com>
4Acked-by: Arjan van de Ven <arjan@infradead.org>
5Signed-off-by: Todd Brandt todd.e.brandt@intel.com
6
7
8diff -uNr linux-2.6.29-clean/drivers/gpio/Kconfig linux-2.6.29/drivers/gpio/Kconfig
9--- linux-2.6.29-clean/drivers/gpio/Kconfig 2009-04-01 09:20:23.000000000 -0700
10+++ linux-2.6.29/drivers/gpio/Kconfig 2009-04-06 13:51:47.000000000 -0700
11@@ -161,6 +161,12 @@
12
13 If unsure, say N.
14
15+config GPIO_TIMBERDALE
16+ tristate "Support for timberdale GPIO"
17+ depends on MFD_TIMBERDALE && GPIOLIB
18+ ---help---
19+ Add support for GPIO usage of some pins of the timberdale FPGA.
20+
21 comment "SPI GPIO expanders:"
22
23 config GPIO_MAX7301
24diff -uNr linux-2.6.29-clean/drivers/gpio/Makefile linux-2.6.29/drivers/gpio/Makefile
25--- linux-2.6.29-clean/drivers/gpio/Makefile 2009-04-01 09:20:23.000000000 -0700
26+++ linux-2.6.29/drivers/gpio/Makefile 2009-04-06 13:51:47.000000000 -0700
27@@ -12,3 +12,4 @@
28 obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
29 obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
30 obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
31+obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
32diff -uNr linux-2.6.29-clean/drivers/gpio/timbgpio.c linux-2.6.29/drivers/gpio/timbgpio.c
33--- linux-2.6.29-clean/drivers/gpio/timbgpio.c 1969-12-31 16:00:00.000000000 -0800
34+++ linux-2.6.29/drivers/gpio/timbgpio.c 2009-04-06 13:51:47.000000000 -0700
35@@ -0,0 +1,275 @@
36+/*
37+ * timbgpio.c timberdale FPGA GPIO driver
38+ * Copyright (c) 2009 Intel Corporation
39+ *
40+ * This program is free software; you can redistribute it and/or modify
41+ * it under the terms of the GNU General Public License version 2 as
42+ * published by the Free Software Foundation.
43+ *
44+ * This program is distributed in the hope that it will be useful,
45+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
46+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
47+ * GNU General Public License for more details.
48+ *
49+ * You should have received a copy of the GNU General Public License
50+ * along with this program; if not, write to the Free Software
51+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
52+ */
53+
54+/* Supports:
55+ * Timberdale FPGA GPIO
56+ */
57+
58+#include <linux/module.h>
59+#include <linux/gpio.h>
60+#include <linux/pci.h>
61+#include <linux/platform_device.h>
62+#include <linux/interrupt.h>
63+
64+#include "timbgpio.h"
65+
66+static u32 timbgpio_configure(struct gpio_chip *gpio, unsigned nr,
67+ unsigned off, unsigned val)
68+{
69+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
70+
71+ u32 config, oldconfig, wconfig;
72+
73+ mutex_lock(&tgpio->lock);
74+ config = ioread32(tgpio->membase + off);
75+ oldconfig = config;
76+
77+ if (val)
78+ config |= (1 << nr);
79+ else
80+ config &= ~(1 << nr);
81+
82+ iowrite32(config, tgpio->membase + off);
83+ wconfig = ioread32(tgpio->membase + off);
84+ mutex_unlock(&tgpio->lock);
85+
86+ return oldconfig;
87+}
88+
89+static int timbgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
90+{
91+ timbgpio_configure(gpio, nr, TGPIODIR, 1);
92+ return 0;
93+}
94+
95+static int timbgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
96+{
97+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
98+ u32 value;
99+
100+ value = ioread32(tgpio->membase + TGPIOVAL);
101+ return (value & (1 << nr)) ? 1 : 0;
102+}
103+
104+static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
105+ unsigned nr, int val)
106+{
107+ timbgpio_configure(gpio, nr, TGPIODIR, 0);
108+ return 0;
109+}
110+
111+
112+
113+static void timbgpio_gpio_set(struct gpio_chip *gpio,
114+ unsigned nr, int val)
115+{
116+ timbgpio_configure(gpio, nr, TGPIOVAL, val);
117+}
118+
119+/*
120+ * Function to control flank or level triggered GPIO pin
121+ * @nr - pin
122+ * @ val - 1: flank, 0: level
123+ *
124+ */
125+static void timbgpio_gpio_flnk_lvl_ctrl(struct gpio_chip *gpio,
126+ unsigned nr, int val)
127+{
128+ timbgpio_configure(gpio, nr, TGPIOFLK, val);
129+}
130+EXPORT_SYMBOL(timbgpio_gpio_flnk_lvl_ctrl);
131+
132+/*
133+ * Enable or disable interrupt
134+ *
135+ */
136+static void timbgpio_gpio_int_ctrl(struct gpio_chip *gpio,
137+ unsigned nr, int val)
138+{
139+ timbgpio_configure(gpio, nr, TGPIOINT, val);
140+}
141+EXPORT_SYMBOL(timbgpio_gpio_int_ctrl);
142+
143+/*
144+ * @val - 1: Asserted high or on positive flank, 0: Asserted low or on negative flank
145+ *
146+ */
147+static void timbgpio_gpio_lvl_ctrl(struct gpio_chip *gpio,
148+ unsigned nr, int val)
149+{
150+ timbgpio_configure(gpio, nr, TGPIOLVL, val);
151+}
152+EXPORT_SYMBOL(timbgpio_gpio_lvl_ctrl);
153+
154+static void timbgpio_gpio_int_clr(struct gpio_chip *gpio,
155+ unsigned nr, int val)
156+{
157+ timbgpio_configure(gpio, nr, TGPIOINT_CLR, val);
158+}
159+EXPORT_SYMBOL(timbgpio_gpio_int_clr);
160+
161+
162+static irqreturn_t timbgpio_handleinterrupt(int irq, void *devid)
163+{
164+ struct timbgpio *tgpio = (struct timbgpio *)devid;
165+
166+ iowrite32(0xffffffff, tgpio->membase + TGPIOINT_CLR);
167+
168+ return IRQ_HANDLED;
169+}
170+
171+static int timbgpio_probe(struct platform_device *dev)
172+{
173+ int err, irq;
174+ struct gpio_chip *gc;
175+ struct timbgpio *tgpio;
176+ struct resource *iomem, *rscr;
177+
178+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
179+ if (!iomem) {
180+ err = -EINVAL;
181+ goto err_mem;
182+ }
183+
184+ tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL);
185+ if (!tgpio) {
186+ err = -EINVAL;
187+ goto err_mem;
188+ }
189+
190+ mutex_init(&tgpio->lock);
191+
192+ rscr = &tgpio->rscr;
193+ rscr->name = "timb-gpio";
194+ rscr->start = iomem->start;
195+ rscr->end = iomem->end;
196+ rscr->flags = IORESOURCE_MEM;
197+
198+ err = request_resource(iomem, rscr);
199+ if (err)
200+ goto err_request;
201+
202+ tgpio->membase = ioremap(rscr->start, resource_size(rscr));
203+ if (!tgpio->membase) {
204+ err = -ENOMEM;
205+ goto err_ioremap;
206+ }
207+
208+ gc = &tgpio->gpio;
209+
210+ gc->label = "timbgpio";
211+ gc->owner = THIS_MODULE;
212+ gc->direction_input = timbgpio_gpio_direction_input;
213+ gc->get = timbgpio_gpio_get;
214+ gc->direction_output = timbgpio_gpio_direction_output;
215+ gc->set = timbgpio_gpio_set;
216+ gc->dbg_show = NULL;
217+ gc->base = 0;
218+ gc->ngpio = TIMB_NR_GPIOS;
219+ gc->can_sleep = 0;
220+
221+ err = gpiochip_add(gc);
222+ if (err)
223+ goto err_chipadd;
224+
225+ platform_set_drvdata(dev, tgpio);
226+
227+ /* register interrupt */
228+ irq = platform_get_irq(dev, 0);
229+ if (irq < 0)
230+ goto err_get_irq;
231+
232+ /* clear pending interrupts */
233+ iowrite32(0xffffffff, tgpio->membase + TGPIOINT_CLR);
234+ iowrite32(0x0, tgpio->membase + TGPIOINT);
235+
236+ /* request IRQ */
237+ err = request_irq(irq, timbgpio_handleinterrupt, IRQF_SHARED,
238+ "timb-gpio", tgpio);
239+ if (err) {
240+ printk(KERN_ERR "timbgpio: Failed to request IRQ\n");
241+ goto err_get_irq;
242+ }
243+
244+ return err;
245+
246+err_get_irq:
247+ err = gpiochip_remove(&tgpio->gpio);
248+ if (err)
249+ printk(KERN_ERR "timbgpio: failed to remove gpio_chip\n");
250+err_chipadd:
251+ iounmap(tgpio->membase);
252+err_ioremap:
253+ release_resource(&tgpio->rscr);
254+err_request:
255+ kfree(tgpio);
256+err_mem:
257+ printk(KERN_ERR "timberdale: Failed to register GPIOs: %d\n", err);
258+
259+ return err;
260+}
261+
262+static int timbgpio_remove(struct platform_device *dev)
263+{
264+ int err;
265+ struct timbgpio *tgpio = platform_get_drvdata(dev);
266+
267+ /* disable interrupts */
268+ iowrite32(0x0, tgpio->membase + TGPIOINT);
269+
270+ free_irq(platform_get_irq(dev, 0), tgpio);
271+ err = gpiochip_remove(&tgpio->gpio);
272+ if (err)
273+ printk(KERN_ERR "timbgpio: failed to remove gpio_chip\n");
274+
275+ iounmap(tgpio->membase);
276+ release_resource(&tgpio->rscr);
277+ kfree(tgpio);
278+
279+ return 0;
280+}
281+
282+static struct platform_driver timbgpio_platform_driver = {
283+ .driver = {
284+ .name = "timb-gpio",
285+ .owner = THIS_MODULE,
286+ },
287+ .probe = timbgpio_probe,
288+ .remove = timbgpio_remove,
289+};
290+
291+/*--------------------------------------------------------------------------*/
292+
293+static int __init timbgpio_init(void)
294+{
295+ return platform_driver_register(&timbgpio_platform_driver);
296+}
297+
298+static void __exit timbgpio_exit(void)
299+{
300+ platform_driver_unregister(&timbgpio_platform_driver);
301+}
302+
303+module_init(timbgpio_init);
304+module_exit(timbgpio_exit);
305+
306+MODULE_DESCRIPTION("Timberdale GPIO driver");
307+MODULE_LICENSE("GPL v2");
308+MODULE_AUTHOR("Mocean Laboratories");
309+MODULE_ALIAS("platform:timb-gpio");
310+
311diff -uNr linux-2.6.29-clean/drivers/gpio/timbgpio.h linux-2.6.29/drivers/gpio/timbgpio.h
312--- linux-2.6.29-clean/drivers/gpio/timbgpio.h 1969-12-31 16:00:00.000000000 -0800
313+++ linux-2.6.29/drivers/gpio/timbgpio.h 2009-04-06 13:51:47.000000000 -0700
314@@ -0,0 +1,48 @@
315+/*
316+ * timbgpio.h timberdale FPGA GPIO driver defines
317+ * Copyright (c) 2009 Intel Corporation
318+ *
319+ * This program is free software; you can redistribute it and/or modify
320+ * it under the terms of the GNU General Public License version 2 as
321+ * published by the Free Software Foundation.
322+ *
323+ * This program is distributed in the hope that it will be useful,
324+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
325+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
326+ * GNU General Public License for more details.
327+ *
328+ * You should have received a copy of the GNU General Public License
329+ * along with this program; if not, write to the Free Software
330+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
331+ */
332+
333+/* Supports:
334+ * Timberdale FPGA GPIO
335+ */
336+
337+#ifndef _TIMBGPIO_H_
338+#define _TIMBGPIO_H_
339+
340+#include <linux/mutex.h>
341+#include <linux/gpio.h>
342+
343+#define TIMB_NR_GPIOS 16
344+
345+#define TGPIOVAL 0
346+#define TGPIODIR 0x04
347+#define TGPIOINT 0x08
348+#define TGPIOINT_STATUS 0x0c
349+#define TGPIOINT_PENDING 0x10
350+#define TGPIOINT_CLR 0x14
351+#define TGPIOFLK 0x18
352+#define TGPIOLVL 0x1c
353+
354+struct timbgpio {
355+ void __iomem *membase;
356+ struct resource rscr;
357+ struct mutex lock; /* mutual exclusion */
358+ struct pci_dev *pdev;
359+ struct gpio_chip gpio;
360+};
361+
362+#endif
363diff -uNr linux-2.6.29-clean/drivers/i2c/busses/i2c-ocores.c linux-2.6.29/drivers/i2c/busses/i2c-ocores.c
364--- linux-2.6.29-clean/drivers/i2c/busses/i2c-ocores.c 2009-04-01 09:20:24.000000000 -0700
365+++ linux-2.6.29/drivers/i2c/busses/i2c-ocores.c 2009-04-06 13:51:47.000000000 -0700
366@@ -216,6 +216,7 @@
367 struct ocores_i2c_platform_data *pdata;
368 struct resource *res, *res2;
369 int ret;
370+ u8 i;
371
372 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
373 if (!res)
374@@ -271,6 +272,10 @@
375 goto add_adapter_failed;
376 }
377
378+ /* add in known devices to the bus */
379+ for (i = 0; i < pdata->num_devices; i++)
380+ i2c_new_device(&i2c->adap, pdata->devices + i);
381+
382 return 0;
383
384 add_adapter_failed:
385diff -uNr linux-2.6.29-clean/drivers/input/touchscreen/Kconfig linux-2.6.29/drivers/input/touchscreen/Kconfig
386--- linux-2.6.29-clean/drivers/input/touchscreen/Kconfig 2009-04-01 09:20:23.000000000 -0700
387+++ linux-2.6.29/drivers/input/touchscreen/Kconfig 2009-04-06 13:51:47.000000000 -0700
388@@ -397,6 +397,17 @@
389 To compile this driver as a module, choose M here: the
390 module will be called touchit213.
391
392+config TOUCHSCREEN_TSC2003
393+ tristate "TSC2003 based touchscreens"
394+ depends on I2C
395+ help
396+ Say Y here if you have a TSC2003 based touchscreen.
397+
398+ If unsure, say N.
399+
400+ To compile this driver as a module, choose M here: the
401+ module will be called tsc2003.
402+
403 config TOUCHSCREEN_TSC2007
404 tristate "TSC2007 based touchscreens"
405 depends on I2C
406diff -uNr linux-2.6.29-clean/drivers/input/touchscreen/Makefile linux-2.6.29/drivers/input/touchscreen/Makefile
407--- linux-2.6.29-clean/drivers/input/touchscreen/Makefile 2009-04-01 09:20:23.000000000 -0700
408+++ linux-2.6.29/drivers/input/touchscreen/Makefile 2009-04-06 13:51:47.000000000 -0700
409@@ -25,6 +25,7 @@
410 obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
411 obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
412 obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
413+obj-$(CONFIG_TOUCHSCREEN_TSC2003) += tsc2003.o
414 obj-$(CONFIG_TOUCHSCREEN_TSC2007) += tsc2007.o
415 obj-$(CONFIG_TOUCHSCREEN_UCB1400) += ucb1400_ts.o
416 obj-$(CONFIG_TOUCHSCREEN_WACOM_W8001) += wacom_w8001.o
417diff -uNr linux-2.6.29-clean/drivers/input/touchscreen/tsc2003.c linux-2.6.29/drivers/input/touchscreen/tsc2003.c
418--- linux-2.6.29-clean/drivers/input/touchscreen/tsc2003.c 1969-12-31 16:00:00.000000000 -0800
419+++ linux-2.6.29/drivers/input/touchscreen/tsc2003.c 2009-04-06 13:51:47.000000000 -0700
420@@ -0,0 +1,387 @@
421+/*
422+ * tsc2003.c Driver for TI TSC2003 touch screen controller
423+ * Copyright (c) 2009 Intel Corporation
424+ *
425+ * This program is free software; you can redistribute it and/or modify
426+ * it under the terms of the GNU General Public License version 2 as
427+ * published by the Free Software Foundation.
428+ *
429+ * This program is distributed in the hope that it will be useful,
430+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
431+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
432+ * GNU General Public License for more details.
433+ *
434+ * You should have received a copy of the GNU General Public License
435+ * along with this program; if not, write to the Free Software
436+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
437+ */
438+
439+/* Supports:
440+ * TI TSC2003
441+ *
442+ * Inspired by tsc2007, Copyright (c) 2008 MtekVision Co., Ltd.
443+ */
444+#include <linux/module.h>
445+#include <linux/input.h>
446+#include <linux/interrupt.h>
447+#include <linux/i2c.h>
448+#include <linux/i2c/tsc2007.h>
449+#include <linux/kthread.h>
450+#include <linux/semaphore.h>
451+
452+#define TSC2003_DRIVER_NAME "tsc2003"
453+
454+#define TS_POLL_PERIOD 20 /* ms delay between samples */
455+
456+#define TSC2003_MEASURE_TEMP0 (0x0 << 4)
457+#define TSC2003_MEASURE_AUX (0x2 << 4)
458+#define TSC2003_MEASURE_TEMP1 (0x4 << 4)
459+#define TSC2003_ACTIVATE_XN (0x8 << 4)
460+#define TSC2003_ACTIVATE_YN (0x9 << 4)
461+#define TSC2003_ACTIVATE_YP_XN (0xa << 4)
462+#define TSC2003_SETUP (0xb << 4)
463+#define TSC2003_MEASURE_X (0xc << 4)
464+#define TSC2003_MEASURE_Y (0xd << 4)
465+#define TSC2003_MEASURE_Z1 (0xe << 4)
466+#define TSC2003_MEASURE_Z2 (0xf << 4)
467+
468+#define TSC2003_POWER_OFF_IRQ_EN (0x0 << 2)
469+#define TSC2003_ADC_ON_IRQ_DIS0 (0x1 << 2)
470+#define TSC2003_ADC_OFF_IRQ_EN (0x2 << 2)
471+#define TSC2003_ADC_ON_IRQ_DIS1 (0x3 << 2)
472+
473+#define TSC2003_12BIT (0x0 << 1)
474+#define TSC2003_8BIT (0x1 << 1)
475+
476+#define MAX_12BIT ((1 << 12) - 1)
477+
478+#define ADC_ON_12BIT (TSC2003_12BIT | TSC2003_ADC_ON_IRQ_DIS0)
479+
480+#define READ_Y (ADC_ON_12BIT | TSC2003_MEASURE_Y)
481+#define READ_Z1 (ADC_ON_12BIT | TSC2003_MEASURE_Z1)
482+#define READ_Z2 (ADC_ON_12BIT | TSC2003_MEASURE_Z2)
483+#define READ_X (ADC_ON_12BIT | TSC2003_MEASURE_X)
484+#define PWRDOWN (TSC2003_12BIT | TSC2003_POWER_OFF_IRQ_EN)
485+
486+struct ts_event {
487+ int x;
488+ int y;
489+ int z1, z2;
490+};
491+
492+struct tsc2003 {
493+ struct input_dev *input;
494+ char phys[32];
495+ struct task_struct *task;
496+ struct ts_event tc;
497+ struct completion penirq_completion;
498+
499+ struct i2c_client *client;
500+
501+ u16 model;
502+ u16 x_plate_ohms;
503+
504+ unsigned pendown;
505+};
506+
507+static inline int tsc2003_xfer(struct tsc2003 *tsc, u8 cmd)
508+{
509+ s32 data;
510+ u16 val;
511+
512+ data = i2c_smbus_read_word_data(tsc->client, cmd);
513+ if (data < 0) {
514+ dev_err(&tsc->client->dev, "i2c io error: %d\n", data);
515+ return data;
516+ }
517+
518+ /* The protocol and raw data format from i2c interface:
519+ * S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
520+ * Where DataLow has [D11-D4], DataHigh has [D3-D0 << 4 | Dummy 4bit].
521+ */
522+ val = swab16(data) >> 4;
523+
524+ dev_dbg(&tsc->client->dev, "data: 0x%x, val: 0x%x\n", data, val);
525+
526+ return val;
527+}
528+
529+static void tsc2003_send_event(void *tsc)
530+{
531+ struct tsc2003 *ts = tsc;
532+ struct input_dev *input = ts->input;
533+ u32 rt = 0;
534+ u16 x, y, z1, z2;
535+
536+ x = ts->tc.x;
537+ y = ts->tc.y;
538+ z1 = ts->tc.z1;
539+ z2 = ts->tc.z2;
540+
541+ /* range filtering */
542+ if (x == MAX_12BIT)
543+ x = 0;
544+
545+ if (likely(x && z1)) {
546+ /* compute touch pressure resistance using equation #1 */
547+ rt = z2;
548+ rt -= z1;
549+ rt *= x;
550+ rt *= ts->x_plate_ohms;
551+ rt /= z1;
552+ rt = (rt + 2047) >> 12;
553+ }
554+
555+ /* Sample found inconsistent by debouncing or pressure is beyond
556+ * the maximum. Don't report it to user space, repeat at least
557+ * once more the measurement
558+ */
559+ if (rt > MAX_12BIT)
560+ return;
561+
562+ /* NOTE: We can't rely on the pressure to determine the pen down
563+ * state, even this controller has a pressure sensor. The pressure
564+ * value can fluctuate for quite a while after lifting the pen and
565+ * in some cases may not even settle at the expected value.
566+ *
567+ * The only safe way to check for the pen up condition is in the
568+ * timer by reading the pen signal state (it's a GPIO _and_ IRQ).
569+ */
570+ if (rt) {
571+ if (!ts->pendown) {
572+ dev_dbg(&ts->client->dev, "DOWN\n");
573+
574+ input_report_key(input, BTN_TOUCH, 1);
575+ ts->pendown = 1;
576+ }
577+
578+ input_report_abs(input, ABS_X, x);
579+ input_report_abs(input, ABS_Y, y);
580+ input_report_abs(input, ABS_PRESSURE, rt);
581+
582+ input_sync(input);
583+
584+ dev_dbg(&ts->client->dev, "point(%4d,%4d), pressure (%4u)\n",
585+ x, y, rt);
586+ } else if (ts->pendown) {
587+ /* pen up */
588+ dev_dbg(&ts->client->dev, "UP\n");
589+ input_report_key(input, BTN_TOUCH, 0);
590+ input_report_abs(input, ABS_PRESSURE, 0);
591+ input_sync(input);
592+
593+ ts->pendown = 0;
594+ }
595+}
596+
597+static int tsc2003_power_off_irq_en(struct tsc2003 *tsc)
598+{
599+ /* power down */
600+ return tsc2003_xfer(tsc, PWRDOWN);
601+}
602+
603+static int tsc2003_read_values(struct tsc2003 *tsc)
604+{
605+ /* y- still on; turn on only y+ (and ADC) */
606+ tsc->tc.y = tsc2003_xfer(tsc, READ_Y);
607+ if (tsc->tc.y < 0)
608+ return tsc->tc.y;
609+
610+ /* turn y- off, x+ on, then leave in lowpower */
611+ tsc->tc.x = tsc2003_xfer(tsc, READ_X);
612+ if (tsc->tc.x < 0)
613+ return tsc->tc.x;
614+
615+ /* turn y+ off, x- on; we'll use formula #1 */
616+ tsc->tc.z1 = tsc2003_xfer(tsc, READ_Z1);
617+ if (tsc->tc.z1 < 0)
618+ return tsc->tc.z1;
619+
620+ tsc->tc.z2 = tsc2003_xfer(tsc, READ_Z2);
621+ if (tsc->tc.z2 < 0)
622+ return tsc->tc.z2;
623+
624+ return 0;
625+}
626+
627+
628+static irqreturn_t tsc2003_irq(int irq, void *handle)
629+{
630+ struct tsc2003 *ts = handle;
631+
632+ /* do not call the synced version -> deadlock */
633+ disable_irq_nosync(irq);
634+ /* signal the thread to continue */
635+ complete(&ts->penirq_completion);
636+
637+ return IRQ_HANDLED;
638+}
639+
640+static int tsc2003_thread(void *d)
641+{
642+ struct tsc2003 *ts = (struct tsc2003 *)d;
643+ int ret;
644+
645+ allow_signal(SIGKILL);
646+
647+ while (!signal_pending(current)) {
648+ /* power down and wait for interrupt */
649+ do {
650+ /* loop because the I2C bus might be busy */
651+ ret = msleep_interruptible(TS_POLL_PERIOD);
652+ if (!ret)
653+ ret = tsc2003_power_off_irq_en(ts);
654+ } while (ret == -EAGAIN && !signal_pending(current));
655+
656+ if (signal_pending(current))
657+ break;
658+
659+ ret = wait_for_completion_interruptible(&ts->penirq_completion);
660+ if (!ret) {
661+ int first = 1;
662+ /* got IRQ, start poll, until pen is up */
663+ while (!ret && !signal_pending(current)
664+ && (first || ts->pendown)) {
665+ ret = tsc2003_read_values(ts);
666+ if (!ret)
667+ tsc2003_send_event(ts);
668+ ret = msleep_interruptible(TS_POLL_PERIOD);
669+ first = 0;
670+ }
671+
672+ /* we re enable the interrupt */
673+ if (!signal_pending(current))
674+ enable_irq(ts->client->irq);
675+ }
676+ }
677+
678+ return 0;
679+}
680+
681+static int tsc2003_probe(struct i2c_client *client,
682+ const struct i2c_device_id *id)
683+{
684+ struct tsc2003 *ts;
685+ struct tsc2007_platform_data *pdata = client->dev.platform_data;
686+ struct input_dev *input_dev;
687+ int err;
688+
689+ if (!pdata) {
690+ dev_err(&client->dev, "platform data is required!\n");
691+ return -EINVAL;
692+ }
693+
694+ if (!i2c_check_functionality(client->adapter,
695+ I2C_FUNC_SMBUS_READ_WORD_DATA))
696+ return -EIO;
697+
698+ ts = kzalloc(sizeof(struct tsc2003), GFP_KERNEL);
699+ input_dev = input_allocate_device();
700+ if (!ts || !input_dev) {
701+ err = -ENOMEM;
702+ goto err_free_mem;
703+ }
704+
705+ ts->client = client;
706+ i2c_set_clientdata(client, ts);
707+
708+ ts->input = input_dev;
709+
710+ ts->model = pdata->model;
711+ ts->x_plate_ohms = pdata->x_plate_ohms;
712+
713+ snprintf(ts->phys, sizeof(ts->phys),
714+ "%s/input0", dev_name(&client->dev));
715+
716+ input_dev->name = TSC2003_DRIVER_NAME" Touchscreen";
717+ input_dev->phys = ts->phys;
718+ input_dev->id.bustype = BUS_I2C;
719+
720+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
721+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
722+
723+ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
724+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
725+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
726+
727+ init_completion(&ts->penirq_completion);
728+
729+ ts->task = kthread_run(tsc2003_thread, ts, TSC2003_DRIVER_NAME);
730+ if (IS_ERR(ts->task)) {
731+ err = PTR_ERR(ts->task);
732+ goto err_free_mem;
733+ }
734+
735+ err = request_irq(client->irq, tsc2003_irq, 0,
736+ client->dev.driver->name, ts);
737+ if (err < 0) {
738+ dev_err(&client->dev, "irq %d busy?\n", client->irq);
739+ goto err_free_thread;
740+ }
741+
742+ err = input_register_device(input_dev);
743+ if (err)
744+ goto err_free_irq;
745+
746+ dev_info(&client->dev, "registered with irq (%d)\n", client->irq);
747+
748+ return 0;
749+
750+ err_free_irq:
751+ free_irq(client->irq, ts);
752+ err_free_thread:
753+ kthread_stop(ts->task);
754+ err_free_mem:
755+ input_free_device(input_dev);
756+ kfree(ts);
757+ return err;
758+}
759+
760+static int tsc2003_remove(struct i2c_client *client)
761+{
762+ struct tsc2003 *ts = i2c_get_clientdata(client);
763+
764+ free_irq(client->irq, ts);
765+ send_sig(SIGKILL, ts->task, 1);
766+ kthread_stop(ts->task);
767+ input_unregister_device(ts->input);
768+ kfree(ts);
769+
770+ return 0;
771+}
772+
773+static struct i2c_device_id tsc2003_idtable[] = {
774+ { TSC2003_DRIVER_NAME, 0 },
775+ { }
776+};
777+
778+MODULE_DEVICE_TABLE(i2c, tsc2003_idtable);
779+
780+static struct i2c_driver tsc2003_driver = {
781+ .driver = {
782+ .owner = THIS_MODULE,
783+ .name = TSC2003_DRIVER_NAME,
784+ .bus = &i2c_bus_type,
785+ },
786+ .id_table = tsc2003_idtable,
787+ .probe = tsc2003_probe,
788+ .remove = tsc2003_remove,
789+};
790+
791+static int __init tsc2003_init(void)
792+{
793+ return i2c_add_driver(&tsc2003_driver);
794+}
795+
796+static void __exit tsc2003_exit(void)
797+{
798+ i2c_del_driver(&tsc2003_driver);
799+}
800+
801+module_init(tsc2003_init);
802+module_exit(tsc2003_exit);
803+
804+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
805+MODULE_DESCRIPTION("TSC2003 TouchScreen Driver");
806+MODULE_LICENSE("GPL v2");
807+
808diff -uNr linux-2.6.29-clean/drivers/media/video/adv7180.c linux-2.6.29/drivers/media/video/adv7180.c
809--- linux-2.6.29-clean/drivers/media/video/adv7180.c 1969-12-31 16:00:00.000000000 -0800
810+++ linux-2.6.29/drivers/media/video/adv7180.c 2009-04-06 13:51:47.000000000 -0700
811@@ -0,0 +1,361 @@
812+/*
813+ * adv7180.c Analog Devices ADV7180 video decoder driver
814+ * Copyright (c) 2009 Intel Corporation
815+ *
816+ * This program is free software; you can redistribute it and/or modify
817+ * it under the terms of the GNU General Public License version 2 as
818+ * published by the Free Software Foundation.
819+ *
820+ * This program is distributed in the hope that it will be useful,
821+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
822+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
823+ * GNU General Public License for more details.
824+ *
825+ * You should have received a copy of the GNU General Public License
826+ * along with this program; if not, write to the Free Software
827+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
828+ */
829+
830+#include <linux/module.h>
831+#include <linux/init.h>
832+#include <linux/interrupt.h>
833+#include <linux/delay.h>
834+#include <linux/errno.h>
835+#include <linux/fs.h>
836+#include <linux/kernel.h>
837+#include <linux/major.h>
838+#include <linux/slab.h>
839+#include <linux/mm.h>
840+#include <linux/signal.h>
841+#include <linux/types.h>
842+#include <linux/io.h>
843+#include <asm/pgtable.h>
844+#include <asm/page.h>
845+#include <linux/uaccess.h>
846+
847+#include <linux/i2c-ocores.h>
848+#include <linux/platform_device.h>
849+#include <linux/i2c.h>
850+#include <linux/i2c-id.h>
851+#include <linux/videodev.h>
852+#include <linux/video_decoder.h>
853+#include <media/v4l2-ioctl.h>
854+#include <media/adv7180.h>
855+
856+
857+MODULE_DESCRIPTION("Analog Devices ADV7180 video decoder driver");
858+MODULE_AUTHOR("Mocean Laboratories");
859+MODULE_LICENSE("GPL v2");
860+
861+static inline int adv7180_write(struct i2c_client *client,
862+ u8 reg, u8 value)
863+{
864+ struct adv7180 *decoder = i2c_get_clientdata(client);
865+
866+ decoder->reg[reg] = value;
867+ return i2c_smbus_write_byte_data(client, reg, value);
868+}
869+
870+static inline int adv7180_read(struct i2c_client *client, u8 reg)
871+{
872+ return i2c_smbus_read_byte_data(client, reg);
873+}
874+
875+static int adv7180_write_block(struct i2c_client *client,
876+ const u8 *data, unsigned int len)
877+{
878+ int ret = -1;
879+ u8 reg;
880+
881+ /* the adv7180 has an autoincrement function, use it if
882+ * the adapter understands raw I2C */
883+ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
884+ /* do raw I2C, not smbus compatible */
885+ struct adv7180 *decoder = i2c_get_clientdata(client);
886+ u8 block_data[32];
887+ int block_len;
888+
889+ while (len >= 2) {
890+ block_len = 0;
891+ reg = data[0];
892+ block_data[block_len++] = reg;
893+ do {
894+ block_data[block_len++] =
895+ decoder->reg[reg++] = data[1];
896+ len -= 2;
897+ data += 2;
898+ } while (len >= 2 && data[0] == reg &&
899+ block_len < 32);
900+
901+ ret = i2c_master_send(client, block_data, block_len);
902+ if (ret < 0)
903+ break;
904+ }
905+ } else {
906+ /* do some slow I2C emulation kind of thing */
907+ while (len >= 2) {
908+ reg = *data++;
909+ ret = adv7180_write(client, reg, *data++);
910+ if (ret < 0)
911+ break;
912+
913+ len -= 2;
914+ }
915+ }
916+
917+ return ret;
918+}
919+#ifdef CONFIG_MFD_TIMBERDALE
920+static irqreturn_t adv7180_irq(int irq, void *dev_id)
921+{
922+ struct adv7180 *decoder = (struct adv7180 *) dev_id;
923+
924+ /* Activate access to sub-regs */
925+ adv7180_write(decoder->client, ADV7180_ADI_CTRL, ADI_ENABLE);
926+
927+ /* TODO: implement a real interrupt handler
928+ * for now just
929+ * clear all four regs
930+ */
931+ adv7180_write_block(decoder->client, reset_icr, sizeof(reset_icr));
932+
933+ return IRQ_HANDLED;
934+}
935+#endif
936+static int adv7180_command(struct i2c_client *client,
937+ unsigned int cmd, void *arg)
938+{
939+ struct adv7180 *decoder = i2c_get_clientdata(client);
940+ int *iarg = (int *)arg;
941+ int status;
942+
943+ switch (cmd) {
944+
945+ case DECODER_INIT:
946+ adv7180_write(client, 0x0f, 0x80); /* Reset */
947+ break;
948+
949+ case DECODER_GET_CAPABILITIES:
950+ {
951+ struct video_decoder_capability *cap = arg;
952+ cap->flags = VIDEO_DECODER_PAL |
953+ VIDEO_DECODER_NTSC |
954+ VIDEO_DECODER_SECAM |
955+ VIDEO_DECODER_AUTO;
956+ cap->inputs = 3;
957+ cap->outputs = 1;
958+ }
959+ break;
960+
961+ case DECODER_GET_STATUS:
962+ {
963+ *iarg = 0;
964+ status = adv7180_read(client, ADV7180_SR);
965+ if ((status & ADV7180_STATUS_PAL))
966+ *iarg = (*iarg | DECODER_STATUS_PAL);
967+
968+ if ((status & ADV7180_STATUS_NTSC))
969+ *iarg = (*iarg | DECODER_STATUS_NTSC);
970+
971+ if ((status & ADV7180_STATUS_SECAM))
972+ *iarg = (*iarg | DECODER_STATUS_SECAM);
973+ }
974+ break;
975+
976+ case DECODER_SET_NORM:
977+ {
978+ int v = *(int *) arg;
979+ if (decoder->norm != v) {
980+ decoder->norm = v;
981+ switch (v) {
982+ case VIDEO_MODE_NTSC:
983+ adv7180_write(client, ADV7180_IN_CTRL, 0x40);
984+ break;
985+ case VIDEO_MODE_PAL:
986+ adv7180_write(client, ADV7180_IN_CTRL, 0x70);
987+ break;
988+ case VIDEO_MODE_SECAM:
989+ adv7180_write(client, ADV7180_IN_CTRL, 0x90);
990+ break;
991+ case VIDEO_MODE_AUTO:
992+ adv7180_write(client, ADV7180_IN_CTRL, 0x00);
993+ break;
994+ default:
995+ return -EPERM;
996+ }
997+ }
998+ }
999+ break;
1000+
1001+ case DECODER_SET_INPUT:
1002+ {
1003+ int v = *(int *) arg;
1004+ if (decoder->input != v) {
1005+ decoder->input = v;
1006+
1007+ switch (v) {
1008+ case CVBS:
1009+ adv7180_write_block(client, init_cvbs_64,
1010+ sizeof(init_cvbs_64));
1011+ break;
1012+ case SVIDEO:
1013+ adv7180_write_block(client, init_svideo_64,
1014+ sizeof(init_svideo_64));
1015+ break;
1016+ case YPbPr:
1017+ adv7180_write_block(client, init_ypbpr_64,
1018+ sizeof(init_ypbpr_64));
1019+ break;
1020+ default:
1021+ return -EINVAL;
1022+ }
1023+ }
1024+ }
1025+ break;
1026+
1027+ case DECODER_SET_OUTPUT:
1028+ {
1029+ }
1030+ break;
1031+
1032+ case DECODER_ENABLE_OUTPUT:
1033+ {
1034+ }
1035+ break;
1036+
1037+ case DECODER_SET_PICTURE:
1038+ {
1039+ }
1040+ break;
1041+
1042+ case DECODER_DUMP:
1043+ {
1044+ adv7180_write(client, 1, 0x88);
1045+ }
1046+ break;
1047+
1048+ default:
1049+ return -EINVAL;
1050+ }
1051+ return 0;
1052+}
1053+
1054+/* ----------------------------------------------------------------------- */
1055+
1056+/*
1057+ * Generic i2c probe
1058+ * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
1059+ */
1060+static unsigned short normal_i2c[] = {
1061+ 0x40 >> 1, 0x41 >> 1,
1062+ I2C_ADV7180 >> 1, 0x43 >> 1,
1063+ I2C_CLIENT_END
1064+};
1065+
1066+I2C_CLIENT_INSMOD;
1067+
1068+static int adv7180_detect(struct i2c_client *client, int kind,
1069+ struct i2c_board_info *info)
1070+{
1071+ struct i2c_adapter *adapter = client->adapter;
1072+
1073+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE
1074+ | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
1075+ return -ENODEV;
1076+
1077+ /* Is chip alive ? */
1078+ if (adv7180_read(client, 0x11) != 0x1b)
1079+ return -ENODEV;
1080+
1081+ strlcpy(info->type, DRIVER_NAME, I2C_NAME_SIZE);
1082+
1083+ return 0;
1084+}
1085+
1086+static int adv7180_probe(struct i2c_client *client,
1087+ const struct i2c_device_id *id)
1088+{
1089+ int err = 0;
1090+ struct adv7180 *decoder;
1091+
1092+ printk(KERN_INFO DRIVER_NAME" chip found @ 0x%x (%s)\n",
1093+ client->addr << 1, client->adapter->name);
1094+
1095+ decoder = kzalloc(sizeof(struct adv7180), GFP_KERNEL);
1096+ if (decoder == NULL)
1097+ return -ENOMEM;
1098+
1099+ decoder->norm = VIDEO_MODE_PAL | VIDEO_MODE_NTSC |
1100+ VIDEO_MODE_SECAM |
1101+ VIDEO_MODE_AUTO;
1102+ decoder->input = CVBS;
1103+ decoder->enable = 1;
1104+ decoder->client = client;
1105+ i2c_set_clientdata(client, decoder);
1106+#ifdef CONFIG_MFD_TIMBERDALE
1107+ err = request_irq(client->irq, adv7180_irq, 0,
1108+ client->dev.driver->name, decoder);
1109+ if (err < 0) {
1110+ dev_err(&client->dev, "irq %d busy?\n", client->irq);
1111+ goto err_free_dec;
1112+ }
1113+ dev_info(&client->dev, "registered with irq (%d)\n", client->irq);
1114+#endif
1115+ adv7180_command(client, DECODER_INIT, NULL); /* Reset */
1116+
1117+ return 0;
1118+#ifdef CONFIG_MFD_TIMBERDALE
1119+err_free_dec:
1120+ kfree(decoder);
1121+
1122+ return err;
1123+#endif
1124+}
1125+
1126+static int adv7180_remove(struct i2c_client *client)
1127+{
1128+ struct adv7180 *decoder = i2c_get_clientdata(client);
1129+#ifdef CONFIG_MFD_TIMBERDALE
1130+ free_irq(client->irq, decoder);
1131+#endif
1132+ kfree(decoder);
1133+ return 0;
1134+}
1135+
1136+/* ----------------------------------------------------------------------- */
1137+static const struct i2c_device_id adv7180_id[] = {
1138+ { DRIVER_NAME, 0 },
1139+ { }
1140+};
1141+MODULE_DEVICE_TABLE(i2c, adv7180_id);
1142+
1143+static struct i2c_driver i2c_driver_adv7180 = {
1144+ .driver = {
1145+ .owner = THIS_MODULE,
1146+ .name = DRIVER_NAME,
1147+ .bus = &i2c_bus_type,
1148+ },
1149+
1150+ .id_table = adv7180_id,
1151+ .probe = adv7180_probe,
1152+ .remove = adv7180_remove,
1153+
1154+ .class = 0xffffffff,
1155+ .detect = adv7180_detect,
1156+ .address_data = &addr_data,
1157+
1158+ .command = adv7180_command,
1159+};
1160+
1161+static int __init adv7180_init(void)
1162+{
1163+ return i2c_add_driver(&i2c_driver_adv7180);
1164+}
1165+
1166+static void __exit adv7180_exit(void)
1167+{
1168+ i2c_del_driver(&i2c_driver_adv7180);
1169+}
1170+
1171+module_init(adv7180_init);
1172+module_exit(adv7180_exit);
1173diff -uNr linux-2.6.29-clean/drivers/media/video/Kconfig linux-2.6.29/drivers/media/video/Kconfig
1174--- linux-2.6.29-clean/drivers/media/video/Kconfig 2009-04-01 09:20:24.000000000 -0700
1175+++ linux-2.6.29/drivers/media/video/Kconfig 2009-04-06 13:51:47.000000000 -0700
1176@@ -251,6 +251,15 @@
1177
1178 comment "Video decoders"
1179
1180+config VIDEO_ADV7180
1181+ tristate "Analog Devices ADV7180 decoder"
1182+ depends on VIDEO_V4L1 && I2C
1183+ ---help---
1184+ Support for the Analog Devices ADV7180 video decoder.
1185+
1186+ To compile this driver as a module, choose M here: the
1187+ module will be called adv7180.
1188+
1189 config VIDEO_BT819
1190 tristate "BT819A VideoStream decoder"
1191 depends on VIDEO_V4L1 && I2C
1192@@ -800,6 +809,12 @@
1193 ---help---
1194 This is a v4l2 driver for the TI OMAP2 camera capture interface
1195
1196+config VIDEO_TIMBERDALE
1197+ tristate "Support for timberdale Video In/LogiWIN"
1198+ depends on VIDEO_V4L2 && MFD_TIMBERDALE_DMA
1199+ ---help---
1200+ Add support for the Video In peripherial of the timberdale FPGA.
1201+
1202 #
1203 # USB Multimedia device configuration
1204 #
1205diff -uNr linux-2.6.29-clean/drivers/media/video/Makefile linux-2.6.29/drivers/media/video/Makefile
1206--- linux-2.6.29-clean/drivers/media/video/Makefile 2009-04-01 09:20:24.000000000 -0700
1207+++ linux-2.6.29/drivers/media/video/Makefile 2009-04-06 13:51:47.000000000 -0700
1208@@ -52,6 +52,7 @@
1209 obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
1210 obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
1211 obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
1212+obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
1213 obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
1214 obj-$(CONFIG_VIDEO_BT819) += bt819.o
1215 obj-$(CONFIG_VIDEO_BT856) += bt856.o
1216@@ -148,6 +149,8 @@
1217
1218 obj-$(CONFIG_VIDEO_AU0828) += au0828/
1219
1220+obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o
1221+
1222 obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
1223
1224 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
1225diff -uNr linux-2.6.29-clean/drivers/media/video/timblogiw.c linux-2.6.29/drivers/media/video/timblogiw.c
1226--- linux-2.6.29-clean/drivers/media/video/timblogiw.c 1969-12-31 16:00:00.000000000 -0800
1227+++ linux-2.6.29/drivers/media/video/timblogiw.c 2009-04-06 13:51:47.000000000 -0700
1228@@ -0,0 +1,930 @@
1229+/*
1230+ * timblogiw.c timberdale FPGA LogiWin Video In driver
1231+ * Copyright (c) 2009 Intel Corporation
1232+ *
1233+ * This program is free software; you can redistribute it and/or modify
1234+ * it under the terms of the GNU General Public License version 2 as
1235+ * published by the Free Software Foundation.
1236+ *
1237+ * This program is distributed in the hope that it will be useful,
1238+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
1239+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1240+ * GNU General Public License for more details.
1241+ *
1242+ * You should have received a copy of the GNU General Public License
1243+ * along with this program; if not, write to the Free Software
1244+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
1245+ */
1246+
1247+/* Supports:
1248+ * Timberdale FPGA LogiWin Video In
1249+ */
1250+
1251+#include <linux/list.h>
1252+#include <linux/version.h>
1253+#include <linux/module.h>
1254+#include <linux/pci.h>
1255+#include <linux/dma-mapping.h>
1256+#include <media/v4l2-common.h>
1257+#include <media/v4l2-ioctl.h>
1258+#include <linux/platform_device.h>
1259+#include <linux/interrupt.h>
1260+#include "timblogiw.h"
1261+#include <linux/mfd/timbdma.h>
1262+
1263+
1264+#define TIMBLOGIW_CTRL 0x40
1265+
1266+#define TIMBLOGIW_H_SCALE 0x20
1267+#define TIMBLOGIW_V_SCALE 0x28
1268+
1269+#define TIMBLOGIW_X_CROP 0x58
1270+#define TIMBLOGIW_Y_CROP 0x60
1271+
1272+#define TIMBLOGIW_W_CROP 0x00
1273+#define TIMBLOGIW_H_CROP 0x08
1274+
1275+#define TIMBLOGIW_VERSION_CODE 0x02
1276+
1277+#define TIMBLOGIW_FRAME 0x10
1278+#define TIMBLOGIW_DROP 0x20
1279+
1280+#define TIMBLOGIW_BUF 0x04
1281+#define TIMBLOGIW_TBI 0x2c
1282+#define TIMBLOGIW_BPL 0x30
1283+
1284+#define dbg(...)
1285+
1286+const struct timblogiw_tvnorm timblogiw_tvnorms[] = {
1287+ {
1288+ .v4l2_id = V4L2_STD_PAL,
1289+ .name = "PAL",
1290+ .swidth = 720,
1291+ .sheight = 576
1292+ },
1293+ {
1294+ .v4l2_id = V4L2_STD_NTSC_M,
1295+ .name = "NTSC",
1296+ .swidth = 720,
1297+ .sheight = 480
1298+ }
1299+};
1300+
1301+static void timblogiw_handleframe(unsigned long arg)
1302+{
1303+ struct timblogiw_frame *f;
1304+ struct timblogiw *lw = (struct timblogiw *)arg;
1305+
1306+ spin_lock_bh(&lw->queue_lock);
1307+ if (!list_empty(&lw->inqueue)) {
1308+ /* put the entry in the outqueue */
1309+ f = list_entry(lw->inqueue.next, struct timblogiw_frame, frame);
1310+
1311+ /* copy data from the DMA buffer */
1312+ memcpy(f->bufmem, lw->dma.filled->buf, f->buf.length);
1313+ /* buffer consumed */
1314+ lw->dma.filled = NULL;
1315+
1316+ do_gettimeofday(&f->buf.timestamp);
1317+ f->buf.sequence = ++lw->frame_count;
1318+ f->buf.field = V4L2_FIELD_NONE;
1319+ f->state = F_DONE;
1320+ f->buf.bytesused = lw->frame_size;
1321+ list_move_tail(&f->frame, &lw->outqueue);
1322+ /* wake up any waiter */
1323+ wake_up(&lw->wait_frame);
1324+ }
1325+ spin_unlock_bh(&lw->queue_lock);
1326+}
1327+
1328+static int timblogiw_isr(u32 flag, void *pdev)
1329+{
1330+ struct timblogiw *lw = (struct timblogiw *)pdev;
1331+
1332+ if (!lw->dma.filled) {
1333+ /* no stored transfer so far, store this, and flip to next */
1334+ lw->dma.filled = lw->dma.transfer + lw->dma.curr;
1335+ lw->dma.curr = !lw->dma.curr;
1336+ }
1337+
1338+ if (lw->stream == STREAM_ON)
1339+ timb_start_dma(DMA_IRQ_VIDEO_RX,
1340+ lw->dma.transfer[lw->dma.curr].handle, lw->frame_size,
1341+ lw->bytesperline);
1342+
1343+ if (flag & DMA_IRQ_VIDEO_DROP)
1344+ dbg("%s: frame dropped\n", __func__);
1345+ if (flag & DMA_IRQ_VIDEO_RX) {
1346+ dbg("%s: frame RX\n", __func__);
1347+ tasklet_schedule(&lw->tasklet);
1348+ }
1349+ return 0;
1350+}
1351+
1352+static void timblogiw_empty_framequeues(struct timblogiw *lw)
1353+{
1354+ u32 i;
1355+
1356+ dbg("%s\n", __func__);
1357+
1358+ INIT_LIST_HEAD(&lw->inqueue);
1359+ INIT_LIST_HEAD(&lw->outqueue);
1360+
1361+ for (i = 0; i < lw->num_frames; i++) {
1362+ lw->frame[i].state = F_UNUSED;
1363+ lw->frame[i].buf.bytesused = 0;
1364+ }
1365+}
1366+
1367+u32 timblogiw_request_buffers(struct timblogiw *lw, u32 count)
1368+{
1369+ /* needs to be page aligned cause the */
1370+ /* buffers can be mapped individually! */
1371+ const size_t imagesize = PAGE_ALIGN(lw->frame_size);
1372+ void *buff = NULL;
1373+ u32 i;
1374+
1375+ dbg("%s - request of %i buffers of size %zi\n",
1376+ __func__, count, lw->frame_size);
1377+
1378+ lw->dma.transfer[0].buf = pci_alloc_consistent(lw->dev, imagesize,
1379+ &lw->dma.transfer[0].handle);
1380+ lw->dma.transfer[1].buf = pci_alloc_consistent(lw->dev, imagesize,
1381+ &lw->dma.transfer[1].handle);
1382+ if ((lw->dma.transfer[0].buf == NULL) ||
1383+ (lw->dma.transfer[1].buf == NULL)) {
1384+ printk(KERN_ALERT "alloc failed\n");
1385+ if (lw->dma.transfer[0].buf != NULL)
1386+ pci_free_consistent(lw->dev, imagesize,
1387+ lw->dma.transfer[0].buf,
1388+ lw->dma.transfer[0].handle);
1389+ if (lw->dma.transfer[1].buf != NULL)
1390+ pci_free_consistent(lw->dev, imagesize,
1391+ lw->dma.transfer[1].buf,
1392+ lw->dma.transfer[1].handle);
1393+ return 0;
1394+ }
1395+
1396+ if (count > TIMBLOGIW_NUM_FRAMES)
1397+ count = TIMBLOGIW_NUM_FRAMES;
1398+
1399+ lw->num_frames = count;
1400+ while (lw->num_frames > 0) {
1401+ buff = vmalloc_32(lw->num_frames * imagesize);
1402+ if (buff) {
1403+ memset(buff, 0, lw->num_frames * imagesize);
1404+ break;
1405+ }
1406+ lw->num_frames--;
1407+ }
1408+
1409+ for (i = 0; i < lw->num_frames; i++) {
1410+ lw->frame[i].bufmem = buff + i * imagesize;
1411+ lw->frame[i].buf.index = i;
1412+ lw->frame[i].buf.m.offset = i * imagesize;
1413+ lw->frame[i].buf.length = lw->frame_size;
1414+ lw->frame[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1415+ lw->frame[i].buf.sequence = 0;
1416+ lw->frame[i].buf.field = V4L2_FIELD_NONE;
1417+ lw->frame[i].buf.memory = V4L2_MEMORY_MMAP;
1418+ lw->frame[i].buf.flags = 0;
1419+ }
1420+
1421+ lw->dma.curr = 0;
1422+ lw->dma.filled = NULL;
1423+ return lw->num_frames;
1424+}
1425+
1426+void timblogiw_release_buffers(struct timblogiw *lw)
1427+{
1428+ dbg("%s\n", __func__);
1429+
1430+ if (lw->frame[0].bufmem != NULL) {
1431+ vfree(lw->frame[0].bufmem);
1432+ lw->frame[0].bufmem = NULL;
1433+ lw->num_frames = TIMBLOGIW_NUM_FRAMES;
1434+ pci_free_consistent(lw->dev, lw->frame_size,
1435+ lw->dma.transfer[0].buf, lw->dma.transfer[0].handle);
1436+ pci_free_consistent(lw->dev, lw->frame_size,
1437+ lw->dma.transfer[1].buf, lw->dma.transfer[1].handle);
1438+ }
1439+}
1440+
1441+/* IOCTL functions */
1442+
1443+static int timblogiw_g_fmt(struct timblogiw *lw, struct v4l2_format *format)
1444+{
1445+ dbg("%s -\n", __func__);
1446+
1447+ if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1448+ return -EINVAL;
1449+
1450+ format->fmt.pix.width = lw->width;
1451+ format->fmt.pix.height = lw->height;
1452+ format->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
1453+ format->fmt.pix.bytesperline = lw->bytesperline;
1454+ format->fmt.pix.sizeimage = lw->frame_size;
1455+ format->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
1456+ format->fmt.pix.field = V4L2_FIELD_NONE;
1457+ return 0;
1458+}
1459+
1460+static int timblogiw_s_fmt(struct timblogiw *lw, struct v4l2_format *format)
1461+{
1462+ struct v4l2_pix_format *pix = &format->fmt.pix;
1463+ dbg("%s - type: %d\n", __func__, format->type);
1464+
1465+ if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1466+ return -EINVAL;
1467+
1468+ if ((lw->height != pix->height) || (lw->width != lw->width))
1469+ return -EINVAL;
1470+
1471+ if (format->fmt.pix.field != V4L2_FIELD_NONE)
1472+ return -EINVAL;
1473+
1474+ dbg("%s - width=%d, height=%d, pixelformat=%d, field=%d\n"
1475+ "bytes per line %d, size image: %d, colorspace: %d\n",
1476+ __func__,
1477+ pix->width, pix->height, pix->pixelformat, pix->field,
1478+ pix->bytesperline, pix->sizeimage, pix->colorspace);
1479+
1480+ return 0;
1481+}
1482+
1483+static int timblogiw_querycap(struct timblogiw *lw,
1484+ struct v4l2_capability *cap)
1485+{
1486+ memset(cap, 0, sizeof(*cap));
1487+ strncpy(cap->card, "Timberdale Video", sizeof(cap->card)-1);
1488+ strncpy(cap->driver, "Timblogiw", sizeof(cap->card)-1);
1489+ cap->version = TIMBLOGIW_VERSION_CODE;
1490+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
1491+ V4L2_CAP_STREAMING;
1492+
1493+ return 0;
1494+}
1495+
1496+static int timblogiw_enum_fmt(struct timblogiw *lw, struct v4l2_fmtdesc *fmt)
1497+{
1498+ dbg("%s - VIDIOC_ENUM_FMT\n", __func__);
1499+
1500+ if (fmt->index != 0)
1501+ return -EINVAL;
1502+ memset(fmt, 0, sizeof(*fmt));
1503+ fmt->index = 0;
1504+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1505+ strncpy(fmt->description, "4:2:2, packed, YUYV",
1506+ sizeof(fmt->description)-1);
1507+ fmt->pixelformat = V4L2_PIX_FMT_YUYV;
1508+ memset(fmt->reserved, 0, sizeof(fmt->reserved));
1509+
1510+ return 0;
1511+}
1512+
1513+static int timblogiw_reqbufs(struct timblogiw *lw,
1514+ struct v4l2_requestbuffers *rb)
1515+{
1516+ if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1517+ rb->memory != V4L2_MEMORY_MMAP)
1518+ return -EINVAL;
1519+
1520+ timblogiw_empty_framequeues(lw);
1521+
1522+ timblogiw_release_buffers(lw);
1523+ if (rb->count)
1524+ rb->count = timblogiw_request_buffers(lw, rb->count);
1525+
1526+ dbg("%s - VIDIOC_REQBUFS: io method is mmap. num bufs %i\n",
1527+ __func__, rb->count);
1528+
1529+ return 0;
1530+}
1531+
1532+static int timblogiw_querybuf(struct timblogiw *lw, struct v4l2_buffer *b)
1533+{
1534+ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1535+ b->index >= lw->num_frames)
1536+ return -EINVAL;
1537+
1538+ memcpy(b, &lw->frame[b->index].buf, sizeof(*b));
1539+
1540+ if (lw->frame[b->index].vma_use_count)
1541+ b->flags |= V4L2_BUF_FLAG_MAPPED;
1542+
1543+ if (lw->frame[b->index].state == F_DONE)
1544+ b->flags |= V4L2_BUF_FLAG_DONE;
1545+ else if (lw->frame[b->index].state != F_UNUSED)
1546+ b->flags |= V4L2_BUF_FLAG_QUEUED;
1547+
1548+ return 0;
1549+}
1550+
1551+static int timblogiw_qbuf(struct timblogiw *lw, struct v4l2_buffer *b)
1552+{
1553+ unsigned long lock_flags;
1554+
1555+ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1556+ b->index >= lw->num_frames)
1557+ return -EINVAL;
1558+
1559+ if (lw->frame[b->index].state != F_UNUSED)
1560+ return -EAGAIN;
1561+
1562+ if (b->memory != V4L2_MEMORY_MMAP)
1563+ return -EINVAL;
1564+
1565+ lw->frame[b->index].state = F_QUEUED;
1566+
1567+ spin_lock_irqsave(&lw->queue_lock, lock_flags);
1568+ list_add_tail(&lw->frame[b->index].frame, &lw->inqueue);
1569+ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
1570+
1571+ return 0;
1572+}
1573+
1574+static int timblogiw_dqbuf(struct timblogiw *lw, struct file *file,
1575+ struct v4l2_buffer *b)
1576+{
1577+ struct timblogiw_frame *f;
1578+ unsigned long lock_flags;
1579+ int ret = 0;
1580+
1581+ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1582+ dbg("%s - VIDIOC_DQBUF, illegal buf type!\n",
1583+ __func__);
1584+ return -EINVAL;
1585+ }
1586+
1587+ if (list_empty(&lw->outqueue)) {
1588+ if (file->f_flags & O_NONBLOCK)
1589+ return -EAGAIN;
1590+
1591+ ret = wait_event_interruptible(lw->wait_frame,
1592+ !list_empty(&lw->outqueue));
1593+ if (ret)
1594+ return ret;
1595+ }
1596+
1597+ spin_lock_irqsave(&lw->queue_lock, lock_flags);
1598+ f = list_entry(lw->outqueue.next,
1599+ struct timblogiw_frame, frame);
1600+ list_del(lw->outqueue.next);
1601+ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
1602+
1603+ f->state = F_UNUSED;
1604+ memcpy(b, &f->buf, sizeof(*b));
1605+
1606+ if (f->vma_use_count)
1607+ b->flags |= V4L2_BUF_FLAG_MAPPED;
1608+
1609+ return 0;
1610+}
1611+
1612+static int timblogiw_enumstd(struct timblogiw *lw, struct v4l2_standard *std)
1613+{
1614+ if (std->index != 0)
1615+ return -EINVAL;
1616+
1617+ memset(std, 0, sizeof(*std));
1618+ std->index = 0;
1619+
1620+ std->id = V4L2_STD_PAL;
1621+ strncpy(std->name, "PAL", sizeof(std->name)-1);
1622+
1623+ return 0;
1624+}
1625+
1626+static int timblogiw_g_std(struct timblogiw *lw, v4l2_std_id *std)
1627+{
1628+ *std = V4L2_STD_PAL;
1629+ return 0;
1630+}
1631+
1632+static int timblogiw_s_std(struct timblogiw *lw, v4l2_std_id *std)
1633+{
1634+ if (!(*std & V4L2_STD_PAL))
1635+ return -EINVAL;
1636+ return 0;
1637+}
1638+
1639+static int timblogiw_enuminput(struct timblogiw *lw, struct v4l2_input *inp)
1640+{
1641+ if (inp->index != 0)
1642+ return -EINVAL;
1643+
1644+ memset(inp, 0, sizeof(*inp));
1645+ inp->index = 0;
1646+
1647+ strncpy(inp->name, "Timb input 1", sizeof(inp->name) - 1);
1648+ inp->type = V4L2_INPUT_TYPE_CAMERA;
1649+ inp->std = V4L2_STD_ALL;
1650+
1651+ return 0;
1652+}
1653+
1654+static int timblogiw_g_input(struct timblogiw *lw, int *input)
1655+{
1656+ *input = 0;
1657+
1658+ return 0;
1659+}
1660+
1661+static int timblogiw_s_input(struct timblogiw *lw, int *input)
1662+{
1663+ if (*input != 0)
1664+ return -EINVAL;
1665+ return 0;
1666+}
1667+
1668+static int timblogiw_streamon(struct timblogiw *lw, int *type)
1669+{
1670+ struct timblogiw_frame *f;
1671+
1672+ if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1673+ dbg("%s - No capture device\n", __func__);
1674+ return -EINVAL;
1675+ }
1676+
1677+ if (list_empty(&lw->inqueue)) {
1678+ dbg("%s - inqueue is empty\n", __func__);
1679+ return -EINVAL;
1680+ }
1681+
1682+ if (lw->stream == STREAM_ON)
1683+ return 0;
1684+
1685+ lw->stream = STREAM_ON;
1686+
1687+ f = list_entry(lw->inqueue.next,
1688+ struct timblogiw_frame, frame);
1689+
1690+ dbg("%s - f size: %d, bpr: %d, dma addr: %x\n", __func__,
1691+ lw->frame_size, lw->bytesperline,
1692+ (unsigned int)lw->dma.transfer[lw->dma.curr].handle);
1693+ timb_start_dma(DMA_IRQ_VIDEO_RX,
1694+ lw->dma.transfer[lw->dma.curr].handle,
1695+ lw->frame_size, lw->bytesperline);
1696+
1697+ return 0;
1698+}
1699+
1700+static int timblogiw_streamoff(struct timblogiw *lw, int *type)
1701+{
1702+ if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1703+ return -EINVAL;
1704+
1705+ if (lw->stream == STREAM_ON) {
1706+ unsigned long lock_flags;
1707+ spin_lock_irqsave(&lw->queue_lock, lock_flags);
1708+ timb_stop_dma(DMA_IRQ_VIDEO_RX);
1709+ lw->stream = STREAM_OFF;
1710+ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
1711+ }
1712+ timblogiw_empty_framequeues(lw);
1713+
1714+ return 0;
1715+}
1716+
1717+static int timblogiw_querystd(struct timblogiw *lw, v4l2_std_id *std)
1718+{
1719+ /* TODO: Ask encoder */
1720+ *std = V4L2_STD_PAL;
1721+ return 0;
1722+}
1723+
1724+static int timblogiw_enum_framsizes(struct timblogiw *lw,
1725+ struct v4l2_frmsizeenum *fsize)
1726+{
1727+ if ((fsize->index != 0) ||
1728+ (fsize->pixel_format != V4L2_PIX_FMT_YUYV))
1729+ return -EINVAL;
1730+
1731+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1732+ fsize->discrete.width = lw->width;
1733+ fsize->discrete.height = lw->height;
1734+
1735+ return 0;
1736+}
1737+
1738+static int timblogiw_g_parm(struct timblogiw *lw, struct v4l2_streamparm *sp)
1739+{
1740+ if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1741+ return -EINVAL;
1742+
1743+ sp->parm.capture.extendedmode = 0;
1744+ sp->parm.capture.readbuffers = lw->num_frames;
1745+ return 0;
1746+}
1747+
1748+/*******************************
1749+ * Device Operations functions *
1750+ *******************************/
1751+
1752+static int timblogiw_open(struct file *file)
1753+{
1754+ struct video_device *vdev = video_devdata(file);
1755+ struct timblogiw *lw = video_get_drvdata(vdev);
1756+
1757+ dbg("%s -\n", __func__);
1758+
1759+ mutex_init(&lw->fileop_lock);
1760+ spin_lock_init(&lw->queue_lock);
1761+ init_waitqueue_head(&lw->wait_frame);
1762+
1763+ mutex_lock(&lw->lock);
1764+
1765+ lw->width = 720; /* TODO: Should depend on tv norm */
1766+ lw->height = 576;
1767+ lw->frame_size = lw->width * lw->height * 2;
1768+ lw->bytesperline = lw->width * 2;
1769+
1770+ file->private_data = lw;
1771+ lw->stream = STREAM_OFF;
1772+ lw->num_frames = TIMBLOGIW_NUM_FRAMES;
1773+
1774+ timblogiw_empty_framequeues(lw);
1775+
1776+ timb_set_dma_interruptcb(DMA_IRQ_VIDEO_RX | DMA_IRQ_VIDEO_DROP,
1777+ timblogiw_isr, (void *)lw);
1778+
1779+ mutex_unlock(&lw->lock);
1780+
1781+ return 0;
1782+}
1783+
1784+static int timblogiw_close(struct file *file)
1785+{
1786+ struct timblogiw *lw = file->private_data;
1787+
1788+ dbg("%s - entry\n", __func__);
1789+
1790+ mutex_lock(&lw->lock);
1791+
1792+ timb_stop_dma(DMA_IRQ_VIDEO_RX);
1793+ timb_set_dma_interruptcb(DMA_IRQ_VIDEO_RX | DMA_IRQ_VIDEO_DROP, NULL,
1794+ NULL);
1795+ timblogiw_release_buffers(lw);
1796+
1797+ mutex_unlock(&lw->lock);
1798+ return 0;
1799+}
1800+
1801+static ssize_t timblogiw_read(struct file *file, char __user *data,
1802+ size_t count, loff_t *ppos)
1803+{
1804+ dbg("%s - read request\n", __func__);
1805+ return -EINVAL;
1806+}
1807+
1808+static void timblogiw_vm_open(struct vm_area_struct *vma)
1809+{
1810+ struct timblogiw_frame *f = vma->vm_private_data;
1811+ f->vma_use_count++;
1812+}
1813+
1814+static void timblogiw_vm_close(struct vm_area_struct *vma)
1815+{
1816+ struct timblogiw_frame *f = vma->vm_private_data;
1817+ f->vma_use_count--;
1818+}
1819+
1820+static struct vm_operations_struct timblogiw_vm_ops = {
1821+ .open = timblogiw_vm_open,
1822+ .close = timblogiw_vm_close,
1823+};
1824+
1825+static int timblogiw_mmap(struct file *filp, struct vm_area_struct *vma)
1826+{
1827+ unsigned long size = vma->vm_end - vma->vm_start, start = vma->vm_start;
1828+ void *pos;
1829+ u32 i;
1830+ int ret = -EINVAL;
1831+
1832+ struct timblogiw *lw = filp->private_data;
1833+ dbg("%s\n", __func__);
1834+
1835+ if (mutex_lock_interruptible(&lw->fileop_lock))
1836+ return -ERESTARTSYS;
1837+
1838+ if (!(vma->vm_flags & VM_WRITE) ||
1839+ size != PAGE_ALIGN(lw->frame[0].buf.length))
1840+ goto error_unlock;
1841+
1842+ for (i = 0; i < lw->num_frames; i++)
1843+ if ((lw->frame[i].buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
1844+ break;
1845+
1846+ if (i == lw->num_frames) {
1847+ dbg("%s - user supplied mapping address is out of range\n",
1848+ __func__);
1849+ goto error_unlock;
1850+ }
1851+
1852+ vma->vm_flags |= VM_IO;
1853+ vma->vm_flags |= VM_RESERVED; /* Do not swap out this VMA */
1854+
1855+ pos = lw->frame[i].bufmem;
1856+ while (size > 0) { /* size is page-aligned */
1857+ if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
1858+ dbg("%s - vm_insert_page failed\n", __func__);
1859+ ret = -EAGAIN;
1860+ goto error_unlock;
1861+ }
1862+ start += PAGE_SIZE;
1863+ pos += PAGE_SIZE;
1864+ size -= PAGE_SIZE;
1865+ }
1866+
1867+ vma->vm_ops = &timblogiw_vm_ops;
1868+ vma->vm_private_data = &lw->frame[i];
1869+ timblogiw_vm_open(vma);
1870+ ret = 0;
1871+
1872+error_unlock:
1873+ mutex_unlock(&lw->fileop_lock);
1874+ return ret;
1875+}
1876+
1877+static long
1878+timblogiw_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1879+{
1880+ struct timblogiw *lw = file->private_data;
1881+
1882+ switch (cmd) {
1883+
1884+ case VIDIOC_QUERYCAP:
1885+ {
1886+ dbg("%s - VIDIOC_QUERYCAP\n", __func__);
1887+ return timblogiw_querycap(lw, (struct v4l2_capability *)arg);
1888+ }
1889+
1890+ case VIDIOC_ENUM_FMT:
1891+ {
1892+ dbg("%s - VIDIOC_ENUM_FMT\n", __func__);
1893+ return timblogiw_enum_fmt(lw, (struct v4l2_fmtdesc *)arg);
1894+ }
1895+
1896+ case VIDIOC_G_FMT:
1897+ {
1898+ dbg("%s - VIDIOC_G_FMT\n", __func__);
1899+ return timblogiw_g_fmt(lw, (struct v4l2_format *) arg);
1900+ }
1901+
1902+ case VIDIOC_TRY_FMT:
1903+ case VIDIOC_S_FMT:
1904+ {
1905+ dbg("%s - VIDIOC_S_FMT\n", __func__);
1906+ return timblogiw_s_fmt(lw, (struct v4l2_format *)arg);
1907+ }
1908+
1909+ case VIDIOC_REQBUFS:
1910+ {
1911+ dbg("%s - VIDIOC_REQBUFS\n", __func__);
1912+ return timblogiw_reqbufs(lw, (struct v4l2_requestbuffers *)arg);
1913+ }
1914+
1915+ case VIDIOC_QUERYBUF:
1916+ {
1917+ dbg("%s - VIDIOC_QUERYBUF\n", __func__);
1918+ return timblogiw_querybuf(lw, (struct v4l2_buffer *)arg);
1919+ }
1920+
1921+ case VIDIOC_QBUF:
1922+ {
1923+ return timblogiw_qbuf(lw, (struct v4l2_buffer *)arg);
1924+ }
1925+
1926+ case VIDIOC_DQBUF:
1927+ {
1928+ return timblogiw_dqbuf(lw, file, (struct v4l2_buffer *)arg);
1929+ }
1930+
1931+ case VIDIOC_ENUMSTD:
1932+ {
1933+ dbg("%s - VIDIOC_ENUMSTD\n", __func__);
1934+ return timblogiw_enumstd(lw, (struct v4l2_standard *)arg);
1935+ }
1936+
1937+ case VIDIOC_G_STD:
1938+ {
1939+ dbg("%s - VIDIOC_G_STD\n", __func__);
1940+ return timblogiw_g_std(lw, (v4l2_std_id *)arg);
1941+ }
1942+
1943+ case VIDIOC_S_STD:
1944+ {
1945+ dbg("%s - VIDIOC_S_STD\n", __func__);
1946+ return timblogiw_s_std(lw, (v4l2_std_id *)arg);
1947+ }
1948+
1949+ case VIDIOC_ENUMINPUT:
1950+ {
1951+ dbg("%s - VIDIOC_ENUMINPUT\n", __func__);
1952+ return timblogiw_enuminput(lw, (struct v4l2_input *)arg);
1953+ }
1954+
1955+ case VIDIOC_G_INPUT:
1956+ {
1957+ dbg("%s - VIDIOC_G_INPUT\n", __func__);
1958+ return timblogiw_g_input(lw, (int *)arg);
1959+ }
1960+
1961+ case VIDIOC_S_INPUT:
1962+ {
1963+ dbg("%s - VIDIOC_S_INPUT\n", __func__);
1964+ return timblogiw_s_input(lw, (int *)arg);
1965+ }
1966+
1967+ case VIDIOC_STREAMON:
1968+ {
1969+ dbg("%s - VIDIOC_STREAMON\n", __func__);
1970+ return timblogiw_streamon(lw, (int *)arg);
1971+ }
1972+
1973+ case VIDIOC_STREAMOFF:
1974+ {
1975+ dbg("%s - VIDIOC_STREAMOFF\n", __func__);
1976+ return timblogiw_streamoff(lw, (int *)arg);
1977+ }
1978+
1979+ case VIDIOC_QUERYSTD:
1980+ {
1981+ dbg("%s - VIDIOC_QUERYSTD\n", __func__);
1982+ return timblogiw_querystd(lw, (v4l2_std_id *)arg);
1983+ }
1984+
1985+ case VIDIOC_ENUM_FRAMESIZES:
1986+ {
1987+ dbg("%s - VIDIOC_ENUM_FRAMESIZES\n", __func__);
1988+ return timblogiw_enum_framsizes(lw,
1989+ (struct v4l2_frmsizeenum *)arg);
1990+ }
1991+
1992+ case VIDIOC_G_PARM:
1993+ {
1994+ dbg("%s - VIDIOC_G_PARM\n", __func__);
1995+ return timblogiw_g_parm(lw, (struct v4l2_streamparm *)arg);
1996+ }
1997+
1998+ default:
1999+ {
2000+ dbg("%s Unknown command, dir: %x, type: %x, nr: %x, size: %x\n",
2001+ __func__,
2002+ _IOC_DIR(cmd),
2003+ _IOC_TYPE(cmd),
2004+ _IOC_NR(cmd),
2005+ _IOC_SIZE(cmd));
2006+ break;
2007+ }
2008+ }
2009+
2010+ return -EINVAL;
2011+}
2012+
2013+void timblogiw_vdev_release(struct video_device *vdev)
2014+{
2015+ kfree(vdev);
2016+}
2017+
2018+static const struct v4l2_file_operations timblogiw_fops = {
2019+ .owner = THIS_MODULE,
2020+ .open = timblogiw_open,
2021+ .release = timblogiw_close,
2022+ .ioctl = timblogiw_ioctl,
2023+ .mmap = timblogiw_mmap,
2024+ .read = timblogiw_read,
2025+};
2026+
2027+static const struct video_device timblogiw_template = {
2028+ .name = TIMBLOGIWIN_NAME,
2029+ .fops = &timblogiw_fops,
2030+ .release = &timblogiw_vdev_release,
2031+ .minor = -1
2032+};
2033+
2034+static int timblogiw_probe(struct platform_device *dev)
2035+{
2036+ int err;
2037+ struct timblogiw *lw;
2038+ struct resource *iomem;
2039+
2040+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
2041+ if (!iomem) {
2042+ err = -EINVAL;
2043+ goto err_mem;
2044+ }
2045+
2046+ lw = kzalloc(sizeof(*lw), GFP_KERNEL);
2047+ if (!lw) {
2048+ err = -EINVAL;
2049+ goto err_mem;
2050+ }
2051+
2052+ /* find the PCI device from the parent... */
2053+ if (!dev->dev.parent) {
2054+ printk(KERN_ERR "timblogwi: No parent device found??\n");
2055+ err = -ENODEV;
2056+ goto err_mem;
2057+ }
2058+
2059+ lw->dev = container_of(dev->dev.parent, struct pci_dev, dev);
2060+
2061+ mutex_init(&lw->lock);
2062+
2063+ lw->video_dev = video_device_alloc();
2064+ if (!lw->video_dev) {
2065+ err = -ENOMEM;
2066+ goto err_video_req;
2067+ }
2068+ *lw->video_dev = timblogiw_template;
2069+
2070+ err = video_register_device(lw->video_dev, VFL_TYPE_GRABBER, 0);
2071+ if (err) {
2072+ video_device_release(lw->video_dev);
2073+ printk(KERN_ALERT "Error reg video\n");
2074+ goto err_video_req;
2075+ }
2076+
2077+ tasklet_init(&lw->tasklet, timblogiw_handleframe, (unsigned long)lw);
2078+
2079+ if (!request_mem_region(iomem->start, resource_size(iomem),
2080+ "timb-video")) {
2081+ err = -EBUSY;
2082+ goto err_request;
2083+ }
2084+
2085+ lw->membase = ioremap(iomem->start, resource_size(iomem));
2086+ if (!lw->membase) {
2087+ err = -ENOMEM;
2088+ goto err_ioremap;
2089+ }
2090+
2091+ platform_set_drvdata(dev, lw);
2092+ video_set_drvdata(lw->video_dev, lw);
2093+
2094+ return 0;
2095+
2096+err_ioremap:
2097+ release_mem_region(iomem->start, resource_size(iomem));
2098+err_request:
2099+ if (-1 != lw->video_dev->minor)
2100+ video_unregister_device(lw->video_dev);
2101+ else
2102+ video_device_release(lw->video_dev);
2103+err_video_req:
2104+ kfree(lw);
2105+err_mem:
2106+ printk(KERN_ERR
2107+ "timberdale: Failed to register Timberdale Video In: %d\n",
2108+ err);
2109+
2110+ return err;
2111+}
2112+
2113+static int timblogiw_remove(struct platform_device *dev)
2114+{
2115+ struct timblogiw *lw = platform_get_drvdata(dev);
2116+ struct resource *iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
2117+
2118+ if (-1 != lw->video_dev->minor)
2119+ video_unregister_device(lw->video_dev);
2120+ else
2121+ video_device_release(lw->video_dev);
2122+
2123+ tasklet_kill(&lw->tasklet);
2124+ iounmap(lw->membase);
2125+ release_mem_region(iomem->start, resource_size(iomem));
2126+ kfree(lw);
2127+
2128+ return 0;
2129+}
2130+
2131+static struct platform_driver timblogiw_platform_driver = {
2132+ .driver = {
2133+ .name = "timb-video",
2134+ .owner = THIS_MODULE,
2135+ },
2136+ .probe = timblogiw_probe,
2137+ .remove = timblogiw_remove,
2138+};
2139+
2140+/*--------------------------------------------------------------------------*/
2141+
2142+static int __init timblogiw_init(void)
2143+{
2144+ return platform_driver_register(&timblogiw_platform_driver);
2145+}
2146+
2147+static void __exit timblogiw_exit(void)
2148+{
2149+ platform_driver_unregister(&timblogiw_platform_driver);
2150+}
2151+
2152+module_init(timblogiw_init);
2153+module_exit(timblogiw_exit);
2154+
2155+MODULE_DESCRIPTION("Timberdale Video In driver");
2156+MODULE_LICENSE("GPL v2");
2157+MODULE_ALIAS("platform:timb-video");
2158+
2159diff -uNr linux-2.6.29-clean/drivers/media/video/timblogiw.h linux-2.6.29/drivers/media/video/timblogiw.h
2160--- linux-2.6.29-clean/drivers/media/video/timblogiw.h 1969-12-31 16:00:00.000000000 -0800
2161+++ linux-2.6.29/drivers/media/video/timblogiw.h 2009-04-06 13:51:47.000000000 -0700
2162@@ -0,0 +1,95 @@
2163+/*
2164+ * timblogiw.h timberdale FPGA LogiWin Video In driver defines
2165+ * Copyright (c) 2009 Intel Corporation
2166+ *
2167+ * This program is free software; you can redistribute it and/or modify
2168+ * it under the terms of the GNU General Public License version 2 as
2169+ * published by the Free Software Foundation.
2170+ *
2171+ * This program is distributed in the hope that it will be useful,
2172+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
2173+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2174+ * GNU General Public License for more details.
2175+ *
2176+ * You should have received a copy of the GNU General Public License
2177+ * along with this program; if not, write to the Free Software
2178+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
2179+ */
2180+
2181+/* Supports:
2182+ * Timberdale FPGA LogiWin Video In
2183+ */
2184+
2185+#ifndef _TIMBLOGIW_H
2186+#define _TIMBLOGIW_H
2187+
2188+#include <linux/interrupt.h>
2189+
2190+#define TIMBLOGIWIN_NAME "Timberdale Video-In"
2191+
2192+#define TIMBLOGIW_NUM_FRAMES 10
2193+
2194+
2195+enum timblogiw_stream_state {
2196+ STREAM_OFF,
2197+ STREAM_ON,
2198+};
2199+
2200+enum timblogiw_frame_state {
2201+ F_UNUSED = 0,
2202+ F_QUEUED,
2203+ F_GRABBING,
2204+ F_DONE,
2205+ F_ERROR,
2206+};
2207+
2208+struct timblogiw_frame {
2209+ void *bufmem;
2210+ struct v4l2_buffer buf;
2211+ enum timblogiw_frame_state state;
2212+ struct list_head frame;
2213+ unsigned long vma_use_count;
2214+};
2215+
2216+struct timblogiw_tvnorm {
2217+ int v4l2_id;
2218+ char *name;
2219+ u16 swidth;
2220+ u16 sheight;
2221+};
2222+
2223+
2224+struct timbdma_transfer {
2225+ dma_addr_t handle;
2226+ void *buf;
2227+};
2228+
2229+struct timbdma_control {
2230+ struct timbdma_transfer transfer[2];
2231+ struct timbdma_transfer *filled;
2232+ int curr;
2233+};
2234+
2235+struct timblogiw {
2236+ struct i2c_client *decoder;
2237+ struct timblogiw_frame frame[TIMBLOGIW_NUM_FRAMES];
2238+ int num_frames;
2239+ unsigned int frame_count;
2240+ struct list_head inqueue, outqueue;
2241+ spinlock_t queue_lock; /* mutual exclusion */
2242+ enum timblogiw_stream_state stream;
2243+ struct video_device *video_dev;
2244+ struct mutex lock, fileop_lock;
2245+ wait_queue_head_t wait_frame;
2246+ int width;
2247+ int height;
2248+ u32 frame_size;
2249+ int bytesperline;
2250+ struct pci_dev *dev;
2251+ struct timbdma_control dma;
2252+ void __iomem *membase;
2253+ struct tasklet_struct tasklet;
2254+};
2255+
2256+#endif /* _TIMBLOGIW_H */
2257+
2258diff -uNr linux-2.6.29-clean/drivers/mfd/Kconfig linux-2.6.29/drivers/mfd/Kconfig
2259--- linux-2.6.29-clean/drivers/mfd/Kconfig 2009-04-01 09:20:24.000000000 -0700
2260+++ linux-2.6.29/drivers/mfd/Kconfig 2009-04-06 13:51:47.000000000 -0700
2261@@ -240,6 +240,27 @@
2262 Say yes here if you want to include support GPIO for pins on
2263 the PCF50633 chip.
2264
2265+config MFD_TIMBERDALE
2266+ bool "Support for Timberdale"
2267+ select MFD_CORE
2268+ ---help---
2269+ This is the core driver for the timberdale FPGA. This device is a
2270+ multifunctioanl device which may provide numerous interfaces.
2271+
2272+config MFD_TIMBERDALE_DMA
2273+ tristate "Support for timberdale DMA"
2274+ depends on MFD_TIMBERDALE
2275+ ---help---
2276+ Add support the DMA block inside the timberdale FPGA. This to be able
2277+ to do DMA transfers directly to some of the blocks inside the FPGA
2278+
2279+config MFD_TIMBERDALE_I2S
2280+ tristate "Support for timberdale I2S bus"
2281+ depends on MFD_TIMBERDALE
2282+ ---help---
2283+ Add support for the I2S bus handled by timberdale FPGA.
2284+ I2S RX and TX instances are then available for other devices to make use of.
2285+
2286 endmenu
2287
2288 menu "Multimedia Capabilities Port drivers"
2289diff -uNr linux-2.6.29-clean/drivers/mfd/Makefile linux-2.6.29/drivers/mfd/Makefile
2290--- linux-2.6.29-clean/drivers/mfd/Makefile 2009-04-01 09:20:24.000000000 -0700
2291+++ linux-2.6.29/drivers/mfd/Makefile 2009-04-06 13:51:47.000000000 -0700
2292@@ -40,4 +40,8 @@
2293
2294 obj-$(CONFIG_MFD_PCF50633) += pcf50633-core.o
2295 obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
2296-obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
2297\ No newline at end of file
2298+obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
2299+
2300+obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
2301+obj-$(CONFIG_MFD_TIMBERDALE_DMA) += timbdma.o
2302+obj-$(CONFIG_MFD_TIMBERDALE_I2S) += timbi2s.o
2303diff -uNr linux-2.6.29-clean/drivers/mfd/timbdma.c linux-2.6.29/drivers/mfd/timbdma.c
2304--- linux-2.6.29-clean/drivers/mfd/timbdma.c 1969-12-31 16:00:00.000000000 -0800
2305+++ linux-2.6.29/drivers/mfd/timbdma.c 2009-04-06 13:51:47.000000000 -0700
2306@@ -0,0 +1,301 @@
2307+/*
2308+ * timbdma.c timberdale FPGA DMA driver
2309+ * Copyright (c) 2009 Intel Corporation
2310+ *
2311+ * This program is free software; you can redistribute it and/or modify
2312+ * it under the terms of the GNU General Public License version 2 as
2313+ * published by the Free Software Foundation.
2314+ *
2315+ * This program is distributed in the hope that it will be useful,
2316+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
2317+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2318+ * GNU General Public License for more details.
2319+ *
2320+ * You should have received a copy of the GNU General Public License
2321+ * along with this program; if not, write to the Free Software
2322+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
2323+ */
2324+
2325+/* Supports:
2326+ * Timberdale FPGA DMA engine
2327+ */
2328+
2329+#include <linux/version.h>
2330+#include <linux/module.h>
2331+#include <linux/pci.h>
2332+#include <linux/interrupt.h>
2333+#include <linux/platform_device.h>
2334+
2335+#include <linux/mfd/timbdma.h>
2336+
2337+static struct timbdma_dev *self_g;
2338+
2339+static irqreturn_t timbdma_handleinterrupt(int irq, void *devid)
2340+{
2341+ struct timbdma_dev *dev = (struct timbdma_dev *)devid;
2342+ int ipr;
2343+ int i;
2344+
2345+ ipr = ioread32(dev->membase + timbdma_ctrlmap_TIMBPEND);
2346+
2347+ /* ack */
2348+ iowrite32(ipr, dev->membase + timbdma_ctrlmap_TIMBSTATUS);
2349+
2350+ /* call the callbacks */
2351+ for (i = 0; i < DMA_IRQS; i++) {
2352+ int mask = 1 << i;
2353+ if ((ipr & mask) && dev->callbacks[i])
2354+ dev->callbacks[i](mask, dev->callback_data[i]);
2355+ }
2356+
2357+ if (ipr)
2358+ return IRQ_HANDLED;
2359+ else
2360+ return IRQ_NONE;
2361+}
2362+
2363+
2364+void timb_start_dma(u32 flag, unsigned long buf, int len, int bytes_per_row)
2365+{
2366+ int i;
2367+ unsigned long irqflags;
2368+ struct timbdma_dev *dev = self_g;
2369+
2370+ spin_lock_irqsave(&dev->lock, irqflags);
2371+
2372+ /* now enable the DMA transfer */
2373+ for (i = 0; i < DMA_IRQS; i++)
2374+ if (flag & (1 << i)) {
2375+ u32 offset = i / 2 * 0x40;
2376+
2377+ if (!(i % 2)) {
2378+ /* RX */
2379+ /* bytes per row */
2380+ iowrite32(bytes_per_row, dev->membase + offset +
2381+ timbdma_dmacfg_BPERROW);
2382+ /* address high */
2383+ iowrite32(0, dev->membase + offset +
2384+ timbdma_dmacfg_RXSTARTH);
2385+ /* address low */
2386+ iowrite32(buf, dev->membase + offset +
2387+ timbdma_dmacfg_RXSTARTL);
2388+ /* Length */
2389+ iowrite32(len, dev->membase + offset +
2390+ timbdma_dmacfg_RXLENGTH);
2391+ /* Clear rx sw read pointer */
2392+ iowrite32(0, dev->membase + offset +
2393+ timbdma_dmacfg_RXSWRP);
2394+ /* enable the transfer */
2395+ iowrite32(1, dev->membase + offset +
2396+ timbdma_dmacfg_RXENABLE);
2397+ } else {
2398+ /* TX */
2399+ /* address high */
2400+ iowrite32(0, dev->membase + offset +
2401+ timbdma_dmacfg_TXSTARTH);
2402+ /* address low */
2403+ iowrite32(buf, dev->membase + offset +
2404+ timbdma_dmacfg_TXSTARTL);
2405+ /* Length */
2406+ iowrite32(len, dev->membase + offset +
2407+ timbdma_dmacfg_TXLENGTH);
2408+ /* Set tx sw write pointer */
2409+ iowrite32(len, dev->membase + offset +
2410+ timbdma_dmacfg_TXSWWP);
2411+ }
2412+
2413+ /* only allow one bit in the flag field */
2414+ break;
2415+ }
2416+ spin_unlock_irqrestore(&dev->lock, irqflags);
2417+}
2418+EXPORT_SYMBOL(timb_start_dma);
2419+
2420+void *timb_stop_dma(u32 flags)
2421+{
2422+ int i;
2423+ unsigned long irqflags;
2424+ struct timbdma_dev *dev = self_g;
2425+ void *result = 0;
2426+
2427+ spin_lock_irqsave(&dev->lock, irqflags);
2428+
2429+ /* now disable the DMA transfers */
2430+ for (i = 0; i < DMA_IRQS; i++)
2431+ if (flags & (1 << i)) {
2432+ /*
2433+ RX enable registers are located at:
2434+ 0x14
2435+ 0x54
2436+ 0x94
2437+
2438+ TX SW pointer registers are located at:
2439+ 0x24
2440+ 0x64
2441+ */
2442+ u32 offset = i / 2 * 0x40;
2443+ u32 result_offset = offset;
2444+ if (!(i % 2)) {
2445+ /* even -> RX enable */
2446+ offset += timbdma_dmacfg_RXENABLE;
2447+ result_offset += timbdma_dmacfg_RXFPGAWP;
2448+ } else {
2449+ /* odd -> TX SW pointer reg */
2450+ offset += timbdma_dmacfg_TXSWWP;
2451+ result_offset = timbdma_dmacfg_TXFPGARP;
2452+ }
2453+
2454+ iowrite32(0, dev->membase + offset);
2455+ /* check how far the FPGA has written/read */
2456+ result = (void *)ioread32(dev->membase + result_offset);
2457+ }
2458+
2459+ /* ack any pending IRQs */
2460+ iowrite32(flags, dev->membase + timbdma_ctrlmap_TIMBSTATUS);
2461+
2462+ spin_unlock_irqrestore(&dev->lock, irqflags);
2463+
2464+ return result;
2465+}
2466+EXPORT_SYMBOL(timb_stop_dma);
2467+
2468+void timb_set_dma_interruptcb(u32 flags, timbdma_interruptcb icb, void *data)
2469+{
2470+ int i;
2471+ unsigned long irqflags;
2472+ struct timbdma_dev *dev = self_g;
2473+ u32 ier;
2474+
2475+ spin_lock_irqsave(&dev->lock, irqflags);
2476+
2477+ for (i = 0; i < DMA_IRQS; i++)
2478+ if (flags & (1 << i)) {
2479+ dev->callbacks[i] = icb;
2480+ dev->callback_data[i] = data;
2481+ }
2482+
2483+ /* Ack any pending IRQ */
2484+ iowrite32(flags, dev->membase + timbdma_ctrlmap_TIMBSTATUS);
2485+
2486+ /* if a null callback is given -> clear interrupt, else -> enable */
2487+ ier = ioread32(dev->membase + timbdma_ctrlmap_TIMBENABLE);
2488+ if (icb != NULL)
2489+ ier |= flags;
2490+ else
2491+ ier &= ~flags;
2492+ iowrite32(ier, dev->membase + timbdma_ctrlmap_TIMBENABLE);
2493+
2494+ spin_unlock_irqrestore(&dev->lock, irqflags);
2495+}
2496+EXPORT_SYMBOL(timb_set_dma_interruptcb);
2497+
2498+static int timbdma_probe(struct platform_device *dev)
2499+{
2500+ int err, irq;
2501+ struct timbdma_dev *self;
2502+ struct resource *iomem;
2503+
2504+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
2505+ if (!iomem) {
2506+ err = -EINVAL;
2507+ goto err_mem;
2508+ }
2509+
2510+ self = kzalloc(sizeof(*self), GFP_KERNEL);
2511+ if (!self) {
2512+ err = -EINVAL;
2513+ goto err_mem;
2514+ }
2515+
2516+ spin_lock_init(&self->lock);
2517+
2518+ if (!request_mem_region(iomem->start,
2519+ resource_size(iomem), "timb-dma")) {
2520+ err = -EBUSY;
2521+ goto err_request;
2522+ }
2523+
2524+ self->membase = ioremap(iomem->start, resource_size(iomem));
2525+ if (!self->membase) {
2526+ printk(KERN_ERR "timbdma: Failed to remap I/O memory\n");
2527+ err = -ENOMEM;
2528+ goto err_ioremap;
2529+ }
2530+
2531+ /* register interrupt */
2532+ irq = platform_get_irq(dev, 0);
2533+ if (irq < 0) {
2534+ err = irq;
2535+ goto err_get_irq;
2536+ }
2537+
2538+ /* request IRQ */
2539+ err = request_irq(irq, timbdma_handleinterrupt, IRQF_SHARED,
2540+ "timb-dma", self);
2541+ if (err) {
2542+ printk(KERN_ERR "timbdma: Failed to request IRQ\n");
2543+ goto err_get_irq;
2544+ }
2545+
2546+ platform_set_drvdata(dev, self);
2547+
2548+ /* assign the global pointer */
2549+ self_g = self;
2550+
2551+ return 0;
2552+
2553+err_get_irq:
2554+ iounmap(self->membase);
2555+err_ioremap:
2556+ release_mem_region(iomem->start, resource_size(iomem));
2557+err_request:
2558+ kfree(self);
2559+err_mem:
2560+ printk(KERN_ERR "timberdale: Failed to register Timberdale DMA: %d\n",
2561+ err);
2562+
2563+ return err;
2564+}
2565+
2566+static int timbdma_remove(struct platform_device *dev)
2567+{
2568+ struct timbdma_dev *self = platform_get_drvdata(dev);
2569+ struct resource *iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
2570+
2571+ free_irq(platform_get_irq(dev, 0), self);
2572+ iounmap(self->membase);
2573+ release_mem_region(iomem->start, resource_size(iomem));
2574+ kfree(self);
2575+ self_g = NULL;
2576+ return 0;
2577+}
2578+
2579+static struct platform_driver timbdma_platform_driver = {
2580+ .driver = {
2581+ .name = "timb-dma",
2582+ .owner = THIS_MODULE,
2583+ },
2584+ .probe = timbdma_probe,
2585+ .remove = timbdma_remove,
2586+};
2587+
2588+/*--------------------------------------------------------------------------*/
2589+
2590+static int __init timbdma_init(void)
2591+{
2592+ self_g = NULL;
2593+ return platform_driver_register(&timbdma_platform_driver);
2594+}
2595+
2596+static void __exit timbdma_exit(void)
2597+{
2598+ platform_driver_unregister(&timbdma_platform_driver);
2599+}
2600+
2601+module_init(timbdma_init);
2602+module_exit(timbdma_exit);
2603+
2604+MODULE_DESCRIPTION("Timberdale DMA driver");
2605+MODULE_LICENSE("GPL v2");
2606+MODULE_ALIAS("platform:timb-dma");
2607+
2608diff -uNr linux-2.6.29-clean/drivers/mfd/timberdale.c linux-2.6.29/drivers/mfd/timberdale.c
2609--- linux-2.6.29-clean/drivers/mfd/timberdale.c 1969-12-31 16:00:00.000000000 -0800
2610+++ linux-2.6.29/drivers/mfd/timberdale.c 2009-04-06 13:51:47.000000000 -0700
2611@@ -0,0 +1,599 @@
2612+/*
2613+ * timberdale.c timberdale FPGA mfd shim driver
2614+ * Copyright (c) 2009 Intel Corporation
2615+ *
2616+ * This program is free software; you can redistribute it and/or modify
2617+ * it under the terms of the GNU General Public License version 2 as
2618+ * published by the Free Software Foundation.
2619+ *
2620+ * This program is distributed in the hope that it will be useful,
2621+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
2622+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2623+ * GNU General Public License for more details.
2624+ *
2625+ * You should have received a copy of the GNU General Public License
2626+ * along with this program; if not, write to the Free Software
2627+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
2628+ */
2629+
2630+/* Supports:
2631+ * Timberdale FPGA
2632+ */
2633+
2634+#include <linux/kernel.h>
2635+#include <linux/module.h>
2636+#include <linux/pci.h>
2637+#include <linux/msi.h>
2638+#include <linux/init.h>
2639+#include <linux/interrupt.h>
2640+#include <linux/platform_device.h>
2641+#include <linux/mfd/core.h>
2642+#include <linux/irq.h>
2643+
2644+#include <linux/i2c.h>
2645+#include <linux/i2c-ocores.h>
2646+#include <linux/i2c/tsc2007.h>
2647+#include <linux/spi/xilinx_spi.h>
2648+#include "timberdale.h"
2649+
2650+struct timberdale_device {
2651+ resource_size_t intc_mapbase;
2652+ resource_size_t ctl_mapbase;
2653+ unsigned char __iomem *intc_membase;
2654+ unsigned char __iomem *ctl_membase;
2655+ int irq_base;
2656+ u32 irq_ack_mask;
2657+ /* locking from interrupts while modifiying registers */
2658+ spinlock_t lock;
2659+};
2660+
2661+/*--------------------------------------------------------------------------*/
2662+
2663+struct tsc2007_platform_data timberdale_tsc2007_platform_data = {
2664+ .model = 2003,
2665+ .x_plate_ohms = 100
2666+};
2667+
2668+struct i2c_board_info timberdale_i2c_board_info[] = {
2669+ {
2670+ I2C_BOARD_INFO("tsc2003", 0x48),
2671+ .platform_data = &timberdale_tsc2007_platform_data,
2672+ .irq = IRQ_TIMBERDALE_TSC_INT
2673+ },
2674+ {
2675+ I2C_BOARD_INFO("adv7180", 0x42 >> 1),
2676+ .irq = IRQ_TIMBERDALE_ADV7180
2677+ }
2678+};
2679+
2680+static __devinitdata struct ocores_i2c_platform_data
2681+timberdale_i2c_platform_data = {
2682+ .regstep = 4,
2683+ .clock_khz = 62500,
2684+ .devices = timberdale_i2c_board_info,
2685+ .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
2686+};
2687+
2688+const static __devinitconst struct resource timberdale_i2c_resources[] = {
2689+ {
2690+ .start = I2COFFSET,
2691+ .end = I2CEND,
2692+ .flags = IORESOURCE_MEM,
2693+ },
2694+ {
2695+ .start = IRQ_TIMBERDALE_I2C,
2696+ .end = IRQ_TIMBERDALE_I2C,
2697+ .flags = IORESOURCE_IRQ,
2698+ },
2699+};
2700+
2701+static __devinitdata struct xspi_platform_data timberdale_xspi_platorm_data = {
2702+ .bus_num = -1,
2703+ /* according to spec. we can have up to 32 slaves however,
2704+ * as of current(2009-03-06) revision of
2705+ * Timberdale we can only handle 3 right now
2706+ */
2707+ .num_chipselect = 3,
2708+ .speed_hz = 1953125, /* hardcoded value in IP, for now */
2709+ .cr_offset = 0x60,
2710+ .sr_offset = 0x64,
2711+ .txd_offset = 0x68,
2712+ .rxd_offset = 0x6c,
2713+ .ssr_offset = 0x70
2714+};
2715+
2716+const static __devinitconst struct resource timberdale_spi_resources[] = {
2717+ {
2718+ .start = SPIOFFSET,
2719+ .end = SPIEND,
2720+ .flags = IORESOURCE_MEM,
2721+ },
2722+ {
2723+ .start = IRQ_TIMBERDALE_SPI,
2724+ .end = IRQ_TIMBERDALE_SPI,
2725+ .flags = IORESOURCE_IRQ,
2726+ },
2727+};
2728+
2729+const static __devinitconst struct resource timberdale_eth_resources[] = {
2730+ {
2731+ .start = ETHOFFSET,
2732+ .end = ETHEND,
2733+ .flags = IORESOURCE_MEM,
2734+ },
2735+ {
2736+ .start = IRQ_TIMBERDALE_ETHSW_IF,
2737+ .end = IRQ_TIMBERDALE_ETHSW_IF,
2738+ .flags = IORESOURCE_IRQ,
2739+ },
2740+};
2741+
2742+const static __devinitconst struct resource timberdale_gpio_resources[] = {
2743+ {
2744+ .start = GPIOOFFSET,
2745+ .end = GPIOEND,
2746+ .flags = IORESOURCE_MEM,
2747+ },
2748+ {
2749+ .start = IRQ_TIMBERDALE_GPIO,
2750+ .end = IRQ_TIMBERDALE_GPIO,
2751+ .flags = IORESOURCE_IRQ,
2752+ },
2753+};
2754+
2755+
2756+const static __devinitconst struct resource timberdale_most_resources[] = {
2757+ {
2758+ .start = MOSTOFFSET,
2759+ .end = MOSTEND,
2760+ .flags = IORESOURCE_MEM,
2761+ },
2762+ {
2763+ .start = IRQ_TIMBERDALE_MLB,
2764+ .end = IRQ_TIMBERDALE_MLB,
2765+ .flags = IORESOURCE_IRQ,
2766+ },
2767+};
2768+
2769+const static __devinitconst struct resource timberdale_uart_resources[] = {
2770+ {
2771+ .start = UARTOFFSET,
2772+ .end = UARTEND,
2773+ .flags = IORESOURCE_MEM,
2774+ },
2775+ {
2776+ .start = IRQ_TIMBERDALE_UART,
2777+ .end = IRQ_TIMBERDALE_UART,
2778+ .flags = IORESOURCE_IRQ,
2779+ },
2780+};
2781+
2782+const static __devinitconst struct resource timberdale_i2s_resources[] = {
2783+ {
2784+ .start = I2SOFFSET,
2785+ .end = I2SEND,
2786+ .flags = IORESOURCE_MEM,
2787+ },
2788+ {
2789+ .start = IRQ_TIMBERDALE_I2S,
2790+ .end = IRQ_TIMBERDALE_I2S,
2791+ .flags = IORESOURCE_IRQ,
2792+ },
2793+};
2794+
2795+const static __devinitconst struct resource timberdale_video_resources[] = {
2796+ {
2797+ .start = LOGIWOFFSET,
2798+ .end = LOGIWEND,
2799+ .flags = IORESOURCE_MEM,
2800+ },
2801+ /*
2802+ note that the "frame buffer" is located in DMA area
2803+ starting at 0x1200000
2804+ */
2805+};
2806+
2807+const static __devinitconst struct resource timberdale_dma_resources[] = {
2808+ {
2809+ .start = DMAOFFSET,
2810+ .end = DMAEND,
2811+ .flags = IORESOURCE_MEM,
2812+ },
2813+ {
2814+ .start = IRQ_TIMBERDALE_DMA,
2815+ .end = IRQ_TIMBERDALE_DMA,
2816+ .flags = IORESOURCE_IRQ,
2817+ },
2818+};
2819+
2820+static __devinitdata struct mfd_cell timberdale_cells_bar0[] = {
2821+ {
2822+ .name = "timb-uart",
2823+ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
2824+ .resources = timberdale_uart_resources,
2825+ },
2826+ {
2827+ .name = "ocores-i2c",
2828+ .num_resources = ARRAY_SIZE(timberdale_i2c_resources),
2829+ .resources = timberdale_i2c_resources,
2830+ .platform_data = &timberdale_i2c_platform_data,
2831+ .data_size = sizeof(timberdale_i2c_platform_data),
2832+ },
2833+ {
2834+ .name = "timb-gpio",
2835+ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
2836+ .resources = timberdale_gpio_resources,
2837+ },
2838+ {
2839+ .name = "timb-i2s",
2840+ .num_resources = ARRAY_SIZE(timberdale_i2s_resources),
2841+ .resources = timberdale_i2s_resources,
2842+ },
2843+ {
2844+ .name = "timb-most",
2845+ .num_resources = ARRAY_SIZE(timberdale_most_resources),
2846+ .resources = timberdale_most_resources,
2847+ },
2848+ {
2849+ .name = "timb-video",
2850+ .num_resources = ARRAY_SIZE(timberdale_video_resources),
2851+ .resources = timberdale_video_resources,
2852+ },
2853+ {
2854+ .name = "xilinx_spi",
2855+ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
2856+ .resources = timberdale_spi_resources,
2857+ .platform_data = &timberdale_xspi_platorm_data,
2858+ .data_size = sizeof(timberdale_xspi_platorm_data),
2859+ },
2860+ {
2861+ .name = "ks884x",
2862+ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
2863+ .resources = timberdale_eth_resources,
2864+ },
2865+ {
2866+ .name = "timb-dma",
2867+ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
2868+ .resources = timberdale_dma_resources,
2869+ },
2870+};
2871+
2872+static const __devinitconst struct resource timberdale_sdhc_resources_bar1[] = {
2873+ {
2874+ .start = SDHC0OFFSET,
2875+ .end = SDHC0END,
2876+ .flags = IORESOURCE_MEM,
2877+ },
2878+ {
2879+ .start = IRQ_TIMBERDALE_SDHC,
2880+ .end = IRQ_TIMBERDALE_SDHC,
2881+ .flags = IORESOURCE_IRQ,
2882+ },
2883+};
2884+
2885+static __devinitdata struct mfd_cell timberdale_cells_bar1[] = {
2886+ {
2887+ .name = "sdhci",
2888+ .num_resources = ARRAY_SIZE(timberdale_sdhc_resources_bar1),
2889+ .resources = timberdale_sdhc_resources_bar1,
2890+ },
2891+};
2892+
2893+/*--------------------------------------------------------------------------*/
2894+
2895+
2896+/* Handle the timberdale interrupt mux */
2897+static void timberdale_irq(unsigned int irq, struct irq_desc *desc)
2898+{
2899+ struct timberdale_device *priv = get_irq_data(irq);
2900+ unsigned int i, ipr;
2901+
2902+ desc->chip->ack(irq);
2903+
2904+ while ((ipr = ioread32(priv->intc_membase + IPR))) {
2905+ priv->irq_ack_mask = 0;
2906+ for (i = 0; i < TIMBERDALE_NR_IRQS; i++)
2907+ if (ipr & (1 << i))
2908+ generic_handle_irq(priv->irq_base + i);
2909+ if (priv->irq_ack_mask)
2910+ iowrite32(priv->irq_ack_mask, priv->intc_membase + IAR);
2911+ }
2912+}
2913+
2914+static void timberdale_irq_mask(unsigned int irq)
2915+{
2916+ struct timberdale_device *priv = get_irq_chip_data(irq);
2917+ unsigned long flags;
2918+
2919+ spin_lock_irqsave(&priv->lock, flags);
2920+ iowrite32(1 << (irq - priv->irq_base), priv->intc_membase + CIE);
2921+ spin_unlock_irqrestore(&priv->lock, flags);
2922+}
2923+
2924+static void timberdale_irq_unmask(unsigned int irq)
2925+{
2926+ struct timberdale_device *priv = get_irq_chip_data(irq);
2927+ unsigned long flags;
2928+
2929+ spin_lock_irqsave(&priv->lock, flags);
2930+ iowrite32(1 << (irq - priv->irq_base), priv->intc_membase + SIE);
2931+ spin_unlock_irqrestore(&priv->lock, flags);
2932+}
2933+
2934+static void timberdale_irq_ack(unsigned int irq)
2935+{
2936+ struct timberdale_device *priv = get_irq_chip_data(irq);
2937+ unsigned long flags;
2938+ u32 ack_mask = 1 << (irq - priv->irq_base);
2939+
2940+ spin_lock_irqsave(&priv->lock, flags);
2941+ /* if edge triggered, ack directly. Otherwhise ack in the end of
2942+ * irq handler
2943+ */
2944+ if (ack_mask & IRQ_TIMBERDALE_EDGE_MASK)
2945+ iowrite32(ack_mask, priv->intc_membase + IAR);
2946+ else
2947+ priv->irq_ack_mask |= ack_mask;
2948+ spin_unlock_irqrestore(&priv->lock, flags);
2949+}
2950+
2951+static struct irq_chip timberdale_chip = {
2952+ .name = "timberdale",
2953+ .ack = timberdale_irq_ack,
2954+ .mask = timberdale_irq_mask,
2955+ .unmask = timberdale_irq_unmask,
2956+ .disable = timberdale_irq_mask,
2957+ .enable = timberdale_irq_unmask,
2958+};
2959+
2960+/*--------------------------------------------------------------------------*/
2961+
2962+/* Install the IRQ handler */
2963+static void timberdale_attach_irq(struct pci_dev *dev)
2964+{
2965+ struct timberdale_device *priv = pci_get_drvdata(dev);
2966+ unsigned int irq, irq_base;
2967+
2968+ irq_base = priv->irq_base;
2969+ for (irq = irq_base; irq < irq_base + TIMBERDALE_NR_IRQS; irq++) {
2970+ set_irq_chip_and_handler_name(irq, &timberdale_chip,
2971+ handle_edge_irq, "mux");
2972+
2973+ set_irq_chip_data(irq, priv);
2974+
2975+#ifdef CONFIG_ARM
2976+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
2977+#endif
2978+ }
2979+
2980+ set_irq_data(dev->irq, priv);
2981+ set_irq_chained_handler(dev->irq, timberdale_irq);
2982+}
2983+
2984+static void timberdale_detach_irq(struct pci_dev *dev)
2985+{
2986+ struct timberdale_device *priv = pci_get_drvdata(dev);
2987+ unsigned int irq, irq_base;
2988+
2989+ irq_base = priv->irq_base;
2990+
2991+ set_irq_chained_handler(dev->irq, NULL);
2992+ set_irq_data(dev->irq, NULL);
2993+
2994+ for (irq = irq_base; irq < irq_base + TIMBERDALE_NR_IRQS; irq++) {
2995+#ifdef CONFIG_ARM
2996+ set_irq_flags(irq, 0);
2997+#endif
2998+ set_irq_chip(irq, NULL);
2999+ set_irq_chip_data(irq, NULL);
3000+ }
3001+}
3002+
3003+static int __devinit timb_probe(struct pci_dev *dev,
3004+ const struct pci_device_id *id)
3005+{
3006+ struct timberdale_device *priv;
3007+ int err, i;
3008+ u16 ver;
3009+ resource_size_t mapbase;
3010+
3011+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
3012+ if (!priv)
3013+ return -ENOMEM;
3014+
3015+ spin_lock_init(&priv->lock);
3016+ pci_set_drvdata(dev, priv);
3017+
3018+ err = pci_enable_device(dev);
3019+ if (err)
3020+ goto err_enable;
3021+
3022+ mapbase = pci_resource_start(dev, 0);
3023+ if (!mapbase) {
3024+ printk(KERN_ERR "timberdale: No resource\n");
3025+ goto err_start;
3026+ }
3027+
3028+ /* create a resource for the Interrupt controller registers */
3029+ priv->intc_mapbase = mapbase + INTCOFFSET;
3030+ if (!request_mem_region(priv->intc_mapbase, INTCSIZE, "timb-intc")) {
3031+ printk(KERN_ERR "timberdale: Failed to request intc mem\n");
3032+ goto err_request;
3033+ }
3034+
3035+ /* create a resource for the PCI master register */
3036+ priv->ctl_mapbase = mapbase + CHIPCTLOFFSET;
3037+ if (!request_mem_region(priv->ctl_mapbase, CHIPCTLSIZE, "timb-intc")) {
3038+ printk(KERN_ERR "timberdale: Failed to request ctl mem\n");
3039+ goto err_request_ctl;
3040+ }
3041+
3042+ priv->intc_membase = ioremap(priv->intc_mapbase, INTCSIZE);
3043+ if (!priv->intc_membase) {
3044+ printk(KERN_ALERT "timberdale: Map error, intc\n");
3045+ goto err_ioremap;
3046+ }
3047+
3048+ priv->ctl_membase = ioremap(priv->ctl_mapbase, CHIPCTLSIZE);
3049+ if (!priv->ctl_membase) {
3050+ printk(KERN_ALERT "timberdale: Map error, ctl\n");
3051+ goto err_ioremap_ctl;
3052+ }
3053+
3054+ err = pci_enable_msi(dev);
3055+ if (err) {
3056+ printk(KERN_WARNING "timberdale: MSI init failed: %d\n", err);
3057+ goto err_msi;
3058+ }
3059+
3060+ /* Reset all FPGA PLB peripherals */
3061+ iowrite32(0x1, priv->ctl_membase + MAYSVILLERST);
3062+
3063+ /* at this stage the FPGA does not generate a
3064+ * unique interrupt per function, to emulate real interrupts
3065+ * we assign them a faked interrupt which we issue in the
3066+ * interrupt handler. For now just hard code a base number
3067+ */
3068+ priv->irq_base = NR_IRQS - TIMBERDALE_NR_IRQS - 1;
3069+ if (priv->irq_base < dev->irq)
3070+ /* ops the device itself got the IRQ in the end... */
3071+ priv->irq_base = 400;
3072+
3073+ timberdale_attach_irq(dev);
3074+
3075+ /* update IRQ offsets in I2C board info */
3076+ for (i = 0; i < ARRAY_SIZE(timberdale_i2c_board_info); i++)
3077+ timberdale_i2c_board_info[i].irq += priv->irq_base;
3078+
3079+ /* don't leave platform_data empty on any device */
3080+ for (i = 0; i < ARRAY_SIZE(timberdale_cells_bar0); i++)
3081+ if (timberdale_cells_bar0[i].platform_data == NULL) {
3082+ timberdale_cells_bar0[i].platform_data =
3083+ timberdale_cells_bar0 + i;
3084+ timberdale_cells_bar0[i].data_size =
3085+ sizeof(timberdale_cells_bar0[i]);
3086+ }
3087+
3088+ err = mfd_add_devices(&dev->dev, -1,
3089+ timberdale_cells_bar0, ARRAY_SIZE(timberdale_cells_bar0),
3090+ &dev->resource[0], priv->irq_base);
3091+ if (err)
3092+ printk(KERN_WARNING
3093+ "timberdale: mfd_add_devices failed: %d\n", err);
3094+ else {
3095+ err = mfd_add_devices(&dev->dev, -1,
3096+ timberdale_cells_bar1,
3097+ ARRAY_SIZE(timberdale_cells_bar1),
3098+ &dev->resource[1], priv->irq_base);
3099+
3100+ if (err)
3101+ printk(KERN_WARNING
3102+ "timberdale: timb_add_sdhci failed: %d\n", err);
3103+ }
3104+
3105+ if (err)
3106+ goto err_mfd;
3107+
3108+ ver = ioread16(priv->ctl_membase + TIMB_REV);
3109+
3110+ printk(KERN_INFO "Found Maysville Card. Rev: %d\n", ver);
3111+
3112+ /* Enable interrupts and wire the hardware interrupts */
3113+ iowrite32(0x3, priv->intc_membase + MER);
3114+
3115+ return 0;
3116+err_mfd:
3117+ timberdale_detach_irq(dev);
3118+ pci_disable_msi(dev);
3119+err_msi:
3120+ iounmap(priv->ctl_membase);
3121+err_ioremap_ctl:
3122+ iounmap(priv->intc_membase);
3123+err_ioremap:
3124+ release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
3125+err_request_ctl:
3126+ release_mem_region(priv->intc_mapbase, INTCSIZE);
3127+err_request:
3128+ pci_set_drvdata(dev, NULL);
3129+err_start:
3130+ pci_disable_device(dev);
3131+err_enable:
3132+ kfree(priv);
3133+ pci_set_drvdata(dev, NULL);
3134+ return -ENODEV;
3135+}
3136+
3137+static void __devexit timb_remove(struct pci_dev *dev)
3138+{
3139+ /* clean up any allocated resources and stuff here.
3140+ * like call release_region();
3141+ */
3142+ struct timberdale_device *priv;
3143+
3144+ priv = pci_get_drvdata(dev);
3145+
3146+ mfd_remove_devices(&dev->dev);
3147+
3148+ timberdale_detach_irq(dev);
3149+
3150+ iowrite32(0xffffffff, priv->intc_membase + IAR);
3151+ iowrite32(0, priv->intc_membase + MER);
3152+ iowrite32(0, priv->intc_membase + IER);
3153+
3154+ iounmap(priv->ctl_membase);
3155+ iounmap(priv->intc_membase);
3156+ release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
3157+ release_mem_region(priv->intc_mapbase, INTCSIZE);
3158+
3159+ pci_disable_msi(dev);
3160+ pci_disable_device(dev);
3161+ pci_set_drvdata(dev, NULL);
3162+ kfree(priv);
3163+}
3164+
3165+static struct pci_device_id timberdale_pci_tbl[] = {
3166+ { PCI_DEVICE(PCI_VENDOR_ID_TIMB, PCI_DEVICE_ID_TIMB) },
3167+ { 0 }
3168+};
3169+MODULE_DEVICE_TABLE(pci, timberdale_pci_tbl);
3170+
3171+static struct pci_driver timberdale_pci_driver = {
3172+ .name = "timberdale",
3173+ .id_table = timberdale_pci_tbl,
3174+ .probe = timb_probe,
3175+ .remove = timb_remove,
3176+};
3177+
3178+static int __init timberdale_init(void)
3179+{
3180+ int err;
3181+
3182+ err = pci_register_driver(&timberdale_pci_driver);
3183+ if (err < 0) {
3184+ printk(KERN_ERR
3185+ "Failed to register PCI driver for %s device.\n",
3186+ timberdale_pci_driver.name);
3187+ return -ENODEV;
3188+ }
3189+
3190+ printk(KERN_INFO "Driver for %s has been successfully registered.\n",
3191+ timberdale_pci_driver.name);
3192+
3193+ return 0;
3194+}
3195+
3196+static void __exit timberdale_exit(void)
3197+{
3198+ pci_unregister_driver(&timberdale_pci_driver);
3199+
3200+ printk(KERN_INFO "Driver for %s has been successfully unregistered.\n",
3201+ timberdale_pci_driver.name);
3202+}
3203+
3204+MODULE_LICENSE("GPL v2");
3205+MODULE_VERSION(DRV_VERSION);
3206+MODULE_AUTHOR("Richard Rojfors");
3207+
3208+module_init(timberdale_init);
3209+module_exit(timberdale_exit);
3210+
3211diff -uNr linux-2.6.29-clean/drivers/mfd/timberdale.h linux-2.6.29/drivers/mfd/timberdale.h
3212--- linux-2.6.29-clean/drivers/mfd/timberdale.h 1969-12-31 16:00:00.000000000 -0800
3213+++ linux-2.6.29/drivers/mfd/timberdale.h 2009-04-06 13:51:47.000000000 -0700
3214@@ -0,0 +1,114 @@
3215+/*
3216+ * timberdale.h timberdale FPGA mfd shim driver defines
3217+ * Copyright (c) 2009 Intel Corporation
3218+ *
3219+ * This program is free software; you can redistribute it and/or modify
3220+ * it under the terms of the GNU General Public License version 2 as
3221+ * published by the Free Software Foundation.
3222+ *
3223+ * This program is distributed in the hope that it will be useful,
3224+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
3225+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3226+ * GNU General Public License for more details.
3227+ *
3228+ * You should have received a copy of the GNU General Public License
3229+ * along with this program; if not, write to the Free Software
3230+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
3231+ */
3232+
3233+/* Supports:
3234+ * Timberdale FPGA
3235+ */
3236+
3237+#ifndef MFD_TIMBERDALE_H
3238+#define MFD_TIMBERDALE_H
3239+
3240+/* Registers of the interrupt controller */
3241+#define ISR 0x00
3242+#define IPR 0x04
3243+#define IER 0x08
3244+#define IAR 0x0c
3245+#define SIE 0x10
3246+#define CIE 0x14
3247+#define MER 0x1c
3248+
3249+/* Registers of the control area */
3250+#define TIMB_REV 0x00
3251+#define MAYSVILLERST 0x40
3252+
3253+
3254+#define I2COFFSET 0x0
3255+#define I2CEND 0x1f
3256+
3257+#define SPIOFFSET 0x80
3258+#define SPIEND 0xff
3259+
3260+#define ETHOFFSET 0x300
3261+#define ETHEND 0x30f
3262+
3263+#define GPIOOFFSET 0x400
3264+#define GPIOEND 0x7ff
3265+
3266+#define CHIPCTLOFFSET 0x800
3267+#define CHIPCTLEND 0x8ff
3268+#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET)
3269+
3270+#define INTCOFFSET 0xc00
3271+#define INTCEND 0xfff
3272+#define INTCSIZE (INTCEND - INTCOFFSET)
3273+
3274+#define MOSTOFFSET 0x1000
3275+#define MOSTEND 0x13ff
3276+
3277+#define UARTOFFSET 0x1400
3278+#define UARTEND 0x17ff
3279+
3280+#define I2SOFFSET 0x1C00
3281+#define I2SEND 0x1fff
3282+
3283+#define LOGIWOFFSET 0x30000
3284+#define LOGIWEND 0x37fff
3285+
3286+#define DMAOFFSET 0x01000000
3287+#define DMAEND 0x013fffff
3288+
3289+/* SDHC0 is placed in PCI bar 1 */
3290+#define SDHC0OFFSET 0x00
3291+#define SDHC0END 0xff
3292+
3293+/* SDHC1 is placed in PCI bar 2 */
3294+#define SDHC1OFFSET 0x00
3295+#define SDHC1END 0xff
3296+
3297+#define PCI_VENDOR_ID_TIMB 0x10ee
3298+#define PCI_DEVICE_ID_TIMB 0xa123
3299+#define DRV_VERSION "0.1"
3300+
3301+
3302+#define IRQ_TIMBERDALE_INIC 0
3303+#define IRQ_TIMBERDALE_MLB 1
3304+#define IRQ_TIMBERDALE_GPIO 2
3305+#define IRQ_TIMBERDALE_I2C 3
3306+#define IRQ_TIMBERDALE_UART 4
3307+#define IRQ_TIMBERDALE_DMA 5
3308+#define IRQ_TIMBERDALE_I2S 6
3309+#define IRQ_TIMBERDALE_TSC_INT 7
3310+#define IRQ_TIMBERDALE_SDHC 8
3311+#define IRQ_TIMBERDALE_ADV7180 9
3312+#define IRQ_TIMBERDALE_ETHSW_IF 10
3313+#define IRQ_TIMBERDALE_SPI 11
3314+
3315+#define TIMBERDALE_NR_IRQS 12
3316+
3317+/* Some of the interrupts are level triggered, some are edge triggered */
3318+#define IRQ_TIMBERDALE_EDGE_MASK ((1 << IRQ_TIMBERDALE_ADV7180) | \
3319+ (1 << IRQ_TIMBERDALE_TSC_INT) | (1 << IRQ_TIMBERDALE_DMA) | \
3320+ (1 << IRQ_TIMBERDALE_MLB) | (1 << IRQ_TIMBERDALE_INIC))
3321+
3322+#define IRQ_TIMBERDALE_LEVEL_MASK ((1 << IRQ_TIMBERDALE_SPI) | \
3323+ (1 << IRQ_TIMBERDALE_ETHSW_IF) | (1 << IRQ_TIMBERDALE_SDHC) | \
3324+ (1 << IRQ_TIMBERDALE_I2S) | (1 << IRQ_TIMBERDALE_UART) | \
3325+ (1 << IRQ_TIMBERDALE_I2C) | (1 << IRQ_TIMBERDALE_GPIO))
3326+
3327+#endif
3328+
3329diff -uNr linux-2.6.29-clean/drivers/mfd/timbi2s.c linux-2.6.29/drivers/mfd/timbi2s.c
3330--- linux-2.6.29-clean/drivers/mfd/timbi2s.c 1969-12-31 16:00:00.000000000 -0800
3331+++ linux-2.6.29/drivers/mfd/timbi2s.c 2009-04-06 13:51:47.000000000 -0700
3332@@ -0,0 +1,597 @@
3333+/*
3334+ * timbi2s.c timberdale FPGA I2S driver
3335+ * Copyright (c) 2009 Intel Corporation
3336+ *
3337+ * This program is free software; you can redistribute it and/or modify
3338+ * it under the terms of the GNU General Public License version 2 as
3339+ * published by the Free Software Foundation.
3340+ *
3341+ * This program is distributed in the hope that it will be useful,
3342+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
3343+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3344+ * GNU General Public License for more details.
3345+ *
3346+ * You should have received a copy of the GNU General Public License
3347+ * along with this program; if not, write to the Free Software
3348+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
3349+ */
3350+
3351+/* Supports:
3352+ * Timberdale FPGA I2S
3353+ *
3354+ * As of 2009-03-23 I2S instances
3355+ * are not configured as masters
3356+ *
3357+ * TODO: implement switching between master and slave
3358+ */
3359+
3360+#include <linux/io.h>
3361+#include <linux/fs.h>
3362+#include <linux/module.h>
3363+#include <linux/circ_buf.h>
3364+#include <linux/spinlock.h>
3365+#include <linux/workqueue.h>
3366+#include <linux/interrupt.h>
3367+#include <linux/platform_device.h>
3368+
3369+#include <linux/mfd/timbi2s.h>
3370+
3371+#define DRIVER_NAME "timb-i2s"
3372+
3373+#define I2S_CLOCK_SPEED 62500000 /* 62,5MHz */
3374+
3375+#define FIFO_FILL_SIZE 127
3376+#define I2S_BUFFER_SIZE PAGE_SIZE
3377+
3378+#define ALMOST_FULL 170
3379+#define ALMOST_EMPTY 85
3380+
3381+/* As of 2009-03-16, IP can instanciate max. 4 RX and 4 TX */
3382+#define MAX_TX_NR 4
3383+#define MAX_RX_NR 4
3384+/* and actually up and running only 4.
3385+ * 1 TX and 3 RX
3386+ */
3387+#define IP_I2S_NR 4
3388+#define REGSTEP 0x04
3389+
3390+#define VERSION 0x00
3391+#define I2S_UIR 0x04 /* Unit Interrupt Register */
3392+
3393+/* Registers for all possible I2S IP instances
3394+ * are the same as for first one (from 0x08 to 0x20)
3395+ */
3396+#define I2S_PRESCALE 0x08 /* Holds prescale value, if clock master */
3397+#define I2S_ICR 0x0c /* Interrupt Clear Register */
3398+# define ICR_F 0x01 /* Full */
3399+# define ICR_AF 0x02 /* Almost full */
3400+# define ICR_AE 0x04 /* Almost empty */
3401+# define ICR_RX_D 0x08 /* Data present, RX only */
3402+# define ICR_TX_E 0x08 /* Epmty, TX only */
3403+
3404+#define I2S_IPR 0x10 /* Interrupt Pending Register */
3405+#define I2S_ISR 0x14 /* Interrupt Status Register */
3406+
3407+#define I2S_IER 0x18 /* Interrupt Enable Register */
3408+# define IER_FF 0x01 /* RX/TX FIFO Full */
3409+# define IER_FAF 0x02 /* RX/TX FIFO Almost Full */
3410+# define IER_FAE 0x04 /* RX/TX FIFO Almost Empty */
3411+# define IER_RX_DATA 0x08 /* RX. Data Present */
3412+# define IER_TX_FE 0x08 /* TX. FIFO Empty */
3413+
3414+#define I2S_CTRL 0x1c /* Control Register */
3415+# define CTRL_TX_ENABLE 0x01 /* Enable TX */
3416+# define CTRL_RX_ENABLE 0x02 /* Enable RX */
3417+# define CTRL_NONE 0x04 /* Not used */
3418+# define CTRL_FIFO_CLR 0x08 /* FIFO Clear */
3419+# define CTRL_SWR 0x10 /* Soft reset */
3420+# define CTRL_CLKMASTER 0x1000 /* IP I2S instance is master */
3421+# define CTRL_IS_TX 0x40000000 /* IP I2S is an TX-instance */
3422+# define CTRL_IS_RX 0x20000000 /* IP I2S is an RX-instance */
3423+
3424+#define I2S_FIFO 0x20 /* read/write FIFO */
3425+
3426+#define INC_HEAD(buf, size) \
3427+ (buf->head = (buf->head + 1) & (size-1))
3428+
3429+#define INC_TAIL(buf, size) \
3430+ (buf->tail = (buf->tail + 1) & (size-1))
3431+
3432+
3433+/* circular buffer */
3434+static struct circ_buf *timbi2s_buf_alloc(void);
3435+static void timbi2s_buf_free(struct circ_buf *cb);
3436+static void timbi2s_buf_clear(struct circ_buf *cb);
3437+
3438+static int timbi2s_fifo_read(struct circ_buf *cb, ssize_t count, long add);
3439+static int timbi2s_fifo_write(struct circ_buf *cb, ssize_t count, long add);
3440+
3441+static int timbi2s_ioctrl(struct timbi2s_dev *);
3442+
3443+static struct timbi2s_bus *bus_p;
3444+
3445+static int timbi2s_is_tx(struct timbi2s_dev *i2sdev)
3446+{
3447+ return (ioread32(i2sdev->membase + i2sdev->ctrl_offset)
3448+ & CTRL_IS_TX) ? 1 : 0;
3449+}
3450+
3451+static int timbi2s_is_rx(struct timbi2s_dev *i2sdev)
3452+{
3453+ return (ioread32(i2sdev->membase + i2sdev->ctrl_offset)
3454+ & CTRL_IS_RX) ? 1 : 0;
3455+}
3456+
3457+/* Return unused TX-instance */
3458+static struct timbi2s_dev *timbi2s_get_tx(void)
3459+{
3460+ struct timbi2s_dev *tdev, *tmp;
3461+
3462+ if (bus_p == NULL)
3463+ return NULL;
3464+
3465+ list_for_each_entry_safe(tdev, tmp, &bus_p->control->list, item) {
3466+ if (!tdev->in_use && timbi2s_is_tx(tdev)) {
3467+ tdev->in_use = 1;
3468+ return tdev;
3469+ }
3470+
3471+ }
3472+ return NULL;
3473+}
3474+EXPORT_SYMBOL_GPL(timbi2s_get_tx);
3475+
3476+/* Return unused RX-instance */
3477+static struct timbi2s_dev *timbi2s_get_rx(void)
3478+{
3479+ struct timbi2s_dev *tdev, *tmp;
3480+
3481+ if (bus_p == NULL)
3482+ return NULL;
3483+
3484+ list_for_each_entry_safe(tdev, tmp, &bus_p->control->list, item) {
3485+ if (!tdev->in_use && timbi2s_is_rx(tdev)) {
3486+ tdev->in_use = 1;
3487+ return tdev;
3488+ }
3489+
3490+ }
3491+ return NULL;
3492+}
3493+EXPORT_SYMBOL_GPL(timbi2s_get_rx);
3494+
3495+/* Flag TX/RX as unused and reset it */
3496+static void timbi2s_put(struct timbi2s_dev *tdev)
3497+{
3498+ if (tdev->in_use) {
3499+ tdev->in_use = 0;
3500+ timbi2s_ioctrl(tdev);
3501+ }
3502+}
3503+EXPORT_SYMBOL_GPL(timbi2s_put);
3504+
3505+/*
3506+ * Write data to the FIFO
3507+ */
3508+static void timbi2s_tx_handler(struct timbi2s_dev *i2sdev)
3509+{
3510+ u32 pend;
3511+
3512+ pend = ioread32(i2sdev->membase + i2sdev->ipr_offset);
3513+
3514+ if (pend & IER_FAE) {
3515+ timbi2s_fifo_write(i2sdev->buffer,
3516+ ALMOST_FULL - ALMOST_EMPTY,
3517+ (unsigned long)i2sdev->membase +
3518+ i2sdev->fifo);
3519+ /* clear interrupt */
3520+ iowrite32(ICR_AE, i2sdev->membase + i2sdev->icr_offset);
3521+ }
3522+}
3523+
3524+/*
3525+ * Read data from the FIFO
3526+ */
3527+static void timbi2s_rx_handler(struct timbi2s_dev *i2sdev)
3528+{
3529+ u32 pend;
3530+ pend = ioread32(i2sdev->membase + i2sdev->ipr_offset);
3531+
3532+ if (pend & IER_FAE) {
3533+ timbi2s_fifo_read(i2sdev->buffer,
3534+ ALMOST_EMPTY,
3535+ (unsigned long)i2sdev->membase +
3536+ i2sdev->fifo);
3537+
3538+ /* clear interrupt */
3539+ iowrite32(ICR_AE | ICR_AF,
3540+ i2sdev->membase + i2sdev->icr_offset);
3541+ }
3542+}
3543+
3544+void timbi2s_int_handler(struct work_struct *workp)
3545+{
3546+ u32 pend, stat, i2stype;
3547+ unsigned long flags;
3548+ struct timbi2s_dev *i2sdev = container_of(workp,
3549+ struct timbi2s_dev,
3550+ work);
3551+
3552+ pend = ioread32(i2sdev->membase + i2sdev->ipr_offset);
3553+ stat = ioread32(i2sdev->membase + i2sdev->isr_offset);
3554+ i2stype = ioread32(i2sdev->membase + i2sdev->ctrl_offset);
3555+
3556+ spin_lock_irqsave(&i2sdev->lock, flags);
3557+
3558+ if (i2stype & CTRL_IS_RX) {
3559+ /* Enable Almost Empty Almost Full interrupt */
3560+ iowrite32(IER_FAE | IER_FAF,
3561+ i2sdev->membase + i2sdev->ier_offset);
3562+ /* Enable RX */
3563+ iowrite32(CTRL_RX_ENABLE,
3564+ i2sdev->membase + i2sdev->ctrl_offset);
3565+ timbi2s_rx_handler(i2sdev);
3566+ } else if (i2stype & CTRL_IS_TX) {
3567+ /* Enable Almost Empty interrupt */
3568+ iowrite32(IER_FAE, i2sdev->membase + i2sdev->ier_offset);
3569+ /* Enable TX */
3570+ iowrite32(CTRL_TX_ENABLE,
3571+ i2sdev->membase + i2sdev->ctrl_offset);
3572+ timbi2s_tx_handler(i2sdev);
3573+ }
3574+
3575+ spin_unlock_irqrestore(&i2sdev->lock, flags);
3576+}
3577+
3578+static int timbi2s_ioctrl(struct timbi2s_dev *i2sdev)
3579+{
3580+ u32 i2stype;
3581+
3582+ /* Reset */
3583+ iowrite8(CTRL_SWR, i2sdev->membase + i2sdev->ctrl_offset);
3584+ /* Clear IER */
3585+ iowrite32(0x00000000, i2sdev->membase + i2sdev->ier_offset);
3586+ /* Clear ICR */
3587+ iowrite32(0xffffffff, i2sdev->membase + i2sdev->icr_offset);
3588+
3589+ i2stype = ioread32(i2sdev->membase + i2sdev->ctrl_offset);
3590+
3591+ if (i2stype & CTRL_IS_TX)
3592+ printk(KERN_INFO DRIVER_NAME": found active I2S Transmitter\n");
3593+ else if (i2stype & CTRL_IS_RX)
3594+ printk(KERN_INFO DRIVER_NAME": found active I2S Receiver\n");
3595+
3596+ return 1;
3597+}
3598+EXPORT_SYMBOL_GPL(timbi2s_ioctrl);
3599+
3600+static struct circ_buf *timbi2s_buf_alloc(void)
3601+{
3602+ struct circ_buf *cb;
3603+
3604+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
3605+ if (cb == NULL)
3606+ return NULL;
3607+
3608+ cb->buf = kzalloc(I2S_BUFFER_SIZE, GFP_KERNEL);
3609+ if (cb->buf == NULL) {
3610+ kfree(cb);
3611+ return NULL;
3612+ }
3613+
3614+ timbi2s_buf_clear(cb);
3615+
3616+ return cb;
3617+}
3618+
3619+static void timbi2s_buf_free(struct circ_buf *cb)
3620+{
3621+ kfree(cb->buf);
3622+ kfree(cb);
3623+}
3624+
3625+static void timbi2s_buf_clear(struct circ_buf *cb)
3626+{
3627+ cb->head = 0;
3628+ cb->tail = cb->head;
3629+}
3630+
3631+/*
3632+ * Read data from the FIFO and write it to the given circular buffer
3633+ */
3634+static int timbi2s_fifo_read(struct circ_buf *cb, ssize_t count, long add)
3635+{
3636+ int c, ret = 0;
3637+
3638+ unsigned char *hi = (unsigned char *)ioread32((void *)(add >> 16));
3639+ unsigned char *lo = (unsigned char *)ioread32((void *)(add & 0xFFFF));
3640+
3641+ c = CIRC_SPACE_TO_END(cb->head, cb->tail, I2S_BUFFER_SIZE);
3642+ if (count < c)
3643+ c = count;
3644+
3645+ if (c <= 0)
3646+ return 1;
3647+
3648+ while (c >= 0) {
3649+ memcpy(cb->buf + cb->head, hi, 2);
3650+ INC_HEAD(cb, I2S_BUFFER_SIZE);
3651+
3652+ memcpy(cb->buf + cb->head, lo, 2);
3653+ INC_HEAD(cb, I2S_BUFFER_SIZE);
3654+ count -= 4;
3655+ }
3656+ return ret;
3657+}
3658+
3659+/*
3660+ * Get data from the circular buffer and write it to the given FIFO address
3661+ */
3662+static int timbi2s_fifo_write(struct circ_buf *cb, ssize_t count, long add)
3663+{
3664+ int c, ret = 0;
3665+
3666+ c = CIRC_CNT_TO_END(cb->head, cb->tail, I2S_BUFFER_SIZE);
3667+ if (count < c)
3668+ c = count;
3669+
3670+ if (c <= 0)
3671+ return 1;
3672+
3673+ while (c >= 0) {
3674+ iowrite32(*(s16 *)(cb->buf + cb->tail), (void *)(add >> 16));
3675+ INC_TAIL(cb, I2S_BUFFER_SIZE);
3676+
3677+ iowrite32(*(s16 *)(cb->buf + cb->tail), (void *)(add & 0xFFFF));
3678+ INC_TAIL(cb, I2S_BUFFER_SIZE);
3679+ count -= 4;
3680+ }
3681+
3682+ return ret;
3683+}
3684+
3685+static void timbi2s_control_destroy(struct timbi2s_bus_control *control)
3686+{
3687+ kfree(control);
3688+ control = NULL;
3689+}
3690+
3691+static void timbi2s_control_add_dev(struct timbi2s_dev *i2sdev)
3692+{
3693+ list_add(&i2sdev->item, &i2sdev->bus->control->list);
3694+}
3695+
3696+static void timbi2s_control_del_dev(struct timbi2s_dev *i2sdev)
3697+{
3698+ list_del(&i2sdev->item);
3699+ if (list_empty(&i2sdev->bus->control->list))
3700+ timbi2s_control_destroy(i2sdev->bus->control);
3701+}
3702+
3703+static irqreturn_t timbi2s_irq(int irq, void *dev_id)
3704+{
3705+ u8 pend;
3706+ u32 iunit;
3707+ int i;
3708+
3709+ struct timbi2s_bus *tbus = dev_id;
3710+ queue_work(tbus->workqueue, &tbus->work);
3711+
3712+ iunit = ioread32(tbus->membase + I2S_UIR);
3713+ /* Find out which I2S instance is interrupting */
3714+ for (i = 0; i < 32; i++) {
3715+ if ((1 << i) & iunit) {
3716+ pend = ioread8(tbus->membase +
3717+ (I2S_IPR + (i * REGSTEP * 7)));
3718+ iowrite8(pend, tbus->membase +
3719+ (I2S_ICR + (i * REGSTEP * 7)));
3720+ }
3721+ }
3722+
3723+ return IRQ_HANDLED;
3724+}
3725+
3726+static int __init timbi2s_probe(struct platform_device *dev)
3727+{
3728+ int err = 0;
3729+ struct timbi2s_dev *tdev, *tmp;
3730+ struct timbi2s_bus *tbus;
3731+ struct resource *iomem;
3732+ int i;
3733+
3734+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
3735+ if (!iomem) {
3736+ err = -EINVAL;
3737+ goto err_mem;
3738+ }
3739+
3740+ tbus = kzalloc(sizeof(*tbus), GFP_KERNEL);
3741+ if (!tbus) {
3742+ err = -EINVAL;
3743+ goto err_mem;
3744+ }
3745+
3746+ /* Init bus_control */
3747+ tbus->control = kzalloc(sizeof(struct timbi2s_bus_control), GFP_KERNEL);
3748+ if (!tbus->control) {
3749+ printk(KERN_ERR DRIVER_NAME
3750+ ": Failed to allocate timbi2s_bus_control.\n");
3751+ err = -ENOMEM;
3752+ goto err_free;
3753+ }
3754+ INIT_LIST_HEAD(&tbus->control->list);
3755+
3756+ /* Init workqueue */
3757+ tbus->workqueue = create_singlethread_workqueue("timbi2s");
3758+ if (tbus->workqueue == NULL) {
3759+ printk(KERN_ERR DRIVER_NAME
3760+ ": unable to create workqueue\n");
3761+ err = -ENOMEM;
3762+ goto err_control;
3763+ }
3764+ INIT_WORK(&tbus->work, timbi2s_int_handler);
3765+
3766+ if (!request_mem_region(iomem->start,
3767+ resource_size(iomem), DRIVER_NAME)) {
3768+ printk(KERN_EMERG DRIVER_NAME
3769+ ": Mem region is already in use\n");
3770+ err = -ENXIO;
3771+ goto err_control;
3772+ }
3773+
3774+ tbus->membase = ioremap(iomem->start, resource_size(iomem));
3775+ if (tbus->membase == NULL) {
3776+ err = -ENOMEM;
3777+ goto err_request;
3778+ }
3779+
3780+ bus_p = tbus;
3781+
3782+
3783+
3784+ /* For now we have only 4 I2S instances in IP : 3 RX and 1 TX */
3785+ /* Note: TX'es are always on top */
3786+ /* TODO: auto-check how many are alive and bring them into control */
3787+ for (i = 0; i < IP_I2S_NR; i++) {
3788+ tdev = kzalloc(sizeof(*tdev), GFP_KERNEL);
3789+ if (!tdev) {
3790+ err = -EINVAL;
3791+ goto clean_list;
3792+ }
3793+
3794+ /* Allocate circ_buf */
3795+ tdev->buffer = timbi2s_buf_alloc();
3796+ if (tdev->buffer == NULL) {
3797+ printk(KERN_ERR "timbi2s: unable to allocate buffer\n");
3798+ goto clean_list;
3799+ }
3800+
3801+ INIT_LIST_HEAD(&tdev->item);
3802+ spin_lock_init(&tdev->lock);
3803+
3804+ /* set up offsets for each instance of I2S */
3805+ tdev->bus = tbus; /* ptr to our bus */
3806+ tdev->membase = tbus->membase;
3807+ tdev->in_use = 0;
3808+ tdev->pscale_offset = I2S_PRESCALE + (i * REGSTEP * 7);
3809+ tdev->icr_offset = I2S_ICR + (i * REGSTEP * 7);
3810+ tdev->isr_offset = I2S_ISR + (i * REGSTEP * 7);
3811+ tdev->ipr_offset = I2S_IPR + (i * REGSTEP * 7);
3812+ tdev->ier_offset = I2S_IER + (i * REGSTEP * 7);
3813+ tdev->ctrl_offset = I2S_CTRL + (i * REGSTEP * 7);
3814+ tdev->fifo = I2S_FIFO + (i * REGSTEP * 7);
3815+
3816+ /* Try to check and reset hardware */
3817+ if (timbi2s_ioctrl(tdev))
3818+ timbi2s_control_add_dev(tdev);
3819+
3820+ tdev = NULL;
3821+ }
3822+
3823+ tbus->irq = platform_get_irq(dev, 0);
3824+ if (tbus->irq < 0) {
3825+ err = -EINVAL;
3826+ goto clean_list;
3827+ }
3828+
3829+ err = request_irq(tbus->irq, timbi2s_irq, 0, DRIVER_NAME, tbus);
3830+ if (err != 0)
3831+ goto clean_list;
3832+
3833+ platform_set_drvdata(dev, tbus);
3834+
3835+ dev_info(&dev->dev, "Driver for Timberdale I2S (ver: %d)"
3836+ " has been successfully registered.\n",
3837+ ioread32(tbus->membase + 0x00));
3838+ return 0;
3839+
3840+clean_list:
3841+ list_for_each_entry_safe(tdev, tmp, &tbus->control->list, item) {
3842+ if (tdev->workqueue != NULL) {
3843+ flush_workqueue(tdev->workqueue);
3844+ destroy_workqueue(tdev->workqueue);
3845+ }
3846+
3847+ if (tdev->buffer != NULL)
3848+ timbi2s_buf_free(tdev->buffer);
3849+
3850+ timbi2s_control_del_dev(tdev);
3851+ kfree(tdev);
3852+ }
3853+ free_irq(tbus->irq, tbus);
3854+ iounmap(tbus->membase);
3855+err_request:
3856+ release_mem_region(iomem->start, resource_size(iomem));
3857+err_control:
3858+ if (tbus->control != NULL)
3859+ timbi2s_control_destroy(tbus->control);
3860+err_free:
3861+ kfree(tbus);
3862+err_mem:
3863+ printk(KERN_ERR
3864+ DRIVER_NAME": Failed to register Timberdale I2S: %d\n", err);
3865+
3866+ return err;
3867+}
3868+
3869+static int __devexit timbi2s_remove(struct platform_device *dev)
3870+{
3871+ struct timbi2s_bus *tbus;
3872+ struct timbi2s_dev *tdev, *tmp;
3873+ struct resource *r;
3874+
3875+ tbus = platform_get_drvdata(dev);
3876+ free_irq(tbus->irq, tbus);
3877+
3878+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
3879+
3880+ list_for_each_entry_safe(tdev, tmp, &tbus->control->list, item) {
3881+ if (tdev->workqueue != NULL) {
3882+ flush_workqueue(tdev->workqueue);
3883+ destroy_workqueue(tdev->workqueue);
3884+ }
3885+
3886+ if (tdev->buffer != NULL)
3887+ timbi2s_buf_free(tdev->buffer);
3888+
3889+ kfree(tdev);
3890+ }
3891+
3892+ iounmap(tdev->membase);
3893+ if (r)
3894+ release_mem_region(r->start, resource_size(r));
3895+
3896+ dev_info(&dev->dev, "Driver for Timberdale I2S has been"
3897+ " successfully unregistered.\n");
3898+
3899+ platform_set_drvdata(dev, 0);
3900+ return 0;
3901+}
3902+
3903+static struct platform_driver timbi2s_platform_driver = {
3904+ .driver = {
3905+ .name = DRIVER_NAME,
3906+ .owner = THIS_MODULE,
3907+ },
3908+ .probe = timbi2s_probe,
3909+ .remove = __devexit_p(timbi2s_remove),
3910+};
3911+
3912+/*--------------------------------------------------------------------------*/
3913+
3914+static int __init timbi2s_init(void)
3915+{
3916+ return platform_driver_register(&timbi2s_platform_driver);
3917+}
3918+
3919+static void __exit timbi2s_exit(void)
3920+{
3921+ platform_driver_unregister(&timbi2s_platform_driver);
3922+}
3923+
3924+module_init(timbi2s_init);
3925+module_exit(timbi2s_exit);
3926+
3927+MODULE_AUTHOR("Mocean Laboratories");
3928+MODULE_DESCRIPTION("Timberdale I2S bus driver");
3929+MODULE_LICENSE("GPL v2");
3930diff -uNr linux-2.6.29-clean/drivers/mmc/host/Kconfig linux-2.6.29/drivers/mmc/host/Kconfig
3931--- linux-2.6.29-clean/drivers/mmc/host/Kconfig 2009-04-01 09:20:24.000000000 -0700
3932+++ linux-2.6.29/drivers/mmc/host/Kconfig 2009-04-06 13:51:47.000000000 -0700
3933@@ -65,6 +65,16 @@
3934
3935 If unsure, say Y.
3936
3937+config MMC_SDHCI_PLTFM
3938+ tristate "SDHCI support on platform devices"
3939+ depends on MMC_SDHCI
3940+ help
3941+ This selects the Secure Digital Host Controller Interface.
3942+
3943+ If you have a controller with this interface, say Y or M here.
3944+
3945+ If unsure, say N.
3946+
3947 config MMC_OMAP
3948 tristate "TI OMAP Multimedia Card Interface support"
3949 depends on ARCH_OMAP
3950diff -uNr linux-2.6.29-clean/drivers/mmc/host/Makefile linux-2.6.29/drivers/mmc/host/Makefile
3951--- linux-2.6.29-clean/drivers/mmc/host/Makefile 2009-04-01 09:20:24.000000000 -0700
3952+++ linux-2.6.29/drivers/mmc/host/Makefile 2009-04-06 13:51:47.000000000 -0700
3953@@ -13,6 +13,7 @@
3954 obj-$(CONFIG_MMC_SDHCI) += sdhci.o
3955 obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
3956 obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
3957+obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
3958 obj-$(CONFIG_MMC_WBSD) += wbsd.o
3959 obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
3960 obj-$(CONFIG_MMC_OMAP) += omap.o
3961diff -uNr linux-2.6.29-clean/drivers/mmc/host/sdhci-pltfm.c linux-2.6.29/drivers/mmc/host/sdhci-pltfm.c
3962--- linux-2.6.29-clean/drivers/mmc/host/sdhci-pltfm.c 1969-12-31 16:00:00.000000000 -0800
3963+++ linux-2.6.29/drivers/mmc/host/sdhci-pltfm.c 2009-04-06 13:51:47.000000000 -0700
3964@@ -0,0 +1,262 @@
3965+/*
3966+ * sdhci-pltfm.c Support for SDHCI platform devices
3967+ * Copyright (c) 2009 Intel Corporation
3968+ *
3969+ * This program is free software; you can redistribute it and/or modify
3970+ * it under the terms of the GNU General Public License version 2 as
3971+ * published by the Free Software Foundation.
3972+ *
3973+ * This program is distributed in the hope that it will be useful,
3974+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
3975+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3976+ * GNU General Public License for more details.
3977+ *
3978+ * You should have received a copy of the GNU General Public License
3979+ * along with this program; if not, write to the Free Software
3980+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
3981+ */
3982+
3983+/* Supports:
3984+ * SDHCI platform devices
3985+ *
3986+ * Inspired by sdhci-pci.c, by Pierre Ossman
3987+ */
3988+
3989+#include <linux/delay.h>
3990+#include <linux/highmem.h>
3991+#include <linux/platform_device.h>
3992+
3993+#include <linux/mmc/host.h>
3994+
3995+#include <linux/io.h>
3996+
3997+#include "sdhci.h"
3998+
3999+
4000+#define MAX_SLOTS 8
4001+
4002+struct sdhci_pltfm_chip;
4003+
4004+struct sdhci_pltfm_slot {
4005+ struct sdhci_pltfm_chip *chip;
4006+ struct sdhci_host *host;
4007+
4008+ int pltfm_resource;
4009+};
4010+
4011+struct sdhci_pltfm_chip {
4012+ struct platform_device *pdev;
4013+
4014+ unsigned int quirks;
4015+
4016+ int num_slots; /* Slots on controller */
4017+ struct sdhci_pltfm_slot *slots[MAX_SLOTS]; /* Pointers to host slots */
4018+};
4019+
4020+
4021+/*****************************************************************************\
4022+ * *
4023+ * SDHCI core callbacks *
4024+ * *
4025+\*****************************************************************************/
4026+
4027+static struct sdhci_ops sdhci_pltfm_ops = {
4028+};
4029+
4030+/*****************************************************************************\
4031+ * *
4032+ * Device probing/removal *
4033+ * *
4034+\*****************************************************************************/
4035+
4036+
4037+static struct sdhci_pltfm_slot * __devinit sdhci_pltfm_probe_slot(
4038+ struct platform_device *pdev, struct sdhci_pltfm_chip *chip,
4039+ int resource)
4040+{
4041+ struct sdhci_pltfm_slot *slot;
4042+ struct sdhci_host *host;
4043+ struct resource *iomem;
4044+ int ret;
4045+
4046+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, resource);
4047+ if (!iomem)
4048+ return ERR_PTR(-ENODEV);
4049+
4050+ if (resource_size(iomem) != 0x100) {
4051+ dev_err(&pdev->dev, "Invalid iomem size. You may "
4052+ "experience problems.\n");
4053+ }
4054+
4055+ if (!pdev->dev.parent) {
4056+ dev_err(&pdev->dev, "The parent device be a PCI device\n");
4057+ return ERR_PTR(-ENODEV);
4058+ }
4059+
4060+ host = sdhci_alloc_host(pdev->dev.parent,
4061+ sizeof(struct sdhci_pltfm_slot));
4062+ if (IS_ERR(host))
4063+ return ERR_PTR(PTR_ERR(host));
4064+
4065+ slot = sdhci_priv(host);
4066+
4067+ slot->chip = chip;
4068+ slot->host = host;
4069+ slot->pltfm_resource = resource;
4070+
4071+ host->hw_name = "PLTFM";
4072+ host->ops = &sdhci_pltfm_ops;
4073+ host->quirks = chip->quirks;
4074+
4075+ host->irq = platform_get_irq(pdev, 0);
4076+
4077+ if (!request_mem_region(iomem->start, resource_size(iomem),
4078+ mmc_hostname(host->mmc))) {
4079+ dev_err(&pdev->dev, "cannot request region\n");
4080+ ret = -EBUSY;
4081+ goto free;
4082+ }
4083+
4084+ host->ioaddr = ioremap(iomem->start, resource_size(iomem));
4085+ if (!host->ioaddr) {
4086+ dev_err(&pdev->dev, "failed to remap registers\n");
4087+ goto release;
4088+ }
4089+
4090+ ret = sdhci_add_host(host);
4091+ if (ret)
4092+ goto unmap;
4093+
4094+ return slot;
4095+
4096+unmap:
4097+ iounmap(host->ioaddr);
4098+release:
4099+ release_mem_region(iomem->start, resource_size(iomem));
4100+free:
4101+ sdhci_free_host(host);
4102+
4103+ return ERR_PTR(ret);
4104+}
4105+
4106+static void sdhci_pltfm_remove_slot(struct sdhci_pltfm_slot *slot)
4107+{
4108+ int dead;
4109+ u32 scratch;
4110+ struct resource *iomem;
4111+
4112+ dead = 0;
4113+ scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
4114+ if (scratch == (u32)-1)
4115+ dead = 1;
4116+
4117+ sdhci_remove_host(slot->host, dead);
4118+
4119+ iounmap(slot->host->ioaddr);
4120+
4121+ iomem = platform_get_resource(slot->chip->pdev, IORESOURCE_MEM,
4122+ slot->pltfm_resource);
4123+ release_mem_region(iomem->start, resource_size(iomem));
4124+
4125+ sdhci_free_host(slot->host);
4126+}
4127+
4128+static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
4129+{
4130+ struct sdhci_pltfm_chip *chip;
4131+ struct sdhci_pltfm_slot *slot;
4132+ u8 slots;
4133+ int ret, i;
4134+
4135+ BUG_ON(pdev == NULL);
4136+
4137+ for (slots = 0; slots <= MAX_SLOTS; slots++)
4138+ if (!platform_get_resource(pdev, IORESOURCE_MEM, slots))
4139+ break;
4140+
4141+ BUG_ON(slots > MAX_SLOTS || slots == 0);
4142+
4143+ chip = kzalloc(sizeof(struct sdhci_pltfm_chip), GFP_KERNEL);
4144+ if (!chip) {
4145+ ret = -ENOMEM;
4146+ goto err;
4147+ }
4148+
4149+ chip->pdev = pdev;
4150+ chip->num_slots = slots;
4151+ platform_set_drvdata(pdev, chip);
4152+
4153+ for (i = 0; i < slots; i++) {
4154+ slot = sdhci_pltfm_probe_slot(pdev, chip, i);
4155+ if (IS_ERR(slot)) {
4156+ for (i--; i >= 0; i--)
4157+ sdhci_pltfm_remove_slot(chip->slots[i]);
4158+ ret = PTR_ERR(slot);
4159+ goto free;
4160+ }
4161+
4162+ chip->slots[i] = slot;
4163+ }
4164+
4165+ return 0;
4166+
4167+free:
4168+ platform_set_drvdata(pdev, NULL);
4169+ kfree(chip);
4170+
4171+err:
4172+ printk(KERN_ERR"Probing of sdhci-pltfm failed: %d\n", ret);
4173+ return ret;
4174+}
4175+
4176+static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
4177+{
4178+ int i;
4179+ struct sdhci_pltfm_chip *chip;
4180+
4181+ chip = platform_get_drvdata(pdev);
4182+
4183+ if (chip) {
4184+ for (i = 0; i < chip->num_slots; i++)
4185+ sdhci_pltfm_remove_slot(chip->slots[i]);
4186+
4187+ platform_set_drvdata(pdev, NULL);
4188+ kfree(chip);
4189+ }
4190+
4191+ return 0;
4192+}
4193+
4194+static struct platform_driver sdhci_pltfm_driver = {
4195+ .driver = {
4196+ .name = "sdhci",
4197+ .owner = THIS_MODULE,
4198+ },
4199+ .probe = sdhci_pltfm_probe,
4200+ .remove = __devexit_p(sdhci_pltfm_remove),
4201+};
4202+
4203+/*****************************************************************************\
4204+ * *
4205+ * Driver init/exit *
4206+ * *
4207+\*****************************************************************************/
4208+
4209+static int __init sdhci_drv_init(void)
4210+{
4211+ return platform_driver_register(&sdhci_pltfm_driver);
4212+}
4213+
4214+static void __exit sdhci_drv_exit(void)
4215+{
4216+ platform_driver_unregister(&sdhci_pltfm_driver);
4217+}
4218+
4219+module_init(sdhci_drv_init);
4220+module_exit(sdhci_drv_exit);
4221+
4222+MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
4223+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
4224+MODULE_LICENSE("GPL v2");
4225+MODULE_ALIAS("platform:sdhci");
4226+
4227diff -uNr linux-2.6.29-clean/drivers/serial/Kconfig linux-2.6.29/drivers/serial/Kconfig
4228--- linux-2.6.29-clean/drivers/serial/Kconfig 2009-04-01 09:20:24.000000000 -0700
4229+++ linux-2.6.29/drivers/serial/Kconfig 2009-04-06 13:51:47.000000000 -0700
4230@@ -1412,4 +1412,11 @@
4231 default 19200 if (SERIAL_SPORT_BAUD_RATE_19200)
4232 default 9600 if (SERIAL_SPORT_BAUD_RATE_9600)
4233
4234+config SERIAL_TIMBERDALE
4235+ tristate "Support for timberdale UART"
4236+ depends on MFD_TIMBERDALE
4237+ select SERIAL_CORE
4238+ ---help---
4239+ Add support for UART controller on timberdale.
4240+
4241 endmenu
4242diff -uNr linux-2.6.29-clean/drivers/serial/Makefile linux-2.6.29/drivers/serial/Makefile
4243--- linux-2.6.29-clean/drivers/serial/Makefile 2009-04-01 09:20:24.000000000 -0700
4244+++ linux-2.6.29/drivers/serial/Makefile 2009-04-06 13:51:47.000000000 -0700
4245@@ -76,3 +76,4 @@
4246 obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
4247 obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
4248 obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
4249+obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
4250diff -uNr linux-2.6.29-clean/drivers/serial/timbuart.c linux-2.6.29/drivers/serial/timbuart.c
4251--- linux-2.6.29-clean/drivers/serial/timbuart.c 1969-12-31 16:00:00.000000000 -0800
4252+++ linux-2.6.29/drivers/serial/timbuart.c 2009-04-06 13:51:47.000000000 -0700
4253@@ -0,0 +1,519 @@
4254+/*
4255+ * timbuart.c timberdale FPGA UART driver
4256+ * Copyright (c) 2009 Intel Corporation
4257+ *
4258+ * This program is free software; you can redistribute it and/or modify
4259+ * it under the terms of the GNU General Public License version 2 as
4260+ * published by the Free Software Foundation.
4261+ *
4262+ * This program is distributed in the hope that it will be useful,
4263+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
4264+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4265+ * GNU General Public License for more details.
4266+ *
4267+ * You should have received a copy of the GNU General Public License
4268+ * along with this program; if not, write to the Free Software
4269+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
4270+ */
4271+
4272+/* Supports:
4273+ * Timberdale FPGA UART
4274+ */
4275+
4276+#include <linux/pci.h>
4277+#include <linux/interrupt.h>
4278+#include <linux/serial_core.h>
4279+#include <linux/kernel.h>
4280+#include <linux/platform_device.h>
4281+#include <linux/ioport.h>
4282+
4283+#include "timbuart.h"
4284+
4285+struct timbuart_port {
4286+ struct uart_port port;
4287+ struct tasklet_struct tasklet;
4288+ int usedma;
4289+ u8 last_ier;
4290+ struct platform_device *dev;
4291+};
4292+
4293+static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800,
4294+ 921600, 1843200, 3250000};
4295+
4296+static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier);
4297+
4298+static irqreturn_t timbuart_handleinterrupt(int irq, void *devid);
4299+
4300+static void timbuart_stop_rx(struct uart_port *port)
4301+{
4302+ /* spin lock held by upper layer, disable all RX interrupts */
4303+ u8 ier = ioread8(port->membase + TIMBUART_IER) & ~RXFLAGS;
4304+ iowrite8(ier, port->membase + TIMBUART_IER);
4305+}
4306+
4307+static void timbuart_stop_tx(struct uart_port *port)
4308+{
4309+ /* spinlock held by upper layer, disable TX interrupt */
4310+ u8 ier = ioread8(port->membase + TIMBUART_IER) & ~TXBAE;
4311+ iowrite8(ier, port->membase + TIMBUART_IER);
4312+}
4313+
4314+static void timbuart_start_tx(struct uart_port *port)
4315+{
4316+ struct timbuart_port *uart =
4317+ container_of(port, struct timbuart_port, port);
4318+
4319+ /* do not transfer anything here -> fire off the tasklet */
4320+ tasklet_schedule(&uart->tasklet);
4321+}
4322+
4323+static void timbuart_flush_buffer(struct uart_port *port)
4324+{
4325+ u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX;
4326+
4327+ iowrite8(ctl, port->membase + TIMBUART_CTRL);
4328+ iowrite8(TXBF, port->membase + TIMBUART_ISR);
4329+}
4330+
4331+static void timbuart_rx_chars(struct uart_port *port)
4332+{
4333+ struct tty_struct *tty = port->info->port.tty;
4334+
4335+ while (ioread8(port->membase + TIMBUART_ISR) & RXDP) {
4336+ u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
4337+ /* ack */
4338+ iowrite8(RXDP, port->membase + TIMBUART_ISR);
4339+ port->icount.rx++;
4340+ tty_insert_flip_char(tty, ch, TTY_NORMAL);
4341+ }
4342+
4343+ spin_unlock(&port->lock);
4344+ tty_flip_buffer_push(port->info->port.tty);
4345+ spin_lock(&port->lock);
4346+
4347+ dev_dbg(port->dev, "%s - total read %d bytes\n",
4348+ __func__, port->icount.rx);
4349+}
4350+
4351+static void timbuart_tx_chars(struct uart_port *port)
4352+{
4353+ struct circ_buf *xmit = &port->info->xmit;
4354+
4355+ while (!(ioread8(port->membase + TIMBUART_ISR) & TXBF) &&
4356+ !uart_circ_empty(xmit)) {
4357+ iowrite8(xmit->buf[xmit->tail],
4358+ port->membase + TIMBUART_TXFIFO);
4359+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
4360+ port->icount.tx++;
4361+ }
4362+
4363+ dev_dbg(port->dev,
4364+ "%s - total written %d bytes, CTL: %x, RTS: %x, baud: %x\n",
4365+ __func__,
4366+ port->icount.tx,
4367+ ioread8(port->membase + TIMBUART_CTRL),
4368+ port->mctrl & TIOCM_RTS,
4369+ ioread8(port->membase + TIMBUART_BAUDRATE));
4370+}
4371+
4372+static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
4373+{
4374+ struct timbuart_port *uart =
4375+ container_of(port, struct timbuart_port, port);
4376+ struct circ_buf *xmit = &port->info->xmit;
4377+
4378+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
4379+ return;
4380+
4381+ if (port->x_char)
4382+ return;
4383+
4384+ if (isr & TXFLAGS) {
4385+ timbuart_tx_chars(port);
4386+ /* clear all TX interrupts */
4387+ iowrite8(TXFLAGS, port->membase + TIMBUART_ISR);
4388+
4389+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
4390+ uart_write_wakeup(port);
4391+ } else
4392+ /* Re-enable any tx interrupt */
4393+ *ier |= uart->last_ier & TXFLAGS;
4394+
4395+ /* enable interrupts if there are chars in the transmit buffer,
4396+ * Or if we delivered some bytes and want the almost empty interrupt
4397+ * we wake up the upper layer later when we got the interrupt
4398+ * to give it some time to go out...
4399+ */
4400+ if (!uart_circ_empty(xmit))
4401+ *ier |= TXBAE;
4402+
4403+ dev_dbg(port->dev, "%s - leaving\n", __func__);
4404+}
4405+
4406+void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
4407+{
4408+ if (isr & RXFLAGS) {
4409+ /* Some RX status is set */
4410+ if (isr & RXBF) {
4411+ u8 ctl = ioread8(port->membase + TIMBUART_CTRL) |
4412+ TIMBUART_CTRL_FLSHRX;
4413+ iowrite8(ctl, port->membase + TIMBUART_CTRL);
4414+ port->icount.overrun++;
4415+ } else if (isr & (RXDP))
4416+ timbuart_rx_chars(port);
4417+
4418+ /* ack all RX interrupts */
4419+ iowrite8(RXFLAGS, port->membase + TIMBUART_ISR);
4420+ }
4421+
4422+ /* always have the RX interrupts enabled */
4423+ *ier |= RXBAF | RXBF | RXTT;
4424+
4425+ dev_dbg(port->dev, "%s - leaving\n", __func__);
4426+}
4427+
4428+void timbuart_tasklet(unsigned long arg)
4429+{
4430+ struct timbuart_port *uart = (struct timbuart_port *)arg;
4431+ u8 isr, ier = 0;
4432+
4433+ spin_lock(&uart->port.lock);
4434+
4435+ isr = ioread8(uart->port.membase + TIMBUART_ISR);
4436+ dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
4437+
4438+ if (!uart->usedma)
4439+ timbuart_handle_tx_port(&uart->port, isr, &ier);
4440+
4441+ timbuart_mctrl_check(&uart->port, isr, &ier);
4442+
4443+ if (!uart->usedma)
4444+ timbuart_handle_rx_port(&uart->port, isr, &ier);
4445+
4446+ iowrite8(ier, uart->port.membase + TIMBUART_IER);
4447+
4448+ spin_unlock(&uart->port.lock);
4449+ dev_dbg(uart->port.dev, "%s leaving\n", __func__);
4450+}
4451+
4452+static unsigned int timbuart_tx_empty(struct uart_port *port)
4453+{
4454+ u8 isr = ioread8(port->membase + TIMBUART_ISR);
4455+
4456+ return (isr & TXBAE) ? TIOCSER_TEMT : 0;
4457+}
4458+
4459+static unsigned int timbuart_get_mctrl(struct uart_port *port)
4460+{
4461+ u8 cts = ioread8(port->membase + TIMBUART_CTRL);
4462+ dev_dbg(port->dev, "%s - cts %x\n", __func__, cts);
4463+
4464+ if (cts & TIMBUART_CTRL_CTS)
4465+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
4466+ else
4467+ return TIOCM_DSR | TIOCM_CAR;
4468+}
4469+
4470+static void timbuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
4471+{
4472+ dev_dbg(port->dev, "%s - %x\n", __func__, mctrl);
4473+
4474+ if (mctrl & TIOCM_RTS)
4475+ iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
4476+ else
4477+ iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
4478+}
4479+
4480+static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier)
4481+{
4482+ unsigned int cts;
4483+
4484+ if (isr & CTS_DELTA) {
4485+ /* ack */
4486+ iowrite8(CTS_DELTA, port->membase + TIMBUART_ISR);
4487+ cts = timbuart_get_mctrl(port);
4488+ uart_handle_cts_change(port, cts & TIOCM_CTS);
4489+ wake_up_interruptible(&port->info->delta_msr_wait);
4490+ }
4491+
4492+ *ier |= CTS_DELTA;
4493+}
4494+
4495+static void timbuart_enable_ms(struct uart_port *port)
4496+{
4497+ /* N/A */
4498+}
4499+
4500+static void timbuart_break_ctl(struct uart_port *port, int ctl)
4501+{
4502+ /* N/A */
4503+}
4504+
4505+static int timbuart_startup(struct uart_port *port)
4506+{
4507+ struct timbuart_port *uart =
4508+ container_of(port, struct timbuart_port, port);
4509+
4510+ dev_dbg(port->dev, "%s\n", __func__);
4511+
4512+ iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL);
4513+ iowrite8(0xff, port->membase + TIMBUART_ISR);
4514+ /* Enable all but TX interrupts */
4515+ iowrite8(RXBAF | RXBF | RXTT | CTS_DELTA,
4516+ port->membase + TIMBUART_IER);
4517+
4518+ return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED,
4519+ "timb-uart", uart);
4520+}
4521+
4522+static void timbuart_shutdown(struct uart_port *port)
4523+{
4524+ struct timbuart_port *uart =
4525+ container_of(port, struct timbuart_port, port);
4526+ dev_dbg(port->dev, "%s\n", __func__);
4527+ free_irq(port->irq, uart);
4528+ iowrite8(0, port->membase + TIMBUART_IER);
4529+}
4530+
4531+static int get_bindex(int baud)
4532+{
4533+ int i;
4534+
4535+ for (i = 0; i < ARRAY_SIZE(baudrates); i++)
4536+ if (baud == baudrates[i])
4537+ return i;
4538+
4539+ return -1;
4540+}
4541+
4542+static void timbuart_set_termios(struct uart_port *port,
4543+ struct ktermios *termios,
4544+ struct ktermios *old)
4545+{
4546+ unsigned int baud;
4547+ short bindex;
4548+ unsigned long flags;
4549+
4550+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
4551+ bindex = get_bindex(baud);
4552+ dev_dbg(port->dev, "%s - bindex %d\n", __func__, bindex);
4553+
4554+ if (bindex < 0) {
4555+ printk(KERN_ALERT "timbuart: Unsupported baud rate\n");
4556+ } else {
4557+ spin_lock_irqsave(&port->lock, flags);
4558+ iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE);
4559+ uart_update_timeout(port, termios->c_cflag, baud);
4560+ spin_unlock_irqrestore(&port->lock, flags);
4561+ }
4562+}
4563+
4564+static const char *timbuart_type(struct uart_port *port)
4565+{
4566+ return port->type == PORT_UNKNOWN ? "timbuart" : NULL;
4567+}
4568+
4569+/* We do not request/release mappings of the registers here,
4570+ * currently it's done in the proble function.
4571+ */
4572+static void timbuart_release_port(struct uart_port *port)
4573+{
4574+ struct platform_device *pdev = to_platform_device(port->dev);
4575+ int size =
4576+ resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
4577+
4578+ if (port->flags & UPF_IOREMAP) {
4579+ iounmap(port->membase);
4580+ port->membase = NULL;
4581+ }
4582+
4583+ release_mem_region(port->mapbase, size);
4584+}
4585+
4586+static int timbuart_request_port(struct uart_port *port)
4587+{
4588+ struct platform_device *pdev = to_platform_device(port->dev);
4589+ int size =
4590+ resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
4591+
4592+ if (!request_mem_region(port->mapbase, size, "timb-uart"))
4593+ return -EBUSY;
4594+
4595+ if (port->flags & UPF_IOREMAP) {
4596+ port->membase = ioremap(port->mapbase, size);
4597+ if (port->membase == NULL) {
4598+ release_mem_region(port->mapbase, size);
4599+ return -ENOMEM;
4600+ }
4601+ }
4602+
4603+ return 0;
4604+}
4605+
4606+static irqreturn_t timbuart_handleinterrupt(int irq, void *devid)
4607+{
4608+ struct timbuart_port *uart = (struct timbuart_port *)devid;
4609+
4610+ uart->last_ier = ioread8(uart->port.membase + TIMBUART_IER);
4611+
4612+ /* disable interrupts, let the tasklet enable them again if needed */
4613+ iowrite8(0, uart->port.membase + TIMBUART_IER);
4614+
4615+ /* fire off bottom half */
4616+ tasklet_schedule(&uart->tasklet);
4617+
4618+ return IRQ_HANDLED;
4619+}
4620+
4621+/*
4622+ * Configure/autoconfigure the port.
4623+ */
4624+static void timbuart_config_port(struct uart_port *port, int flags)
4625+{
4626+ if (flags & UART_CONFIG_TYPE) {
4627+ port->type = PORT_TIMBUART;
4628+ timbuart_request_port(port);
4629+ }
4630+}
4631+
4632+static int timbuart_verify_port(struct uart_port *port,
4633+ struct serial_struct *ser)
4634+{
4635+ /* we don't want the core code to modify any port params */
4636+ return -EINVAL;
4637+}
4638+
4639+static struct uart_ops timbuart_ops = {
4640+ .tx_empty = timbuart_tx_empty,
4641+ .set_mctrl = timbuart_set_mctrl,
4642+ .get_mctrl = timbuart_get_mctrl,
4643+ .stop_tx = timbuart_stop_tx,
4644+ .start_tx = timbuart_start_tx,
4645+ .flush_buffer = timbuart_flush_buffer,
4646+ .stop_rx = timbuart_stop_rx,
4647+ .enable_ms = timbuart_enable_ms,
4648+ .break_ctl = timbuart_break_ctl,
4649+ .startup = timbuart_startup,
4650+ .shutdown = timbuart_shutdown,
4651+ .set_termios = timbuart_set_termios,
4652+ .type = timbuart_type,
4653+ .release_port = timbuart_release_port,
4654+ .request_port = timbuart_request_port,
4655+ .config_port = timbuart_config_port,
4656+ .verify_port = timbuart_verify_port
4657+};
4658+
4659+static struct uart_driver timbuart_driver = {
4660+ .owner = THIS_MODULE,
4661+ .driver_name = "timberdale_uart",
4662+ .dev_name = "ttyTU",
4663+ .major = TIMBUART_MAJOR,
4664+ .minor = TIMBUART_MINOR,
4665+ .nr = 1
4666+};
4667+
4668+static int timbuart_probe(struct platform_device *dev)
4669+{
4670+ int err;
4671+ struct timbuart_port *uart;
4672+ struct resource *iomem;
4673+
4674+ dev_dbg(&dev->dev, "%s\n", __func__);
4675+
4676+ uart = kzalloc(sizeof(*uart), GFP_KERNEL);
4677+ if (!uart) {
4678+ err = -EINVAL;
4679+ goto err_mem;
4680+ }
4681+
4682+ uart->usedma = 0;
4683+
4684+ uart->port.uartclk = 3250000 * 16;
4685+ uart->port.fifosize = TIMBUART_FIFO_SIZE;
4686+ uart->port.regshift = 2;
4687+ uart->port.iotype = UPIO_MEM;
4688+ uart->port.ops = &timbuart_ops;
4689+ uart->port.irq = 0;
4690+ uart->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
4691+ uart->port.line = 0;
4692+ uart->port.dev = &dev->dev;
4693+
4694+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
4695+ if (!iomem) {
4696+ err = -ENOMEM;
4697+ goto err_register;
4698+ }
4699+ uart->port.mapbase = iomem->start;
4700+ uart->port.membase = NULL;
4701+
4702+ uart->port.irq = platform_get_irq(dev, 0);
4703+ if (uart->port.irq < 0) {
4704+ err = -EINVAL;
4705+ goto err_register;
4706+ }
4707+
4708+ tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
4709+
4710+ err = uart_register_driver(&timbuart_driver);
4711+ if (err)
4712+ goto err_register;
4713+
4714+ err = uart_add_one_port(&timbuart_driver, &uart->port);
4715+ if (err)
4716+ goto err_add_port;
4717+
4718+ platform_set_drvdata(dev, uart);
4719+
4720+ return 0;
4721+
4722+err_add_port:
4723+ uart_unregister_driver(&timbuart_driver);
4724+err_register:
4725+ kfree(uart);
4726+err_mem:
4727+ printk(KERN_ERR "timberdale: Failed to register Timberdale UART: %d\n",
4728+ err);
4729+
4730+ return err;
4731+}
4732+
4733+static int timbuart_remove(struct platform_device *dev)
4734+{
4735+ struct timbuart_port *uart = platform_get_drvdata(dev);
4736+
4737+ tasklet_kill(&uart->tasklet);
4738+ uart_remove_one_port(&timbuart_driver, &uart->port);
4739+ uart_unregister_driver(&timbuart_driver);
4740+ kfree(uart);
4741+
4742+ return 0;
4743+}
4744+
4745+static struct platform_driver timbuart_platform_driver = {
4746+ .driver = {
4747+ .name = "timb-uart",
4748+ .owner = THIS_MODULE,
4749+ },
4750+ .probe = timbuart_probe,
4751+ .remove = timbuart_remove,
4752+};
4753+
4754+/*--------------------------------------------------------------------------*/
4755+
4756+static int __init timbuart_init(void)
4757+{
4758+ return platform_driver_register(&timbuart_platform_driver);
4759+}
4760+
4761+static void __exit timbuart_exit(void)
4762+{
4763+ platform_driver_unregister(&timbuart_platform_driver);
4764+}
4765+
4766+module_init(timbuart_init);
4767+module_exit(timbuart_exit);
4768+
4769+MODULE_DESCRIPTION("Timberdale UART driver");
4770+MODULE_LICENSE("GPL v2");
4771+MODULE_ALIAS("platform:timb-uart");
4772+
4773diff -uNr linux-2.6.29-clean/drivers/serial/timbuart.h linux-2.6.29/drivers/serial/timbuart.h
4774--- linux-2.6.29-clean/drivers/serial/timbuart.h 1969-12-31 16:00:00.000000000 -0800
4775+++ linux-2.6.29/drivers/serial/timbuart.h 2009-04-06 13:51:47.000000000 -0700
4776@@ -0,0 +1,57 @@
4777+/*
4778+ * timbuart.c timberdale FPGA GPIO driver
4779+ * Copyright (c) 2009 Intel Corporation
4780+ *
4781+ * This program is free software; you can redistribute it and/or modify
4782+ * it under the terms of the GNU General Public License version 2 as
4783+ * published by the Free Software Foundation.
4784+ *
4785+ * This program is distributed in the hope that it will be useful,
4786+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
4787+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4788+ * GNU General Public License for more details.
4789+ *
4790+ * You should have received a copy of the GNU General Public License
4791+ * along with this program; if not, write to the Free Software
4792+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
4793+ */
4794+
4795+/* Supports:
4796+ * Timberdale FPGA UART
4797+ */
4798+
4799+#ifndef _TIMBUART_H
4800+#define _TIMBUART_H
4801+
4802+#define TIMBUART_FIFO_SIZE 2048
4803+
4804+#define TIMBUART_RXFIFO 0x08
4805+#define TIMBUART_TXFIFO 0x0c
4806+#define TIMBUART_IER 0x10
4807+#define TIMBUART_IPR 0x14
4808+#define TIMBUART_ISR 0x18
4809+#define TIMBUART_CTRL 0x1c
4810+#define TIMBUART_BAUDRATE 0x20
4811+
4812+#define TIMBUART_CTRL_RTS 0x01
4813+#define TIMBUART_CTRL_CTS 0x02
4814+#define TIMBUART_CTRL_FLSHTX 0x40
4815+#define TIMBUART_CTRL_FLSHRX 0x80
4816+
4817+#define TXBF 0x01
4818+#define TXBAE 0x02
4819+#define CTS_DELTA 0x04
4820+#define RXDP 0x08
4821+#define RXBAF 0x10
4822+#define RXBF 0x20
4823+#define RXTT 0x40
4824+#define RXBNAE 0x80
4825+
4826+#define RXFLAGS (RXDP | RXBAF | RXBF | RXTT | RXBNAE)
4827+#define TXFLAGS (TXBF | TXBAE)
4828+
4829+#define TIMBUART_MAJOR 204
4830+#define TIMBUART_MINOR 192
4831+
4832+#endif /* _TIMBUART_H */
4833+
4834diff -uNr linux-2.6.29-clean/drivers/spi/Kconfig linux-2.6.29/drivers/spi/Kconfig
4835--- linux-2.6.29-clean/drivers/spi/Kconfig 2009-04-01 09:20:25.000000000 -0700
4836+++ linux-2.6.29/drivers/spi/Kconfig 2009-04-06 13:51:47.000000000 -0700
4837@@ -211,8 +211,8 @@
4838 SPI driver for Toshiba TXx9 MIPS SoCs
4839
4840 config SPI_XILINX
4841- tristate "Xilinx SPI controller"
4842- depends on XILINX_VIRTEX && EXPERIMENTAL
4843+ tristate "Xilinx SPI controller common module"
4844+ depends on EXPERIMENTAL
4845 select SPI_BITBANG
4846 help
4847 This exposes the SPI controller IP from the Xilinx EDK.
4848@@ -220,6 +220,25 @@
4849 See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
4850 Product Specification document (DS464) for hardware details.
4851
4852+config SPI_XILINX_OF
4853+ tristate "Xilinx SPI controller OF device"
4854+ depends on SPI_XILINX && XILINX_VIRTEX
4855+ help
4856+ This exposes the SPI controller IP from the Xilinx EDK.
4857+
4858+ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
4859+ Product Specification document (DS464) for hardware details.
4860+
4861+config SPI_XILINX_PLTFM
4862+ tristate "Xilinx SPI controller platform device"
4863+ depends on SPI_XILINX
4864+ help
4865+ This exposes the SPI controller IP from the Xilinx EDK.
4866+
4867+ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
4868+ Product Specification document (DS464) for hardware details.
4869+
4870+
4871 #
4872 # Add new SPI master controllers in alphabetical order above this line
4873 #
4874diff -uNr linux-2.6.29-clean/drivers/spi/Makefile linux-2.6.29/drivers/spi/Makefile
4875--- linux-2.6.29-clean/drivers/spi/Makefile 2009-04-01 09:20:25.000000000 -0700
4876+++ linux-2.6.29/drivers/spi/Makefile 2009-04-06 13:51:47.000000000 -0700
4877@@ -29,6 +29,8 @@
4878 obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
4879 obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
4880 obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
4881+obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
4882+obj-$(CONFIG_SPI_XILINX_PLTFM) += xilinx_spi_pltfm.o
4883 obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
4884 # ... add above this line ...
4885
4886diff -uNr linux-2.6.29-clean/drivers/spi/xilinx_spi.c linux-2.6.29/drivers/spi/xilinx_spi.c
4887--- linux-2.6.29-clean/drivers/spi/xilinx_spi.c 2009-04-01 09:20:25.000000000 -0700
4888+++ linux-2.6.29/drivers/spi/xilinx_spi.c 2009-04-06 13:51:47.000000000 -0700
4889@@ -14,22 +14,28 @@
4890 #include <linux/module.h>
4891 #include <linux/init.h>
4892 #include <linux/interrupt.h>
4893-#include <linux/platform_device.h>
4894-
4895-#include <linux/of_platform.h>
4896-#include <linux/of_device.h>
4897-#include <linux/of_spi.h>
4898
4899 #include <linux/spi/spi.h>
4900 #include <linux/spi/spi_bitbang.h>
4901 #include <linux/io.h>
4902
4903-#define XILINX_SPI_NAME "xilinx_spi"
4904+#include "xilinx_spi.h"
4905+
4906+#ifndef CONFIG_PPC
4907+#define in_8(addr) ioread8(addr)
4908+#define in_be16(addr) ioread16(addr)
4909+#define in_be32(addr) ioread32(addr)
4910+
4911+#define out_8(addr, b) iowrite8(b, addr)
4912+#define out_be16(addr, w) iowrite16(w, addr)
4913+#define out_be32(addr, l) iowrite32(l, addr)
4914+#endif
4915+
4916
4917 /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
4918 * Product Specification", DS464
4919 */
4920-#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */
4921+#define XSPI_CR_OFFSET_DEF 0x62 /* 16-bit Control Register */
4922
4923 #define XSPI_CR_ENABLE 0x02
4924 #define XSPI_CR_MASTER_MODE 0x04
4925@@ -41,7 +47,7 @@
4926 #define XSPI_CR_MANUAL_SSELECT 0x80
4927 #define XSPI_CR_TRANS_INHIBIT 0x100
4928
4929-#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */
4930+#define XSPI_SR_OFFSET_DEF 0x67 /* 8-bit Status Register */
4931
4932 #define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
4933 #define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
4934@@ -49,10 +55,10 @@
4935 #define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
4936 #define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
4937
4938-#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */
4939-#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */
4940+#define XSPI_TXD_OFFSET_DEF 0x6b /* 8-bit Data Transmit Register */
4941+#define XSPI_RXD_OFFSET_DEF 0x6f /* 8-bit Data Receive Register */
4942
4943-#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
4944+#define XSPI_SSR_OFFSET_DEF 0x70 /* 32-bit Slave Select Register */
4945
4946 /* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414
4947 * IPIF registers are 32 bit
4948@@ -74,24 +80,10 @@
4949 #define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
4950 #define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
4951
4952-struct xilinx_spi {
4953- /* bitbang has to be first */
4954- struct spi_bitbang bitbang;
4955- struct completion done;
4956-
4957- void __iomem *regs; /* virt. address of the control registers */
4958-
4959- u32 irq;
4960-
4961- u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
4962-
4963- u8 *rx_ptr; /* pointer in the Tx buffer */
4964- const u8 *tx_ptr; /* pointer in the Rx buffer */
4965- int remaining_bytes; /* the number of bytes left to transfer */
4966-};
4967
4968-static void xspi_init_hw(void __iomem *regs_base)
4969+void xspi_init_hw(struct xilinx_spi *xspi)
4970 {
4971+ void __iomem *regs_base = xspi->regs;
4972 /* Reset the SPI device */
4973 out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET,
4974 XIPIF_V123B_RESET_MASK);
4975@@ -101,30 +93,31 @@
4976 out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET,
4977 XIPIF_V123B_GINTR_ENABLE);
4978 /* Deselect the slave on the SPI bus */
4979- out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff);
4980+ out_be32(regs_base + xspi->ssr_offset, 0xffff);
4981 /* Disable the transmitter, enable Manual Slave Select Assertion,
4982 * put SPI controller into master mode, and enable it */
4983- out_be16(regs_base + XSPI_CR_OFFSET,
4984+ out_be16(regs_base + xspi->cr_offset,
4985 XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT
4986 | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE);
4987 }
4988+EXPORT_SYMBOL(xspi_init_hw);
4989
4990-static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
4991+void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
4992 {
4993 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
4994
4995 if (is_on == BITBANG_CS_INACTIVE) {
4996 /* Deselect the slave on the SPI bus */
4997- out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff);
4998+ out_be32(xspi->regs + xspi->ssr_offset, 0xffff);
4999 } else if (is_on == BITBANG_CS_ACTIVE) {
5000 /* Set the SPI clock phase and polarity */
5001- u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET)
5002+ u16 cr = in_be16(xspi->regs + xspi->cr_offset)
5003 & ~XSPI_CR_MODE_MASK;
5004 if (spi->mode & SPI_CPHA)
5005 cr |= XSPI_CR_CPHA;
5006 if (spi->mode & SPI_CPOL)
5007 cr |= XSPI_CR_CPOL;
5008- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
5009+ out_be16(xspi->regs + xspi->cr_offset, cr);
5010
5011 /* We do not check spi->max_speed_hz here as the SPI clock
5012 * frequency is not software programmable (the IP block design
5013@@ -132,10 +125,11 @@
5014 */
5015
5016 /* Activate the chip select */
5017- out_be32(xspi->regs + XSPI_SSR_OFFSET,
5018+ out_be32(xspi->regs + xspi->ssr_offset,
5019 ~(0x0001 << spi->chip_select));
5020 }
5021 }
5022+EXPORT_SYMBOL(xilinx_spi_chipselect);
5023
5024 /* spi_bitbang requires custom setup_transfer() to be defined if there is a
5025 * custom txrx_bufs(). We have nothing to setup here as the SPI IP block
5026@@ -143,8 +137,7 @@
5027 * Check for 8 bits per word. Chip select delay calculations could be
5028 * added here as soon as bitbang_work() can be made aware of the delay value.
5029 */
5030-static int xilinx_spi_setup_transfer(struct spi_device *spi,
5031- struct spi_transfer *t)
5032+int xilinx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
5033 {
5034 u8 bits_per_word;
5035
5036@@ -157,11 +150,12 @@
5037
5038 return 0;
5039 }
5040+EXPORT_SYMBOL(xilinx_spi_setup_transfer);
5041
5042 /* the spi->mode bits understood by this driver: */
5043 #define MODEBITS (SPI_CPOL | SPI_CPHA)
5044
5045-static int xilinx_spi_setup(struct spi_device *spi)
5046+int xilinx_spi_setup(struct spi_device *spi)
5047 {
5048 struct spi_bitbang *bitbang;
5049 struct xilinx_spi *xspi;
5050@@ -188,25 +182,25 @@
5051
5052 return 0;
5053 }
5054+EXPORT_SYMBOL(xilinx_spi_setup);
5055
5056 static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
5057 {
5058 u8 sr;
5059
5060 /* Fill the Tx FIFO with as many bytes as possible */
5061- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
5062+ sr = in_8(xspi->regs + xspi->sr_offset);
5063 while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
5064- if (xspi->tx_ptr) {
5065- out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++);
5066- } else {
5067- out_8(xspi->regs + XSPI_TXD_OFFSET, 0);
5068- }
5069+ if (xspi->tx_ptr)
5070+ out_8(xspi->regs + xspi->txd_offset, *xspi->tx_ptr++);
5071+ else
5072+ out_8(xspi->regs + xspi->txd_offset, 0);
5073 xspi->remaining_bytes--;
5074- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
5075+ sr = in_8(xspi->regs + xspi->sr_offset);
5076 }
5077 }
5078
5079-static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
5080+int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
5081 {
5082 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
5083 u32 ipif_ier;
5084@@ -229,8 +223,8 @@
5085 ipif_ier | XSPI_INTR_TX_EMPTY);
5086
5087 /* Start the transfer by not inhibiting the transmitter any longer */
5088- cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT;
5089- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
5090+ cr = in_be16(xspi->regs + xspi->cr_offset) & ~XSPI_CR_TRANS_INHIBIT;
5091+ out_be16(xspi->regs + xspi->cr_offset, cr);
5092
5093 wait_for_completion(&xspi->done);
5094
5095@@ -239,14 +233,14 @@
5096
5097 return t->len - xspi->remaining_bytes;
5098 }
5099-
5100+EXPORT_SYMBOL(xilinx_spi_txrx_bufs);
5101
5102 /* This driver supports single master mode only. Hence Tx FIFO Empty
5103 * is the only interrupt we care about.
5104 * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
5105 * Fault are not to happen.
5106 */
5107-static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
5108+irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
5109 {
5110 struct xilinx_spi *xspi = dev_id;
5111 u32 ipif_isr;
5112@@ -264,20 +258,19 @@
5113 * transmitter while the Isr refills the transmit register/FIFO,
5114 * or make sure it is stopped if we're done.
5115 */
5116- cr = in_be16(xspi->regs + XSPI_CR_OFFSET);
5117- out_be16(xspi->regs + XSPI_CR_OFFSET,
5118+ cr = in_be16(xspi->regs + xspi->cr_offset);
5119+ out_be16(xspi->regs + xspi->cr_offset,
5120 cr | XSPI_CR_TRANS_INHIBIT);
5121
5122 /* Read out all the data from the Rx FIFO */
5123- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
5124+ sr = in_8(xspi->regs + xspi->sr_offset);
5125 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
5126 u8 data;
5127
5128- data = in_8(xspi->regs + XSPI_RXD_OFFSET);
5129- if (xspi->rx_ptr) {
5130+ data = in_8(xspi->regs + xspi->rxd_offset);
5131+ if (xspi->rx_ptr)
5132 *xspi->rx_ptr++ = data;
5133- }
5134- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
5135+ sr = in_8(xspi->regs + xspi->sr_offset);
5136 }
5137
5138 /* See if there is more data to send */
5139@@ -286,7 +279,7 @@
5140 /* Start the transfer by not inhibiting the
5141 * transmitter any longer
5142 */
5143- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
5144+ out_be16(xspi->regs + xspi->cr_offset, cr);
5145 } else {
5146 /* No more data to send.
5147 * Indicate the transfer is completed.
5148@@ -297,167 +290,18 @@
5149
5150 return IRQ_HANDLED;
5151 }
5152+EXPORT_SYMBOL(xilinx_spi_irq);
5153
5154-static int __init xilinx_spi_of_probe(struct of_device *ofdev,
5155- const struct of_device_id *match)
5156-{
5157- struct spi_master *master;
5158- struct xilinx_spi *xspi;
5159- struct resource r_irq_struct;
5160- struct resource r_mem_struct;
5161-
5162- struct resource *r_irq = &r_irq_struct;
5163- struct resource *r_mem = &r_mem_struct;
5164- int rc = 0;
5165- const u32 *prop;
5166- int len;
5167-
5168- /* Get resources(memory, IRQ) associated with the device */
5169- master = spi_alloc_master(&ofdev->dev, sizeof(struct xilinx_spi));
5170-
5171- if (master == NULL) {
5172- return -ENOMEM;
5173- }
5174-
5175- dev_set_drvdata(&ofdev->dev, master);
5176-
5177- rc = of_address_to_resource(ofdev->node, 0, r_mem);
5178- if (rc) {
5179- dev_warn(&ofdev->dev, "invalid address\n");
5180- goto put_master;
5181- }
5182-
5183- rc = of_irq_to_resource(ofdev->node, 0, r_irq);
5184- if (rc == NO_IRQ) {
5185- dev_warn(&ofdev->dev, "no IRQ found\n");
5186- goto put_master;
5187- }
5188-
5189- xspi = spi_master_get_devdata(master);
5190- xspi->bitbang.master = spi_master_get(master);
5191- xspi->bitbang.chipselect = xilinx_spi_chipselect;
5192- xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
5193- xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
5194- xspi->bitbang.master->setup = xilinx_spi_setup;
5195- init_completion(&xspi->done);
5196-
5197- xspi->irq = r_irq->start;
5198-
5199- if (!request_mem_region(r_mem->start,
5200- r_mem->end - r_mem->start + 1, XILINX_SPI_NAME)) {
5201- rc = -ENXIO;
5202- dev_warn(&ofdev->dev, "memory request failure\n");
5203- goto put_master;
5204- }
5205-
5206- xspi->regs = ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
5207- if (xspi->regs == NULL) {
5208- rc = -ENOMEM;
5209- dev_warn(&ofdev->dev, "ioremap failure\n");
5210- goto put_master;
5211- }
5212- xspi->irq = r_irq->start;
5213-
5214- /* dynamic bus assignment */
5215- master->bus_num = -1;
5216-
5217- /* number of slave select bits is required */
5218- prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
5219- if (!prop || len < sizeof(*prop)) {
5220- dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
5221- goto put_master;
5222- }
5223- master->num_chipselect = *prop;
5224-
5225- /* SPI controller initializations */
5226- xspi_init_hw(xspi->regs);
5227-
5228- /* Register for SPI Interrupt */
5229- rc = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
5230- if (rc != 0) {
5231- dev_warn(&ofdev->dev, "irq request failure: %d\n", xspi->irq);
5232- goto unmap_io;
5233- }
5234-
5235- rc = spi_bitbang_start(&xspi->bitbang);
5236- if (rc != 0) {
5237- dev_err(&ofdev->dev, "spi_bitbang_start FAILED\n");
5238- goto free_irq;
5239- }
5240-
5241- dev_info(&ofdev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
5242- (unsigned int)r_mem->start, (u32)xspi->regs, xspi->irq);
5243-
5244- /* Add any subnodes on the SPI bus */
5245- of_register_spi_devices(master, ofdev->node);
5246-
5247- return rc;
5248-
5249-free_irq:
5250- free_irq(xspi->irq, xspi);
5251-unmap_io:
5252- iounmap(xspi->regs);
5253-put_master:
5254- spi_master_put(master);
5255- return rc;
5256-}
5257-
5258-static int __devexit xilinx_spi_remove(struct of_device *ofdev)
5259+void xilinx_spi_set_default_reg_offsets(struct xilinx_spi *xspi)
5260 {
5261- struct xilinx_spi *xspi;
5262- struct spi_master *master;
5263-
5264- master = platform_get_drvdata(ofdev);
5265- xspi = spi_master_get_devdata(master);
5266-
5267- spi_bitbang_stop(&xspi->bitbang);
5268- free_irq(xspi->irq, xspi);
5269- iounmap(xspi->regs);
5270- dev_set_drvdata(&ofdev->dev, 0);
5271- spi_master_put(xspi->bitbang.master);
5272-
5273- return 0;
5274-}
5275-
5276-/* work with hotplug and coldplug */
5277-MODULE_ALIAS("platform:" XILINX_SPI_NAME);
5278-
5279-static int __exit xilinx_spi_of_remove(struct of_device *op)
5280-{
5281- return xilinx_spi_remove(op);
5282+ xspi->cr_offset = XSPI_CR_OFFSET_DEF;
5283+ xspi->sr_offset = XSPI_SR_OFFSET_DEF;
5284+ xspi->txd_offset = XSPI_TXD_OFFSET_DEF;
5285+ xspi->rxd_offset = XSPI_RXD_OFFSET_DEF;
5286+ xspi->ssr_offset = XSPI_SSR_OFFSET_DEF;
5287 }
5288+EXPORT_SYMBOL(xilinx_spi_set_default_reg_offsets);
5289
5290-static struct of_device_id xilinx_spi_of_match[] = {
5291- { .compatible = "xlnx,xps-spi-2.00.a", },
5292- { .compatible = "xlnx,xps-spi-2.00.b", },
5293- {}
5294-};
5295-
5296-MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
5297-
5298-static struct of_platform_driver xilinx_spi_of_driver = {
5299- .owner = THIS_MODULE,
5300- .name = "xilinx-xps-spi",
5301- .match_table = xilinx_spi_of_match,
5302- .probe = xilinx_spi_of_probe,
5303- .remove = __exit_p(xilinx_spi_of_remove),
5304- .driver = {
5305- .name = "xilinx-xps-spi",
5306- .owner = THIS_MODULE,
5307- },
5308-};
5309-
5310-static int __init xilinx_spi_init(void)
5311-{
5312- return of_register_platform_driver(&xilinx_spi_of_driver);
5313-}
5314-module_init(xilinx_spi_init);
5315-
5316-static void __exit xilinx_spi_exit(void)
5317-{
5318- of_unregister_platform_driver(&xilinx_spi_of_driver);
5319-}
5320-module_exit(xilinx_spi_exit);
5321 MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
5322 MODULE_DESCRIPTION("Xilinx SPI driver");
5323 MODULE_LICENSE("GPL");
5324diff -uNr linux-2.6.29-clean/drivers/spi/xilinx_spi.h linux-2.6.29/drivers/spi/xilinx_spi.h
5325--- linux-2.6.29-clean/drivers/spi/xilinx_spi.h 1969-12-31 16:00:00.000000000 -0800
5326+++ linux-2.6.29/drivers/spi/xilinx_spi.h 2009-04-06 13:51:47.000000000 -0700
5327@@ -0,0 +1,52 @@
5328+/*
5329+ * xilinx_spi.c
5330+ *
5331+ * Xilinx SPI controller driver (master mode only)
5332+ *
5333+ * Author: MontaVista Software, Inc.
5334+ * source@mvista.com
5335+ *
5336+ * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the
5337+ * terms of the GNU General Public License version 2. This program is licensed
5338+ * "as is" without any warranty of any kind, whether express or implied.
5339+ */
5340+
5341+#ifndef _XILINX_SPI_H_
5342+#define _XILINX_SPI_H_ 1
5343+
5344+#include <linux/spi/spi.h>
5345+#include <linux/spi/spi_bitbang.h>
5346+
5347+#define XILINX_SPI_NAME "xilinx_spi"
5348+
5349+
5350+struct xilinx_spi {
5351+ /* bitbang has to be first */
5352+ struct spi_bitbang bitbang;
5353+ struct completion done;
5354+
5355+ void __iomem *regs; /* virt. address of the control registers */
5356+
5357+ u32 irq;
5358+
5359+ u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
5360+
5361+ u8 *rx_ptr; /* pointer in the Tx buffer */
5362+ const u8 *tx_ptr; /* pointer in the Rx buffer */
5363+ int remaining_bytes; /* the number of bytes left to transfer */
5364+ /* offset to the XSPI regs, these might vary... */
5365+ u8 cr_offset;
5366+ u8 sr_offset;
5367+ u8 txd_offset;
5368+ u8 rxd_offset;
5369+ u8 ssr_offset;
5370+};
5371+
5372+void xspi_init_hw(struct xilinx_spi *xspi);
5373+void xilinx_spi_set_default_reg_offsets(struct xilinx_spi *xspi);
5374+void xilinx_spi_chipselect(struct spi_device *spi, int is_on);
5375+int xilinx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t);
5376+int xilinx_spi_setup(struct spi_device *spi);
5377+int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t);
5378+irqreturn_t xilinx_spi_irq(int irq, void *dev_id);
5379+#endif
5380diff -uNr linux-2.6.29-clean/drivers/spi/xilinx_spi_of.c linux-2.6.29/drivers/spi/xilinx_spi_of.c
5381--- linux-2.6.29-clean/drivers/spi/xilinx_spi_of.c 1969-12-31 16:00:00.000000000 -0800
5382+++ linux-2.6.29/drivers/spi/xilinx_spi_of.c 2009-04-06 13:51:47.000000000 -0700
5383@@ -0,0 +1,193 @@
5384+/*
5385+ * xilinx_spi.c
5386+ *
5387+ * Xilinx SPI controller driver (master mode only)
5388+ *
5389+ * Author: MontaVista Software, Inc.
5390+ * source@mvista.com
5391+ *
5392+ * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the
5393+ * terms of the GNU General Public License version 2. This program is licensed
5394+ * "as is" without any warranty of any kind, whether express or implied.
5395+ */
5396+
5397+#include <linux/module.h>
5398+#include <linux/init.h>
5399+#include <linux/interrupt.h>
5400+#include <linux/io.h>
5401+#include <linux/platform_device.h>
5402+
5403+#include <linux/of_platform.h>
5404+#include <linux/of_device.h>
5405+#include <linux/of_spi.h>
5406+
5407+#include <linux/spi/spi.h>
5408+#include <linux/spi/spi_bitbang.h>
5409+
5410+#include "xilinx_spi.h"
5411+
5412+
5413+static int __init xilinx_spi_of_probe(struct of_device *ofdev,
5414+ const struct of_device_id *match)
5415+{
5416+ struct spi_master *master;
5417+ struct xilinx_spi *xspi;
5418+ struct resource r_irq_struct;
5419+ struct resource r_mem_struct;
5420+
5421+ struct resource *r_irq = &r_irq_struct;
5422+ struct resource *r_mem = &r_mem_struct;
5423+ int rc = 0;
5424+ const u32 *prop;
5425+ int len;
5426+
5427+ /* Get resources(memory, IRQ) associated with the device */
5428+ master = spi_alloc_master(&ofdev->dev, sizeof(struct xilinx_spi));
5429+
5430+ if (master == NULL)
5431+ return -ENOMEM;
5432+
5433+ dev_set_drvdata(&ofdev->dev, master);
5434+
5435+ rc = of_address_to_resource(ofdev->node, 0, r_mem);
5436+ if (rc) {
5437+ dev_warn(&ofdev->dev, "invalid address\n");
5438+ goto put_master;
5439+ }
5440+
5441+ rc = of_irq_to_resource(ofdev->node, 0, r_irq);
5442+ if (rc == NO_IRQ) {
5443+ dev_warn(&ofdev->dev, "no IRQ found\n");
5444+ goto put_master;
5445+ }
5446+
5447+ xspi = spi_master_get_devdata(master);
5448+ xspi->bitbang.master = spi_master_get(master);
5449+ xspi->bitbang.chipselect = xilinx_spi_chipselect;
5450+ xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
5451+ xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
5452+ xspi->bitbang.master->setup = xilinx_spi_setup;
5453+ init_completion(&xspi->done);
5454+
5455+ xspi->irq = r_irq->start;
5456+
5457+ if (!request_mem_region(r_mem->start,
5458+ r_mem->end - r_mem->start + 1, XILINX_SPI_NAME)) {
5459+ rc = -ENXIO;
5460+ dev_warn(&ofdev->dev, "memory request failure\n");
5461+ goto put_master;
5462+ }
5463+
5464+ xspi->regs = ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
5465+ if (xspi->regs == NULL) {
5466+ rc = -ENOMEM;
5467+ dev_warn(&ofdev->dev, "ioremap failure\n");
5468+ goto put_master;
5469+ }
5470+ xspi->irq = r_irq->start;
5471+
5472+ /* dynamic bus assignment */
5473+ master->bus_num = -1;
5474+
5475+ /* number of slave select bits is required */
5476+ prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
5477+ if (!prop || len < sizeof(*prop)) {
5478+ dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
5479+ goto put_master;
5480+ }
5481+ master->num_chipselect = *prop;
5482+
5483+ xilinx_spi_set_default_reg_offsets(xspi);
5484+
5485+ /* SPI controller initializations */
5486+ xspi_init_hw(xspi->regs);
5487+
5488+ /* Register for SPI Interrupt */
5489+ rc = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
5490+ if (rc != 0) {
5491+ dev_warn(&ofdev->dev, "irq request failure: %d\n", xspi->irq);
5492+ goto unmap_io;
5493+ }
5494+
5495+ rc = spi_bitbang_start(&xspi->bitbang);
5496+ if (rc != 0) {
5497+ dev_err(&ofdev->dev, "spi_bitbang_start FAILED\n");
5498+ goto free_irq;
5499+ }
5500+
5501+ dev_info(&ofdev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
5502+ (unsigned int)r_mem->start, (u32)xspi->regs, xspi->irq);
5503+
5504+ /* Add any subnodes on the SPI bus */
5505+ of_register_spi_devices(master, ofdev->node);
5506+
5507+ return rc;
5508+
5509+free_irq:
5510+ free_irq(xspi->irq, xspi);
5511+unmap_io:
5512+ iounmap(xspi->regs);
5513+put_master:
5514+ spi_master_put(master);
5515+ return rc;
5516+}
5517+
5518+static int __devexit xilinx_spi_remove(struct of_device *ofdev)
5519+{
5520+ struct xilinx_spi *xspi;
5521+ struct spi_master *master;
5522+
5523+ master = platform_get_drvdata(ofdev);
5524+ xspi = spi_master_get_devdata(master);
5525+
5526+ spi_bitbang_stop(&xspi->bitbang);
5527+ free_irq(xspi->irq, xspi);
5528+ iounmap(xspi->regs);
5529+ dev_set_drvdata(&ofdev->dev, 0);
5530+ spi_master_put(xspi->bitbang.master);
5531+
5532+ return 0;
5533+}
5534+
5535+/* work with hotplug and coldplug */
5536+MODULE_ALIAS("platform:" XILINX_SPI_NAME);
5537+
5538+static int __exit xilinx_spi_of_remove(struct of_device *op)
5539+{
5540+ return xilinx_spi_remove(op);
5541+}
5542+
5543+static struct of_device_id xilinx_spi_of_match[] = {
5544+ { .compatible = "xlnx,xps-spi-2.00.a", },
5545+ { .compatible = "xlnx,xps-spi-2.00.b", },
5546+ {}
5547+};
5548+
5549+MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
5550+
5551+static struct of_platform_driver xilinx_spi_of_driver = {
5552+ .owner = THIS_MODULE,
5553+ .name = "xilinx-xps-spi",
5554+ .match_table = xilinx_spi_of_match,
5555+ .probe = xilinx_spi_of_probe,
5556+ .remove = __exit_p(xilinx_spi_of_remove),
5557+ .driver = {
5558+ .name = "xilinx-xps-spi",
5559+ .owner = THIS_MODULE,
5560+ },
5561+};
5562+
5563+static int __init xilinx_spi_init(void)
5564+{
5565+ return of_register_platform_driver(&xilinx_spi_of_driver);
5566+}
5567+module_init(xilinx_spi_init);
5568+
5569+static void __exit xilinx_spi_exit(void)
5570+{
5571+ of_unregister_platform_driver(&xilinx_spi_of_driver);
5572+}
5573+module_exit(xilinx_spi_exit);
5574+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
5575+MODULE_DESCRIPTION("Xilinx SPI driver");
5576+MODULE_LICENSE("GPL");
5577diff -uNr linux-2.6.29-clean/drivers/spi/xilinx_spi_pltfm.c linux-2.6.29/drivers/spi/xilinx_spi_pltfm.c
5578--- linux-2.6.29-clean/drivers/spi/xilinx_spi_pltfm.c 1969-12-31 16:00:00.000000000 -0800
5579+++ linux-2.6.29/drivers/spi/xilinx_spi_pltfm.c 2009-04-06 13:51:47.000000000 -0700
5580@@ -0,0 +1,184 @@
5581+/*
5582+ * xilinx_spi_pltfm.c Support for Xilinx SPI platform devices
5583+ * Copyright (c) 2009 Intel Corporation
5584+ *
5585+ * This program is free software; you can redistribute it and/or modify
5586+ * it under the terms of the GNU General Public License version 2 as
5587+ * published by the Free Software Foundation.
5588+ *
5589+ * This program is distributed in the hope that it will be useful,
5590+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5591+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5592+ * GNU General Public License for more details.
5593+ *
5594+ * You should have received a copy of the GNU General Public License
5595+ * along with this program; if not, write to the Free Software
5596+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5597+ */
5598+
5599+/* Supports:
5600+ * Xilinx SPI devices as platform devices
5601+ *
5602+ * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
5603+ */
5604+
5605+#include <linux/module.h>
5606+#include <linux/init.h>
5607+#include <linux/interrupt.h>
5608+#include <linux/io.h>
5609+#include <linux/platform_device.h>
5610+
5611+#include <linux/spi/spi.h>
5612+#include <linux/spi/spi_bitbang.h>
5613+#include <linux/spi/xilinx_spi.h>
5614+
5615+#include "xilinx_spi.h"
5616+
5617+static int __init xilinx_spi_probe(struct platform_device *dev)
5618+{
5619+ int ret = 0;
5620+ struct spi_master *master;
5621+ struct xilinx_spi *xspi;
5622+ struct xspi_platform_data *pdata;
5623+ struct resource *r;
5624+
5625+ master = spi_alloc_master(&dev->dev, sizeof(struct xilinx_spi));
5626+
5627+ if (master == NULL)
5628+ return -ENOMEM;
5629+
5630+
5631+ platform_set_drvdata(dev, master);
5632+ pdata = dev->dev.platform_data;
5633+ if (pdata == NULL) {
5634+ ret = -ENODEV;
5635+ goto put_master;
5636+ }
5637+
5638+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
5639+ if (r == NULL) {
5640+ ret = -ENODEV;
5641+ goto put_master;
5642+ }
5643+
5644+ xspi = spi_master_get_devdata(master);
5645+ xspi->bitbang.master = spi_master_get(master);
5646+ xspi->bitbang.chipselect = xilinx_spi_chipselect;
5647+ xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
5648+ xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
5649+ xspi->bitbang.master->setup = xilinx_spi_setup;
5650+ init_completion(&xspi->done);
5651+
5652+ if (!request_mem_region(r->start, resource_size(r), XILINX_SPI_NAME)) {
5653+ ret = -ENXIO;
5654+ goto put_master;
5655+ }
5656+
5657+ xspi->regs = ioremap(r->start, resource_size(r));
5658+ if (xspi->regs == NULL) {
5659+ ret = -ENOMEM;
5660+ goto map_failed;
5661+ }
5662+
5663+ ret = platform_get_irq(dev, 0);
5664+ if (ret < 0) {
5665+ ret = -ENXIO;
5666+ goto unmap_io;
5667+ }
5668+ xspi->irq = ret;
5669+
5670+ master->bus_num = pdata->bus_num;
5671+ master->num_chipselect = pdata->num_chipselect;
5672+ xspi->speed_hz = pdata->speed_hz;
5673+ xilinx_spi_set_default_reg_offsets(xspi);
5674+ if (pdata->cr_offset)
5675+ xspi->cr_offset = pdata->cr_offset;
5676+ if (pdata->sr_offset)
5677+ xspi->sr_offset = pdata->sr_offset;
5678+ if (pdata->txd_offset)
5679+ xspi->txd_offset = pdata->txd_offset;
5680+ if (pdata->rxd_offset)
5681+ xspi->rxd_offset = pdata->rxd_offset;
5682+ if (pdata->ssr_offset)
5683+ xspi->ssr_offset = pdata->ssr_offset;
5684+
5685+ /* SPI controller initializations */
5686+ xspi_init_hw(xspi);
5687+
5688+ /* Register for SPI Interrupt */
5689+ ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
5690+ if (ret != 0)
5691+ goto unmap_io;
5692+
5693+ ret = spi_bitbang_start(&xspi->bitbang);
5694+ if (ret != 0) {
5695+ dev_err(&dev->dev, "spi_bitbang_start FAILED\n");
5696+ goto free_irq;
5697+ }
5698+
5699+ dev_info(&dev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
5700+ (u32)r->start, (u32)xspi->regs, xspi->irq);
5701+ return ret;
5702+
5703+free_irq:
5704+ free_irq(xspi->irq, xspi);
5705+unmap_io:
5706+ iounmap(xspi->regs);
5707+map_failed:
5708+ release_mem_region(r->start, resource_size(r));
5709+put_master:
5710+ spi_master_put(master);
5711+ return ret;
5712+}
5713+
5714+static int __devexit xilinx_spi_remove(struct platform_device *dev)
5715+{
5716+ struct xilinx_spi *xspi;
5717+ struct spi_master *master;
5718+ struct resource *r;
5719+
5720+ master = platform_get_drvdata(dev);
5721+ xspi = spi_master_get_devdata(master);
5722+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
5723+
5724+ spi_bitbang_stop(&xspi->bitbang);
5725+ free_irq(xspi->irq, xspi);
5726+ iounmap(xspi->regs);
5727+
5728+ if (r)
5729+ release_mem_region(r->start, resource_size(r));
5730+
5731+ platform_set_drvdata(dev, 0);
5732+ spi_master_put(xspi->bitbang.master);
5733+
5734+ return 0;
5735+}
5736+
5737+/* work with hotplug and coldplug */
5738+MODULE_ALIAS("platform:" XILINX_SPI_NAME);
5739+
5740+static struct platform_driver xilinx_spi_driver = {
5741+ .probe = xilinx_spi_probe,
5742+ .remove = __devexit_p(xilinx_spi_remove),
5743+ .driver = {
5744+ .name = XILINX_SPI_NAME,
5745+ .owner = THIS_MODULE,
5746+ },
5747+};
5748+
5749+static int __init xilinx_spi_init(void)
5750+{
5751+ return platform_driver_register(&xilinx_spi_driver);
5752+}
5753+module_init(xilinx_spi_init);
5754+
5755+static void __exit xilinx_spi_exit(void)
5756+{
5757+ platform_driver_unregister(&xilinx_spi_driver);
5758+}
5759+module_exit(xilinx_spi_exit);
5760+
5761+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
5762+MODULE_DESCRIPTION("Xilinx SPI platform driver");
5763+MODULE_LICENSE("GPL v2");
5764+
5765diff -uNr linux-2.6.29-clean/include/linux/i2c-ocores.h linux-2.6.29/include/linux/i2c-ocores.h
5766--- linux-2.6.29-clean/include/linux/i2c-ocores.h 2009-04-01 09:20:20.000000000 -0700
5767+++ linux-2.6.29/include/linux/i2c-ocores.h 2009-04-06 13:51:47.000000000 -0700
5768@@ -14,6 +14,8 @@
5769 struct ocores_i2c_platform_data {
5770 u32 regstep; /* distance between registers */
5771 u32 clock_khz; /* input clock in kHz */
5772+ u8 num_devices; /* number of devices in the devices list */
5773+ struct i2c_board_info const *devices; /* devices connected to the bus */
5774 };
5775
5776 #endif /* _LINUX_I2C_OCORES_H */
5777diff -uNr linux-2.6.29-clean/include/linux/mfd/timbdma.h linux-2.6.29/include/linux/mfd/timbdma.h
5778--- linux-2.6.29-clean/include/linux/mfd/timbdma.h 1969-12-31 16:00:00.000000000 -0800
5779+++ linux-2.6.29/include/linux/mfd/timbdma.h 2009-04-06 13:51:47.000000000 -0700
5780@@ -0,0 +1,80 @@
5781+/*
5782+ * timbdma.h timberdale FPGA DMA driver defines
5783+ * Copyright (c) 2009 Intel Corporation
5784+ *
5785+ * This program is free software; you can redistribute it and/or modify
5786+ * it under the terms of the GNU General Public License version 2 as
5787+ * published by the Free Software Foundation.
5788+ *
5789+ * This program is distributed in the hope that it will be useful,
5790+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5791+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5792+ * GNU General Public License for more details.
5793+ *
5794+ * You should have received a copy of the GNU General Public License
5795+ * along with this program; if not, write to the Free Software
5796+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5797+ */
5798+
5799+/* Supports:
5800+ * Timberdale FPGA DMA engine
5801+ */
5802+
5803+#ifndef _TIMBDMA_H
5804+#define _TIMBDMA_H
5805+
5806+#include <linux/spinlock.h>
5807+
5808+
5809+#define DMA_IRQ_UART_RX 0x01
5810+#define DMA_IRQ_UART_TX 0x02
5811+#define DMA_IRQ_MLB_RX 0x04
5812+#define DMA_IRQ_MLB_TX 0x08
5813+#define DMA_IRQ_VIDEO_RX 0x10
5814+#define DMA_IRQ_VIDEO_DROP 0x20
5815+#define DMA_IRQS 6
5816+
5817+
5818+typedef int (*timbdma_interruptcb)(u32 flag, void *data);
5819+
5820+enum timbdma_ctrlmap {
5821+ timbdma_ctrlmap_DMACFGBTUART = 0x000000,
5822+ timbdma_ctrlmap_DMACFGMLBSY = 0x000040,
5823+ timbdma_ctrlmap_DMACFGVIDEO = 0x000080,
5824+ timbdma_ctrlmap_TIMBSTATUS = 0x080000,
5825+ timbdma_ctrlmap_TIMBPEND = 0x080004,
5826+ timbdma_ctrlmap_TIMBENABLE = 0x080008,
5827+ timbdma_ctrlmap_VIDEOBUFFER = 0x200000
5828+};
5829+
5830+enum timbdma_dmacfg {
5831+ timbdma_dmacfg_RXSTARTH = 0x00,
5832+ timbdma_dmacfg_RXSTARTL = 0x04,
5833+ timbdma_dmacfg_RXLENGTH = 0x08,
5834+ timbdma_dmacfg_RXFPGAWP = 0x0C,
5835+ timbdma_dmacfg_RXSWRP = 0x10,
5836+ timbdma_dmacfg_RXENABLE = 0x14,
5837+ timbdma_dmacfg_TXSTARTH = 0x18,
5838+ timbdma_dmacfg_TXSTARTL = 0x1C,
5839+ timbdma_dmacfg_TXLENGTH = 0x20,
5840+ timbdma_dmacfg_TXSWWP = 0x24,
5841+ timbdma_dmacfg_TXFPGARP = 0x28,
5842+ timbdma_dmacfg_TXBEFINT = 0x2C,
5843+ timbdma_dmacfg_BPERROW = 0x30
5844+};
5845+
5846+struct timbdma_dev {
5847+ void __iomem *membase;
5848+ timbdma_interruptcb callbacks[DMA_IRQS];
5849+ void *callback_data[DMA_IRQS];
5850+ spinlock_t lock; /* mutual exclusion */
5851+};
5852+
5853+void timb_start_dma(u32 flag, unsigned long buf, int len, int bytes_per_row);
5854+
5855+void *timb_stop_dma(u32 flags);
5856+
5857+void timb_set_dma_interruptcb(u32 flags, timbdma_interruptcb icb, void *data);
5858+
5859+#endif /* _TIMBDMA_H */
5860+
5861diff -uNr linux-2.6.29-clean/include/linux/mfd/timbi2s.h linux-2.6.29/include/linux/mfd/timbi2s.h
5862--- linux-2.6.29-clean/include/linux/mfd/timbi2s.h 1969-12-31 16:00:00.000000000 -0800
5863+++ linux-2.6.29/include/linux/mfd/timbi2s.h 2009-04-06 13:51:47.000000000 -0700
5864@@ -0,0 +1,66 @@
5865+/*
5866+ * timbi2s.h timberdale FPGA I2S driver
5867+ * Copyright (c) 2009 Intel Corporation
5868+ *
5869+ * This program is free software; you can redistribute it and/or modify
5870+ * it under the terms of the GNU General Public License version 2 as
5871+ * published by the Free Software Foundation.
5872+ *
5873+ * This program is distributed in the hope that it will be useful,
5874+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5875+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5876+ * GNU General Public License for more details.
5877+ *
5878+ * You should have received a copy of the GNU General Public License
5879+ * along with this program; if not, write to the Free Software
5880+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5881+ */
5882+
5883+/* Supports:
5884+ * Timberdale FPGA I2S
5885+ */
5886+
5887+struct timbi2s_bus_control {
5888+ struct list_head list;
5889+};
5890+
5891+struct timbi2s_bus {
5892+ void __iomem *membase;
5893+ u32 irq;
5894+ struct timbi2s_bus_control *control;
5895+ struct workqueue_struct *workqueue;
5896+ struct work_struct work;
5897+};
5898+
5899+struct timbi2s_dev {
5900+ void __iomem *membase;
5901+ u32 irq;
5902+ struct timbi2s_bus *bus;
5903+ struct workqueue_struct *workqueue;
5904+ struct work_struct work;
5905+ u32 ioctrl;
5906+ u32 devid;
5907+ u8 timbi2s_rx;
5908+ u8 timbi2s_tx;
5909+ struct circ_buf *buffer;
5910+ /* Register access */
5911+ spinlock_t lock;
5912+
5913+ int in_use;
5914+ u8 pscale_offset; /* Prescale */
5915+ u8 icr_offset; /* Clear register */
5916+ u8 isr_offset; /* Status */
5917+ u8 ipr_offset; /* Pending register */
5918+ u8 ier_offset; /* Interrupt Enable register */
5919+ u8 ctrl_offset;
5920+ u8 fifo;
5921+
5922+ struct list_head item;
5923+};
5924+
5925+static struct timbi2s_dev *timbi2s_get_tx(void);
5926+static struct timbi2s_dev *timbi2s_get_rx(void);
5927+static void timbi2s_put(struct timbi2s_dev *tdev);
5928+
5929+static int timbi2s_ioctrl(struct timbi2s_dev *i2sdev);
5930+
5931diff -uNr linux-2.6.29-clean/include/linux/serial_core.h linux-2.6.29/include/linux/serial_core.h
5932--- linux-2.6.29-clean/include/linux/serial_core.h 2009-04-01 09:20:20.000000000 -0700
5933+++ linux-2.6.29/include/linux/serial_core.h 2009-04-06 13:51:47.000000000 -0700
5934@@ -164,6 +164,9 @@
5935 /* NWPSERIAL */
5936 #define PORT_NWPSERIAL 85
5937
5938+/* Timberdale UART */
5939+#define PORT_TIMBUART 86
5940+
5941 #ifdef __KERNEL__
5942
5943 #include <linux/compiler.h>
5944diff -uNr linux-2.6.29-clean/include/linux/spi/xilinx_spi.h linux-2.6.29/include/linux/spi/xilinx_spi.h
5945--- linux-2.6.29-clean/include/linux/spi/xilinx_spi.h 1969-12-31 16:00:00.000000000 -0800
5946+++ linux-2.6.29/include/linux/spi/xilinx_spi.h 2009-04-06 13:51:47.000000000 -0700
5947@@ -0,0 +1,17 @@
5948+#ifndef __LINUX_SPI_XILINX_SPI_H
5949+#define __LINUX_SPI_XILINX_SPI_H
5950+
5951+/* SPI Controller IP */
5952+struct xspi_platform_data {
5953+ s16 bus_num;
5954+ u16 num_chipselect;
5955+ u32 speed_hz;
5956+ u8 cr_offset;
5957+ u8 sr_offset;
5958+ u8 txd_offset;
5959+ u8 rxd_offset;
5960+ u8 ssr_offset;
5961+};
5962+
5963+#endif /* __LINUX_SPI_XILINX_SPI_H */
5964+
5965diff -uNr linux-2.6.29-clean/include/media/adv7180.h linux-2.6.29/include/media/adv7180.h
5966--- linux-2.6.29-clean/include/media/adv7180.h 1969-12-31 16:00:00.000000000 -0800
5967+++ linux-2.6.29/include/media/adv7180.h 2009-04-06 13:51:47.000000000 -0700
5968@@ -0,0 +1,127 @@
5969+/*
5970+ * adv7180.h Analog Devices ADV7180 video decoder driver defines
5971+ * Copyright (c) 2009 Intel Corporation
5972+ *
5973+ * This program is free software; you can redistribute it and/or modify
5974+ * it under the terms of the GNU General Public License version 2 as
5975+ * published by the Free Software Foundation.
5976+ *
5977+ * This program is distributed in the hope that it will be useful,
5978+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5979+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5980+ * GNU General Public License for more details.
5981+ *
5982+ * You should have received a copy of the GNU General Public License
5983+ * along with this program; if not, write to the Free Software
5984+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5985+ */
5986+
5987+#define DRIVER_NAME "adv7180"
5988+
5989+#define I2C_ADV7180 0x42
5990+#define ADV7180_NR_REG 0xfc
5991+
5992+#define ADV7180_IN_CTRL 0x00 /* Input CR */
5993+#define ADV7180_OUT_CTRL 0x03 /* Output CR */
5994+#define ADV7180_EXT_OUT_CTRL 0x04 /* Extended Output CR */
5995+
5996+#define ADV7180_ADI_CTRL 0x0e /* ADI CR */
5997+# define ADI_ENABLE 0x20 /* Enable access to sub-regs */
5998+
5999+#define ADV7180_SR_1 0x10 /* Status Register 1 */
6000+#define ADV7180_SR_2 0x12
6001+#define ADV7180_SR_3 0x13
6002+
6003+/* Interrupt and VDP sub-registers */
6004+#define ADV7180_ISR_1 0x42 /* Interrupt Status Register 1 */
6005+#define ADV7180_ICR_1 0x43 /* Interrupt Clear Register 1 */
6006+
6007+#define ADV7180_ISR_2 0x46
6008+#define ADV7180_ICR_2 0x47
6009+
6010+#define ADV7180_ISR_3 0x4a
6011+#define ADV7180_ICR_3 0x4b
6012+
6013+#define ADV7180_ISR_4 0x4e
6014+#define ADV7180_ICR_4 0x4f
6015+/* */
6016+
6017+#define ADV7180_SR 0x10
6018+#define ADV7180_STATUS_NTSM 0x00 /* NTSM M/J */
6019+#define ADV7180_STATUS_NTSC 0x10 /* NTSC 4.43 */
6020+#define ADV7180_STATUS_PAL_M 0x20 /* PAL M */
6021+#define ADV7180_STATUS_PAL_60 0x30 /* PAL 60 */
6022+#define ADV7180_STATUS_PAL 0x40 /* PAL B/G/H/I/D */
6023+#define ADV7180_STATUS_SECAM 0x50 /* SECAM */
6024+#define ADV7180_STATUS_PAL_N 0x60 /* PAL Combination N */
6025+#define ADV7180_STATUS_SECAM_525 0x70 /* SECAM 525 */
6026+
6027+enum input_mode {
6028+ CVBS, /* Composite */
6029+ SVIDEO, /* S-video */
6030+ YPbPr, /* Component */
6031+};
6032+
6033+struct adv7180 {
6034+ unsigned char reg[ADV7180_NR_REG];
6035+ int norm;
6036+ enum input_mode input;
6037+ int enable;
6038+ struct i2c_client *client;
6039+};
6040+
6041+static const unsigned char reset_icr[] = {
6042+ ADV7180_ICR_1, 0x00,
6043+ ADV7180_ICR_2, 0x00,
6044+ ADV7180_ICR_3, 0x00,
6045+ ADV7180_ICR_4, 0x00,
6046+};
6047+
6048+/* ADV7180 LQFP-64. ADV7180.pdf, page 104 */
6049+static const unsigned char init_cvbs_64[] = {
6050+ 0x00, 0x01, /* INSEL = CVBS in on Ain2 */
6051+ 0x04, 0x57, /* Enable SFL */
6052+ 0x17, 0x41, /* Select SH1 */
6053+
6054+ 0x31, 0x02, /* Clear NEWAV_MODE, SAV/EAV to
6055+ * suit ADV video encoders
6056+ */
6057+ 0x3d, 0xa2, /* MWE enable manual window,
6058+ * color kill threshold to 2
6059+ */
6060+ 0x3e, 0x6a, /* BLM optimization */
6061+ 0x3f, 0xa0, /* BGB optimization */
6062+ 0x0e, 0x80, /* Hidden space */
6063+ 0x55, 0x81, /* ADC configuration */
6064+ 0x0e, 0x00, /* User space */
6065+};
6066+
6067+static const unsigned char init_svideo_64[] = {
6068+ 0x00, 0x08, /* Insel = Y/C, Y = AIN3, C = AIN6 */
6069+ 0x04, 0x57, /* Enable SFL */
6070+ 0x31, 0x02, /* Clear NEWAV_MODE, SAV/EAV to
6071+ * suit ADV video encoders
6072+ */
6073+ 0x3d, 0xa2, /* MWE enable manual window,
6074+ * color kill threshold to 2
6075+ */
6076+ 0x3e, 0x6a, /* BLM optimization */
6077+ 0x3f, 0xa0, /* BGB optimization */
6078+ 0x58, 0x04, /* Mandatory write. This must be
6079+ * performed for correct operation.
6080+ */
6081+ 0x0e, 0x80, /* Hidden space */
6082+ 0x55, 0x81, /* ADC configuration */
6083+ 0x0e, 0x00, /* User space */
6084+};
6085+
6086+static const unsigned char init_ypbpr_64[] = {
6087+ 0x00, 0x09, /* INSEL = YPrPb, Y = AIN1, Pr = AIN4, Pb = AIN5 */
6088+ 0x31, 0x02, /* Clear NEWAV_MODE, SAV/EAV to suit ADV video encoders */
6089+ 0x3d, 0xa2, /* MWE enable manual window */
6090+ 0x3e, 0x6a, /* BLM optimization */
6091+ 0x3f, 0xa0, /* ADI recommended */
6092+ 0x0e, 0x80, /* Hidden space */
6093+ 0x55, 0x81, /* ADC configuration */
6094+ 0x0e, 0x00, /* User space */
6095+};
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-touchkit.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-touchkit.patch
new file mode 100644
index 0000000000..92e71fa31b
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-touchkit.patch
@@ -0,0 +1,130 @@
1diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
2index f8f86de..5d4cea2 100644
3--- a/drivers/input/mouse/psmouse-base.c
4+++ b/drivers/input/mouse/psmouse-base.c
5@@ -676,6 +676,9 @@ static int psmouse_extensions(struct psmouse *psmouse,
6
7 if (touchkit_ps2_detect(psmouse, set_properties) == 0)
8 return PSMOUSE_TOUCHKIT_PS2;
9+
10+ if (elftouch_ps2_detect(psmouse, set_properties) == 0)
11+ return PSMOUSE_ELFTOUCH_PS2;
12 }
13
14 /*
15@@ -786,6 +789,12 @@ static const struct psmouse_protocol psmouse_protocols[] = {
16 .alias = "trackpoint",
17 .detect = trackpoint_detect,
18 },
19+ {
20+ .type = PSMOUSE_ELFTOUCH_PS2,
21+ .name = "elftouchPS2",
22+ .alias = "elftouch",
23+ .detect = elftouch_ps2_detect,
24+ },
25 #endif
26 #ifdef CONFIG_MOUSE_PS2_TOUCHKIT
27 {
28diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
29index 54ed267..8d1ba79 100644
30--- a/drivers/input/mouse/psmouse.h
31+++ b/drivers/input/mouse/psmouse.h
32@@ -89,6 +89,7 @@ enum psmouse_type {
33 PSMOUSE_TRACKPOINT,
34 PSMOUSE_TOUCHKIT_PS2,
35 PSMOUSE_CORTRON,
36+ PSMOUSE_ELFTOUCH_PS2,
37 PSMOUSE_HGPK,
38 PSMOUSE_ELANTECH,
39 PSMOUSE_AUTO /* This one should always be last */
40diff --git a/drivers/input/mouse/touchkit_ps2.c b/drivers/input/mouse/touchkit_ps2.c
41index 3fadb2a..e9c27f1 100644
42--- a/drivers/input/mouse/touchkit_ps2.c
43+++ b/drivers/input/mouse/touchkit_ps2.c
44@@ -51,6 +51,11 @@
45 #define TOUCHKIT_GET_X(packet) (((packet)[1] << 7) | (packet)[2])
46 #define TOUCHKIT_GET_Y(packet) (((packet)[3] << 7) | (packet)[4])
47
48+#define ELFTOUCH_MAX_XC 0x0fff
49+#define ELFTOUCH_MAX_YC 0x0fff
50+#define ELFTOUCH_GET_X(packet) (((packet)[3] << 7) | (packet)[4])
51+#define ELFTOUCH_GET_Y(packet) (((packet)[1] << 7) | (packet)[2])
52+
53 static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse)
54 {
55 unsigned char *packet = psmouse->packet;
56@@ -59,9 +64,15 @@ static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse)
57 if (psmouse->pktcnt != 5)
58 return PSMOUSE_GOOD_DATA;
59
60- input_report_abs(dev, ABS_X, TOUCHKIT_GET_X(packet));
61- input_report_abs(dev, ABS_Y, TOUCHKIT_GET_Y(packet));
62+ if(psmouse->type==PSMOUSE_ELFTOUCH_PS2) {
63+ input_report_abs(dev, ABS_X, ELFTOUCH_GET_X(packet));
64+ input_report_abs(dev, ABS_Y, ELFTOUCH_GET_Y(packet));
65+ } else {
66+ input_report_abs(dev, ABS_X, TOUCHKIT_GET_X(packet));
67+ input_report_abs(dev, ABS_Y, TOUCHKIT_GET_Y(packet));
68+ }
69 input_report_key(dev, BTN_TOUCH, TOUCHKIT_GET_TOUCHED(packet));
70+
71 input_sync(dev);
72
73 return PSMOUSE_FULL_PACKET;
74@@ -98,3 +109,33 @@ int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties)
75
76 return 0;
77 }
78+
79+int elftouch_ps2_detect(struct psmouse *psmouse, int set_properties)
80+{
81+ struct input_dev *dev = psmouse->dev;
82+ unsigned char param[16];
83+ int command, res;
84+
85+ param[0]=0x0f4;
86+ command = TOUCHKIT_SEND_PARMS(1, 0, TOUCHKIT_CMD);
87+ res=ps2_command(&psmouse->ps2dev, param, command);
88+ if(res) { return -ENODEV; }
89+
90+ param[0]=0x0b0;
91+ command = TOUCHKIT_SEND_PARMS(1, 1, TOUCHKIT_CMD);
92+ res=ps2_command(&psmouse->ps2dev, param, command);
93+ if(res) { return -ENODEV; }
94+
95+ if (set_properties) {
96+ dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
97+ set_bit(BTN_TOUCH, dev->keybit);
98+ input_set_abs_params(dev, ABS_X, 0, ELFTOUCH_MAX_XC, 0, 0);
99+ input_set_abs_params(dev, ABS_Y, 0, ELFTOUCH_MAX_YC, 0, 0);
100+
101+ psmouse->vendor = "ElfTouch";
102+ psmouse->name = "Touchscreen";
103+ psmouse->protocol_handler = touchkit_ps2_process_byte;
104+ psmouse->pktsize = 5;
105+ }
106+ return 0;
107+}
108diff --git a/drivers/input/mouse/touchkit_ps2.h b/drivers/input/mouse/touchkit_ps2.h
109index 8a0dd35..f32ef4c 100644
110--- a/drivers/input/mouse/touchkit_ps2.h
111+++ b/drivers/input/mouse/touchkit_ps2.h
112@@ -14,12 +14,18 @@
113
114 #ifdef CONFIG_MOUSE_PS2_TOUCHKIT
115 int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties);
116+int elftouch_ps2_detect(struct psmouse *psmouse, int set_properties);
117 #else
118 static inline int touchkit_ps2_detect(struct psmouse *psmouse,
119 int set_properties)
120 {
121 return -ENOSYS;
122 }
123+static inline int elftouch_ps2_detect(struct psmouse *psmouse,
124+ int set_properties)
125+{
126+ return -ENOSYS;
127+}
128 #endif /* CONFIG_MOUSE_PS2_TOUCHKIT */
129
130 #endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-async.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-async.patch
new file mode 100644
index 0000000000..a489339cbd
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-async.patch
@@ -0,0 +1,69 @@
1
2Gitweb: http://git.kernel.org/linus/d6de2c80e9d758d2e36c21699117db6178c0f517
3Commit: d6de2c80e9d758d2e36c21699117db6178c0f517
4Parent: 7933a3cfba017330ebb25f9820cb25ec9cdd67cc
5Author: Linus Torvalds <torvalds@linux-foundation.org>
6AuthorDate: Fri Apr 10 12:17:41 2009 -0700
7Committer: Linus Torvalds <torvalds@linux-foundation.org>
8CommitDate: Sat Apr 11 12:44:49 2009 -0700
9
10 async: Fix module loading async-work regression
11
12 Several drivers use asynchronous work to do device discovery, and we
13 synchronize with them in the compiled-in case before we actually try to
14 mount root filesystems etc.
15
16 However, when compiled as modules, that synchronization is missing - the
17 module loading completes, but the driver hasn't actually finished
18 probing for devices, and that means that any user mode that expects to
19 use the devices after the 'insmod' is now potentially broken.
20
21 We already saw one case of a similar issue in the ACPI battery code,
22 where the kernel itself expected the module to be all done, and unmapped
23 the init memory - but the async device discovery was still running.
24 That got hacked around by just removing the "__init" (see commit
25 5d38258ec026921a7b266f4047ebeaa75db358e5 "ACPI battery: fix async boot
26 oops"), but the real fix is to just make the module loading wait for all
27 async work to be completed.
28
29 It will slow down module loading, but since common devices should be
30 built in anyway, and since the bug is really annoying and hard to handle
31 from user space (and caused several S3 resume regressions), the simple
32 fix to wait is the right one.
33
34 This fixes at least
35
36 http://bugzilla.kernel.org/show_bug.cgi?id=13063
37
38 but probably a few other bugzilla entries too (12936, for example), and
39 is confirmed to fix Rafael's storage driver breakage after resume bug
40 report (no bugzilla entry).
41
42 We should also be able to now revert that ACPI battery fix.
43
44 Reported-and-tested-by: Rafael J. Wysocki <rjw@suse.com>
45 Tested-by: Heinz Diehl <htd@fancy-poultry.org>
46 Acked-by: Arjan van de Ven <arjan@linux.intel.com>
47 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
48---
49 kernel/module.c | 3 +++
50 1 files changed, 3 insertions(+), 0 deletions(-)
51
52diff --git a/kernel/module.c b/kernel/module.c
53index 05f014e..e797812 100644
54--- a/kernel/module.c
55+++ b/kernel/module.c
56@@ -2388,6 +2388,9 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
57 blocking_notifier_call_chain(&module_notify_list,
58 MODULE_STATE_LIVE, mod);
59
60+ /* We need to finish all async code before the module init sequence is done */
61+ async_synchronize_full();
62+
63 mutex_lock(&module_mutex);
64 /* Drop initial reference. */
65 module_put(mod);
66--
67To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
68the body of a message to majordomo@vger.kernel.org
69More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-suspend.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-suspend.patch
new file mode 100644
index 0000000000..3932a51ae0
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-suspend.patch
@@ -0,0 +1,139 @@
1From: Rafael J. Wysocki <rjw@suse.com>
2Organization: SUSE
3To: Arjan van de Ven <arjan@linux.intel.com>
4CC: Linus Torvalds <torvalds@linux-foundation.org>
5
6
7OK, updated patch follows, with a changelog.
8
9I've added this check to user.c too, because that code can be called
10independently of the one in disk.c . Also, if resume is user space-driven,
11it's a good idea to wait for all of the device probes to complete before
12continuing.
13
14Thanks,
15Rafael
16
17---
18From: Rafael J. Wysocki <rjw@sisk.pl>
19Subject: PM/Hibernate: Wait for SCSI devices scan to complete during resume
20
21There is a race between resume from hibernation and the asynchronous
22scanning of SCSI devices and to prevent it from happening we need to
23call scsi_complete_async_scans() during resume from hibernation.
24
25In addition, if the resume from hibernation is userland-driven, it's
26better to wait for all device probes in the kernel to complete before
27attempting to open the resume device.
28
29Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
30---
31 drivers/scsi/scsi_priv.h | 3 ---
32 drivers/scsi/scsi_wait_scan.c | 2 +-
33 include/scsi/scsi_scan.h | 11 +++++++++++
34 kernel/power/disk.c | 8 ++++++++
35 kernel/power/user.c | 9 +++++++++
36 5 files changed, 29 insertions(+), 4 deletions(-)
37
38Index: linux-2.6/include/scsi/scsi_scan.h
39===================================================================
40--- /dev/null
41+++ linux-2.6/include/scsi/scsi_scan.h
42@@ -0,0 +1,11 @@
43+#ifndef _SCSI_SCSI_SCAN_H
44+#define _SCSI_SCSI_SCAN_H
45+
46+#ifdef CONFIG_SCSI
47+/* drivers/scsi/scsi_scan.c */
48+extern int scsi_complete_async_scans(void);
49+#else
50+static inline int scsi_complete_async_scans(void) { return 0; }
51+#endif
52+
53+#endif /* _SCSI_SCSI_SCAN_H */
54Index: linux-2.6/drivers/scsi/scsi_priv.h
55===================================================================
56--- linux-2.6.orig/drivers/scsi/scsi_priv.h
57+++ linux-2.6/drivers/scsi/scsi_priv.h
58@@ -38,9 +38,6 @@ static inline void scsi_log_completion(s
59 { };
60 #endif
61
62-/* scsi_scan.c */
63-int scsi_complete_async_scans(void);
64-
65 /* scsi_devinfo.c */
66 extern int scsi_get_device_flags(struct scsi_device *sdev,
67 const unsigned char *vendor,
68Index: linux-2.6/drivers/scsi/scsi_wait_scan.c
69===================================================================
70--- linux-2.6.orig/drivers/scsi/scsi_wait_scan.c
71+++ linux-2.6/drivers/scsi/scsi_wait_scan.c
72@@ -11,7 +11,7 @@
73 */
74
75 #include <linux/module.h>
76-#include "scsi_priv.h"
77+#include <scsi/scsi_scan.h>
78
79 static int __init wait_scan_init(void)
80 {
81Index: linux-2.6/kernel/power/disk.c
82===================================================================
83--- linux-2.6.orig/kernel/power/disk.c
84+++ linux-2.6/kernel/power/disk.c
85@@ -22,5 +22,6 @@
86 #include <linux/console.h>
87 #include <linux/cpu.h>
88 #include <linux/freezer.h>
89+#include <scsi/scsi_scan.h>
90
91 #include "power.h"
92@@ -645,6 +646,13 @@ static int software_resume(void)
93 return 0;
94
95 /*
96+ * We can't depend on SCSI devices being available after loading one of
97+ * their modules if scsi_complete_async_scans() is not called and the
98+ * resume device usually is a SCSI one.
99+ */
100+ scsi_complete_async_scans();
101+
102+ /*
103 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
104 * is configured into the kernel. Since the regular hibernate
105 * trigger path is via sysfs which takes a buffer mutex before
106Index: linux-2.6/kernel/power/user.c
107===================================================================
108--- linux-2.6.orig/kernel/power/user.c
109+++ linux-2.6/kernel/power/user.c
110@@ -24,6 +24,7 @@
111 #include <linux/cpu.h>
112 #include <linux/freezer.h>
113 #include <linux/smp_lock.h>
114+#include <scsi/scsi_scan.h>
115
116 #include <asm/uaccess.h>
117
118@@ -92,6 +93,7 @@ static int snapshot_open(struct inode *i
119 filp->private_data = data;
120 memset(&data->handle, 0, sizeof(struct snapshot_handle));
121 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
122+ /* Hibernating. The image device should be accessible. */
123 data->swap = swsusp_resume_device ?
124 swap_type_of(swsusp_resume_device, 0, NULL) : -1;
125 data->mode = O_RDONLY;
126@@ -99,6 +101,13 @@ static int snapshot_open(struct inode *i
127 if (error)
128 pm_notifier_call_chain(PM_POST_HIBERNATION);
129 } else {
130+ /*
131+ * Resuming. We may need to wait for the image device to
132+ * appear.
133+ */
134+ wait_for_device_probe();
135+ scsi_complete_async_scans();
136+
137 data->swap = -1;
138 data->mode = O_WRONLY;
139 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
diff --git a/meta-moblin/packages/linux/linux-moblin_2.6.27.bb b/meta-moblin/packages/linux/linux-moblin_2.6.27.bb
deleted file mode 100644
index 82f7b435e6..0000000000
--- a/meta-moblin/packages/linux/linux-moblin_2.6.27.bb
+++ /dev/null
@@ -1,59 +0,0 @@
1require linux-moblin.inc
2
3PR = "r8"
4PE = "1"
5
6DEFAULT_PREFERENCE = "-1"
7DEFAULT_PREFERENCE_netbook = "1"
8DEFAULT_PREFERENCE_menlow = "1"
9
10SRC_URI = "${KERNELORG_MIRROR}pub/linux/kernel/v2.6/linux-2.6.27.tar.bz2 \
11 file://0001-drm-remove-define-for-non-linux-systems.patch;patch=1 \
12 file://0002-i915-remove-settable-use_mi_batchbuffer_start.patch;patch=1 \
13 file://0003-i915-Ignore-X-server-provided-mmio-address.patch;patch=1 \
14 file://0004-i915-Use-more-consistent-names-for-regs-and-store.patch;patch=1 \
15 file://0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch;patch=1 \
16 file://0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch;patch=1 \
17 file://0007-i915-Initialize-hardware-status-page-at-device-load.patch;patch=1 \
18 file://0008-Add-Intel-ACPI-IGD-OpRegion-support.patch;patch=1 \
19 file://0009-drm-fix-sysfs-error-path.patch;patch=1 \
20 file://0010-i915-separate-suspend-resume-functions.patch;patch=1 \
21 file://0011-drm-vblank-rework.patch;patch=1 \
22 file://0012-Export-shmem_file_setup-for-DRM-GEM.patch;patch=1 \
23 file://0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch;patch=1 \
24 file://0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch;patch=1 \
25 file://0015-i915-Add-chip-set-ID-param.patch;patch=1 \
26 file://0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch;patch=1 \
27 file://0017-i915-Make-use-of-sarea_priv-conditional.patch;patch=1 \
28 file://0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch;patch=1 \
29 file://0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch;patch=1 \
30 file://0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch;patch=1 \
31 file://0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch;patch=1 \
32 file://0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch;patch=1 \
33 file://0023-drm-clean-up-many-sparse-warnings-in-i915.patch;patch=1 \
34 file://0024-fastboot-create-a-asynchronous-initlevel.patch;patch=1 \
35 file://0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch;patch=1 \
36 file://0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch;patch=1 \
37 file://0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch;patch=1 \
38 file://0028-fastboot-sync-the-async-execution-before-late_initc.patch;patch=1 \
39 file://0029-fastboot-make-fastboot-a-config-option.patch;patch=1 \
40 file://0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch;patch=1 \
41 file://0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch;patch=1 \
42 file://0032-fastboot-remove-wait-for-all-devices-before-mounti.patch;patch=1 \
43 file://0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch;patch=1 \
44 file://0034-fastboot-fix-typo-in-init-Kconfig-text.patch;patch=1 \
45 file://0035-fastboot-remove-duplicate-unpack_to_rootfs.patch;patch=1 \
46 file://0036-warning-fix-init-do_mounts_md-c.patch;patch=1 \
47 file://0037-init-initramfs.c-unused-function-when-compiling-wit.patch;patch=1 \
48 file://0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch;patch=1 \
49 file://0039-Add-a-script-to-visualize-the-kernel-boot-process.patch;patch=1 \
50 file://0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch;patch=1 \
51 file://0041-r8169-8101e.patch;patch=1 \
52 file://0042-intelfb-945gme.patch;patch=1 \
53 file://0043-superreadahead-patch.patch;patch=1 \
54 file://defconfig-menlow \
55 file://defconfig-netbook"
56
57SRC_URI_append_menlow = " file://psb-driver.patch;patch=1"
58
59S = "${WORKDIR}/linux-2.6.27"
diff --git a/meta-moblin/packages/linux/linux-moblin_2.6.28+2.6.29-rc2.bb b/meta-moblin/packages/linux/linux-moblin_2.6.28+2.6.29-rc2.bb
deleted file mode 100644
index 7607e528fb..0000000000
--- a/meta-moblin/packages/linux/linux-moblin_2.6.28+2.6.29-rc2.bb
+++ /dev/null
@@ -1,24 +0,0 @@
1require linux-moblin.inc
2
3PR = "r3"
4PE = "1"
5
6DEFAULT_PREFERENCE = "-1"
7DEFAULT_PREFERENCE_netbook = "1"
8DEFAULT_PREFERENCE_menlow = "1"
9
10SRC_URI = "${KERNELORG_MIRROR}pub/linux/kernel/v2.6/linux-2.6.28.tar.bz2 \
11 ${KERNELORG_MIRROR}pub/linux/kernel/v2.6/testing/patch-2.6.29-rc2.bz2;patch=1 \
12 file://0001-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch;patch=1 \
13 file://0002-fastboot-remove-wait-for-all-devices-before-mounti.patch;patch=1 \
14 file://0003-fastboot-remove-duplicate-unpack_to_rootfs.patch;patch=1 \
15 file://0004-superreadahead-patch.patch;patch=1 \
16 file://0005-fastboot-async-enable-default.patch;patch=1 \
17 file://0006-Revert-drm-i915-GEM-on-PAE-has-problems-disable.patch;patch=1 \
18 file://0007-acer-error-msg.patch;patch=1 \
19 file://defconfig-menlow \
20 file://defconfig-netbook"
21
22SRC_URI_append_menlow = " file://i915_split.patch;patch=1 file://psb-driver.patch;patch=1"
23
24S = "${WORKDIR}/linux-2.6.28"
diff --git a/meta-moblin/packages/linux/linux-moblin_2.6.29.1.bb b/meta-moblin/packages/linux/linux-moblin_2.6.29.1.bb
new file mode 100644
index 0000000000..d72c96f9cb
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin_2.6.29.1.bb
@@ -0,0 +1,46 @@
1require linux-moblin.inc
2
3PR = "r10"
4
5DEFAULT_PREFERENCE = "-1"
6DEFAULT_PREFERENCE_netbook = "1"
7DEFAULT_PREFERENCE_menlow = "1"
8
9SRC_URI = "${KERNELORG_MIRROR}pub/linux/kernel/v2.6/linux-2.6.29.1.tar.bz2 \
10 file://linux-2.6-build-nonintconfig.patch;patch=1 \
11 file://linux-2.6.29-retry-root-mount.patch;patch=1 \
12 file://linux-2.6.29-dont-wait-for-mouse.patch;patch=1 \
13 file://linux-2.6.29-fast-initrd.patch;patch=1 \
14 file://linux-2.6.29-sreadahead.patch;patch=1 \
15 file://linux-2.6.29-enable-async-by-default.patch;patch=1 \
16 file://linux-2.6.29-drm-revert.patch;patch=1 \
17 file://linux-2.6.19-modesetting-by-default.patch;patch=1 \
18 file://linux-2.6.29-fast-kms.patch;patch=1 \
19 file://linux-2.6.29-even-faster-kms.patch;patch=1 \
20 file://linux-2.6.29-silence-acer-message.patch;patch=1 \
21 file://linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch;patch=1 \
22 file://linux-2.6.29-msiwind.patch;patch=1 \
23 file://linux-2.6.29-flip-ide-net.patch;patch=1 \
24 file://linux-2.6.29-kms-after-sata.patch;patch=1 \
25 file://linux-2.6.29-jbd-longer-commit-interval.patch;patch=1 \
26 file://linux-2.6.29-touchkit.patch;patch=1 \
27 file://linux-2.6.30-fix-async.patch;patch=1 \
28 file://linux-2.6.30-fix-suspend.patch;patch=1 \
29 file://0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch;patch=1 \
30 file://0002-drm-Add-a-tracker-for-global-objects.patch;patch=1 \
31 file://0003-drm-Export-hash-table-functionality.patch;patch=1 \
32 file://0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch;patch=1 \
33 file://linux-2.6.29-psb-driver.patch;patch=1 \
34 file://linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch;patch=1 \
35 file://linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch;patch=1 \
36 file://linux-2.6.29-pnv-agp.patch;patch=1 \
37 file://linux-2.6.29-pnv-drm.patch;patch=1 \
38 file://linux-2.6.29-pnv-fix-gtt-size.patch;patch=1 \
39 file://linux-2.6.29-pnv-fix-i2c.patch;patch=1 \
40 file://linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch;patch=1 \
41 file://linux-2.6.29-timberdale.patch;patch=1 \
42# file://i915_split.patch;patch=1 \
43 file://defconfig-menlow \
44 file://defconfig-netbook"
45
46S = "${WORKDIR}/linux-2.6.29.1"