summaryrefslogtreecommitdiffstats
path: root/meta-moblin
diff options
context:
space:
mode:
authorRichard Purdie <rpurdie@linux.intel.com>2009-01-16 15:39:26 +0000
committerRichard Purdie <rpurdie@linux.intel.com>2009-01-29 13:48:14 +0000
commitc9f94869681a36eb04d80f17db0ab9c94a3f847a (patch)
tree49fa2ccd0a2ec11a87f8a486015094bbf63219a3 /meta-moblin
parent837119e38e47366d85c61d8dcbc7567c6b117fc4 (diff)
downloadpoky-c9f94869681a36eb04d80f17db0ab9c94a3f847a.tar.gz
linux-moblin: Add patch to add the psb driver directly to the kernel instead of being standalone
Diffstat (limited to 'meta-moblin')
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-menlow2113
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/psb-driver.patch34380
-rw-r--r--meta-moblin/packages/linux/linux-moblin_2.6.27.bb4
3 files changed, 35806 insertions, 691 deletions
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-menlow b/meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-menlow
index aa5883a66e..8841ad3ea7 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-menlow
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-menlow
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.27 3# Linux kernel version: 2.6.27
4# Wed Nov 5 17:17:12 2008 4# Mon Jan 12 17:53:54 2009
5# 5#
6# CONFIG_64BIT is not set 6# CONFIG_64BIT is not set
7CONFIG_X86_32=y 7CONFIG_X86_32=y
@@ -61,7 +61,7 @@ CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
61CONFIG_EXPERIMENTAL=y 61CONFIG_EXPERIMENTAL=y
62CONFIG_LOCK_KERNEL=y 62CONFIG_LOCK_KERNEL=y
63CONFIG_INIT_ENV_ARG_LIMIT=32 63CONFIG_INIT_ENV_ARG_LIMIT=32
64CONFIG_LOCALVERSION="-netbook" 64CONFIG_LOCALVERSION="-default"
65# CONFIG_LOCALVERSION_AUTO is not set 65# CONFIG_LOCALVERSION_AUTO is not set
66CONFIG_SWAP=y 66CONFIG_SWAP=y
67CONFIG_SYSVIPC=y 67CONFIG_SYSVIPC=y
@@ -71,41 +71,41 @@ CONFIG_BSD_PROCESS_ACCT=y
71CONFIG_BSD_PROCESS_ACCT_V3=y 71CONFIG_BSD_PROCESS_ACCT_V3=y
72CONFIG_TASKSTATS=y 72CONFIG_TASKSTATS=y
73CONFIG_TASK_DELAY_ACCT=y 73CONFIG_TASK_DELAY_ACCT=y
74CONFIG_TASK_XACCT=y 74# CONFIG_TASK_XACCT is not set
75CONFIG_TASK_IO_ACCOUNTING=y
76CONFIG_AUDIT=y 75CONFIG_AUDIT=y
77CONFIG_AUDITSYSCALL=y 76CONFIG_AUDITSYSCALL=y
78CONFIG_AUDIT_TREE=y 77CONFIG_AUDIT_TREE=y
79CONFIG_IKCONFIG=y 78CONFIG_IKCONFIG=y
80CONFIG_IKCONFIG_PROC=y 79CONFIG_IKCONFIG_PROC=y
81CONFIG_LOG_BUF_SHIFT=17 80CONFIG_LOG_BUF_SHIFT=15
82# CONFIG_CGROUPS is not set 81# CONFIG_CGROUPS is not set
83CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y 82CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
84# CONFIG_GROUP_SCHED is not set 83# CONFIG_GROUP_SCHED is not set
85# CONFIG_SYSFS_DEPRECATED_V2 is not set 84CONFIG_SYSFS_DEPRECATED=y
85CONFIG_SYSFS_DEPRECATED_V2=y
86CONFIG_RELAY=y 86CONFIG_RELAY=y
87CONFIG_NAMESPACES=y 87CONFIG_NAMESPACES=y
88# CONFIG_UTS_NS is not set 88# CONFIG_UTS_NS is not set
89# CONFIG_IPC_NS is not set 89# CONFIG_IPC_NS is not set
90CONFIG_USER_NS=y 90# CONFIG_USER_NS is not set
91# CONFIG_PID_NS is not set 91# CONFIG_PID_NS is not set
92CONFIG_BLK_DEV_INITRD=y 92CONFIG_BLK_DEV_INITRD=y
93CONFIG_INITRAMFS_SOURCE="" 93CONFIG_INITRAMFS_SOURCE=""
94CONFIG_CC_OPTIMIZE_FOR_SIZE=y 94CONFIG_CC_OPTIMIZE_FOR_SIZE=y
95CONFIG_FASTBOOT=y 95# CONFIG_FASTBOOT is not set
96CONFIG_SYSCTL=y 96CONFIG_SYSCTL=y
97# CONFIG_EMBEDDED is not set 97# CONFIG_EMBEDDED is not set
98CONFIG_UID16=y 98CONFIG_UID16=y
99CONFIG_SYSCTL_SYSCALL=y 99CONFIG_SYSCTL_SYSCALL=y
100CONFIG_KALLSYMS=y 100CONFIG_KALLSYMS=y
101CONFIG_KALLSYMS_ALL=y 101CONFIG_KALLSYMS_ALL=y
102CONFIG_KALLSYMS_EXTRA_PASS=y 102# CONFIG_KALLSYMS_EXTRA_PASS is not set
103CONFIG_HOTPLUG=y 103CONFIG_HOTPLUG=y
104CONFIG_PRINTK=y 104CONFIG_PRINTK=y
105CONFIG_BUG=y 105CONFIG_BUG=y
106CONFIG_ELF_CORE=y 106CONFIG_ELF_CORE=y
107CONFIG_PCSPKR_PLATFORM=y 107CONFIG_PCSPKR_PLATFORM=y
108# CONFIG_COMPAT_BRK is not set 108CONFIG_COMPAT_BRK=y
109CONFIG_BASE_FULL=y 109CONFIG_BASE_FULL=y
110CONFIG_FUTEX=y 110CONFIG_FUTEX=y
111CONFIG_ANON_INODES=y 111CONFIG_ANON_INODES=y
@@ -140,24 +140,24 @@ CONFIG_BASE_SMALL=0
140CONFIG_MODULES=y 140CONFIG_MODULES=y
141# CONFIG_MODULE_FORCE_LOAD is not set 141# CONFIG_MODULE_FORCE_LOAD is not set
142CONFIG_MODULE_UNLOAD=y 142CONFIG_MODULE_UNLOAD=y
143# CONFIG_MODULE_FORCE_UNLOAD is not set 143CONFIG_MODULE_FORCE_UNLOAD=y
144# CONFIG_MODVERSIONS is not set 144CONFIG_MODVERSIONS=y
145# CONFIG_MODULE_SRCVERSION_ALL is not set 145CONFIG_MODULE_SRCVERSION_ALL=y
146CONFIG_KMOD=y 146CONFIG_KMOD=y
147CONFIG_STOP_MACHINE=y 147CONFIG_STOP_MACHINE=y
148CONFIG_BLOCK=y 148CONFIG_BLOCK=y
149# CONFIG_LBD is not set 149CONFIG_LBD=y
150CONFIG_BLK_DEV_IO_TRACE=y 150CONFIG_BLK_DEV_IO_TRACE=y
151# CONFIG_LSF is not set 151CONFIG_LSF=y
152CONFIG_BLK_DEV_BSG=y 152# CONFIG_BLK_DEV_BSG is not set
153# CONFIG_BLK_DEV_INTEGRITY is not set 153# CONFIG_BLK_DEV_INTEGRITY is not set
154 154
155# 155#
156# IO Schedulers 156# IO Schedulers
157# 157#
158CONFIG_IOSCHED_NOOP=y 158CONFIG_IOSCHED_NOOP=y
159# CONFIG_IOSCHED_AS is not set 159CONFIG_IOSCHED_AS=y
160# CONFIG_IOSCHED_DEADLINE is not set 160CONFIG_IOSCHED_DEADLINE=y
161CONFIG_IOSCHED_CFQ=y 161CONFIG_IOSCHED_CFQ=y
162# CONFIG_DEFAULT_AS is not set 162# CONFIG_DEFAULT_AS is not set
163# CONFIG_DEFAULT_DEADLINE is not set 163# CONFIG_DEFAULT_DEADLINE is not set
@@ -176,21 +176,26 @@ CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
176CONFIG_SMP=y 176CONFIG_SMP=y
177CONFIG_X86_FIND_SMP_CONFIG=y 177CONFIG_X86_FIND_SMP_CONFIG=y
178CONFIG_X86_MPPARSE=y 178CONFIG_X86_MPPARSE=y
179CONFIG_X86_PC=y 179# CONFIG_X86_PC is not set
180# CONFIG_X86_ELAN is not set 180# CONFIG_X86_ELAN is not set
181# CONFIG_X86_VOYAGER is not set 181# CONFIG_X86_VOYAGER is not set
182# CONFIG_X86_GENERICARCH is not set 182CONFIG_X86_GENERICARCH=y
183# CONFIG_X86_NUMAQ is not set
184# CONFIG_X86_SUMMIT is not set
185# CONFIG_X86_ES7000 is not set
186# CONFIG_X86_BIGSMP is not set
183# CONFIG_X86_VSMP is not set 187# CONFIG_X86_VSMP is not set
184# CONFIG_X86_RDC321X is not set 188# CONFIG_X86_RDC321X is not set
185CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 189CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
186# CONFIG_PARAVIRT_GUEST is not set 190# CONFIG_PARAVIRT_GUEST is not set
187# CONFIG_MEMTEST is not set 191# CONFIG_MEMTEST is not set
192CONFIG_X86_CYCLONE_TIMER=y
188# CONFIG_M386 is not set 193# CONFIG_M386 is not set
189# CONFIG_M486 is not set 194# CONFIG_M486 is not set
190# CONFIG_M586 is not set 195CONFIG_M586=y
191# CONFIG_M586TSC is not set 196# CONFIG_M586TSC is not set
192# CONFIG_M586MMX is not set 197# CONFIG_M586MMX is not set
193CONFIG_M686=y 198# CONFIG_M686 is not set
194# CONFIG_MPENTIUMII is not set 199# CONFIG_MPENTIUMII is not set
195# CONFIG_MPENTIUMIII is not set 200# CONFIG_MPENTIUMIII is not set
196# CONFIG_MPENTIUMM is not set 201# CONFIG_MPENTIUMM is not set
@@ -211,27 +216,25 @@ CONFIG_M686=y
211# CONFIG_MPSC is not set 216# CONFIG_MPSC is not set
212# CONFIG_MCORE2 is not set 217# CONFIG_MCORE2 is not set
213# CONFIG_GENERIC_CPU is not set 218# CONFIG_GENERIC_CPU is not set
214# CONFIG_X86_GENERIC is not set 219CONFIG_X86_GENERIC=y
215CONFIG_X86_CPU=y 220CONFIG_X86_CPU=y
216CONFIG_X86_CMPXCHG=y 221CONFIG_X86_CMPXCHG=y
217CONFIG_X86_L1_CACHE_SHIFT=5 222CONFIG_X86_L1_CACHE_SHIFT=7
218CONFIG_X86_XADD=y 223CONFIG_X86_XADD=y
219# CONFIG_X86_PPRO_FENCE is not set 224CONFIG_X86_PPRO_FENCE=y
225CONFIG_X86_F00F_BUG=y
220CONFIG_X86_WP_WORKS_OK=y 226CONFIG_X86_WP_WORKS_OK=y
221CONFIG_X86_INVLPG=y 227CONFIG_X86_INVLPG=y
222CONFIG_X86_BSWAP=y 228CONFIG_X86_BSWAP=y
223CONFIG_X86_POPAD_OK=y 229CONFIG_X86_POPAD_OK=y
224CONFIG_X86_USE_PPRO_CHECKSUM=y 230CONFIG_X86_ALIGNMENT_16=y
225CONFIG_X86_TSC=y 231CONFIG_X86_INTEL_USERCOPY=y
226CONFIG_X86_CMOV=y
227CONFIG_X86_MINIMUM_CPU_FAMILY=4 232CONFIG_X86_MINIMUM_CPU_FAMILY=4
228CONFIG_X86_DEBUGCTLMSR=y
229CONFIG_HPET_TIMER=y 233CONFIG_HPET_TIMER=y
230CONFIG_HPET_EMULATE_RTC=y
231CONFIG_DMI=y 234CONFIG_DMI=y
232# CONFIG_IOMMU_HELPER is not set 235# CONFIG_IOMMU_HELPER is not set
233CONFIG_NR_CPUS=2 236CONFIG_NR_CPUS=8
234CONFIG_SCHED_SMT=y 237# CONFIG_SCHED_SMT is not set
235CONFIG_SCHED_MC=y 238CONFIG_SCHED_MC=y
236# CONFIG_PREEMPT_NONE is not set 239# CONFIG_PREEMPT_NONE is not set
237CONFIG_PREEMPT_VOLUNTARY=y 240CONFIG_PREEMPT_VOLUNTARY=y
@@ -239,63 +242,55 @@ CONFIG_PREEMPT_VOLUNTARY=y
239CONFIG_X86_LOCAL_APIC=y 242CONFIG_X86_LOCAL_APIC=y
240CONFIG_X86_IO_APIC=y 243CONFIG_X86_IO_APIC=y
241CONFIG_X86_MCE=y 244CONFIG_X86_MCE=y
242# CONFIG_X86_MCE_NONFATAL is not set 245CONFIG_X86_MCE_NONFATAL=y
243# CONFIG_X86_MCE_P4THERMAL is not set 246# CONFIG_X86_MCE_P4THERMAL is not set
244CONFIG_VM86=y 247CONFIG_VM86=y
245# CONFIG_TOSHIBA is not set 248# CONFIG_TOSHIBA is not set
246# CONFIG_I8K is not set 249# CONFIG_I8K is not set
247# CONFIG_X86_REBOOTFIXUPS is not set 250CONFIG_X86_REBOOTFIXUPS=y
248CONFIG_MICROCODE=y 251CONFIG_MICROCODE=m
249CONFIG_MICROCODE_OLD_INTERFACE=y 252CONFIG_MICROCODE_OLD_INTERFACE=y
250CONFIG_X86_MSR=y 253CONFIG_X86_MSR=m
251CONFIG_X86_CPUID=y 254CONFIG_X86_CPUID=m
252# CONFIG_NOHIGHMEM is not set 255# CONFIG_NOHIGHMEM is not set
253CONFIG_HIGHMEM4G=y 256CONFIG_HIGHMEM4G=y
254# CONFIG_HIGHMEM64G is not set 257# CONFIG_HIGHMEM64G is not set
255CONFIG_PAGE_OFFSET=0xC0000000 258CONFIG_PAGE_OFFSET=0xC0000000
256CONFIG_HIGHMEM=y 259CONFIG_HIGHMEM=y
257CONFIG_NEED_NODE_MEMMAP_SIZE=y
258CONFIG_ARCH_FLATMEM_ENABLE=y
259CONFIG_ARCH_SPARSEMEM_ENABLE=y
260CONFIG_ARCH_SELECT_MEMORY_MODEL=y
261CONFIG_SELECT_MEMORY_MODEL=y 260CONFIG_SELECT_MEMORY_MODEL=y
262# CONFIG_FLATMEM_MANUAL is not set 261CONFIG_FLATMEM_MANUAL=y
263# CONFIG_DISCONTIGMEM_MANUAL is not set 262# CONFIG_DISCONTIGMEM_MANUAL is not set
264CONFIG_SPARSEMEM_MANUAL=y 263# CONFIG_SPARSEMEM_MANUAL is not set
265CONFIG_SPARSEMEM=y 264CONFIG_FLATMEM=y
266CONFIG_HAVE_MEMORY_PRESENT=y 265CONFIG_FLAT_NODE_MEM_MAP=y
267CONFIG_SPARSEMEM_STATIC=y 266# CONFIG_SPARSEMEM_STATIC is not set
268# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set 267# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
269
270#
271# Memory hotplug is currently incompatible with Software Suspend
272#
273CONFIG_PAGEFLAGS_EXTENDED=y 268CONFIG_PAGEFLAGS_EXTENDED=y
274CONFIG_SPLIT_PTLOCK_CPUS=4 269CONFIG_SPLIT_PTLOCK_CPUS=4
275CONFIG_RESOURCES_64BIT=y 270# CONFIG_RESOURCES_64BIT is not set
276CONFIG_ZONE_DMA_FLAG=1 271CONFIG_ZONE_DMA_FLAG=1
277CONFIG_BOUNCE=y 272CONFIG_BOUNCE=y
278CONFIG_VIRT_TO_BUS=y 273CONFIG_VIRT_TO_BUS=y
279# CONFIG_HIGHPTE is not set 274CONFIG_HIGHPTE=y
280# CONFIG_MATH_EMULATION is not set 275# CONFIG_MATH_EMULATION is not set
281CONFIG_MTRR=y 276CONFIG_MTRR=y
282# CONFIG_MTRR_SANITIZER is not set 277# CONFIG_MTRR_SANITIZER is not set
283# CONFIG_X86_PAT is not set 278# CONFIG_X86_PAT is not set
284# CONFIG_EFI is not set 279CONFIG_EFI=y
285# CONFIG_IRQBALANCE is not set 280# CONFIG_IRQBALANCE is not set
286# CONFIG_SECCOMP is not set 281CONFIG_SECCOMP=y
287# CONFIG_HZ_100 is not set 282# CONFIG_HZ_100 is not set
288# CONFIG_HZ_250 is not set 283CONFIG_HZ_250=y
289# CONFIG_HZ_300 is not set 284# CONFIG_HZ_300 is not set
290CONFIG_HZ_1000=y 285# CONFIG_HZ_1000 is not set
291CONFIG_HZ=1000 286CONFIG_HZ=250
292CONFIG_SCHED_HRTICK=y 287CONFIG_SCHED_HRTICK=y
293CONFIG_KEXEC=y 288CONFIG_KEXEC=y
294CONFIG_CRASH_DUMP=y 289# CONFIG_CRASH_DUMP is not set
295# CONFIG_KEXEC_JUMP is not set 290# CONFIG_KEXEC_JUMP is not set
296CONFIG_PHYSICAL_START=0x400000 291CONFIG_PHYSICAL_START=0x100000
297CONFIG_RELOCATABLE=y 292# CONFIG_RELOCATABLE is not set
298CONFIG_PHYSICAL_ALIGN=0x200000 293CONFIG_PHYSICAL_ALIGN=0x100000
299CONFIG_HOTPLUG_CPU=y 294CONFIG_HOTPLUG_CPU=y
300CONFIG_COMPAT_VDSO=y 295CONFIG_COMPAT_VDSO=y
301CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y 296CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
@@ -304,15 +299,10 @@ CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
304# Power management options 299# Power management options
305# 300#
306CONFIG_PM=y 301CONFIG_PM=y
307CONFIG_PM_DEBUG=y 302# CONFIG_PM_DEBUG is not set
308# CONFIG_PM_VERBOSE is not set
309CONFIG_CAN_PM_TRACE=y
310CONFIG_PM_TRACE=y
311CONFIG_PM_TRACE_RTC=y
312CONFIG_PM_SLEEP_SMP=y 303CONFIG_PM_SLEEP_SMP=y
313CONFIG_PM_SLEEP=y 304CONFIG_PM_SLEEP=y
314CONFIG_SUSPEND=y 305CONFIG_SUSPEND=y
315# CONFIG_PM_TEST_SUSPEND is not set
316CONFIG_SUSPEND_FREEZER=y 306CONFIG_SUSPEND_FREEZER=y
317CONFIG_HIBERNATION=y 307CONFIG_HIBERNATION=y
318CONFIG_PM_STD_PARTITION="" 308CONFIG_PM_STD_PARTITION=""
@@ -323,7 +313,7 @@ CONFIG_ACPI_PROCFS_POWER=y
323CONFIG_ACPI_SYSFS_POWER=y 313CONFIG_ACPI_SYSFS_POWER=y
324CONFIG_ACPI_PROC_EVENT=y 314CONFIG_ACPI_PROC_EVENT=y
325CONFIG_ACPI_AC=y 315CONFIG_ACPI_AC=y
326CONFIG_ACPI_BATTERY=m 316CONFIG_ACPI_BATTERY=y
327CONFIG_ACPI_BUTTON=y 317CONFIG_ACPI_BUTTON=y
328CONFIG_ACPI_VIDEO=y 318CONFIG_ACPI_VIDEO=y
329CONFIG_ACPI_FAN=y 319CONFIG_ACPI_FAN=y
@@ -332,11 +322,12 @@ CONFIG_ACPI_DOCK=y
332CONFIG_ACPI_PROCESSOR=y 322CONFIG_ACPI_PROCESSOR=y
333CONFIG_ACPI_HOTPLUG_CPU=y 323CONFIG_ACPI_HOTPLUG_CPU=y
334CONFIG_ACPI_THERMAL=y 324CONFIG_ACPI_THERMAL=y
335CONFIG_ACPI_WMI=m 325# CONFIG_ACPI_WMI is not set
336CONFIG_ACPI_ASUS=y 326# CONFIG_ACPI_ASUS is not set
337# CONFIG_ACPI_TOSHIBA is not set 327# CONFIG_ACPI_TOSHIBA is not set
328CONFIG_ACPI_CUSTOM_DSDT_FILE=""
338# CONFIG_ACPI_CUSTOM_DSDT is not set 329# CONFIG_ACPI_CUSTOM_DSDT is not set
339CONFIG_ACPI_BLACKLIST_YEAR=0 330CONFIG_ACPI_BLACKLIST_YEAR=2001
340# CONFIG_ACPI_DEBUG is not set 331# CONFIG_ACPI_DEBUG is not set
341CONFIG_ACPI_EC=y 332CONFIG_ACPI_EC=y
342# CONFIG_ACPI_PCI_SLOT is not set 333# CONFIG_ACPI_PCI_SLOT is not set
@@ -344,27 +335,34 @@ CONFIG_ACPI_POWER=y
344CONFIG_ACPI_SYSTEM=y 335CONFIG_ACPI_SYSTEM=y
345CONFIG_X86_PM_TIMER=y 336CONFIG_X86_PM_TIMER=y
346CONFIG_ACPI_CONTAINER=y 337CONFIG_ACPI_CONTAINER=y
347CONFIG_ACPI_SBS=m 338CONFIG_ACPI_SBS=y
348# CONFIG_APM is not set 339CONFIG_X86_APM_BOOT=y
340CONFIG_APM=y
341# CONFIG_APM_IGNORE_USER_SUSPEND is not set
342CONFIG_APM_DO_ENABLE=y
343# CONFIG_APM_CPU_IDLE is not set
344CONFIG_APM_DISPLAY_BLANK=y
345CONFIG_APM_ALLOW_INTS=y
346# CONFIG_APM_REAL_MODE_POWER_OFF is not set
349 347
350# 348#
351# CPU Frequency scaling 349# CPU Frequency scaling
352# 350#
353CONFIG_CPU_FREQ=y 351CONFIG_CPU_FREQ=y
354CONFIG_CPU_FREQ_TABLE=y 352CONFIG_CPU_FREQ_TABLE=y
355CONFIG_CPU_FREQ_DEBUG=y 353# CONFIG_CPU_FREQ_DEBUG is not set
356CONFIG_CPU_FREQ_STAT=m 354CONFIG_CPU_FREQ_STAT=m
357CONFIG_CPU_FREQ_STAT_DETAILS=y 355CONFIG_CPU_FREQ_STAT_DETAILS=y
358CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y 356# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
359# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set 357# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
360# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set 358CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
361# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set 359# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
362# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set 360# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
363CONFIG_CPU_FREQ_GOV_PERFORMANCE=y 361CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
364# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set 362CONFIG_CPU_FREQ_GOV_POWERSAVE=m
365CONFIG_CPU_FREQ_GOV_USERSPACE=y 363CONFIG_CPU_FREQ_GOV_USERSPACE=y
366CONFIG_CPU_FREQ_GOV_ONDEMAND=y 364CONFIG_CPU_FREQ_GOV_ONDEMAND=m
367# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set 365CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
368 366
369# 367#
370# CPUFreq processor drivers 368# CPUFreq processor drivers
@@ -374,10 +372,11 @@ CONFIG_X86_ACPI_CPUFREQ=y
374# CONFIG_X86_POWERNOW_K7 is not set 372# CONFIG_X86_POWERNOW_K7 is not set
375# CONFIG_X86_POWERNOW_K8 is not set 373# CONFIG_X86_POWERNOW_K8 is not set
376# CONFIG_X86_GX_SUSPMOD is not set 374# CONFIG_X86_GX_SUSPMOD is not set
377# CONFIG_X86_SPEEDSTEP_CENTRINO is not set 375CONFIG_X86_SPEEDSTEP_CENTRINO=m
378# CONFIG_X86_SPEEDSTEP_ICH is not set 376CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE=y
379# CONFIG_X86_SPEEDSTEP_SMI is not set 377CONFIG_X86_SPEEDSTEP_ICH=m
380# CONFIG_X86_P4_CLOCKMOD is not set 378CONFIG_X86_SPEEDSTEP_SMI=m
379CONFIG_X86_P4_CLOCKMOD=m
381# CONFIG_X86_CPUFREQ_NFORCE2 is not set 380# CONFIG_X86_CPUFREQ_NFORCE2 is not set
382# CONFIG_X86_LONGRUN is not set 381# CONFIG_X86_LONGRUN is not set
383# CONFIG_X86_LONGHAUL is not set 382# CONFIG_X86_LONGHAUL is not set
@@ -387,7 +386,8 @@ CONFIG_X86_ACPI_CPUFREQ=y
387# shared options 386# shared options
388# 387#
389# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set 388# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
390# CONFIG_X86_SPEEDSTEP_LIB is not set 389CONFIG_X86_SPEEDSTEP_LIB=m
390CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK=y
391CONFIG_CPU_IDLE=y 391CONFIG_CPU_IDLE=y
392CONFIG_CPU_IDLE_GOV_LADDER=y 392CONFIG_CPU_IDLE_GOV_LADDER=y
393CONFIG_CPU_IDLE_GOV_MENU=y 393CONFIG_CPU_IDLE_GOV_MENU=y
@@ -406,52 +406,70 @@ CONFIG_PCI_DIRECT=y
406CONFIG_PCI_MMCONFIG=y 406CONFIG_PCI_MMCONFIG=y
407CONFIG_PCI_DOMAINS=y 407CONFIG_PCI_DOMAINS=y
408CONFIG_PCIEPORTBUS=y 408CONFIG_PCIEPORTBUS=y
409CONFIG_HOTPLUG_PCI_PCIE=m
409CONFIG_PCIEAER=y 410CONFIG_PCIEAER=y
410CONFIG_PCIEASPM=y 411# CONFIG_PCIEASPM is not set
411# CONFIG_PCIEASPM_DEBUG is not set
412CONFIG_ARCH_SUPPORTS_MSI=y 412CONFIG_ARCH_SUPPORTS_MSI=y
413CONFIG_PCI_MSI=y 413CONFIG_PCI_MSI=y
414CONFIG_PCI_LEGACY=y 414CONFIG_PCI_LEGACY=y
415# CONFIG_PCI_DEBUG is not set 415# CONFIG_PCI_DEBUG is not set
416CONFIG_HT_IRQ=y 416CONFIG_HT_IRQ=y
417CONFIG_ISA_DMA_API=y 417CONFIG_ISA_DMA_API=y
418# CONFIG_ISA is not set 418CONFIG_ISA=y
419# CONFIG_EISA is not set
419# CONFIG_MCA is not set 420# CONFIG_MCA is not set
420# CONFIG_SCx200 is not set 421# CONFIG_SCx200 is not set
421# CONFIG_OLPC is not set 422# CONFIG_OLPC is not set
422CONFIG_K8_NB=y
423# CONFIG_PCCARD is not set 423# CONFIG_PCCARD is not set
424# CONFIG_HOTPLUG_PCI is not set 424CONFIG_HOTPLUG_PCI=m
425CONFIG_HOTPLUG_PCI_FAKE=m
426# CONFIG_HOTPLUG_PCI_COMPAQ is not set
427# CONFIG_HOTPLUG_PCI_IBM is not set
428CONFIG_HOTPLUG_PCI_ACPI=m
429CONFIG_HOTPLUG_PCI_ACPI_IBM=m
430CONFIG_HOTPLUG_PCI_CPCI=y
431CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
432CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
433CONFIG_HOTPLUG_PCI_SHPC=m
425 434
426# 435#
427# Executable file formats / Emulations 436# Executable file formats / Emulations
428# 437#
429CONFIG_BINFMT_ELF=y 438CONFIG_BINFMT_ELF=y
430# CONFIG_BINFMT_AOUT is not set 439CONFIG_BINFMT_AOUT=m
431CONFIG_BINFMT_MISC=y 440CONFIG_BINFMT_MISC=m
432CONFIG_NET=y 441CONFIG_NET=y
433 442
434# 443#
435# Networking options 444# Networking options
436# 445#
437CONFIG_PACKET=y 446CONFIG_PACKET=m
438CONFIG_PACKET_MMAP=y 447CONFIG_PACKET_MMAP=y
439CONFIG_UNIX=y 448CONFIG_UNIX=y
440CONFIG_XFRM=y 449CONFIG_XFRM=y
441CONFIG_XFRM_USER=y 450CONFIG_XFRM_USER=m
442CONFIG_XFRM_SUB_POLICY=y 451# CONFIG_XFRM_SUB_POLICY is not set
443CONFIG_XFRM_MIGRATE=y 452# CONFIG_XFRM_MIGRATE is not set
444CONFIG_XFRM_STATISTICS=y 453# CONFIG_XFRM_STATISTICS is not set
445CONFIG_XFRM_IPCOMP=m 454CONFIG_XFRM_IPCOMP=m
446CONFIG_NET_KEY=m 455CONFIG_NET_KEY=m
447CONFIG_NET_KEY_MIGRATE=y 456# CONFIG_NET_KEY_MIGRATE is not set
448CONFIG_INET=y 457CONFIG_INET=y
449CONFIG_IP_MULTICAST=y 458CONFIG_IP_MULTICAST=y
450# CONFIG_IP_ADVANCED_ROUTER is not set 459CONFIG_IP_ADVANCED_ROUTER=y
460CONFIG_ASK_IP_FIB_HASH=y
461# CONFIG_IP_FIB_TRIE is not set
451CONFIG_IP_FIB_HASH=y 462CONFIG_IP_FIB_HASH=y
452# CONFIG_IP_PNP is not set 463CONFIG_IP_MULTIPLE_TABLES=y
453# CONFIG_NET_IPIP is not set 464CONFIG_IP_ROUTE_MULTIPATH=y
454# CONFIG_NET_IPGRE is not set 465CONFIG_IP_ROUTE_VERBOSE=y
466CONFIG_IP_PNP=y
467CONFIG_IP_PNP_DHCP=y
468CONFIG_IP_PNP_BOOTP=y
469CONFIG_IP_PNP_RARP=y
470CONFIG_NET_IPIP=m
471CONFIG_NET_IPGRE=m
472CONFIG_NET_IPGRE_BROADCAST=y
455CONFIG_IP_MROUTE=y 473CONFIG_IP_MROUTE=y
456CONFIG_IP_PIMSM_V1=y 474CONFIG_IP_PIMSM_V1=y
457CONFIG_IP_PIMSM_V2=y 475CONFIG_IP_PIMSM_V2=y
@@ -464,58 +482,87 @@ CONFIG_INET_XFRM_TUNNEL=m
464CONFIG_INET_TUNNEL=m 482CONFIG_INET_TUNNEL=m
465CONFIG_INET_XFRM_MODE_TRANSPORT=m 483CONFIG_INET_XFRM_MODE_TRANSPORT=m
466CONFIG_INET_XFRM_MODE_TUNNEL=m 484CONFIG_INET_XFRM_MODE_TUNNEL=m
467CONFIG_INET_XFRM_MODE_BEET=m 485CONFIG_INET_XFRM_MODE_BEET=y
468CONFIG_INET_LRO=y 486# CONFIG_INET_LRO is not set
469CONFIG_INET_DIAG=m 487CONFIG_INET_DIAG=m
470CONFIG_INET_TCP_DIAG=m 488CONFIG_INET_TCP_DIAG=m
471CONFIG_TCP_CONG_ADVANCED=y 489CONFIG_TCP_CONG_ADVANCED=y
472CONFIG_TCP_CONG_BIC=m 490CONFIG_TCP_CONG_BIC=m
473CONFIG_TCP_CONG_CUBIC=y 491CONFIG_TCP_CONG_CUBIC=m
474# CONFIG_TCP_CONG_WESTWOOD is not set 492CONFIG_TCP_CONG_WESTWOOD=m
475# CONFIG_TCP_CONG_HTCP is not set 493CONFIG_TCP_CONG_HTCP=m
476# CONFIG_TCP_CONG_HSTCP is not set 494CONFIG_TCP_CONG_HSTCP=m
477# CONFIG_TCP_CONG_HYBLA is not set 495CONFIG_TCP_CONG_HYBLA=m
478# CONFIG_TCP_CONG_VEGAS is not set 496CONFIG_TCP_CONG_VEGAS=m
479# CONFIG_TCP_CONG_SCALABLE is not set 497CONFIG_TCP_CONG_SCALABLE=m
480# CONFIG_TCP_CONG_LP is not set 498CONFIG_TCP_CONG_LP=m
481# CONFIG_TCP_CONG_VENO is not set 499CONFIG_TCP_CONG_VENO=m
482# CONFIG_TCP_CONG_YEAH is not set 500# CONFIG_TCP_CONG_YEAH is not set
483# CONFIG_TCP_CONG_ILLINOIS is not set 501# CONFIG_TCP_CONG_ILLINOIS is not set
484# CONFIG_DEFAULT_BIC is not set 502# CONFIG_DEFAULT_BIC is not set
485CONFIG_DEFAULT_CUBIC=y 503# CONFIG_DEFAULT_CUBIC is not set
486# CONFIG_DEFAULT_HTCP is not set 504# CONFIG_DEFAULT_HTCP is not set
487# CONFIG_DEFAULT_VEGAS is not set 505# CONFIG_DEFAULT_VEGAS is not set
488# CONFIG_DEFAULT_WESTWOOD is not set 506# CONFIG_DEFAULT_WESTWOOD is not set
489# CONFIG_DEFAULT_RENO is not set 507CONFIG_DEFAULT_RENO=y
490CONFIG_DEFAULT_TCP_CONG="cubic" 508CONFIG_DEFAULT_TCP_CONG="reno"
491CONFIG_TCP_MD5SIG=y 509# CONFIG_TCP_MD5SIG is not set
492# CONFIG_IP_VS is not set 510CONFIG_IP_VS=m
493CONFIG_IPV6=y 511# CONFIG_IP_VS_DEBUG is not set
512CONFIG_IP_VS_TAB_BITS=12
513
514#
515# IPVS transport protocol load balancing support
516#
517CONFIG_IP_VS_PROTO_TCP=y
518CONFIG_IP_VS_PROTO_UDP=y
519CONFIG_IP_VS_PROTO_ESP=y
520CONFIG_IP_VS_PROTO_AH=y
521
522#
523# IPVS scheduler
524#
525CONFIG_IP_VS_RR=m
526CONFIG_IP_VS_WRR=m
527CONFIG_IP_VS_LC=m
528CONFIG_IP_VS_WLC=m
529CONFIG_IP_VS_LBLC=m
530CONFIG_IP_VS_LBLCR=m
531CONFIG_IP_VS_DH=m
532CONFIG_IP_VS_SH=m
533CONFIG_IP_VS_SED=m
534CONFIG_IP_VS_NQ=m
535
536#
537# IPVS application helper
538#
539CONFIG_IP_VS_FTP=m
540CONFIG_IPV6=m
494CONFIG_IPV6_PRIVACY=y 541CONFIG_IPV6_PRIVACY=y
495CONFIG_IPV6_ROUTER_PREF=y 542CONFIG_IPV6_ROUTER_PREF=y
496CONFIG_IPV6_ROUTE_INFO=y 543CONFIG_IPV6_ROUTE_INFO=y
497CONFIG_IPV6_OPTIMISTIC_DAD=y 544# CONFIG_IPV6_OPTIMISTIC_DAD is not set
498CONFIG_INET6_AH=m 545CONFIG_INET6_AH=m
499CONFIG_INET6_ESP=m 546CONFIG_INET6_ESP=m
500CONFIG_INET6_IPCOMP=m 547CONFIG_INET6_IPCOMP=m
501CONFIG_IPV6_MIP6=m 548# CONFIG_IPV6_MIP6 is not set
502CONFIG_INET6_XFRM_TUNNEL=m 549CONFIG_INET6_XFRM_TUNNEL=m
503CONFIG_INET6_TUNNEL=m 550CONFIG_INET6_TUNNEL=m
504CONFIG_INET6_XFRM_MODE_TRANSPORT=m 551CONFIG_INET6_XFRM_MODE_TRANSPORT=m
505CONFIG_INET6_XFRM_MODE_TUNNEL=m 552CONFIG_INET6_XFRM_MODE_TUNNEL=m
506CONFIG_INET6_XFRM_MODE_BEET=m 553CONFIG_INET6_XFRM_MODE_BEET=m
507CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m 554# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
508CONFIG_IPV6_SIT=m 555CONFIG_IPV6_SIT=m
509CONFIG_IPV6_NDISC_NODETYPE=y 556CONFIG_IPV6_NDISC_NODETYPE=y
510CONFIG_IPV6_TUNNEL=m 557CONFIG_IPV6_TUNNEL=m
511CONFIG_IPV6_MULTIPLE_TABLES=y 558# CONFIG_IPV6_MULTIPLE_TABLES is not set
512CONFIG_IPV6_SUBTREES=y
513# CONFIG_IPV6_MROUTE is not set 559# CONFIG_IPV6_MROUTE is not set
514CONFIG_NETLABEL=y 560# CONFIG_NETLABEL is not set
515CONFIG_NETWORK_SECMARK=y 561CONFIG_NETWORK_SECMARK=y
516CONFIG_NETFILTER=y 562CONFIG_NETFILTER=y
517# CONFIG_NETFILTER_DEBUG is not set 563# CONFIG_NETFILTER_DEBUG is not set
518CONFIG_NETFILTER_ADVANCED=y 564CONFIG_NETFILTER_ADVANCED=y
565CONFIG_BRIDGE_NETFILTER=y
519 566
520# 567#
521# Core Netfilter Configuration 568# Core Netfilter Configuration
@@ -523,105 +570,60 @@ CONFIG_NETFILTER_ADVANCED=y
523CONFIG_NETFILTER_NETLINK=m 570CONFIG_NETFILTER_NETLINK=m
524CONFIG_NETFILTER_NETLINK_QUEUE=m 571CONFIG_NETFILTER_NETLINK_QUEUE=m
525CONFIG_NETFILTER_NETLINK_LOG=m 572CONFIG_NETFILTER_NETLINK_LOG=m
526CONFIG_NF_CONNTRACK=y 573# CONFIG_NF_CONNTRACK is not set
527CONFIG_NF_CT_ACCT=y 574CONFIG_NETFILTER_XTABLES=m
528CONFIG_NF_CONNTRACK_MARK=y
529CONFIG_NF_CONNTRACK_SECMARK=y
530CONFIG_NF_CONNTRACK_EVENTS=y
531# CONFIG_NF_CT_PROTO_DCCP is not set
532CONFIG_NF_CT_PROTO_GRE=m
533CONFIG_NF_CT_PROTO_SCTP=m
534CONFIG_NF_CT_PROTO_UDPLITE=m
535CONFIG_NF_CONNTRACK_AMANDA=m
536CONFIG_NF_CONNTRACK_FTP=m
537CONFIG_NF_CONNTRACK_H323=m
538CONFIG_NF_CONNTRACK_IRC=m
539CONFIG_NF_CONNTRACK_NETBIOS_NS=m
540CONFIG_NF_CONNTRACK_PPTP=m
541CONFIG_NF_CONNTRACK_SANE=m
542CONFIG_NF_CONNTRACK_SIP=m
543CONFIG_NF_CONNTRACK_TFTP=m
544CONFIG_NF_CT_NETLINK=m
545CONFIG_NETFILTER_XTABLES=y
546CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 575CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
547CONFIG_NETFILTER_XT_TARGET_CONNMARK=m 576# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
548CONFIG_NETFILTER_XT_TARGET_DSCP=m
549CONFIG_NETFILTER_XT_TARGET_MARK=m 577CONFIG_NETFILTER_XT_TARGET_MARK=m
550CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 578CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
551CONFIG_NETFILTER_XT_TARGET_NFLOG=m 579# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
552CONFIG_NETFILTER_XT_TARGET_NOTRACK=m 580# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
553CONFIG_NETFILTER_XT_TARGET_RATEEST=m 581# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
554CONFIG_NETFILTER_XT_TARGET_TRACE=m
555CONFIG_NETFILTER_XT_TARGET_SECMARK=m 582CONFIG_NETFILTER_XT_TARGET_SECMARK=m
556CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m 583# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
557CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 584# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
558CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
559CONFIG_NETFILTER_XT_MATCH_COMMENT=m 585CONFIG_NETFILTER_XT_MATCH_COMMENT=m
560CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m 586CONFIG_NETFILTER_XT_MATCH_DCCP=m
561CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m 587# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
562CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
563CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
564# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
565CONFIG_NETFILTER_XT_MATCH_DSCP=m
566CONFIG_NETFILTER_XT_MATCH_ESP=m 588CONFIG_NETFILTER_XT_MATCH_ESP=m
567CONFIG_NETFILTER_XT_MATCH_HELPER=m 589# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
568CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
569CONFIG_NETFILTER_XT_MATCH_LENGTH=m 590CONFIG_NETFILTER_XT_MATCH_LENGTH=m
570CONFIG_NETFILTER_XT_MATCH_LIMIT=m 591CONFIG_NETFILTER_XT_MATCH_LIMIT=m
571CONFIG_NETFILTER_XT_MATCH_MAC=m 592CONFIG_NETFILTER_XT_MATCH_MAC=m
572CONFIG_NETFILTER_XT_MATCH_MARK=m 593CONFIG_NETFILTER_XT_MATCH_MARK=m
573CONFIG_NETFILTER_XT_MATCH_OWNER=m 594# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
574CONFIG_NETFILTER_XT_MATCH_POLICY=m 595CONFIG_NETFILTER_XT_MATCH_POLICY=m
575CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m 596CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
597CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
576CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 598CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
577CONFIG_NETFILTER_XT_MATCH_QUOTA=m 599CONFIG_NETFILTER_XT_MATCH_QUOTA=m
578CONFIG_NETFILTER_XT_MATCH_RATEEST=m 600# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
579CONFIG_NETFILTER_XT_MATCH_REALM=m 601CONFIG_NETFILTER_XT_MATCH_REALM=m
580CONFIG_NETFILTER_XT_MATCH_SCTP=m 602CONFIG_NETFILTER_XT_MATCH_SCTP=m
581CONFIG_NETFILTER_XT_MATCH_STATE=y
582CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 603CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
583CONFIG_NETFILTER_XT_MATCH_STRING=m 604CONFIG_NETFILTER_XT_MATCH_STRING=m
584CONFIG_NETFILTER_XT_MATCH_TCPMSS=m 605CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
585CONFIG_NETFILTER_XT_MATCH_TIME=m 606# CONFIG_NETFILTER_XT_MATCH_TIME is not set
586CONFIG_NETFILTER_XT_MATCH_U32=m 607# CONFIG_NETFILTER_XT_MATCH_U32 is not set
587CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m 608# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
588 609
589# 610#
590# IP: Netfilter Configuration 611# IP: Netfilter Configuration
591# 612#
592CONFIG_NF_CONNTRACK_IPV4=y
593# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
594CONFIG_IP_NF_QUEUE=m 613CONFIG_IP_NF_QUEUE=m
595CONFIG_IP_NF_IPTABLES=y 614CONFIG_IP_NF_IPTABLES=m
596CONFIG_IP_NF_MATCH_RECENT=m 615CONFIG_IP_NF_MATCH_RECENT=m
597CONFIG_IP_NF_MATCH_ECN=m 616CONFIG_IP_NF_MATCH_ECN=m
598CONFIG_IP_NF_MATCH_AH=m 617CONFIG_IP_NF_MATCH_AH=m
599CONFIG_IP_NF_MATCH_TTL=m 618CONFIG_IP_NF_MATCH_TTL=m
600CONFIG_IP_NF_MATCH_ADDRTYPE=m 619CONFIG_IP_NF_MATCH_ADDRTYPE=m
601CONFIG_IP_NF_FILTER=y 620CONFIG_IP_NF_FILTER=m
602CONFIG_IP_NF_TARGET_REJECT=y 621CONFIG_IP_NF_TARGET_REJECT=m
603CONFIG_IP_NF_TARGET_LOG=m 622CONFIG_IP_NF_TARGET_LOG=m
604CONFIG_IP_NF_TARGET_ULOG=m 623CONFIG_IP_NF_TARGET_ULOG=m
605CONFIG_NF_NAT=m
606CONFIG_NF_NAT_NEEDED=y
607CONFIG_IP_NF_TARGET_MASQUERADE=m
608CONFIG_IP_NF_TARGET_REDIRECT=m
609CONFIG_IP_NF_TARGET_NETMAP=m
610CONFIG_NF_NAT_SNMP_BASIC=m
611CONFIG_NF_NAT_PROTO_GRE=m
612CONFIG_NF_NAT_PROTO_UDPLITE=m
613CONFIG_NF_NAT_PROTO_SCTP=m
614CONFIG_NF_NAT_FTP=m
615CONFIG_NF_NAT_IRC=m
616CONFIG_NF_NAT_TFTP=m
617CONFIG_NF_NAT_AMANDA=m
618CONFIG_NF_NAT_PPTP=m
619CONFIG_NF_NAT_H323=m
620CONFIG_NF_NAT_SIP=m
621CONFIG_IP_NF_MANGLE=m 624CONFIG_IP_NF_MANGLE=m
622CONFIG_IP_NF_TARGET_ECN=m 625CONFIG_IP_NF_TARGET_ECN=m
623CONFIG_IP_NF_TARGET_TTL=m 626CONFIG_IP_NF_TARGET_TTL=m
624CONFIG_IP_NF_TARGET_CLUSTERIP=m
625CONFIG_IP_NF_RAW=m 627CONFIG_IP_NF_RAW=m
626# CONFIG_IP_NF_SECURITY is not set 628# CONFIG_IP_NF_SECURITY is not set
627CONFIG_IP_NF_ARPTABLES=m 629CONFIG_IP_NF_ARPTABLES=m
@@ -631,45 +633,162 @@ CONFIG_IP_NF_ARP_MANGLE=m
631# 633#
632# IPv6: Netfilter Configuration 634# IPv6: Netfilter Configuration
633# 635#
634CONFIG_NF_CONNTRACK_IPV6=y
635CONFIG_IP6_NF_QUEUE=m 636CONFIG_IP6_NF_QUEUE=m
636CONFIG_IP6_NF_IPTABLES=y 637CONFIG_IP6_NF_IPTABLES=m
637CONFIG_IP6_NF_MATCH_RT=m 638CONFIG_IP6_NF_MATCH_RT=m
638CONFIG_IP6_NF_MATCH_OPTS=m 639CONFIG_IP6_NF_MATCH_OPTS=m
639CONFIG_IP6_NF_MATCH_FRAG=m 640CONFIG_IP6_NF_MATCH_FRAG=m
640CONFIG_IP6_NF_MATCH_HL=m 641CONFIG_IP6_NF_MATCH_HL=m
641CONFIG_IP6_NF_MATCH_IPV6HEADER=m 642CONFIG_IP6_NF_MATCH_IPV6HEADER=m
642CONFIG_IP6_NF_MATCH_AH=m 643CONFIG_IP6_NF_MATCH_AH=m
643CONFIG_IP6_NF_MATCH_MH=m 644# CONFIG_IP6_NF_MATCH_MH is not set
644CONFIG_IP6_NF_MATCH_EUI64=m 645CONFIG_IP6_NF_MATCH_EUI64=m
645CONFIG_IP6_NF_FILTER=y 646CONFIG_IP6_NF_FILTER=m
646CONFIG_IP6_NF_TARGET_LOG=m 647CONFIG_IP6_NF_TARGET_LOG=m
647CONFIG_IP6_NF_TARGET_REJECT=y 648CONFIG_IP6_NF_TARGET_REJECT=m
648CONFIG_IP6_NF_MANGLE=m 649CONFIG_IP6_NF_MANGLE=m
649CONFIG_IP6_NF_TARGET_HL=m 650CONFIG_IP6_NF_TARGET_HL=m
650CONFIG_IP6_NF_RAW=m 651CONFIG_IP6_NF_RAW=m
651# CONFIG_IP6_NF_SECURITY is not set 652# CONFIG_IP6_NF_SECURITY is not set
652# CONFIG_IP_DCCP is not set 653
653# CONFIG_IP_SCTP is not set 654#
655# DECnet: Netfilter Configuration
656#
657CONFIG_DECNET_NF_GRABULATOR=m
658
659#
660# Bridge: Netfilter Configuration
661#
662CONFIG_BRIDGE_NF_EBTABLES=m
663CONFIG_BRIDGE_EBT_BROUTE=m
664CONFIG_BRIDGE_EBT_T_FILTER=m
665CONFIG_BRIDGE_EBT_T_NAT=m
666CONFIG_BRIDGE_EBT_802_3=m
667CONFIG_BRIDGE_EBT_AMONG=m
668CONFIG_BRIDGE_EBT_ARP=m
669CONFIG_BRIDGE_EBT_IP=m
670# CONFIG_BRIDGE_EBT_IP6 is not set
671CONFIG_BRIDGE_EBT_LIMIT=m
672CONFIG_BRIDGE_EBT_MARK=m
673CONFIG_BRIDGE_EBT_PKTTYPE=m
674CONFIG_BRIDGE_EBT_STP=m
675CONFIG_BRIDGE_EBT_VLAN=m
676CONFIG_BRIDGE_EBT_ARPREPLY=m
677CONFIG_BRIDGE_EBT_DNAT=m
678CONFIG_BRIDGE_EBT_MARK_T=m
679CONFIG_BRIDGE_EBT_REDIRECT=m
680CONFIG_BRIDGE_EBT_SNAT=m
681CONFIG_BRIDGE_EBT_LOG=m
682CONFIG_BRIDGE_EBT_ULOG=m
683# CONFIG_BRIDGE_EBT_NFLOG is not set
684CONFIG_IP_DCCP=m
685CONFIG_INET_DCCP_DIAG=m
686CONFIG_IP_DCCP_ACKVEC=y
687
688#
689# DCCP CCIDs Configuration (EXPERIMENTAL)
690#
691CONFIG_IP_DCCP_CCID2=m
692# CONFIG_IP_DCCP_CCID2_DEBUG is not set
693CONFIG_IP_DCCP_CCID3=m
694# CONFIG_IP_DCCP_CCID3_DEBUG is not set
695CONFIG_IP_DCCP_CCID3_RTO=100
696CONFIG_IP_DCCP_TFRC_LIB=m
697
698#
699# DCCP Kernel Hacking
700#
701# CONFIG_IP_DCCP_DEBUG is not set
702CONFIG_IP_SCTP=m
703# CONFIG_SCTP_DBG_MSG is not set
704# CONFIG_SCTP_DBG_OBJCNT is not set
705# CONFIG_SCTP_HMAC_NONE is not set
706# CONFIG_SCTP_HMAC_SHA1 is not set
707CONFIG_SCTP_HMAC_MD5=y
654# CONFIG_TIPC is not set 708# CONFIG_TIPC is not set
655# CONFIG_ATM is not set 709CONFIG_ATM=m
656# CONFIG_BRIDGE is not set 710CONFIG_ATM_CLIP=m
657# CONFIG_VLAN_8021Q is not set 711CONFIG_ATM_CLIP_NO_ICMP=y
658# CONFIG_DECNET is not set 712CONFIG_ATM_LANE=m
659# CONFIG_LLC2 is not set 713CONFIG_ATM_MPOA=m
660# CONFIG_IPX is not set 714CONFIG_ATM_BR2684=m
661# CONFIG_ATALK is not set 715# CONFIG_ATM_BR2684_IPFILTER is not set
662# CONFIG_X25 is not set 716CONFIG_STP=m
663# CONFIG_LAPB is not set 717CONFIG_BRIDGE=m
664# CONFIG_ECONET is not set 718CONFIG_VLAN_8021Q=m
665# CONFIG_WAN_ROUTER is not set 719# CONFIG_VLAN_8021Q_GVRP is not set
666# CONFIG_NET_SCHED is not set 720CONFIG_DECNET=m
721CONFIG_DECNET_ROUTER=y
722CONFIG_LLC=m
723CONFIG_LLC2=m
724CONFIG_IPX=m
725# CONFIG_IPX_INTERN is not set
726CONFIG_ATALK=m
727CONFIG_DEV_APPLETALK=m
728CONFIG_LTPC=m
729CONFIG_COPS=m
730CONFIG_COPS_DAYNA=y
731CONFIG_COPS_TANGENT=y
732CONFIG_IPDDP=m
733CONFIG_IPDDP_ENCAP=y
734CONFIG_IPDDP_DECAP=y
735CONFIG_X25=m
736CONFIG_LAPB=m
737CONFIG_ECONET=m
738# CONFIG_ECONET_AUNUDP is not set
739# CONFIG_ECONET_NATIVE is not set
740CONFIG_WAN_ROUTER=m
741CONFIG_NET_SCHED=y
742
743#
744# Queueing/Scheduling
745#
746CONFIG_NET_SCH_CBQ=m
747CONFIG_NET_SCH_HTB=m
748CONFIG_NET_SCH_HFSC=m
749CONFIG_NET_SCH_ATM=m
750CONFIG_NET_SCH_PRIO=m
751CONFIG_NET_SCH_RED=m
752CONFIG_NET_SCH_SFQ=m
753CONFIG_NET_SCH_TEQL=m
754CONFIG_NET_SCH_TBF=m
755CONFIG_NET_SCH_GRED=m
756CONFIG_NET_SCH_DSMARK=m
757CONFIG_NET_SCH_NETEM=m
758CONFIG_NET_SCH_INGRESS=m
759
760#
761# Classification
762#
763CONFIG_NET_CLS=y
764CONFIG_NET_CLS_BASIC=m
765CONFIG_NET_CLS_TCINDEX=m
766CONFIG_NET_CLS_ROUTE4=m
667CONFIG_NET_CLS_ROUTE=y 767CONFIG_NET_CLS_ROUTE=y
768CONFIG_NET_CLS_FW=m
769CONFIG_NET_CLS_U32=m
770CONFIG_CLS_U32_PERF=y
771CONFIG_CLS_U32_MARK=y
772CONFIG_NET_CLS_RSVP=m
773CONFIG_NET_CLS_RSVP6=m
774# CONFIG_NET_CLS_FLOW is not set
775# CONFIG_NET_EMATCH is not set
776CONFIG_NET_CLS_ACT=y
777CONFIG_NET_ACT_POLICE=m
778CONFIG_NET_ACT_GACT=m
779CONFIG_GACT_PROB=y
780CONFIG_NET_ACT_MIRRED=m
781CONFIG_NET_ACT_IPT=m
782# CONFIG_NET_ACT_NAT is not set
783CONFIG_NET_ACT_PEDIT=m
784CONFIG_NET_ACT_SIMP=m
785# CONFIG_NET_CLS_IND is not set
786CONFIG_NET_SCH_FIFO=y
668 787
669# 788#
670# Network testing 789# Network testing
671# 790#
672# CONFIG_NET_PKTGEN is not set 791CONFIG_NET_PKTGEN=m
673# CONFIG_HAMRADIO is not set 792# CONFIG_HAMRADIO is not set
674# CONFIG_CAN is not set 793# CONFIG_CAN is not set
675# CONFIG_IRDA is not set 794# CONFIG_IRDA is not set
@@ -679,9 +798,9 @@ CONFIG_BT_SCO=m
679CONFIG_BT_RFCOMM=m 798CONFIG_BT_RFCOMM=m
680CONFIG_BT_RFCOMM_TTY=y 799CONFIG_BT_RFCOMM_TTY=y
681CONFIG_BT_BNEP=m 800CONFIG_BT_BNEP=m
682# CONFIG_BT_BNEP_MC_FILTER is not set 801CONFIG_BT_BNEP_MC_FILTER=y
683# CONFIG_BT_BNEP_PROTO_FILTER is not set 802CONFIG_BT_BNEP_PROTO_FILTER=y
684# CONFIG_BT_HIDP is not set 803CONFIG_BT_HIDP=m
685 804
686# 805#
687# Bluetooth device drivers 806# Bluetooth device drivers
@@ -689,11 +808,11 @@ CONFIG_BT_BNEP=m
689CONFIG_BT_HCIUSB=m 808CONFIG_BT_HCIUSB=m
690CONFIG_BT_HCIUSB_SCO=y 809CONFIG_BT_HCIUSB_SCO=y
691# CONFIG_BT_HCIBTUSB is not set 810# CONFIG_BT_HCIBTUSB is not set
692CONFIG_BT_HCIBTSDIO=m 811# CONFIG_BT_HCIBTSDIO is not set
693CONFIG_BT_HCIUART=m 812CONFIG_BT_HCIUART=m
694CONFIG_BT_HCIUART_H4=y 813CONFIG_BT_HCIUART_H4=y
695CONFIG_BT_HCIUART_BCSP=y 814CONFIG_BT_HCIUART_BCSP=y
696CONFIG_BT_HCIUART_LL=y 815# CONFIG_BT_HCIUART_LL is not set
697CONFIG_BT_HCIBCM203X=m 816CONFIG_BT_HCIBCM203X=m
698CONFIG_BT_HCIBPA10X=m 817CONFIG_BT_HCIBPA10X=m
699CONFIG_BT_HCIBFUSB=m 818CONFIG_BT_HCIBFUSB=m
@@ -704,30 +823,16 @@ CONFIG_FIB_RULES=y
704# 823#
705# Wireless 824# Wireless
706# 825#
707CONFIG_CFG80211=m 826# CONFIG_CFG80211 is not set
708CONFIG_NL80211=y
709CONFIG_WIRELESS_EXT=y 827CONFIG_WIRELESS_EXT=y
710# CONFIG_WIRELESS_EXT_SYSFS is not set 828CONFIG_WIRELESS_EXT_SYSFS=y
711CONFIG_MAC80211=m 829# CONFIG_MAC80211 is not set
712
713#
714# Rate control algorithm selection
715#
716CONFIG_MAC80211_RC_PID=y
717CONFIG_MAC80211_RC_DEFAULT_PID=y
718CONFIG_MAC80211_RC_DEFAULT="pid"
719CONFIG_MAC80211_MESH=y
720CONFIG_MAC80211_LEDS=y
721CONFIG_MAC80211_DEBUGFS=y
722# CONFIG_MAC80211_DEBUG_MENU is not set
723CONFIG_IEEE80211=m 830CONFIG_IEEE80211=m
724# CONFIG_IEEE80211_DEBUG is not set 831# CONFIG_IEEE80211_DEBUG is not set
725CONFIG_IEEE80211_CRYPT_WEP=m 832CONFIG_IEEE80211_CRYPT_WEP=m
726CONFIG_IEEE80211_CRYPT_CCMP=m 833CONFIG_IEEE80211_CRYPT_CCMP=m
727CONFIG_IEEE80211_CRYPT_TKIP=m 834CONFIG_IEEE80211_CRYPT_TKIP=m
728CONFIG_RFKILL=m 835# CONFIG_RFKILL is not set
729CONFIG_RFKILL_INPUT=m
730CONFIG_RFKILL_LEDS=y
731# CONFIG_NET_9P is not set 836# CONFIG_NET_9P is not set
732 837
733# 838#
@@ -738,17 +843,146 @@ CONFIG_RFKILL_LEDS=y
738# Generic Driver Options 843# Generic Driver Options
739# 844#
740CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 845CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
741CONFIG_STANDALONE=y 846# CONFIG_STANDALONE is not set
742CONFIG_PREVENT_FIRMWARE_BUILD=y 847CONFIG_PREVENT_FIRMWARE_BUILD=y
743CONFIG_FW_LOADER=y 848CONFIG_FW_LOADER=y
744CONFIG_FIRMWARE_IN_KERNEL=y 849CONFIG_FIRMWARE_IN_KERNEL=y
745CONFIG_EXTRA_FIRMWARE="" 850CONFIG_EXTRA_FIRMWARE=""
746# CONFIG_DEBUG_DRIVER is not set 851# CONFIG_DEBUG_DRIVER is not set
747CONFIG_DEBUG_DEVRES=y 852# CONFIG_DEBUG_DEVRES is not set
748# CONFIG_SYS_HYPERVISOR is not set 853# CONFIG_SYS_HYPERVISOR is not set
749CONFIG_CONNECTOR=y 854CONFIG_CONNECTOR=y
750CONFIG_PROC_EVENTS=y 855CONFIG_PROC_EVENTS=y
751# CONFIG_MTD is not set 856CONFIG_MTD=m
857# CONFIG_MTD_DEBUG is not set
858CONFIG_MTD_CONCAT=m
859CONFIG_MTD_PARTITIONS=y
860CONFIG_MTD_REDBOOT_PARTS=m
861CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
862# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
863# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
864# CONFIG_MTD_AR7_PARTS is not set
865
866#
867# User Modules And Translation Layers
868#
869CONFIG_MTD_CHAR=m
870CONFIG_MTD_BLKDEVS=m
871CONFIG_MTD_BLOCK=m
872# CONFIG_MTD_BLOCK_RO is not set
873# CONFIG_FTL is not set
874# CONFIG_NFTL is not set
875# CONFIG_INFTL is not set
876CONFIG_RFD_FTL=m
877# CONFIG_SSFDC is not set
878# CONFIG_MTD_OOPS is not set
879
880#
881# RAM/ROM/Flash chip drivers
882#
883CONFIG_MTD_CFI=m
884CONFIG_MTD_JEDECPROBE=m
885CONFIG_MTD_GEN_PROBE=m
886CONFIG_MTD_CFI_ADV_OPTIONS=y
887CONFIG_MTD_CFI_NOSWAP=y
888# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
889# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
890# CONFIG_MTD_CFI_GEOMETRY is not set
891CONFIG_MTD_MAP_BANK_WIDTH_1=y
892CONFIG_MTD_MAP_BANK_WIDTH_2=y
893CONFIG_MTD_MAP_BANK_WIDTH_4=y
894# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
895# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
896# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
897CONFIG_MTD_CFI_I1=y
898CONFIG_MTD_CFI_I2=y
899# CONFIG_MTD_CFI_I4 is not set
900# CONFIG_MTD_CFI_I8 is not set
901# CONFIG_MTD_OTP is not set
902CONFIG_MTD_CFI_INTELEXT=m
903CONFIG_MTD_CFI_AMDSTD=m
904CONFIG_MTD_CFI_STAA=m
905CONFIG_MTD_CFI_UTIL=m
906# CONFIG_MTD_RAM is not set
907# CONFIG_MTD_ROM is not set
908CONFIG_MTD_ABSENT=m
909
910#
911# Mapping drivers for chip access
912#
913CONFIG_MTD_COMPLEX_MAPPINGS=y
914CONFIG_MTD_PHYSMAP=m
915CONFIG_MTD_PHYSMAP_START=0x8000000
916CONFIG_MTD_PHYSMAP_LEN=0x4000000
917CONFIG_MTD_PHYSMAP_BANKWIDTH=2
918CONFIG_MTD_SC520CDP=m
919CONFIG_MTD_NETSC520=m
920CONFIG_MTD_TS5500=m
921CONFIG_MTD_SBC_GXX=m
922CONFIG_MTD_AMD76XROM=m
923CONFIG_MTD_ICHXROM=m
924# CONFIG_MTD_ESB2ROM is not set
925# CONFIG_MTD_CK804XROM is not set
926CONFIG_MTD_SCB2_FLASH=m
927CONFIG_MTD_NETtel=m
928CONFIG_MTD_DILNETPC=m
929CONFIG_MTD_DILNETPC_BOOTSIZE=0x80000
930CONFIG_MTD_L440GX=m
931CONFIG_MTD_PCI=m
932# CONFIG_MTD_INTEL_VR_NOR is not set
933# CONFIG_MTD_PLATRAM is not set
934
935#
936# Self-contained MTD device drivers
937#
938CONFIG_MTD_PMC551=m
939CONFIG_MTD_PMC551_BUGFIX=y
940# CONFIG_MTD_PMC551_DEBUG is not set
941# CONFIG_MTD_DATAFLASH is not set
942# CONFIG_MTD_M25P80 is not set
943CONFIG_MTD_SLRAM=m
944CONFIG_MTD_PHRAM=m
945CONFIG_MTD_MTDRAM=m
946CONFIG_MTDRAM_TOTAL_SIZE=4096
947CONFIG_MTDRAM_ERASE_SIZE=128
948CONFIG_MTD_BLOCK2MTD=m
949
950#
951# Disk-On-Chip Device Drivers
952#
953CONFIG_MTD_DOC2000=m
954CONFIG_MTD_DOC2001=m
955CONFIG_MTD_DOC2001PLUS=m
956CONFIG_MTD_DOCPROBE=m
957CONFIG_MTD_DOCECC=m
958CONFIG_MTD_DOCPROBE_ADVANCED=y
959CONFIG_MTD_DOCPROBE_ADDRESS=0x0000
960CONFIG_MTD_DOCPROBE_HIGH=y
961CONFIG_MTD_DOCPROBE_55AA=y
962CONFIG_MTD_NAND=m
963# CONFIG_MTD_NAND_VERIFY_WRITE is not set
964CONFIG_MTD_NAND_ECC_SMC=y
965# CONFIG_MTD_NAND_MUSEUM_IDS is not set
966CONFIG_MTD_NAND_IDS=m
967CONFIG_MTD_NAND_DISKONCHIP=m
968# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
969CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
970CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y
971# CONFIG_MTD_NAND_CAFE is not set
972CONFIG_MTD_NAND_CS553X=m
973CONFIG_MTD_NAND_NANDSIM=m
974# CONFIG_MTD_NAND_PLATFORM is not set
975# CONFIG_MTD_ALAUDA is not set
976CONFIG_MTD_ONENAND=m
977# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
978CONFIG_MTD_ONENAND_OTP=y
979# CONFIG_MTD_ONENAND_2X_PROGRAM is not set
980# CONFIG_MTD_ONENAND_SIM is not set
981
982#
983# UBI - Unsorted block images
984#
985# CONFIG_MTD_UBI is not set
752# CONFIG_PARPORT is not set 986# CONFIG_PARPORT is not set
753CONFIG_PNP=y 987CONFIG_PNP=y
754# CONFIG_PNP_DEBUG is not set 988# CONFIG_PNP_DEBUG is not set
@@ -756,42 +990,49 @@ CONFIG_PNP=y
756# 990#
757# Protocols 991# Protocols
758# 992#
993# CONFIG_ISAPNP is not set
994CONFIG_PNPBIOS=y
995CONFIG_PNPBIOS_PROC_FS=y
759CONFIG_PNPACPI=y 996CONFIG_PNPACPI=y
760CONFIG_BLK_DEV=y 997CONFIG_BLK_DEV=y
761# CONFIG_BLK_DEV_FD is not set 998# CONFIG_BLK_DEV_FD is not set
762# CONFIG_BLK_CPQ_DA is not set 999CONFIG_BLK_DEV_XD=m
763# CONFIG_BLK_CPQ_CISS_DA is not set 1000CONFIG_BLK_CPQ_DA=m
764# CONFIG_BLK_DEV_DAC960 is not set 1001CONFIG_BLK_CPQ_CISS_DA=m
765# CONFIG_BLK_DEV_UMEM is not set 1002CONFIG_CISS_SCSI_TAPE=y
1003CONFIG_BLK_DEV_DAC960=m
1004CONFIG_BLK_DEV_UMEM=m
766# CONFIG_BLK_DEV_COW_COMMON is not set 1005# CONFIG_BLK_DEV_COW_COMMON is not set
767CONFIG_BLK_DEV_LOOP=y 1006CONFIG_BLK_DEV_LOOP=y
768CONFIG_BLK_DEV_CRYPTOLOOP=m 1007CONFIG_BLK_DEV_CRYPTOLOOP=m
769# CONFIG_BLK_DEV_NBD is not set 1008CONFIG_BLK_DEV_NBD=m
770# CONFIG_BLK_DEV_SX8 is not set 1009CONFIG_BLK_DEV_SX8=m
771# CONFIG_BLK_DEV_UB is not set 1010# CONFIG_BLK_DEV_UB is not set
772# CONFIG_BLK_DEV_RAM is not set 1011CONFIG_BLK_DEV_RAM=y
1012CONFIG_BLK_DEV_RAM_COUNT=16
1013CONFIG_BLK_DEV_RAM_SIZE=64000
1014# CONFIG_BLK_DEV_XIP is not set
773CONFIG_CDROM_PKTCDVD=m 1015CONFIG_CDROM_PKTCDVD=m
774CONFIG_CDROM_PKTCDVD_BUFFERS=8 1016CONFIG_CDROM_PKTCDVD_BUFFERS=8
775# CONFIG_CDROM_PKTCDVD_WCACHE is not set 1017CONFIG_CDROM_PKTCDVD_WCACHE=y
776# CONFIG_ATA_OVER_ETH is not set 1018CONFIG_ATA_OVER_ETH=m
777# CONFIG_BLK_DEV_HD is not set 1019# CONFIG_BLK_DEV_HD is not set
778CONFIG_MISC_DEVICES=y 1020CONFIG_MISC_DEVICES=y
779# CONFIG_IBM_ASM is not set 1021# CONFIG_IBM_ASM is not set
780# CONFIG_PHANTOM is not set 1022# CONFIG_PHANTOM is not set
781CONFIG_EEPROM_93CX6=m 1023# CONFIG_EEPROM_93CX6 is not set
782# CONFIG_SGI_IOC4 is not set 1024# CONFIG_SGI_IOC4 is not set
783CONFIG_TIFM_CORE=m 1025# CONFIG_TIFM_CORE is not set
784CONFIG_TIFM_7XX1=m
785# CONFIG_ACER_WMI is not set 1026# CONFIG_ACER_WMI is not set
1027# CONFIG_ASUS_LAPTOP is not set
786# CONFIG_FUJITSU_LAPTOP is not set 1028# CONFIG_FUJITSU_LAPTOP is not set
787# CONFIG_TC1100_WMI is not set 1029# CONFIG_TC1100_WMI is not set
788# CONFIG_HP_WMI is not set
789# CONFIG_MSI_LAPTOP is not set 1030# CONFIG_MSI_LAPTOP is not set
790# CONFIG_COMPAL_LAPTOP is not set 1031# CONFIG_COMPAL_LAPTOP is not set
791# CONFIG_SONY_LAPTOP is not set 1032# CONFIG_SONY_LAPTOP is not set
792# CONFIG_THINKPAD_ACPI is not set 1033# CONFIG_THINKPAD_ACPI is not set
793CONFIG_INTEL_MENLOW=y 1034# CONFIG_INTEL_MENLOW is not set
794CONFIG_EEEPC_LAPTOP=y 1035# CONFIG_EEEPC_LAPTOP is not set
795# CONFIG_ENCLOSURE_SERVICES is not set 1036# CONFIG_ENCLOSURE_SERVICES is not set
796# CONFIG_HP_ILO is not set 1037# CONFIG_HP_ILO is not set
797CONFIG_HAVE_IDE=y 1038CONFIG_HAVE_IDE=y
@@ -804,7 +1045,7 @@ CONFIG_RAID_ATTRS=m
804CONFIG_SCSI=y 1045CONFIG_SCSI=y
805CONFIG_SCSI_DMA=y 1046CONFIG_SCSI_DMA=y
806# CONFIG_SCSI_TGT is not set 1047# CONFIG_SCSI_TGT is not set
807# CONFIG_SCSI_NETLINK is not set 1048CONFIG_SCSI_NETLINK=y
808CONFIG_SCSI_PROC_FS=y 1049CONFIG_SCSI_PROC_FS=y
809 1050
810# 1051#
@@ -812,10 +1053,10 @@ CONFIG_SCSI_PROC_FS=y
812# 1053#
813CONFIG_BLK_DEV_SD=y 1054CONFIG_BLK_DEV_SD=y
814CONFIG_CHR_DEV_ST=m 1055CONFIG_CHR_DEV_ST=m
815# CONFIG_CHR_DEV_OSST is not set 1056CONFIG_CHR_DEV_OSST=m
816CONFIG_BLK_DEV_SR=y 1057CONFIG_BLK_DEV_SR=y
817CONFIG_BLK_DEV_SR_VENDOR=y 1058# CONFIG_BLK_DEV_SR_VENDOR is not set
818# CONFIG_CHR_DEV_SG is not set 1059CONFIG_CHR_DEV_SG=y
819CONFIG_CHR_DEV_SCH=m 1060CONFIG_CHR_DEV_SCH=m
820 1061
821# 1062#
@@ -824,23 +1065,25 @@ CONFIG_CHR_DEV_SCH=m
824CONFIG_SCSI_MULTI_LUN=y 1065CONFIG_SCSI_MULTI_LUN=y
825CONFIG_SCSI_CONSTANTS=y 1066CONFIG_SCSI_CONSTANTS=y
826CONFIG_SCSI_LOGGING=y 1067CONFIG_SCSI_LOGGING=y
827CONFIG_SCSI_SCAN_ASYNC=y 1068# CONFIG_SCSI_SCAN_ASYNC is not set
828CONFIG_SCSI_WAIT_SCAN=m 1069CONFIG_SCSI_WAIT_SCAN=m
829 1070
830# 1071#
831# SCSI Transports 1072# SCSI Transports
832# 1073#
833# CONFIG_SCSI_SPI_ATTRS is not set 1074CONFIG_SCSI_SPI_ATTRS=m
834# CONFIG_SCSI_FC_ATTRS is not set 1075CONFIG_SCSI_FC_ATTRS=m
835# CONFIG_SCSI_ISCSI_ATTRS is not set 1076CONFIG_SCSI_ISCSI_ATTRS=m
836# CONFIG_SCSI_SAS_ATTRS is not set
837# CONFIG_SCSI_SAS_LIBSAS is not set 1077# CONFIG_SCSI_SAS_LIBSAS is not set
838# CONFIG_SCSI_SRP_ATTRS is not set 1078# CONFIG_SCSI_SRP_ATTRS is not set
839CONFIG_SCSI_LOWLEVEL=y 1079CONFIG_SCSI_LOWLEVEL=y
840# CONFIG_ISCSI_TCP is not set 1080# CONFIG_ISCSI_TCP is not set
841# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 1081# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
842# CONFIG_SCSI_3W_9XXX is not set 1082# CONFIG_SCSI_3W_9XXX is not set
1083# CONFIG_SCSI_7000FASST is not set
843# CONFIG_SCSI_ACARD is not set 1084# CONFIG_SCSI_ACARD is not set
1085# CONFIG_SCSI_AHA152X is not set
1086# CONFIG_SCSI_AHA1542 is not set
844# CONFIG_SCSI_AACRAID is not set 1087# CONFIG_SCSI_AACRAID is not set
845# CONFIG_SCSI_AIC7XXX is not set 1088# CONFIG_SCSI_AIC7XXX is not set
846# CONFIG_SCSI_AIC7XXX_OLD is not set 1089# CONFIG_SCSI_AIC7XXX_OLD is not set
@@ -848,6 +1091,7 @@ CONFIG_SCSI_LOWLEVEL=y
848# CONFIG_SCSI_AIC94XX is not set 1091# CONFIG_SCSI_AIC94XX is not set
849# CONFIG_SCSI_DPT_I2O is not set 1092# CONFIG_SCSI_DPT_I2O is not set
850# CONFIG_SCSI_ADVANSYS is not set 1093# CONFIG_SCSI_ADVANSYS is not set
1094# CONFIG_SCSI_IN2000 is not set
851# CONFIG_SCSI_ARCMSR is not set 1095# CONFIG_SCSI_ARCMSR is not set
852# CONFIG_MEGARAID_NEWGEN is not set 1096# CONFIG_MEGARAID_NEWGEN is not set
853# CONFIG_MEGARAID_LEGACY is not set 1097# CONFIG_MEGARAID_LEGACY is not set
@@ -855,22 +1099,32 @@ CONFIG_SCSI_LOWLEVEL=y
855# CONFIG_SCSI_HPTIOP is not set 1099# CONFIG_SCSI_HPTIOP is not set
856# CONFIG_SCSI_BUSLOGIC is not set 1100# CONFIG_SCSI_BUSLOGIC is not set
857# CONFIG_SCSI_DMX3191D is not set 1101# CONFIG_SCSI_DMX3191D is not set
1102# CONFIG_SCSI_DTC3280 is not set
858# CONFIG_SCSI_EATA is not set 1103# CONFIG_SCSI_EATA is not set
859# CONFIG_SCSI_FUTURE_DOMAIN is not set 1104# CONFIG_SCSI_FUTURE_DOMAIN is not set
860# CONFIG_SCSI_GDTH is not set 1105CONFIG_SCSI_GDTH=m
1106# CONFIG_SCSI_GENERIC_NCR5380 is not set
1107# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
861# CONFIG_SCSI_IPS is not set 1108# CONFIG_SCSI_IPS is not set
862# CONFIG_SCSI_INITIO is not set 1109# CONFIG_SCSI_INITIO is not set
863# CONFIG_SCSI_INIA100 is not set 1110# CONFIG_SCSI_INIA100 is not set
864# CONFIG_SCSI_MVSAS is not set 1111# CONFIG_SCSI_MVSAS is not set
1112# CONFIG_SCSI_NCR53C406A is not set
865# CONFIG_SCSI_STEX is not set 1113# CONFIG_SCSI_STEX is not set
866# CONFIG_SCSI_SYM53C8XX_2 is not set 1114# CONFIG_SCSI_SYM53C8XX_2 is not set
867# CONFIG_SCSI_IPR is not set 1115# CONFIG_SCSI_IPR is not set
1116# CONFIG_SCSI_PAS16 is not set
1117# CONFIG_SCSI_QLOGIC_FAS is not set
868# CONFIG_SCSI_QLOGIC_1280 is not set 1118# CONFIG_SCSI_QLOGIC_1280 is not set
869# CONFIG_SCSI_QLA_FC is not set 1119# CONFIG_SCSI_QLA_FC is not set
870# CONFIG_SCSI_QLA_ISCSI is not set 1120# CONFIG_SCSI_QLA_ISCSI is not set
871# CONFIG_SCSI_LPFC is not set 1121# CONFIG_SCSI_LPFC is not set
1122# CONFIG_SCSI_SYM53C416 is not set
872# CONFIG_SCSI_DC395x is not set 1123# CONFIG_SCSI_DC395x is not set
873# CONFIG_SCSI_DC390T is not set 1124# CONFIG_SCSI_DC390T is not set
1125# CONFIG_SCSI_T128 is not set
1126# CONFIG_SCSI_U14_34F is not set
1127# CONFIG_SCSI_ULTRASTOR is not set
874# CONFIG_SCSI_NSP32 is not set 1128# CONFIG_SCSI_NSP32 is not set
875# CONFIG_SCSI_DEBUG is not set 1129# CONFIG_SCSI_DEBUG is not set
876# CONFIG_SCSI_SRP is not set 1130# CONFIG_SCSI_SRP is not set
@@ -878,8 +1132,8 @@ CONFIG_SCSI_LOWLEVEL=y
878CONFIG_ATA=y 1132CONFIG_ATA=y
879# CONFIG_ATA_NONSTANDARD is not set 1133# CONFIG_ATA_NONSTANDARD is not set
880CONFIG_ATA_ACPI=y 1134CONFIG_ATA_ACPI=y
881# CONFIG_SATA_PMP is not set 1135CONFIG_SATA_PMP=y
882CONFIG_SATA_AHCI=y 1136# CONFIG_SATA_AHCI is not set
883# CONFIG_SATA_SIL24 is not set 1137# CONFIG_SATA_SIL24 is not set
884CONFIG_ATA_SFF=y 1138CONFIG_ATA_SFF=y
885# CONFIG_SATA_SVW is not set 1139# CONFIG_SATA_SVW is not set
@@ -909,7 +1163,7 @@ CONFIG_ATA_PIIX=y
909# CONFIG_PATA_CS5536 is not set 1163# CONFIG_PATA_CS5536 is not set
910# CONFIG_PATA_CYPRESS is not set 1164# CONFIG_PATA_CYPRESS is not set
911# CONFIG_PATA_EFAR is not set 1165# CONFIG_PATA_EFAR is not set
912# CONFIG_ATA_GENERIC is not set 1166CONFIG_ATA_GENERIC=y
913# CONFIG_PATA_HPT366 is not set 1167# CONFIG_PATA_HPT366 is not set
914# CONFIG_PATA_HPT37X is not set 1168# CONFIG_PATA_HPT37X is not set
915# CONFIG_PATA_HPT3X2N is not set 1169# CONFIG_PATA_HPT3X2N is not set
@@ -917,9 +1171,10 @@ CONFIG_ATA_PIIX=y
917# CONFIG_PATA_IT821X is not set 1171# CONFIG_PATA_IT821X is not set
918# CONFIG_PATA_IT8213 is not set 1172# CONFIG_PATA_IT8213 is not set
919# CONFIG_PATA_JMICRON is not set 1173# CONFIG_PATA_JMICRON is not set
1174# CONFIG_PATA_LEGACY is not set
920# CONFIG_PATA_TRIFLEX is not set 1175# CONFIG_PATA_TRIFLEX is not set
921# CONFIG_PATA_MARVELL is not set 1176# CONFIG_PATA_MARVELL is not set
922# CONFIG_PATA_MPIIX is not set 1177CONFIG_PATA_MPIIX=y
923# CONFIG_PATA_OLDPIIX is not set 1178# CONFIG_PATA_OLDPIIX is not set
924# CONFIG_PATA_NETCELL is not set 1179# CONFIG_PATA_NETCELL is not set
925# CONFIG_PATA_NINJA32 is not set 1180# CONFIG_PATA_NINJA32 is not set
@@ -928,6 +1183,7 @@ CONFIG_ATA_PIIX=y
928# CONFIG_PATA_OPTI is not set 1183# CONFIG_PATA_OPTI is not set
929# CONFIG_PATA_OPTIDMA is not set 1184# CONFIG_PATA_OPTIDMA is not set
930# CONFIG_PATA_PDC_OLD is not set 1185# CONFIG_PATA_PDC_OLD is not set
1186# CONFIG_PATA_QDI is not set
931# CONFIG_PATA_RADISYS is not set 1187# CONFIG_PATA_RADISYS is not set
932# CONFIG_PATA_RZ1000 is not set 1188# CONFIG_PATA_RZ1000 is not set
933# CONFIG_PATA_SC1200 is not set 1189# CONFIG_PATA_SC1200 is not set
@@ -937,7 +1193,8 @@ CONFIG_ATA_PIIX=y
937# CONFIG_PATA_SIS is not set 1193# CONFIG_PATA_SIS is not set
938# CONFIG_PATA_VIA is not set 1194# CONFIG_PATA_VIA is not set
939# CONFIG_PATA_WINBOND is not set 1195# CONFIG_PATA_WINBOND is not set
940CONFIG_PATA_SCH=y 1196# CONFIG_PATA_WINBOND_VLB is not set
1197# CONFIG_PATA_SCH is not set
941# CONFIG_MD is not set 1198# CONFIG_MD is not set
942# CONFIG_FUSION is not set 1199# CONFIG_FUSION is not set
943 1200
@@ -949,15 +1206,34 @@ CONFIG_PATA_SCH=y
949# Enable only one of the two stacks, unless you know what you are doing 1206# Enable only one of the two stacks, unless you know what you are doing
950# 1207#
951# CONFIG_FIREWIRE is not set 1208# CONFIG_FIREWIRE is not set
952# CONFIG_IEEE1394 is not set 1209CONFIG_IEEE1394=m
953# CONFIG_I2O is not set 1210CONFIG_IEEE1394_OHCI1394=m
1211# CONFIG_IEEE1394_PCILYNX is not set
1212CONFIG_IEEE1394_SBP2=m
1213# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
1214CONFIG_IEEE1394_ETH1394_ROM_ENTRY=y
1215CONFIG_IEEE1394_ETH1394=m
1216CONFIG_IEEE1394_RAWIO=m
1217CONFIG_IEEE1394_VIDEO1394=m
1218CONFIG_IEEE1394_DV1394=m
1219# CONFIG_IEEE1394_VERBOSEDEBUG is not set
1220CONFIG_I2O=m
1221CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y
1222CONFIG_I2O_EXT_ADAPTEC=y
1223CONFIG_I2O_CONFIG=m
1224CONFIG_I2O_CONFIG_OLD_IOCTL=y
1225CONFIG_I2O_BUS=m
1226CONFIG_I2O_BLOCK=m
1227CONFIG_I2O_SCSI=m
1228CONFIG_I2O_PROC=m
954# CONFIG_MACINTOSH_DRIVERS is not set 1229# CONFIG_MACINTOSH_DRIVERS is not set
955CONFIG_NETDEVICES=y 1230CONFIG_NETDEVICES=y
956# CONFIG_DUMMY is not set 1231CONFIG_IFB=m
957# CONFIG_BONDING is not set 1232CONFIG_DUMMY=m
958CONFIG_MACVLAN=m 1233CONFIG_BONDING=m
959# CONFIG_EQUALIZER is not set 1234# CONFIG_MACVLAN is not set
960# CONFIG_TUN is not set 1235CONFIG_EQUALIZER=m
1236CONFIG_TUN=m
961# CONFIG_VETH is not set 1237# CONFIG_VETH is not set
962# CONFIG_NET_SB1000 is not set 1238# CONFIG_NET_SB1000 is not set
963# CONFIG_ARCNET is not set 1239# CONFIG_ARCNET is not set
@@ -973,30 +1249,82 @@ CONFIG_LXT_PHY=m
973CONFIG_CICADA_PHY=m 1249CONFIG_CICADA_PHY=m
974CONFIG_VITESSE_PHY=m 1250CONFIG_VITESSE_PHY=m
975CONFIG_SMSC_PHY=m 1251CONFIG_SMSC_PHY=m
976CONFIG_BROADCOM_PHY=m 1252# CONFIG_BROADCOM_PHY is not set
977CONFIG_ICPLUS_PHY=m 1253# CONFIG_ICPLUS_PHY is not set
978CONFIG_REALTEK_PHY=m 1254# CONFIG_REALTEK_PHY is not set
979CONFIG_MDIO_BITBANG=m 1255# CONFIG_MDIO_BITBANG is not set
980CONFIG_NET_ETHERNET=y 1256CONFIG_NET_ETHERNET=y
981CONFIG_MII=m 1257CONFIG_MII=y
982CONFIG_HAPPYMEAL=m 1258# CONFIG_HAPPYMEAL is not set
983CONFIG_SUNGEM=m 1259# CONFIG_SUNGEM is not set
984CONFIG_CASSINI=m 1260# CONFIG_CASSINI is not set
985CONFIG_NET_VENDOR_3COM=y 1261CONFIG_NET_VENDOR_3COM=y
986# CONFIG_VORTEX is not set 1262CONFIG_EL1=m
987# CONFIG_TYPHOON is not set 1263CONFIG_EL2=m
988# CONFIG_NET_TULIP is not set 1264CONFIG_ELPLUS=m
1265CONFIG_EL16=m
1266CONFIG_EL3=m
1267CONFIG_3C515=m
1268CONFIG_VORTEX=m
1269CONFIG_TYPHOON=m
1270# CONFIG_LANCE is not set
1271CONFIG_NET_VENDOR_SMC=y
1272CONFIG_WD80x3=m
1273CONFIG_ULTRA=m
1274CONFIG_SMC9194=m
1275# CONFIG_ENC28J60 is not set
1276# CONFIG_NET_VENDOR_RACAL is not set
1277CONFIG_NET_TULIP=y
1278CONFIG_DE2104X=m
1279CONFIG_TULIP=m
1280# CONFIG_TULIP_MWI is not set
1281# CONFIG_TULIP_MMIO is not set
1282CONFIG_TULIP_NAPI=y
1283CONFIG_TULIP_NAPI_HW_MITIGATION=y
1284CONFIG_DE4X5=m
1285CONFIG_WINBOND_840=m
1286CONFIG_DM9102=m
1287CONFIG_ULI526X=m
1288# CONFIG_AT1700 is not set
1289# CONFIG_DEPCA is not set
989# CONFIG_HP100 is not set 1290# CONFIG_HP100 is not set
1291# CONFIG_NET_ISA is not set
990# CONFIG_IBM_NEW_EMAC_ZMII is not set 1292# CONFIG_IBM_NEW_EMAC_ZMII is not set
991# CONFIG_IBM_NEW_EMAC_RGMII is not set 1293# CONFIG_IBM_NEW_EMAC_RGMII is not set
992# CONFIG_IBM_NEW_EMAC_TAH is not set 1294# CONFIG_IBM_NEW_EMAC_TAH is not set
993# CONFIG_IBM_NEW_EMAC_EMAC4 is not set 1295# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
994# CONFIG_NET_PCI is not set 1296CONFIG_NET_PCI=y
1297# CONFIG_PCNET32 is not set
1298# CONFIG_AMD8111_ETH is not set
1299# CONFIG_ADAPTEC_STARFIRE is not set
1300# CONFIG_AC3200 is not set
1301# CONFIG_APRICOT is not set
995# CONFIG_B44 is not set 1302# CONFIG_B44 is not set
1303# CONFIG_FORCEDETH is not set
1304# CONFIG_CS89x0 is not set
1305# CONFIG_EEPRO100 is not set
1306CONFIG_E100=m
1307# CONFIG_FEALNX is not set
1308# CONFIG_NATSEMI is not set
1309CONFIG_NE2K_PCI=m
1310CONFIG_8139CP=m
1311CONFIG_8139TOO=m
1312# CONFIG_8139TOO_PIO is not set
1313# CONFIG_8139TOO_TUNE_TWISTER is not set
1314CONFIG_8139TOO_8129=y
1315# CONFIG_8139_OLD_RX_RESET is not set
1316# CONFIG_R6040 is not set
1317# CONFIG_SIS900 is not set
1318CONFIG_EPIC100=m
1319# CONFIG_SUNDANCE is not set
1320# CONFIG_TLAN is not set
1321# CONFIG_VIA_RHINE is not set
1322# CONFIG_SC92031 is not set
996CONFIG_NETDEV_1000=y 1323CONFIG_NETDEV_1000=y
997# CONFIG_ACENIC is not set 1324# CONFIG_ACENIC is not set
998# CONFIG_DL2K is not set 1325# CONFIG_DL2K is not set
999# CONFIG_E1000 is not set 1326CONFIG_E1000=m
1327CONFIG_E1000_DISABLE_PACKET_SPLIT=y
1000# CONFIG_E1000E is not set 1328# CONFIG_E1000E is not set
1001# CONFIG_IP1000 is not set 1329# CONFIG_IP1000 is not set
1002# CONFIG_IGB is not set 1330# CONFIG_IGB is not set
@@ -1005,77 +1333,52 @@ CONFIG_NETDEV_1000=y
1005# CONFIG_YELLOWFIN is not set 1333# CONFIG_YELLOWFIN is not set
1006# CONFIG_R8169 is not set 1334# CONFIG_R8169 is not set
1007# CONFIG_SIS190 is not set 1335# CONFIG_SIS190 is not set
1008# CONFIG_SKGE is not set 1336CONFIG_SKGE=y
1009# CONFIG_SKY2 is not set 1337# CONFIG_SKGE_DEBUG is not set
1338CONFIG_SKY2=y
1339# CONFIG_SKY2_DEBUG is not set
1010# CONFIG_VIA_VELOCITY is not set 1340# CONFIG_VIA_VELOCITY is not set
1011# CONFIG_TIGON3 is not set 1341# CONFIG_TIGON3 is not set
1012# CONFIG_BNX2 is not set 1342# CONFIG_BNX2 is not set
1013# CONFIG_QLA3XXX is not set 1343# CONFIG_QLA3XXX is not set
1014CONFIG_ATL1=m 1344# CONFIG_ATL1 is not set
1015CONFIG_ATL1E=m 1345# CONFIG_ATL1E is not set
1016# CONFIG_NETDEV_10000 is not set 1346CONFIG_NETDEV_10000=y
1347# CONFIG_CHELSIO_T1 is not set
1348# CONFIG_CHELSIO_T3 is not set
1349# CONFIG_IXGBE is not set
1350CONFIG_IXGB=m
1351# CONFIG_S2IO is not set
1352# CONFIG_MYRI10GE is not set
1353# CONFIG_NETXEN_NIC is not set
1354# CONFIG_NIU is not set
1355# CONFIG_MLX4_CORE is not set
1356# CONFIG_TEHUTI is not set
1357# CONFIG_BNX2X is not set
1358# CONFIG_SFC is not set
1017# CONFIG_TR is not set 1359# CONFIG_TR is not set
1018 1360
1019# 1361#
1020# Wireless LAN 1362# Wireless LAN
1021# 1363#
1022CONFIG_WLAN_PRE80211=y 1364# CONFIG_WLAN_PRE80211 is not set
1023# CONFIG_STRIP is not set
1024CONFIG_WLAN_80211=y 1365CONFIG_WLAN_80211=y
1025# CONFIG_IPW2100 is not set 1366CONFIG_IPW2100=m
1026# CONFIG_IPW2200 is not set 1367# CONFIG_IPW2100_MONITOR is not set
1368# CONFIG_IPW2100_DEBUG is not set
1369CONFIG_IPW2200=m
1370# CONFIG_IPW2200_MONITOR is not set
1371# CONFIG_IPW2200_QOS is not set
1372# CONFIG_IPW2200_DEBUG is not set
1027# CONFIG_LIBERTAS is not set 1373# CONFIG_LIBERTAS is not set
1028# CONFIG_AIRO is not set 1374# CONFIG_AIRO is not set
1029# CONFIG_HERMES is not set 1375# CONFIG_HERMES is not set
1030# CONFIG_ATMEL is not set 1376# CONFIG_ATMEL is not set
1031# CONFIG_PRISM54 is not set 1377# CONFIG_PRISM54 is not set
1032CONFIG_USB_ZD1201=m 1378# CONFIG_USB_ZD1201 is not set
1033CONFIG_USB_NET_RNDIS_WLAN=m 1379# CONFIG_USB_NET_RNDIS_WLAN is not set
1034CONFIG_RTL8180=m
1035CONFIG_RTL8187=m
1036# CONFIG_ADM8211 is not set
1037# CONFIG_MAC80211_HWSIM is not set
1038# CONFIG_P54_COMMON is not set
1039CONFIG_ATH5K=m
1040# CONFIG_ATH5K_DEBUG is not set
1041# CONFIG_ATH9K is not set
1042CONFIG_IWLWIFI=m
1043CONFIG_IWLCORE=m
1044# CONFIG_IWLWIFI_LEDS is not set 1380# CONFIG_IWLWIFI_LEDS is not set
1045CONFIG_IWLWIFI_RFKILL=y
1046# CONFIG_IWLWIFI_DEBUG is not set
1047# CONFIG_IWLAGN is not set
1048CONFIG_IWL3945=m
1049CONFIG_IWL3945_RFKILL=y
1050# CONFIG_IWL3945_SPECTRUM_MEASUREMENT is not set
1051# CONFIG_IWL3945_LEDS is not set
1052# CONFIG_IWL3945_DEBUG is not set
1053# CONFIG_HOSTAP is not set 1381# CONFIG_HOSTAP is not set
1054# CONFIG_B43 is not set
1055# CONFIG_B43LEGACY is not set
1056# CONFIG_ZD1211RW is not set
1057CONFIG_RT2X00=m
1058CONFIG_RT2X00_LIB=m
1059CONFIG_RT2X00_LIB_PCI=m
1060CONFIG_RT2X00_LIB_USB=m
1061CONFIG_RT2X00_LIB_FIRMWARE=y
1062CONFIG_RT2X00_LIB_RFKILL=y
1063CONFIG_RT2X00_LIB_LEDS=y
1064CONFIG_RT2400PCI=m
1065CONFIG_RT2400PCI_RFKILL=y
1066CONFIG_RT2400PCI_LEDS=y
1067CONFIG_RT2500PCI=m
1068CONFIG_RT2500PCI_RFKILL=y
1069CONFIG_RT2500PCI_LEDS=y
1070CONFIG_RT61PCI=m
1071CONFIG_RT61PCI_RFKILL=y
1072CONFIG_RT61PCI_LEDS=y
1073CONFIG_RT2500USB=m
1074CONFIG_RT2500USB_LEDS=y
1075CONFIG_RT73USB=m
1076CONFIG_RT73USB_LEDS=y
1077CONFIG_RT2X00_LIB_DEBUGFS=y
1078# CONFIG_RT2X00_DEBUG is not set
1079 1382
1080# 1383#
1081# USB Network Adapters 1384# USB Network Adapters
@@ -1084,14 +1387,14 @@ CONFIG_USB_CATC=m
1084CONFIG_USB_KAWETH=m 1387CONFIG_USB_KAWETH=m
1085CONFIG_USB_PEGASUS=m 1388CONFIG_USB_PEGASUS=m
1086CONFIG_USB_RTL8150=m 1389CONFIG_USB_RTL8150=m
1087CONFIG_USB_USBNET=m 1390CONFIG_USB_USBNET=y
1088CONFIG_USB_NET_AX8817X=m 1391CONFIG_USB_NET_AX8817X=y
1089CONFIG_USB_NET_CDCETHER=m 1392CONFIG_USB_NET_CDCETHER=m
1090CONFIG_USB_NET_DM9601=m 1393# CONFIG_USB_NET_DM9601 is not set
1091CONFIG_USB_NET_GL620A=m 1394CONFIG_USB_NET_GL620A=m
1092CONFIG_USB_NET_NET1080=m 1395CONFIG_USB_NET_NET1080=m
1093CONFIG_USB_NET_PLUSB=m 1396CONFIG_USB_NET_PLUSB=m
1094CONFIG_USB_NET_MCS7830=m 1397# CONFIG_USB_NET_MCS7830 is not set
1095CONFIG_USB_NET_RNDIS_HOST=m 1398CONFIG_USB_NET_RNDIS_HOST=m
1096CONFIG_USB_NET_CDC_SUBSET=m 1399CONFIG_USB_NET_CDC_SUBSET=m
1097CONFIG_USB_ALI_M5632=y 1400CONFIG_USB_ALI_M5632=y
@@ -1099,10 +1402,23 @@ CONFIG_USB_AN2720=y
1099CONFIG_USB_BELKIN=y 1402CONFIG_USB_BELKIN=y
1100CONFIG_USB_ARMLINUX=y 1403CONFIG_USB_ARMLINUX=y
1101CONFIG_USB_EPSON2888=y 1404CONFIG_USB_EPSON2888=y
1102CONFIG_USB_KC2190=y 1405# CONFIG_USB_KC2190 is not set
1103CONFIG_USB_NET_ZAURUS=m 1406CONFIG_USB_NET_ZAURUS=m
1104# CONFIG_USB_HSO is not set
1105# CONFIG_WAN is not set 1407# CONFIG_WAN is not set
1408CONFIG_ATM_DRIVERS=y
1409# CONFIG_ATM_DUMMY is not set
1410# CONFIG_ATM_TCP is not set
1411# CONFIG_ATM_LANAI is not set
1412# CONFIG_ATM_ENI is not set
1413# CONFIG_ATM_FIRESTREAM is not set
1414# CONFIG_ATM_ZATM is not set
1415# CONFIG_ATM_NICSTAR is not set
1416# CONFIG_ATM_IDT77252 is not set
1417# CONFIG_ATM_AMBASSADOR is not set
1418# CONFIG_ATM_HORIZON is not set
1419# CONFIG_ATM_IA is not set
1420# CONFIG_ATM_FORE200E is not set
1421# CONFIG_ATM_HE is not set
1106# CONFIG_FDDI is not set 1422# CONFIG_FDDI is not set
1107# CONFIG_HIPPI is not set 1423# CONFIG_HIPPI is not set
1108CONFIG_PPP=m 1424CONFIG_PPP=m
@@ -1111,20 +1427,25 @@ CONFIG_PPP_FILTER=y
1111CONFIG_PPP_ASYNC=m 1427CONFIG_PPP_ASYNC=m
1112CONFIG_PPP_SYNC_TTY=m 1428CONFIG_PPP_SYNC_TTY=m
1113CONFIG_PPP_DEFLATE=m 1429CONFIG_PPP_DEFLATE=m
1114# CONFIG_PPP_BSDCOMP is not set 1430CONFIG_PPP_BSDCOMP=m
1115CONFIG_PPP_MPPE=m 1431CONFIG_PPP_MPPE=m
1116CONFIG_PPPOE=m 1432CONFIG_PPPOE=m
1117CONFIG_PPPOL2TP=m 1433CONFIG_PPPOATM=m
1118# CONFIG_SLIP is not set 1434# CONFIG_PPPOL2TP is not set
1435CONFIG_SLIP=m
1436CONFIG_SLIP_COMPRESSED=y
1119CONFIG_SLHC=m 1437CONFIG_SLHC=m
1438CONFIG_SLIP_SMART=y
1439CONFIG_SLIP_MODE_SLIP6=y
1120CONFIG_NET_FC=y 1440CONFIG_NET_FC=y
1121CONFIG_NETCONSOLE=m 1441CONFIG_NETCONSOLE=m
1122CONFIG_NETCONSOLE_DYNAMIC=y 1442# CONFIG_NETCONSOLE_DYNAMIC is not set
1123CONFIG_NETPOLL=y 1443CONFIG_NETPOLL=y
1124CONFIG_NETPOLL_TRAP=y 1444CONFIG_NETPOLL_TRAP=y
1125CONFIG_NET_POLL_CONTROLLER=y 1445CONFIG_NET_POLL_CONTROLLER=y
1126# CONFIG_ISDN is not set 1446# CONFIG_ISDN is not set
1127# CONFIG_PHONE is not set 1447CONFIG_PHONE=m
1448# CONFIG_PHONE_IXJ is not set
1128 1449
1129# 1450#
1130# Input device support 1451# Input device support
@@ -1137,7 +1458,7 @@ CONFIG_INPUT_POLLDEV=m
1137# Userland interfaces 1458# Userland interfaces
1138# 1459#
1139CONFIG_INPUT_MOUSEDEV=y 1460CONFIG_INPUT_MOUSEDEV=y
1140# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 1461CONFIG_INPUT_MOUSEDEV_PSAUX=y
1141CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 1462CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
1142CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 1463CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
1143CONFIG_INPUT_JOYDEV=m 1464CONFIG_INPUT_JOYDEV=m
@@ -1149,10 +1470,10 @@ CONFIG_INPUT_EVDEV=y
1149# 1470#
1150CONFIG_INPUT_KEYBOARD=y 1471CONFIG_INPUT_KEYBOARD=y
1151CONFIG_KEYBOARD_ATKBD=y 1472CONFIG_KEYBOARD_ATKBD=y
1152# CONFIG_KEYBOARD_SUNKBD is not set 1473CONFIG_KEYBOARD_SUNKBD=m
1153# CONFIG_KEYBOARD_LKKBD is not set 1474# CONFIG_KEYBOARD_LKKBD is not set
1154# CONFIG_KEYBOARD_XTKBD is not set 1475CONFIG_KEYBOARD_XTKBD=m
1155# CONFIG_KEYBOARD_NEWTON is not set 1476CONFIG_KEYBOARD_NEWTON=m
1156# CONFIG_KEYBOARD_STOWAWAY is not set 1477# CONFIG_KEYBOARD_STOWAWAY is not set
1157CONFIG_INPUT_MOUSE=y 1478CONFIG_INPUT_MOUSE=y
1158CONFIG_MOUSE_PS2=y 1479CONFIG_MOUSE_PS2=y
@@ -1165,65 +1486,62 @@ CONFIG_MOUSE_PS2_TRACKPOINT=y
1165CONFIG_MOUSE_SERIAL=m 1486CONFIG_MOUSE_SERIAL=m
1166# CONFIG_MOUSE_APPLETOUCH is not set 1487# CONFIG_MOUSE_APPLETOUCH is not set
1167# CONFIG_MOUSE_BCM5974 is not set 1488# CONFIG_MOUSE_BCM5974 is not set
1168CONFIG_MOUSE_VSXXXAA=m 1489CONFIG_MOUSE_INPORT=m
1490CONFIG_MOUSE_ATIXL=y
1491CONFIG_MOUSE_LOGIBM=m
1492CONFIG_MOUSE_PC110PAD=m
1493# CONFIG_MOUSE_VSXXXAA is not set
1169CONFIG_INPUT_JOYSTICK=y 1494CONFIG_INPUT_JOYSTICK=y
1170# CONFIG_JOYSTICK_ANALOG is not set 1495CONFIG_JOYSTICK_ANALOG=m
1171# CONFIG_JOYSTICK_A3D is not set 1496CONFIG_JOYSTICK_A3D=m
1172# CONFIG_JOYSTICK_ADI is not set 1497CONFIG_JOYSTICK_ADI=m
1173# CONFIG_JOYSTICK_COBRA is not set 1498CONFIG_JOYSTICK_COBRA=m
1174# CONFIG_JOYSTICK_GF2K is not set 1499CONFIG_JOYSTICK_GF2K=m
1175# CONFIG_JOYSTICK_GRIP is not set 1500CONFIG_JOYSTICK_GRIP=m
1176# CONFIG_JOYSTICK_GRIP_MP is not set 1501CONFIG_JOYSTICK_GRIP_MP=m
1177# CONFIG_JOYSTICK_GUILLEMOT is not set 1502CONFIG_JOYSTICK_GUILLEMOT=m
1178# CONFIG_JOYSTICK_INTERACT is not set 1503CONFIG_JOYSTICK_INTERACT=m
1179# CONFIG_JOYSTICK_SIDEWINDER is not set 1504CONFIG_JOYSTICK_SIDEWINDER=m
1180# CONFIG_JOYSTICK_TMDC is not set 1505CONFIG_JOYSTICK_TMDC=m
1181# CONFIG_JOYSTICK_IFORCE is not set 1506CONFIG_JOYSTICK_IFORCE=m
1182# CONFIG_JOYSTICK_WARRIOR is not set 1507CONFIG_JOYSTICK_IFORCE_USB=y
1183# CONFIG_JOYSTICK_MAGELLAN is not set 1508CONFIG_JOYSTICK_IFORCE_232=y
1184# CONFIG_JOYSTICK_SPACEORB is not set 1509CONFIG_JOYSTICK_WARRIOR=m
1185# CONFIG_JOYSTICK_SPACEBALL is not set 1510CONFIG_JOYSTICK_MAGELLAN=m
1186# CONFIG_JOYSTICK_STINGER is not set 1511CONFIG_JOYSTICK_SPACEORB=m
1187# CONFIG_JOYSTICK_TWIDJOY is not set 1512CONFIG_JOYSTICK_SPACEBALL=m
1513CONFIG_JOYSTICK_STINGER=m
1514CONFIG_JOYSTICK_TWIDJOY=m
1188# CONFIG_JOYSTICK_ZHENHUA is not set 1515# CONFIG_JOYSTICK_ZHENHUA is not set
1189# CONFIG_JOYSTICK_JOYDUMP is not set 1516CONFIG_JOYSTICK_JOYDUMP=m
1190# CONFIG_JOYSTICK_XPAD is not set 1517# CONFIG_JOYSTICK_XPAD is not set
1191# CONFIG_INPUT_TABLET is not set 1518# CONFIG_INPUT_TABLET is not set
1192CONFIG_INPUT_TOUCHSCREEN=y 1519CONFIG_INPUT_TOUCHSCREEN=y
1193CONFIG_TOUCHSCREEN_FUJITSU=m 1520CONFIG_TOUCHSCREEN_ADS7846=m
1521# CONFIG_TOUCHSCREEN_FUJITSU is not set
1194CONFIG_TOUCHSCREEN_GUNZE=m 1522CONFIG_TOUCHSCREEN_GUNZE=m
1195CONFIG_TOUCHSCREEN_ELO=m 1523CONFIG_TOUCHSCREEN_ELO=m
1196CONFIG_TOUCHSCREEN_MTOUCH=m 1524CONFIG_TOUCHSCREEN_MTOUCH=m
1197CONFIG_TOUCHSCREEN_INEXIO=m 1525# CONFIG_TOUCHSCREEN_INEXIO is not set
1198CONFIG_TOUCHSCREEN_MK712=m 1526CONFIG_TOUCHSCREEN_MK712=m
1199CONFIG_TOUCHSCREEN_PENMOUNT=m 1527# CONFIG_TOUCHSCREEN_HTCPEN is not set
1200CONFIG_TOUCHSCREEN_TOUCHRIGHT=m 1528# CONFIG_TOUCHSCREEN_PENMOUNT is not set
1201CONFIG_TOUCHSCREEN_TOUCHWIN=m 1529# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
1202CONFIG_TOUCHSCREEN_UCB1400=m 1530# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
1531# CONFIG_TOUCHSCREEN_UCB1400 is not set
1203# CONFIG_TOUCHSCREEN_WM97XX is not set 1532# CONFIG_TOUCHSCREEN_WM97XX is not set
1204CONFIG_TOUCHSCREEN_USB_COMPOSITE=m 1533# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
1205CONFIG_TOUCHSCREEN_USB_EGALAX=y 1534# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
1206CONFIG_TOUCHSCREEN_USB_PANJIT=y
1207CONFIG_TOUCHSCREEN_USB_3M=y
1208CONFIG_TOUCHSCREEN_USB_ITM=y
1209CONFIG_TOUCHSCREEN_USB_ETURBO=y
1210CONFIG_TOUCHSCREEN_USB_GUNZE=y
1211CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
1212CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
1213CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
1214CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
1215CONFIG_TOUCHSCREEN_USB_GOTOP=y
1216CONFIG_TOUCHSCREEN_TOUCHIT213=m
1217CONFIG_INPUT_MISC=y 1535CONFIG_INPUT_MISC=y
1218# CONFIG_INPUT_PCSPKR is not set 1536CONFIG_INPUT_PCSPKR=y
1219# CONFIG_INPUT_APANEL is not set 1537# CONFIG_INPUT_APANEL is not set
1220# CONFIG_INPUT_WISTRON_BTNS is not set 1538CONFIG_INPUT_WISTRON_BTNS=m
1221CONFIG_INPUT_ATLAS_BTNS=m 1539# CONFIG_INPUT_ATLAS_BTNS is not set
1222CONFIG_INPUT_ATI_REMOTE=m 1540# CONFIG_INPUT_ATI_REMOTE is not set
1223CONFIG_INPUT_ATI_REMOTE2=m 1541# CONFIG_INPUT_ATI_REMOTE2 is not set
1224CONFIG_INPUT_KEYSPAN_REMOTE=m 1542# CONFIG_INPUT_KEYSPAN_REMOTE is not set
1225CONFIG_INPUT_POWERMATE=m 1543# CONFIG_INPUT_POWERMATE is not set
1226CONFIG_INPUT_YEALINK=m 1544# CONFIG_INPUT_YEALINK is not set
1227CONFIG_INPUT_UINPUT=m 1545CONFIG_INPUT_UINPUT=m
1228 1546
1229# 1547#
@@ -1231,12 +1549,16 @@ CONFIG_INPUT_UINPUT=m
1231# 1549#
1232CONFIG_SERIO=y 1550CONFIG_SERIO=y
1233CONFIG_SERIO_I8042=y 1551CONFIG_SERIO_I8042=y
1234CONFIG_SERIO_SERPORT=y 1552CONFIG_SERIO_SERPORT=m
1235# CONFIG_SERIO_CT82C710 is not set 1553CONFIG_SERIO_CT82C710=m
1236# CONFIG_SERIO_PCIPS2 is not set 1554CONFIG_SERIO_PCIPS2=m
1237CONFIG_SERIO_LIBPS2=y 1555CONFIG_SERIO_LIBPS2=y
1238CONFIG_SERIO_RAW=m 1556CONFIG_SERIO_RAW=m
1239# CONFIG_GAMEPORT is not set 1557CONFIG_GAMEPORT=m
1558CONFIG_GAMEPORT_NS558=m
1559CONFIG_GAMEPORT_L4=m
1560CONFIG_GAMEPORT_EMU10K1=m
1561CONFIG_GAMEPORT_FM801=m
1240 1562
1241# 1563#
1242# Character devices 1564# Character devices
@@ -1246,25 +1568,66 @@ CONFIG_CONSOLE_TRANSLATIONS=y
1246CONFIG_VT_CONSOLE=y 1568CONFIG_VT_CONSOLE=y
1247CONFIG_HW_CONSOLE=y 1569CONFIG_HW_CONSOLE=y
1248CONFIG_VT_HW_CONSOLE_BINDING=y 1570CONFIG_VT_HW_CONSOLE_BINDING=y
1249# CONFIG_DEVKMEM is not set 1571CONFIG_DEVKMEM=y
1250# CONFIG_SERIAL_NONSTANDARD is not set 1572CONFIG_SERIAL_NONSTANDARD=y
1573# CONFIG_COMPUTONE is not set
1574# CONFIG_ROCKETPORT is not set
1575# CONFIG_CYCLADES is not set
1576# CONFIG_DIGIEPCA is not set
1577# CONFIG_ESPSERIAL is not set
1578# CONFIG_MOXA_INTELLIO is not set
1579# CONFIG_MOXA_SMARTIO is not set
1580# CONFIG_ISI is not set
1581# CONFIG_SYNCLINK is not set
1582# CONFIG_SYNCLINKMP is not set
1583# CONFIG_SYNCLINK_GT is not set
1584# CONFIG_N_HDLC is not set
1585# CONFIG_RISCOM8 is not set
1586# CONFIG_SPECIALIX is not set
1587# CONFIG_SX is not set
1588# CONFIG_RIO is not set
1589# CONFIG_STALDRV is not set
1251# CONFIG_NOZOMI is not set 1590# CONFIG_NOZOMI is not set
1252 1591
1253# 1592#
1254# Serial drivers 1593# Serial drivers
1255# 1594#
1256# CONFIG_SERIAL_8250 is not set 1595CONFIG_SERIAL_8250=y
1596CONFIG_SERIAL_8250_CONSOLE=y
1257CONFIG_FIX_EARLYCON_MEM=y 1597CONFIG_FIX_EARLYCON_MEM=y
1598CONFIG_SERIAL_8250_PCI=y
1599CONFIG_SERIAL_8250_PNP=y
1600CONFIG_SERIAL_8250_NR_UARTS=8
1601CONFIG_SERIAL_8250_RUNTIME_UARTS=4
1602CONFIG_SERIAL_8250_EXTENDED=y
1603# CONFIG_SERIAL_8250_MANY_PORTS is not set
1604CONFIG_SERIAL_8250_SHARE_IRQ=y
1605# CONFIG_SERIAL_8250_DETECT_IRQ is not set
1606# CONFIG_SERIAL_8250_RSA is not set
1258 1607
1259# 1608#
1260# Non-8250 serial port support 1609# Non-8250 serial port support
1261# 1610#
1262# CONFIG_SERIAL_JSM is not set 1611CONFIG_SERIAL_CORE=y
1612CONFIG_SERIAL_CORE_CONSOLE=y
1613CONFIG_SERIAL_JSM=y
1263CONFIG_UNIX98_PTYS=y 1614CONFIG_UNIX98_PTYS=y
1264# CONFIG_LEGACY_PTYS is not set 1615CONFIG_LEGACY_PTYS=y
1265# CONFIG_IPMI_HANDLER is not set 1616CONFIG_LEGACY_PTY_COUNT=64
1266# CONFIG_HW_RANDOM is not set 1617CONFIG_IPMI_HANDLER=m
1267# CONFIG_NVRAM is not set 1618CONFIG_IPMI_PANIC_EVENT=y
1619CONFIG_IPMI_PANIC_STRING=y
1620CONFIG_IPMI_DEVICE_INTERFACE=m
1621CONFIG_IPMI_SI=m
1622CONFIG_IPMI_WATCHDOG=m
1623CONFIG_IPMI_POWEROFF=m
1624CONFIG_HW_RANDOM=y
1625CONFIG_HW_RANDOM_INTEL=m
1626# CONFIG_HW_RANDOM_AMD is not set
1627# CONFIG_HW_RANDOM_GEODE is not set
1628# CONFIG_HW_RANDOM_VIA is not set
1629CONFIG_NVRAM=m
1630# CONFIG_DTLK is not set
1268# CONFIG_R3964 is not set 1631# CONFIG_R3964 is not set
1269# CONFIG_APPLICOM is not set 1632# CONFIG_APPLICOM is not set
1270# CONFIG_SONYPI is not set 1633# CONFIG_SONYPI is not set
@@ -1272,18 +1635,20 @@ CONFIG_UNIX98_PTYS=y
1272# CONFIG_PC8736x_GPIO is not set 1635# CONFIG_PC8736x_GPIO is not set
1273# CONFIG_NSC_GPIO is not set 1636# CONFIG_NSC_GPIO is not set
1274# CONFIG_CS5535_GPIO is not set 1637# CONFIG_CS5535_GPIO is not set
1275# CONFIG_RAW_DRIVER is not set 1638CONFIG_RAW_DRIVER=m
1639CONFIG_MAX_RAW_DEVS=4096
1276CONFIG_HPET=y 1640CONFIG_HPET=y
1277# CONFIG_HPET_MMAP is not set 1641CONFIG_HPET_MMAP=y
1278# CONFIG_HANGCHECK_TIMER is not set 1642CONFIG_HANGCHECK_TIMER=m
1279# CONFIG_TCG_TPM is not set 1643# CONFIG_TCG_TPM is not set
1280# CONFIG_TELCLOCK is not set 1644# CONFIG_TELCLOCK is not set
1281CONFIG_DEVPORT=y 1645CONFIG_DEVPORT=y
1282CONFIG_I2C=y 1646CONFIG_I2C=m
1283CONFIG_I2C_BOARDINFO=y 1647CONFIG_I2C_BOARDINFO=y
1284# CONFIG_I2C_CHARDEV is not set 1648CONFIG_I2C_CHARDEV=m
1285CONFIG_I2C_HELPER_AUTO=y 1649CONFIG_I2C_HELPER_AUTO=y
1286CONFIG_I2C_ALGOBIT=y 1650CONFIG_I2C_ALGOBIT=m
1651CONFIG_I2C_ALGOPCA=m
1287 1652
1288# 1653#
1289# I2C Hardware Bus support 1654# I2C Hardware Bus support
@@ -1292,76 +1657,111 @@ CONFIG_I2C_ALGOBIT=y
1292# 1657#
1293# PC SMBus host controller drivers 1658# PC SMBus host controller drivers
1294# 1659#
1295# CONFIG_I2C_ALI1535 is not set 1660CONFIG_I2C_ALI1535=m
1296# CONFIG_I2C_ALI1563 is not set 1661CONFIG_I2C_ALI1563=m
1297# CONFIG_I2C_ALI15X3 is not set 1662CONFIG_I2C_ALI15X3=m
1298# CONFIG_I2C_AMD756 is not set 1663CONFIG_I2C_AMD756=m
1299# CONFIG_I2C_AMD8111 is not set 1664CONFIG_I2C_AMD756_S4882=m
1300# CONFIG_I2C_I801 is not set 1665CONFIG_I2C_AMD8111=m
1666CONFIG_I2C_I801=m
1301# CONFIG_I2C_ISCH is not set 1667# CONFIG_I2C_ISCH is not set
1302# CONFIG_I2C_PIIX4 is not set 1668CONFIG_I2C_PIIX4=m
1303# CONFIG_I2C_NFORCE2 is not set 1669CONFIG_I2C_NFORCE2=m
1304# CONFIG_I2C_SIS5595 is not set 1670# CONFIG_I2C_NFORCE2_S4985 is not set
1305# CONFIG_I2C_SIS630 is not set 1671CONFIG_I2C_SIS5595=m
1306# CONFIG_I2C_SIS96X is not set 1672CONFIG_I2C_SIS630=m
1307# CONFIG_I2C_VIA is not set 1673CONFIG_I2C_SIS96X=m
1308# CONFIG_I2C_VIAPRO is not set 1674CONFIG_I2C_VIA=m
1675CONFIG_I2C_VIAPRO=m
1309 1676
1310# 1677#
1311# I2C system bus drivers (mostly embedded / system-on-chip) 1678# I2C system bus drivers (mostly embedded / system-on-chip)
1312# 1679#
1313# CONFIG_I2C_OCORES is not set 1680CONFIG_I2C_OCORES=m
1314# CONFIG_I2C_SIMTEC is not set 1681# CONFIG_I2C_SIMTEC is not set
1315 1682
1316# 1683#
1317# External I2C/SMBus adapter drivers 1684# External I2C/SMBus adapter drivers
1318# 1685#
1319# CONFIG_I2C_PARPORT_LIGHT is not set 1686CONFIG_I2C_PARPORT_LIGHT=m
1320# CONFIG_I2C_TAOS_EVM is not set 1687# CONFIG_I2C_TAOS_EVM is not set
1321# CONFIG_I2C_TINY_USB is not set 1688# CONFIG_I2C_TINY_USB is not set
1322 1689
1323# 1690#
1324# Graphics adapter I2C/DDC channel drivers 1691# Graphics adapter I2C/DDC channel drivers
1325# 1692#
1326# CONFIG_I2C_VOODOO3 is not set 1693CONFIG_I2C_VOODOO3=m
1327 1694
1328# 1695#
1329# Other I2C/SMBus bus drivers 1696# Other I2C/SMBus bus drivers
1330# 1697#
1698CONFIG_I2C_PCA_ISA=m
1331# CONFIG_I2C_PCA_PLATFORM is not set 1699# CONFIG_I2C_PCA_PLATFORM is not set
1332# CONFIG_I2C_STUB is not set 1700CONFIG_I2C_STUB=m
1333# CONFIG_SCx200_ACB is not set 1701CONFIG_SCx200_ACB=m
1334 1702
1335# 1703#
1336# Miscellaneous I2C Chip support 1704# Miscellaneous I2C Chip support
1337# 1705#
1338# CONFIG_DS1682 is not set 1706# CONFIG_DS1682 is not set
1339# CONFIG_AT24 is not set 1707# CONFIG_AT24 is not set
1340# CONFIG_SENSORS_EEPROM is not set 1708CONFIG_SENSORS_EEPROM=m
1341# CONFIG_SENSORS_PCF8574 is not set 1709CONFIG_SENSORS_PCF8574=m
1342# CONFIG_PCF8575 is not set 1710# CONFIG_PCF8575 is not set
1343# CONFIG_SENSORS_PCA9539 is not set 1711CONFIG_SENSORS_PCA9539=m
1344# CONFIG_SENSORS_PCF8591 is not set 1712CONFIG_SENSORS_PCF8591=m
1345# CONFIG_SENSORS_MAX6875 is not set 1713CONFIG_SENSORS_MAX6875=m
1346# CONFIG_SENSORS_TSL2550 is not set 1714# CONFIG_SENSORS_TSL2550 is not set
1347# CONFIG_I2C_DEBUG_CORE is not set 1715# CONFIG_I2C_DEBUG_CORE is not set
1348# CONFIG_I2C_DEBUG_ALGO is not set 1716# CONFIG_I2C_DEBUG_ALGO is not set
1349# CONFIG_I2C_DEBUG_BUS is not set 1717# CONFIG_I2C_DEBUG_BUS is not set
1350# CONFIG_I2C_DEBUG_CHIP is not set 1718# CONFIG_I2C_DEBUG_CHIP is not set
1351# CONFIG_SPI is not set 1719CONFIG_SPI=y
1720# CONFIG_SPI_DEBUG is not set
1721CONFIG_SPI_MASTER=y
1722
1723#
1724# SPI Master Controller Drivers
1725#
1726CONFIG_SPI_BITBANG=m
1727
1728#
1729# SPI Protocol Masters
1730#
1731# CONFIG_SPI_AT25 is not set
1732# CONFIG_SPI_SPIDEV is not set
1733# CONFIG_SPI_TLE62X0 is not set
1352CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 1734CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
1353# CONFIG_GPIOLIB is not set 1735# CONFIG_GPIOLIB is not set
1354# CONFIG_W1 is not set 1736CONFIG_W1=m
1737CONFIG_W1_CON=y
1738
1739#
1740# 1-wire Bus Masters
1741#
1742CONFIG_W1_MASTER_MATROX=m
1743CONFIG_W1_MASTER_DS2490=m
1744CONFIG_W1_MASTER_DS2482=m
1745
1746#
1747# 1-wire Slaves
1748#
1749CONFIG_W1_SLAVE_THERM=m
1750CONFIG_W1_SLAVE_SMEM=m
1751CONFIG_W1_SLAVE_DS2433=m
1752CONFIG_W1_SLAVE_DS2433_CRC=y
1753# CONFIG_W1_SLAVE_DS2760 is not set
1355CONFIG_POWER_SUPPLY=y 1754CONFIG_POWER_SUPPLY=y
1356# CONFIG_POWER_SUPPLY_DEBUG is not set 1755# CONFIG_POWER_SUPPLY_DEBUG is not set
1357# CONFIG_PDA_POWER is not set 1756# CONFIG_PDA_POWER is not set
1358# CONFIG_BATTERY_DS2760 is not set 1757# CONFIG_BATTERY_DS2760 is not set
1359CONFIG_HWMON=y 1758CONFIG_HWMON=y
1360# CONFIG_HWMON_VID is not set 1759CONFIG_HWMON_VID=m
1361# CONFIG_SENSORS_ABITUGURU is not set 1760# CONFIG_SENSORS_ABITUGURU is not set
1362# CONFIG_SENSORS_ABITUGURU3 is not set 1761# CONFIG_SENSORS_ABITUGURU3 is not set
1363# CONFIG_SENSORS_AD7414 is not set 1762# CONFIG_SENSORS_AD7414 is not set
1364# CONFIG_SENSORS_AD7418 is not set 1763# CONFIG_SENSORS_AD7418 is not set
1764# CONFIG_SENSORS_ADCXX is not set
1365# CONFIG_SENSORS_ADM1021 is not set 1765# CONFIG_SENSORS_ADM1021 is not set
1366# CONFIG_SENSORS_ADM1025 is not set 1766# CONFIG_SENSORS_ADM1025 is not set
1367# CONFIG_SENSORS_ADM1026 is not set 1767# CONFIG_SENSORS_ADM1026 is not set
@@ -1384,14 +1784,17 @@ CONFIG_HWMON=y
1384# CONFIG_SENSORS_GL518SM is not set 1784# CONFIG_SENSORS_GL518SM is not set
1385# CONFIG_SENSORS_GL520SM is not set 1785# CONFIG_SENSORS_GL520SM is not set
1386# CONFIG_SENSORS_CORETEMP is not set 1786# CONFIG_SENSORS_CORETEMP is not set
1787# CONFIG_SENSORS_IBMAEM is not set
1788# CONFIG_SENSORS_IBMPEX is not set
1387# CONFIG_SENSORS_IT87 is not set 1789# CONFIG_SENSORS_IT87 is not set
1388# CONFIG_SENSORS_LM63 is not set 1790# CONFIG_SENSORS_LM63 is not set
1791# CONFIG_SENSORS_LM70 is not set
1389# CONFIG_SENSORS_LM75 is not set 1792# CONFIG_SENSORS_LM75 is not set
1390# CONFIG_SENSORS_LM77 is not set 1793# CONFIG_SENSORS_LM77 is not set
1391# CONFIG_SENSORS_LM78 is not set 1794# CONFIG_SENSORS_LM78 is not set
1392# CONFIG_SENSORS_LM80 is not set 1795# CONFIG_SENSORS_LM80 is not set
1393# CONFIG_SENSORS_LM83 is not set 1796# CONFIG_SENSORS_LM83 is not set
1394# CONFIG_SENSORS_LM85 is not set 1797CONFIG_SENSORS_LM85=m
1395# CONFIG_SENSORS_LM87 is not set 1798# CONFIG_SENSORS_LM87 is not set
1396# CONFIG_SENSORS_LM90 is not set 1799# CONFIG_SENSORS_LM90 is not set
1397# CONFIG_SENSORS_LM92 is not set 1800# CONFIG_SENSORS_LM92 is not set
@@ -1446,92 +1849,278 @@ CONFIG_SSB_POSSIBLE=y
1446# 1849#
1447# Multimedia core support 1850# Multimedia core support
1448# 1851#
1449CONFIG_VIDEO_DEV=y 1852CONFIG_VIDEO_DEV=m
1450CONFIG_VIDEO_V4L2_COMMON=y 1853CONFIG_VIDEO_V4L2_COMMON=m
1451# CONFIG_VIDEO_ALLOW_V4L1 is not set 1854CONFIG_VIDEO_ALLOW_V4L1=y
1452CONFIG_VIDEO_V4L1_COMPAT=y 1855CONFIG_VIDEO_V4L1_COMPAT=y
1453CONFIG_DVB_CORE=y 1856CONFIG_DVB_CORE=m
1454CONFIG_VIDEO_MEDIA=y 1857CONFIG_VIDEO_MEDIA=m
1455 1858
1456# 1859#
1457# Multimedia drivers 1860# Multimedia drivers
1458# 1861#
1459# CONFIG_MEDIA_ATTACH is not set 1862# CONFIG_MEDIA_ATTACH is not set
1460CONFIG_MEDIA_TUNER=y 1863CONFIG_MEDIA_TUNER=m
1461# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set 1864# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
1462CONFIG_MEDIA_TUNER_SIMPLE=y 1865CONFIG_MEDIA_TUNER_SIMPLE=m
1463CONFIG_MEDIA_TUNER_TDA8290=y 1866CONFIG_MEDIA_TUNER_TDA8290=m
1464CONFIG_MEDIA_TUNER_TDA9887=y 1867CONFIG_MEDIA_TUNER_TDA18271=m
1465CONFIG_MEDIA_TUNER_TEA5761=y 1868CONFIG_MEDIA_TUNER_TDA9887=m
1466CONFIG_MEDIA_TUNER_TEA5767=y 1869CONFIG_MEDIA_TUNER_TEA5761=m
1467CONFIG_MEDIA_TUNER_MT20XX=y 1870CONFIG_MEDIA_TUNER_TEA5767=m
1468CONFIG_MEDIA_TUNER_XC2028=y 1871CONFIG_MEDIA_TUNER_MT20XX=m
1469CONFIG_MEDIA_TUNER_XC5000=y 1872CONFIG_MEDIA_TUNER_MT2060=m
1470CONFIG_VIDEO_V4L2=y 1873CONFIG_MEDIA_TUNER_XC2028=m
1874CONFIG_MEDIA_TUNER_XC5000=m
1875CONFIG_VIDEO_V4L2=m
1876CONFIG_VIDEO_V4L1=m
1877CONFIG_VIDEOBUF_GEN=m
1878CONFIG_VIDEOBUF_VMALLOC=m
1879CONFIG_VIDEO_IR=m
1880CONFIG_VIDEO_TVEEPROM=m
1881CONFIG_VIDEO_TUNER=m
1471CONFIG_VIDEO_CAPTURE_DRIVERS=y 1882CONFIG_VIDEO_CAPTURE_DRIVERS=y
1472# CONFIG_VIDEO_ADV_DEBUG is not set 1883# CONFIG_VIDEO_ADV_DEBUG is not set
1473CONFIG_VIDEO_HELPER_CHIPS_AUTO=y 1884CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
1885CONFIG_VIDEO_IR_I2C=m
1886CONFIG_VIDEO_MSP3400=m
1887CONFIG_VIDEO_CS53L32A=m
1888CONFIG_VIDEO_WM8775=m
1889CONFIG_VIDEO_SAA711X=m
1890CONFIG_VIDEO_TVP5150=m
1891CONFIG_VIDEO_CX25840=m
1892CONFIG_VIDEO_CX2341X=m
1474# CONFIG_VIDEO_VIVI is not set 1893# CONFIG_VIDEO_VIVI is not set
1475# CONFIG_VIDEO_BT848 is not set 1894# CONFIG_VIDEO_BT848 is not set
1895# CONFIG_VIDEO_PMS is not set
1896# CONFIG_VIDEO_CPIA is not set
1897# CONFIG_VIDEO_CPIA2 is not set
1476# CONFIG_VIDEO_SAA5246A is not set 1898# CONFIG_VIDEO_SAA5246A is not set
1477# CONFIG_VIDEO_SAA5249 is not set 1899# CONFIG_VIDEO_SAA5249 is not set
1900# CONFIG_TUNER_3036 is not set
1901# CONFIG_VIDEO_STRADIS is not set
1902# CONFIG_VIDEO_ZORAN is not set
1478# CONFIG_VIDEO_SAA7134 is not set 1903# CONFIG_VIDEO_SAA7134 is not set
1904# CONFIG_VIDEO_MXB is not set
1905# CONFIG_VIDEO_DPC is not set
1479# CONFIG_VIDEO_HEXIUM_ORION is not set 1906# CONFIG_VIDEO_HEXIUM_ORION is not set
1480# CONFIG_VIDEO_HEXIUM_GEMINI is not set 1907# CONFIG_VIDEO_HEXIUM_GEMINI is not set
1481# CONFIG_VIDEO_CX88 is not set 1908# CONFIG_VIDEO_CX88 is not set
1482# CONFIG_VIDEO_CX23885 is not set 1909# CONFIG_VIDEO_CX23885 is not set
1483# CONFIG_VIDEO_AU0828 is not set 1910# CONFIG_VIDEO_AU0828 is not set
1911# CONFIG_VIDEO_IVTV is not set
1484# CONFIG_VIDEO_CX18 is not set 1912# CONFIG_VIDEO_CX18 is not set
1485# CONFIG_VIDEO_CAFE_CCIC is not set 1913# CONFIG_VIDEO_CAFE_CCIC is not set
1486CONFIG_V4L_USB_DRIVERS=y 1914CONFIG_V4L_USB_DRIVERS=y
1487CONFIG_USB_VIDEO_CLASS=m 1915# CONFIG_USB_VIDEO_CLASS is not set
1488CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
1489# CONFIG_USB_GSPCA is not set 1916# CONFIG_USB_GSPCA is not set
1490# CONFIG_VIDEO_PVRUSB2 is not set 1917CONFIG_VIDEO_PVRUSB2=m
1491# CONFIG_VIDEO_EM28XX is not set 1918CONFIG_VIDEO_PVRUSB2_SYSFS=y
1919CONFIG_VIDEO_PVRUSB2_DVB=y
1920# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
1921CONFIG_VIDEO_EM28XX=m
1922# CONFIG_VIDEO_EM28XX_ALSA is not set
1923# CONFIG_VIDEO_EM28XX_DVB is not set
1492# CONFIG_VIDEO_USBVISION is not set 1924# CONFIG_VIDEO_USBVISION is not set
1493# CONFIG_USB_ET61X251 is not set 1925CONFIG_VIDEO_USBVIDEO=m
1494# CONFIG_USB_SN9C102 is not set 1926CONFIG_USB_VICAM=m
1927CONFIG_USB_IBMCAM=m
1928CONFIG_USB_KONICAWC=m
1929CONFIG_USB_QUICKCAM_MESSENGER=m
1930CONFIG_USB_ET61X251=m
1931CONFIG_VIDEO_OVCAMCHIP=m
1932CONFIG_USB_W9968CF=m
1933CONFIG_USB_OV511=m
1934CONFIG_USB_SE401=m
1935CONFIG_USB_SN9C102=m
1936CONFIG_USB_STV680=m
1495# CONFIG_USB_ZC0301 is not set 1937# CONFIG_USB_ZC0301 is not set
1938CONFIG_USB_PWC=m
1939# CONFIG_USB_PWC_DEBUG is not set
1496# CONFIG_USB_ZR364XX is not set 1940# CONFIG_USB_ZR364XX is not set
1497# CONFIG_USB_STKWEBCAM is not set 1941# CONFIG_USB_STKWEBCAM is not set
1498# CONFIG_USB_S2255 is not set 1942# CONFIG_USB_S2255 is not set
1499# CONFIG_SOC_CAMERA is not set 1943# CONFIG_SOC_CAMERA is not set
1500# CONFIG_VIDEO_SH_MOBILE_CEU is not set 1944# CONFIG_VIDEO_SH_MOBILE_CEU is not set
1501# CONFIG_RADIO_ADAPTERS is not set 1945CONFIG_RADIO_ADAPTERS=y
1502# CONFIG_DVB_CAPTURE_DRIVERS is not set 1946# CONFIG_RADIO_CADET is not set
1503# CONFIG_DAB is not set 1947# CONFIG_RADIO_RTRACK is not set
1948# CONFIG_RADIO_RTRACK2 is not set
1949# CONFIG_RADIO_AZTECH is not set
1950# CONFIG_RADIO_GEMTEK is not set
1951# CONFIG_RADIO_GEMTEK_PCI is not set
1952# CONFIG_RADIO_MAXIRADIO is not set
1953# CONFIG_RADIO_MAESTRO is not set
1954# CONFIG_RADIO_SF16FMI is not set
1955# CONFIG_RADIO_SF16FMR2 is not set
1956# CONFIG_RADIO_TERRATEC is not set
1957# CONFIG_RADIO_TRUST is not set
1958# CONFIG_RADIO_TYPHOON is not set
1959# CONFIG_RADIO_ZOLTRIX is not set
1960# CONFIG_USB_DSBR is not set
1961# CONFIG_USB_SI470X is not set
1962CONFIG_DVB_CAPTURE_DRIVERS=y
1963
1964#
1965# Supported SAA7146 based PCI Adapters
1966#
1967# CONFIG_TTPCI_EEPROM is not set
1968# CONFIG_DVB_AV7110 is not set
1969# CONFIG_DVB_BUDGET_CORE is not set
1970
1971#
1972# Supported USB Adapters
1973#
1974CONFIG_DVB_USB=m
1975# CONFIG_DVB_USB_DEBUG is not set
1976CONFIG_DVB_USB_A800=m
1977CONFIG_DVB_USB_DIBUSB_MB=m
1978# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
1979CONFIG_DVB_USB_DIBUSB_MC=m
1980# CONFIG_DVB_USB_DIB0700 is not set
1981CONFIG_DVB_USB_UMT_010=m
1982# CONFIG_DVB_USB_CXUSB is not set
1983# CONFIG_DVB_USB_M920X is not set
1984# CONFIG_DVB_USB_GL861 is not set
1985# CONFIG_DVB_USB_AU6610 is not set
1986CONFIG_DVB_USB_DIGITV=m
1987CONFIG_DVB_USB_VP7045=m
1988CONFIG_DVB_USB_VP702X=m
1989CONFIG_DVB_USB_GP8PSK=m
1990CONFIG_DVB_USB_NOVA_T_USB2=m
1991# CONFIG_DVB_USB_TTUSB2 is not set
1992CONFIG_DVB_USB_DTT200U=m
1993# CONFIG_DVB_USB_OPERA1 is not set
1994# CONFIG_DVB_USB_AF9005 is not set
1995# CONFIG_DVB_USB_DW2102 is not set
1996# CONFIG_DVB_USB_ANYSEE is not set
1997# CONFIG_DVB_TTUSB_BUDGET is not set
1998# CONFIG_DVB_TTUSB_DEC is not set
1999# CONFIG_DVB_CINERGYT2 is not set
2000# CONFIG_DVB_SIANO_SMS1XXX is not set
2001
2002#
2003# Supported FlexCopII (B2C2) Adapters
2004#
2005# CONFIG_DVB_B2C2_FLEXCOP is not set
2006
2007#
2008# Supported BT878 Adapters
2009#
2010
2011#
2012# Supported Pluto2 Adapters
2013#
2014# CONFIG_DVB_PLUTO2 is not set
2015
2016#
2017# Supported DVB Frontends
2018#
2019
2020#
2021# Customise DVB Frontends
2022#
2023# CONFIG_DVB_FE_CUSTOMISE is not set
2024
2025#
2026# DVB-S (satellite) frontends
2027#
2028CONFIG_DVB_CX24110=m
2029CONFIG_DVB_CX24123=m
2030CONFIG_DVB_MT312=m
2031CONFIG_DVB_S5H1420=m
2032CONFIG_DVB_STV0299=m
2033CONFIG_DVB_TDA8083=m
2034CONFIG_DVB_TDA10086=m
2035CONFIG_DVB_VES1X93=m
2036# CONFIG_DVB_TUNER_ITD1000 is not set
2037CONFIG_DVB_TDA826X=m
2038CONFIG_DVB_TUA6100=m
2039
2040#
2041# DVB-T (terrestrial) frontends
2042#
2043CONFIG_DVB_SP8870=m
2044CONFIG_DVB_SP887X=m
2045CONFIG_DVB_CX22700=m
2046CONFIG_DVB_CX22702=m
2047# CONFIG_DVB_DRX397XD is not set
2048CONFIG_DVB_L64781=m
2049CONFIG_DVB_TDA1004X=m
2050CONFIG_DVB_NXT6000=m
2051CONFIG_DVB_MT352=m
2052CONFIG_DVB_ZL10353=m
2053CONFIG_DVB_DIB3000MB=m
2054CONFIG_DVB_DIB3000MC=m
2055# CONFIG_DVB_DIB7000M is not set
2056# CONFIG_DVB_DIB7000P is not set
2057CONFIG_DVB_TDA10048=m
2058
2059#
2060# DVB-C (cable) frontends
2061#
2062CONFIG_DVB_VES1820=m
2063CONFIG_DVB_TDA10021=m
2064# CONFIG_DVB_TDA10023 is not set
2065CONFIG_DVB_STV0297=m
2066
2067#
2068# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
2069#
2070CONFIG_DVB_NXT200X=m
2071CONFIG_DVB_OR51211=m
2072CONFIG_DVB_OR51132=m
2073CONFIG_DVB_BCM3510=m
2074CONFIG_DVB_LGDT330X=m
2075CONFIG_DVB_S5H1409=m
2076# CONFIG_DVB_AU8522 is not set
2077CONFIG_DVB_S5H1411=m
2078
2079#
2080# Digital terrestrial only tuners/PLL
2081#
2082CONFIG_DVB_PLL=m
2083# CONFIG_DVB_TUNER_DIB0070 is not set
2084
2085#
2086# SEC control devices for DVB-S
2087#
2088CONFIG_DVB_LNBP21=m
2089# CONFIG_DVB_ISL6405 is not set
2090CONFIG_DVB_ISL6421=m
2091CONFIG_DAB=y
2092CONFIG_USB_DABUSB=m
1504 2093
1505# 2094#
1506# Graphics support 2095# Graphics support
1507# 2096#
1508CONFIG_AGP=y 2097CONFIG_AGP=m
1509# CONFIG_AGP_ALI is not set 2098# CONFIG_AGP_ALI is not set
1510# CONFIG_AGP_ATI is not set 2099# CONFIG_AGP_ATI is not set
1511# CONFIG_AGP_AMD is not set 2100# CONFIG_AGP_AMD is not set
1512CONFIG_AGP_AMD64=y 2101# CONFIG_AGP_AMD64 is not set
1513CONFIG_AGP_INTEL=y 2102CONFIG_AGP_INTEL=m
1514# CONFIG_AGP_NVIDIA is not set 2103CONFIG_AGP_NVIDIA=m
1515# CONFIG_AGP_SIS is not set 2104# CONFIG_AGP_SIS is not set
1516# CONFIG_AGP_SWORKS is not set 2105# CONFIG_AGP_SWORKS is not set
1517# CONFIG_AGP_VIA is not set 2106# CONFIG_AGP_VIA is not set
1518# CONFIG_AGP_EFFICEON is not set 2107# CONFIG_AGP_EFFICEON is not set
1519CONFIG_DRM=y 2108CONFIG_DRM=m
1520# CONFIG_DRM_TDFX is not set 2109CONFIG_DRM_TDFX=m
1521# CONFIG_DRM_R128 is not set 2110# CONFIG_DRM_R128 is not set
1522# CONFIG_DRM_RADEON is not set 2111# CONFIG_DRM_RADEON is not set
1523CONFIG_DRM_I810=y 2112CONFIG_DRM_I810=m
1524CONFIG_DRM_I830=y 2113CONFIG_DRM_I830=m
1525CONFIG_DRM_I915=y 2114CONFIG_DRM_I915=m
1526# CONFIG_DRM_MGA is not set 2115# CONFIG_DRM_MGA is not set
1527# CONFIG_DRM_SIS is not set 2116CONFIG_DRM_SIS=m
1528# CONFIG_DRM_VIA is not set 2117# CONFIG_DRM_VIA is not set
1529# CONFIG_DRM_SAVAGE is not set 2118CONFIG_DRM_SAVAGE=m
1530# CONFIG_VGASTATE is not set 2119CONFIG_VGASTATE=m
1531CONFIG_VIDEO_OUTPUT_CONTROL=y 2120CONFIG_VIDEO_OUTPUT_CONTROL=y
1532CONFIG_FB=y 2121CONFIG_FB=y
1533CONFIG_FIRMWARE_EDID=y 2122CONFIG_FIRMWARE_EDID=y
1534CONFIG_FB_DDC=y 2123CONFIG_FB_DDC=m
1535CONFIG_FB_CFB_FILLRECT=y 2124CONFIG_FB_CFB_FILLRECT=y
1536CONFIG_FB_CFB_COPYAREA=y 2125CONFIG_FB_CFB_COPYAREA=y
1537CONFIG_FB_CFB_IMAGEBLIT=y 2126CONFIG_FB_CFB_IMAGEBLIT=y
@@ -1543,9 +2132,9 @@ CONFIG_FB_CFB_IMAGEBLIT=y
1543# CONFIG_FB_SYS_FOPS is not set 2132# CONFIG_FB_SYS_FOPS is not set
1544# CONFIG_FB_SVGALIB is not set 2133# CONFIG_FB_SVGALIB is not set
1545# CONFIG_FB_MACMODES is not set 2134# CONFIG_FB_MACMODES is not set
1546# CONFIG_FB_BACKLIGHT is not set 2135CONFIG_FB_BACKLIGHT=y
1547CONFIG_FB_MODE_HELPERS=y 2136CONFIG_FB_MODE_HELPERS=y
1548# CONFIG_FB_TILEBLITTING is not set 2137CONFIG_FB_TILEBLITTING=y
1549 2138
1550# 2139#
1551# Frame buffer hardware drivers 2140# Frame buffer hardware drivers
@@ -1556,24 +2145,40 @@ CONFIG_FB_MODE_HELPERS=y
1556# CONFIG_FB_ARC is not set 2145# CONFIG_FB_ARC is not set
1557# CONFIG_FB_ASILIANT is not set 2146# CONFIG_FB_ASILIANT is not set
1558# CONFIG_FB_IMSTT is not set 2147# CONFIG_FB_IMSTT is not set
1559CONFIG_FB_VGA16=y 2148CONFIG_FB_VGA16=m
1560CONFIG_FB_UVESA=y 2149# CONFIG_FB_UVESA is not set
1561CONFIG_FB_VESA=y 2150CONFIG_FB_VESA=y
1562# CONFIG_FB_EFI is not set 2151# CONFIG_FB_EFI is not set
2152# CONFIG_FB_IMAC is not set
1563# CONFIG_FB_N411 is not set 2153# CONFIG_FB_N411 is not set
1564# CONFIG_FB_HGA is not set 2154# CONFIG_FB_HGA is not set
1565# CONFIG_FB_S1D13XXX is not set 2155# CONFIG_FB_S1D13XXX is not set
1566# CONFIG_FB_NVIDIA is not set 2156CONFIG_FB_NVIDIA=m
1567# CONFIG_FB_RIVA is not set 2157CONFIG_FB_NVIDIA_I2C=y
1568CONFIG_FB_I810=y 2158# CONFIG_FB_NVIDIA_DEBUG is not set
2159CONFIG_FB_NVIDIA_BACKLIGHT=y
2160CONFIG_FB_RIVA=m
2161CONFIG_FB_RIVA_I2C=y
2162# CONFIG_FB_RIVA_DEBUG is not set
2163CONFIG_FB_RIVA_BACKLIGHT=y
2164CONFIG_FB_I810=m
2165CONFIG_FB_I810_GTF=y
2166CONFIG_FB_I810_I2C=y
1569# CONFIG_FB_LE80578 is not set 2167# CONFIG_FB_LE80578 is not set
1570CONFIG_FB_INTEL=y 2168CONFIG_FB_INTEL=m
1571CONFIG_FB_INTEL_DEBUG=y 2169# CONFIG_FB_INTEL_DEBUG is not set
1572CONFIG_FB_INTEL_I2C=y 2170CONFIG_FB_INTEL_I2C=y
1573# CONFIG_FB_MATROX is not set 2171# CONFIG_FB_MATROX is not set
1574# CONFIG_FB_RADEON is not set 2172CONFIG_FB_RADEON=m
2173CONFIG_FB_RADEON_I2C=y
2174CONFIG_FB_RADEON_BACKLIGHT=y
2175# CONFIG_FB_RADEON_DEBUG is not set
1575# CONFIG_FB_ATY128 is not set 2176# CONFIG_FB_ATY128 is not set
1576# CONFIG_FB_ATY is not set 2177CONFIG_FB_ATY=m
2178CONFIG_FB_ATY_CT=y
2179CONFIG_FB_ATY_GENERIC_LCD=y
2180CONFIG_FB_ATY_GX=y
2181CONFIG_FB_ATY_BACKLIGHT=y
1577# CONFIG_FB_S3 is not set 2182# CONFIG_FB_S3 is not set
1578# CONFIG_FB_SAVAGE is not set 2183# CONFIG_FB_SAVAGE is not set
1579# CONFIG_FB_SIS is not set 2184# CONFIG_FB_SIS is not set
@@ -1590,22 +2195,20 @@ CONFIG_FB_INTEL_I2C=y
1590# CONFIG_FB_GEODE is not set 2195# CONFIG_FB_GEODE is not set
1591# CONFIG_FB_VIRTUAL is not set 2196# CONFIG_FB_VIRTUAL is not set
1592CONFIG_BACKLIGHT_LCD_SUPPORT=y 2197CONFIG_BACKLIGHT_LCD_SUPPORT=y
1593CONFIG_LCD_CLASS_DEVICE=y 2198CONFIG_LCD_CLASS_DEVICE=m
2199# CONFIG_LCD_LTV350QV is not set
1594# CONFIG_LCD_ILI9320 is not set 2200# CONFIG_LCD_ILI9320 is not set
1595CONFIG_LCD_PLATFORM=y 2201# CONFIG_LCD_VGG2432A4 is not set
2202# CONFIG_LCD_PLATFORM is not set
1596CONFIG_BACKLIGHT_CLASS_DEVICE=y 2203CONFIG_BACKLIGHT_CLASS_DEVICE=y
1597# CONFIG_BACKLIGHT_CORGI is not set 2204# CONFIG_BACKLIGHT_CORGI is not set
1598# CONFIG_BACKLIGHT_PROGEAR is not set 2205# CONFIG_BACKLIGHT_PROGEAR is not set
1599CONFIG_BACKLIGHT_MBP_NVIDIA=y 2206# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
1600 2207
1601# 2208#
1602# Display device support 2209# Display device support
1603# 2210#
1604CONFIG_DISPLAY_SUPPORT=y 2211# CONFIG_DISPLAY_SUPPORT is not set
1605
1606#
1607# Display hardware drivers
1608#
1609 2212
1610# 2213#
1611# Console display driver support 2214# Console display driver support
@@ -1614,43 +2217,77 @@ CONFIG_VGA_CONSOLE=y
1614CONFIG_VGACON_SOFT_SCROLLBACK=y 2217CONFIG_VGACON_SOFT_SCROLLBACK=y
1615CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 2218CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
1616CONFIG_VIDEO_SELECT=y 2219CONFIG_VIDEO_SELECT=y
2220CONFIG_MDA_CONSOLE=m
1617CONFIG_DUMMY_CONSOLE=y 2221CONFIG_DUMMY_CONSOLE=y
1618CONFIG_FRAMEBUFFER_CONSOLE=y 2222CONFIG_FRAMEBUFFER_CONSOLE=y
1619# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set 2223# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
1620# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set 2224CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
1621# CONFIG_FONTS is not set 2225# CONFIG_FONTS is not set
1622CONFIG_FONT_8x8=y 2226CONFIG_FONT_8x8=y
1623CONFIG_FONT_8x16=y 2227CONFIG_FONT_8x16=y
1624# CONFIG_LOGO is not set 2228# CONFIG_LOGO is not set
1625CONFIG_SOUND=y 2229CONFIG_SOUND=m
1626CONFIG_SND=y 2230CONFIG_SND=m
1627CONFIG_SND_TIMER=y 2231CONFIG_SND_TIMER=m
1628CONFIG_SND_PCM=y 2232CONFIG_SND_PCM=m
1629CONFIG_SND_HWDEP=y 2233CONFIG_SND_HWDEP=m
1630CONFIG_SND_RAWMIDI=m 2234CONFIG_SND_RAWMIDI=m
1631CONFIG_SND_SEQUENCER=y 2235CONFIG_SND_SEQUENCER=m
1632CONFIG_SND_SEQ_DUMMY=y 2236CONFIG_SND_SEQ_DUMMY=m
1633# CONFIG_SND_MIXER_OSS is not set 2237CONFIG_SND_OSSEMUL=y
1634# CONFIG_SND_PCM_OSS is not set 2238CONFIG_SND_MIXER_OSS=m
1635# CONFIG_SND_SEQUENCER_OSS is not set 2239CONFIG_SND_PCM_OSS=m
2240CONFIG_SND_PCM_OSS_PLUGINS=y
2241CONFIG_SND_SEQUENCER_OSS=y
1636CONFIG_SND_DYNAMIC_MINORS=y 2242CONFIG_SND_DYNAMIC_MINORS=y
1637# CONFIG_SND_SUPPORT_OLD_API is not set 2243CONFIG_SND_SUPPORT_OLD_API=y
1638CONFIG_SND_VERBOSE_PROCFS=y 2244CONFIG_SND_VERBOSE_PROCFS=y
1639CONFIG_SND_VERBOSE_PRINTK=y 2245CONFIG_SND_VERBOSE_PRINTK=y
1640CONFIG_SND_DEBUG=y 2246CONFIG_SND_DEBUG=y
1641# CONFIG_SND_DEBUG_VERBOSE is not set 2247# CONFIG_SND_DEBUG_VERBOSE is not set
1642CONFIG_SND_PCM_XRUN_DEBUG=y 2248# CONFIG_SND_PCM_XRUN_DEBUG is not set
1643CONFIG_SND_VMASTER=y 2249CONFIG_SND_VMASTER=y
1644CONFIG_SND_AC97_CODEC=y 2250CONFIG_SND_MPU401_UART=m
2251CONFIG_SND_AC97_CODEC=m
1645CONFIG_SND_DRIVERS=y 2252CONFIG_SND_DRIVERS=y
1646# CONFIG_SND_PCSP is not set 2253CONFIG_SND_DUMMY=m
1647# CONFIG_SND_DUMMY is not set 2254CONFIG_SND_VIRMIDI=m
1648# CONFIG_SND_VIRMIDI is not set 2255CONFIG_SND_MTPAV=m
1649# CONFIG_SND_MTPAV is not set 2256CONFIG_SND_SERIAL_U16550=m
1650# CONFIG_SND_SERIAL_U16550 is not set 2257CONFIG_SND_MPU401=m
1651# CONFIG_SND_MPU401 is not set
1652CONFIG_SND_AC97_POWER_SAVE=y 2258CONFIG_SND_AC97_POWER_SAVE=y
1653CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 2259CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
2260CONFIG_SND_ISA=y
2261# CONFIG_SND_ADLIB is not set
2262# CONFIG_SND_AD1816A is not set
2263# CONFIG_SND_AD1848 is not set
2264# CONFIG_SND_ALS100 is not set
2265# CONFIG_SND_AZT2320 is not set
2266# CONFIG_SND_CMI8330 is not set
2267# CONFIG_SND_CS4231 is not set
2268# CONFIG_SND_CS4232 is not set
2269# CONFIG_SND_CS4236 is not set
2270# CONFIG_SND_DT019X is not set
2271# CONFIG_SND_ES968 is not set
2272# CONFIG_SND_ES1688 is not set
2273# CONFIG_SND_ES18XX is not set
2274# CONFIG_SND_SC6000 is not set
2275# CONFIG_SND_GUSCLASSIC is not set
2276# CONFIG_SND_GUSEXTREME is not set
2277# CONFIG_SND_GUSMAX is not set
2278# CONFIG_SND_INTERWAVE is not set
2279# CONFIG_SND_INTERWAVE_STB is not set
2280# CONFIG_SND_OPL3SA2 is not set
2281# CONFIG_SND_OPTI92X_AD1848 is not set
2282# CONFIG_SND_OPTI92X_CS4231 is not set
2283# CONFIG_SND_OPTI93X is not set
2284# CONFIG_SND_MIRO is not set
2285# CONFIG_SND_SB8 is not set
2286# CONFIG_SND_SB16 is not set
2287# CONFIG_SND_SBAWE is not set
2288# CONFIG_SND_SGALAXY is not set
2289# CONFIG_SND_SSCAPE is not set
2290# CONFIG_SND_WAVEFRONT is not set
1654CONFIG_SND_PCI=y 2291CONFIG_SND_PCI=y
1655# CONFIG_SND_AD1889 is not set 2292# CONFIG_SND_AD1889 is not set
1656# CONFIG_SND_ALS300 is not set 2293# CONFIG_SND_ALS300 is not set
@@ -1690,8 +2327,8 @@ CONFIG_SND_PCI=y
1690# CONFIG_SND_ES1938 is not set 2327# CONFIG_SND_ES1938 is not set
1691# CONFIG_SND_ES1968 is not set 2328# CONFIG_SND_ES1968 is not set
1692# CONFIG_SND_FM801 is not set 2329# CONFIG_SND_FM801 is not set
1693CONFIG_SND_HDA_INTEL=y 2330CONFIG_SND_HDA_INTEL=m
1694CONFIG_SND_HDA_HWDEP=y 2331# CONFIG_SND_HDA_HWDEP is not set
1695CONFIG_SND_HDA_CODEC_REALTEK=y 2332CONFIG_SND_HDA_CODEC_REALTEK=y
1696CONFIG_SND_HDA_CODEC_ANALOG=y 2333CONFIG_SND_HDA_CODEC_ANALOG=y
1697CONFIG_SND_HDA_CODEC_SIGMATEL=y 2334CONFIG_SND_HDA_CODEC_SIGMATEL=y
@@ -1701,15 +2338,14 @@ CONFIG_SND_HDA_CODEC_CONEXANT=y
1701CONFIG_SND_HDA_CODEC_CMEDIA=y 2338CONFIG_SND_HDA_CODEC_CMEDIA=y
1702CONFIG_SND_HDA_CODEC_SI3054=y 2339CONFIG_SND_HDA_CODEC_SI3054=y
1703CONFIG_SND_HDA_GENERIC=y 2340CONFIG_SND_HDA_GENERIC=y
1704CONFIG_SND_HDA_POWER_SAVE=y 2341# CONFIG_SND_HDA_POWER_SAVE is not set
1705CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
1706# CONFIG_SND_HDSP is not set 2342# CONFIG_SND_HDSP is not set
1707# CONFIG_SND_HDSPM is not set 2343# CONFIG_SND_HDSPM is not set
1708# CONFIG_SND_HIFIER is not set 2344# CONFIG_SND_HIFIER is not set
1709# CONFIG_SND_ICE1712 is not set 2345# CONFIG_SND_ICE1712 is not set
1710# CONFIG_SND_ICE1724 is not set 2346# CONFIG_SND_ICE1724 is not set
1711CONFIG_SND_INTEL8X0=y 2347CONFIG_SND_INTEL8X0=m
1712# CONFIG_SND_INTEL8X0M is not set 2348CONFIG_SND_INTEL8X0M=m
1713# CONFIG_SND_KORG1212 is not set 2349# CONFIG_SND_KORG1212 is not set
1714# CONFIG_SND_MAESTRO3 is not set 2350# CONFIG_SND_MAESTRO3 is not set
1715# CONFIG_SND_MIXART is not set 2351# CONFIG_SND_MIXART is not set
@@ -1727,18 +2363,18 @@ CONFIG_SND_INTEL8X0=y
1727# CONFIG_SND_VIRTUOSO is not set 2363# CONFIG_SND_VIRTUOSO is not set
1728# CONFIG_SND_VX222 is not set 2364# CONFIG_SND_VX222 is not set
1729# CONFIG_SND_YMFPCI is not set 2365# CONFIG_SND_YMFPCI is not set
2366CONFIG_SND_SPI=y
1730CONFIG_SND_USB=y 2367CONFIG_SND_USB=y
1731CONFIG_SND_USB_AUDIO=m 2368CONFIG_SND_USB_AUDIO=m
1732CONFIG_SND_USB_USX2Y=m 2369# CONFIG_SND_USB_USX2Y is not set
1733CONFIG_SND_USB_CAIAQ=m 2370# CONFIG_SND_USB_CAIAQ is not set
1734CONFIG_SND_USB_CAIAQ_INPUT=y
1735# CONFIG_SND_SOC is not set 2371# CONFIG_SND_SOC is not set
1736# CONFIG_SOUND_PRIME is not set 2372# CONFIG_SOUND_PRIME is not set
1737CONFIG_AC97_BUS=y 2373CONFIG_AC97_BUS=m
1738CONFIG_HID_SUPPORT=y 2374CONFIG_HID_SUPPORT=y
1739CONFIG_HID=y 2375CONFIG_HID=y
1740CONFIG_HID_DEBUG=y 2376# CONFIG_HID_DEBUG is not set
1741CONFIG_HIDRAW=y 2377# CONFIG_HIDRAW is not set
1742 2378
1743# 2379#
1744# USB Input Devices 2380# USB Input Devices
@@ -1749,9 +2385,9 @@ CONFIG_HID_FF=y
1749CONFIG_HID_PID=y 2385CONFIG_HID_PID=y
1750CONFIG_LOGITECH_FF=y 2386CONFIG_LOGITECH_FF=y
1751# CONFIG_LOGIRUMBLEPAD2_FF is not set 2387# CONFIG_LOGIRUMBLEPAD2_FF is not set
1752CONFIG_PANTHERLORD_FF=y 2388# CONFIG_PANTHERLORD_FF is not set
1753CONFIG_THRUSTMASTER_FF=y 2389CONFIG_THRUSTMASTER_FF=y
1754CONFIG_ZEROPLUS_FF=y 2390# CONFIG_ZEROPLUS_FF is not set
1755CONFIG_USB_HIDDEV=y 2391CONFIG_USB_HIDDEV=y
1756CONFIG_USB_SUPPORT=y 2392CONFIG_USB_SUPPORT=y
1757CONFIG_USB_ARCH_HAS_HCD=y 2393CONFIG_USB_ARCH_HAS_HCD=y
@@ -1759,13 +2395,13 @@ CONFIG_USB_ARCH_HAS_OHCI=y
1759CONFIG_USB_ARCH_HAS_EHCI=y 2395CONFIG_USB_ARCH_HAS_EHCI=y
1760CONFIG_USB=y 2396CONFIG_USB=y
1761# CONFIG_USB_DEBUG is not set 2397# CONFIG_USB_DEBUG is not set
1762CONFIG_USB_ANNOUNCE_NEW_DEVICES=y 2398# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
1763 2399
1764# 2400#
1765# Miscellaneous USB options 2401# Miscellaneous USB options
1766# 2402#
1767CONFIG_USB_DEVICEFS=y 2403CONFIG_USB_DEVICEFS=y
1768# CONFIG_USB_DEVICE_CLASS is not set 2404CONFIG_USB_DEVICE_CLASS=y
1769# CONFIG_USB_DYNAMIC_MINORS is not set 2405# CONFIG_USB_DYNAMIC_MINORS is not set
1770CONFIG_USB_SUSPEND=y 2406CONFIG_USB_SUSPEND=y
1771# CONFIG_USB_OTG is not set 2407# CONFIG_USB_OTG is not set
@@ -1780,14 +2416,14 @@ CONFIG_USB_EHCI_ROOT_HUB_TT=y
1780CONFIG_USB_EHCI_TT_NEWSCHED=y 2416CONFIG_USB_EHCI_TT_NEWSCHED=y
1781# CONFIG_USB_ISP116X_HCD is not set 2417# CONFIG_USB_ISP116X_HCD is not set
1782# CONFIG_USB_ISP1760_HCD is not set 2418# CONFIG_USB_ISP1760_HCD is not set
1783CONFIG_USB_OHCI_HCD=m 2419CONFIG_USB_OHCI_HCD=y
1784# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set 2420# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
1785# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set 2421# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
1786CONFIG_USB_OHCI_LITTLE_ENDIAN=y 2422CONFIG_USB_OHCI_LITTLE_ENDIAN=y
1787CONFIG_USB_UHCI_HCD=m 2423CONFIG_USB_UHCI_HCD=y
1788CONFIG_USB_U132_HCD=m 2424# CONFIG_USB_SL811_HCD is not set
1789CONFIG_USB_SL811_HCD=m
1790# CONFIG_USB_R8A66597_HCD is not set 2425# CONFIG_USB_R8A66597_HCD is not set
2426# CONFIG_USB_GADGET_MUSB_HDRC is not set
1791 2427
1792# 2428#
1793# USB Device Class drivers 2429# USB Device Class drivers
@@ -1807,7 +2443,7 @@ CONFIG_USB_STORAGE=y
1807# CONFIG_USB_STORAGE_DEBUG is not set 2443# CONFIG_USB_STORAGE_DEBUG is not set
1808CONFIG_USB_STORAGE_DATAFAB=y 2444CONFIG_USB_STORAGE_DATAFAB=y
1809CONFIG_USB_STORAGE_FREECOM=y 2445CONFIG_USB_STORAGE_FREECOM=y
1810CONFIG_USB_STORAGE_ISD200=y 2446# CONFIG_USB_STORAGE_ISD200 is not set
1811CONFIG_USB_STORAGE_DPCM=y 2447CONFIG_USB_STORAGE_DPCM=y
1812CONFIG_USB_STORAGE_USBAT=y 2448CONFIG_USB_STORAGE_USBAT=y
1813CONFIG_USB_STORAGE_SDDR09=y 2449CONFIG_USB_STORAGE_SDDR09=y
@@ -1815,7 +2451,7 @@ CONFIG_USB_STORAGE_SDDR55=y
1815CONFIG_USB_STORAGE_JUMPSHOT=y 2451CONFIG_USB_STORAGE_JUMPSHOT=y
1816CONFIG_USB_STORAGE_ALAUDA=y 2452CONFIG_USB_STORAGE_ALAUDA=y
1817# CONFIG_USB_STORAGE_ONETOUCH is not set 2453# CONFIG_USB_STORAGE_ONETOUCH is not set
1818CONFIG_USB_STORAGE_KARMA=y 2454# CONFIG_USB_STORAGE_KARMA is not set
1819# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set 2455# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
1820# CONFIG_USB_LIBUSUAL is not set 2456# CONFIG_USB_LIBUSUAL is not set
1821 2457
@@ -1831,10 +2467,10 @@ CONFIG_USB_MICROTEK=m
1831CONFIG_USB_SERIAL=m 2467CONFIG_USB_SERIAL=m
1832CONFIG_USB_EZUSB=y 2468CONFIG_USB_EZUSB=y
1833CONFIG_USB_SERIAL_GENERIC=y 2469CONFIG_USB_SERIAL_GENERIC=y
1834CONFIG_USB_SERIAL_AIRCABLE=m 2470# CONFIG_USB_SERIAL_AIRCABLE is not set
1835CONFIG_USB_SERIAL_ARK3116=m 2471CONFIG_USB_SERIAL_ARK3116=m
1836CONFIG_USB_SERIAL_BELKIN=m 2472CONFIG_USB_SERIAL_BELKIN=m
1837CONFIG_USB_SERIAL_CH341=m 2473# CONFIG_USB_SERIAL_CH341 is not set
1838CONFIG_USB_SERIAL_WHITEHEAT=m 2474CONFIG_USB_SERIAL_WHITEHEAT=m
1839CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m 2475CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
1840CONFIG_USB_SERIAL_CP2101=m 2476CONFIG_USB_SERIAL_CP2101=m
@@ -1849,7 +2485,7 @@ CONFIG_USB_SERIAL_EDGEPORT=m
1849CONFIG_USB_SERIAL_EDGEPORT_TI=m 2485CONFIG_USB_SERIAL_EDGEPORT_TI=m
1850CONFIG_USB_SERIAL_GARMIN=m 2486CONFIG_USB_SERIAL_GARMIN=m
1851CONFIG_USB_SERIAL_IPW=m 2487CONFIG_USB_SERIAL_IPW=m
1852CONFIG_USB_SERIAL_IUU=m 2488# CONFIG_USB_SERIAL_IUU is not set
1853CONFIG_USB_SERIAL_KEYSPAN_PDA=m 2489CONFIG_USB_SERIAL_KEYSPAN_PDA=m
1854CONFIG_USB_SERIAL_KEYSPAN=m 2490CONFIG_USB_SERIAL_KEYSPAN=m
1855CONFIG_USB_SERIAL_KEYSPAN_MPR=y 2491CONFIG_USB_SERIAL_KEYSPAN_MPR=y
@@ -1867,12 +2503,12 @@ CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
1867CONFIG_USB_SERIAL_KLSI=m 2503CONFIG_USB_SERIAL_KLSI=m
1868CONFIG_USB_SERIAL_KOBIL_SCT=m 2504CONFIG_USB_SERIAL_KOBIL_SCT=m
1869CONFIG_USB_SERIAL_MCT_U232=m 2505CONFIG_USB_SERIAL_MCT_U232=m
1870CONFIG_USB_SERIAL_MOS7720=m 2506# CONFIG_USB_SERIAL_MOS7720 is not set
1871CONFIG_USB_SERIAL_MOS7840=m 2507# CONFIG_USB_SERIAL_MOS7840 is not set
1872# CONFIG_USB_SERIAL_MOTOROLA is not set 2508# CONFIG_USB_SERIAL_MOTOROLA is not set
1873CONFIG_USB_SERIAL_NAVMAN=m 2509CONFIG_USB_SERIAL_NAVMAN=m
1874CONFIG_USB_SERIAL_PL2303=m 2510CONFIG_USB_SERIAL_PL2303=m
1875CONFIG_USB_SERIAL_OTI6858=m 2511# CONFIG_USB_SERIAL_OTI6858 is not set
1876# CONFIG_USB_SERIAL_SPCP8X5 is not set 2512# CONFIG_USB_SERIAL_SPCP8X5 is not set
1877CONFIG_USB_SERIAL_HP4X=m 2513CONFIG_USB_SERIAL_HP4X=m
1878CONFIG_USB_SERIAL_SAFE=m 2514CONFIG_USB_SERIAL_SAFE=m
@@ -1883,69 +2519,87 @@ CONFIG_USB_SERIAL_CYBERJACK=m
1883CONFIG_USB_SERIAL_XIRCOM=m 2519CONFIG_USB_SERIAL_XIRCOM=m
1884CONFIG_USB_SERIAL_OPTION=m 2520CONFIG_USB_SERIAL_OPTION=m
1885CONFIG_USB_SERIAL_OMNINET=m 2521CONFIG_USB_SERIAL_OMNINET=m
1886CONFIG_USB_SERIAL_DEBUG=m 2522# CONFIG_USB_SERIAL_DEBUG is not set
1887 2523
1888# 2524#
1889# USB Miscellaneous drivers 2525# USB Miscellaneous drivers
1890# 2526#
1891CONFIG_USB_EMI62=m 2527CONFIG_USB_EMI62=m
1892CONFIG_USB_EMI26=m 2528CONFIG_USB_EMI26=m
1893CONFIG_USB_ADUTUX=m 2529# CONFIG_USB_ADUTUX is not set
1894# CONFIG_USB_RIO500 is not set 2530CONFIG_USB_RIO500=m
1895CONFIG_USB_LEGOTOWER=m 2531CONFIG_USB_LEGOTOWER=m
1896CONFIG_USB_LCD=m 2532CONFIG_USB_LCD=m
1897CONFIG_USB_BERRY_CHARGE=m 2533# CONFIG_USB_BERRY_CHARGE is not set
1898CONFIG_USB_LED=m 2534CONFIG_USB_LED=m
1899# CONFIG_USB_CYPRESS_CY7C63 is not set 2535CONFIG_USB_CYPRESS_CY7C63=m
1900# CONFIG_USB_CYTHERM is not set 2536CONFIG_USB_CYTHERM=m
1901CONFIG_USB_PHIDGET=m 2537# CONFIG_USB_PHIDGET is not set
1902CONFIG_USB_PHIDGETKIT=m
1903CONFIG_USB_PHIDGETMOTORCONTROL=m
1904CONFIG_USB_PHIDGETSERVO=m
1905CONFIG_USB_IDMOUSE=m 2538CONFIG_USB_IDMOUSE=m
1906CONFIG_USB_FTDI_ELAN=m 2539# CONFIG_USB_FTDI_ELAN is not set
1907CONFIG_USB_APPLEDISPLAY=m 2540CONFIG_USB_APPLEDISPLAY=m
1908CONFIG_USB_SISUSBVGA=m 2541CONFIG_USB_SISUSBVGA=m
1909CONFIG_USB_SISUSBVGA_CON=y 2542CONFIG_USB_SISUSBVGA_CON=y
1910CONFIG_USB_LD=m 2543CONFIG_USB_LD=m
1911CONFIG_USB_TRANCEVIBRATOR=m 2544# CONFIG_USB_TRANCEVIBRATOR is not set
1912CONFIG_USB_IOWARRIOR=m 2545# CONFIG_USB_IOWARRIOR is not set
1913# CONFIG_USB_TEST is not set 2546# CONFIG_USB_TEST is not set
1914# CONFIG_USB_ISIGHTFW is not set 2547# CONFIG_USB_ISIGHTFW is not set
1915# CONFIG_USB_GADGET is not set 2548CONFIG_USB_ATM=m
1916CONFIG_MMC=m 2549CONFIG_USB_SPEEDTOUCH=m
2550CONFIG_USB_CXACRU=m
2551CONFIG_USB_UEAGLEATM=m
2552CONFIG_USB_XUSBATM=m
2553CONFIG_USB_GADGET=y
2554# CONFIG_USB_GADGET_DEBUG is not set
2555CONFIG_USB_GADGET_DEBUG_FILES=y
2556# CONFIG_USB_GADGET_DEBUG_FS is not set
2557CONFIG_USB_GADGET_SELECTED=y
2558CONFIG_USB_GADGET_AMD5536UDC=y
2559CONFIG_USB_AMD5536UDC=y
2560# CONFIG_USB_GADGET_ATMEL_USBA is not set
2561# CONFIG_USB_GADGET_FSL_USB2 is not set
2562# CONFIG_USB_GADGET_NET2280 is not set
2563# CONFIG_USB_GADGET_PXA25X is not set
2564# CONFIG_USB_GADGET_M66592 is not set
2565# CONFIG_USB_GADGET_PXA27X is not set
2566# CONFIG_USB_GADGET_GOKU is not set
2567# CONFIG_USB_GADGET_LH7A40X is not set
2568# CONFIG_USB_GADGET_OMAP is not set
2569# CONFIG_USB_GADGET_S3C2410 is not set
2570# CONFIG_USB_GADGET_AT91 is not set
2571# CONFIG_USB_GADGET_DUMMY_HCD is not set
2572CONFIG_USB_GADGET_DUALSPEED=y
2573# CONFIG_USB_ZERO is not set
2574CONFIG_USB_ETH=m
2575CONFIG_USB_ETH_RNDIS=y
2576# CONFIG_USB_GADGETFS is not set
2577CONFIG_USB_FILE_STORAGE=m
2578CONFIG_USB_FILE_STORAGE_TEST=y
2579# CONFIG_USB_G_SERIAL is not set
2580# CONFIG_USB_MIDI_GADGET is not set
2581# CONFIG_USB_G_PRINTER is not set
2582# CONFIG_USB_CDC_COMPOSITE is not set
2583CONFIG_MMC=y
1917# CONFIG_MMC_DEBUG is not set 2584# CONFIG_MMC_DEBUG is not set
1918# CONFIG_MMC_UNSAFE_RESUME is not set 2585CONFIG_MMC_UNSAFE_RESUME=y
1919 2586
1920# 2587#
1921# MMC/SD Card Drivers 2588# MMC/SD Card Drivers
1922# 2589#
1923CONFIG_MMC_BLOCK=m 2590CONFIG_MMC_BLOCK=y
1924CONFIG_MMC_BLOCK_BOUNCE=y 2591CONFIG_MMC_BLOCK_BOUNCE=y
1925CONFIG_SDIO_UART=m 2592# CONFIG_SDIO_UART is not set
1926# CONFIG_MMC_TEST is not set 2593# CONFIG_MMC_TEST is not set
1927 2594
1928# 2595#
1929# MMC/SD Host Controller Drivers 2596# MMC/SD Host Controller Drivers
1930# 2597#
1931CONFIG_MMC_SDHCI=m 2598CONFIG_MMC_SDHCI=y
1932# CONFIG_MMC_SDHCI_PCI is not set 2599# CONFIG_MMC_SDHCI_PCI is not set
1933CONFIG_MMC_WBSD=m 2600# CONFIG_MMC_WBSD is not set
1934CONFIG_MMC_TIFM_SD=m 2601# CONFIG_MMC_TIFM_SD is not set
1935CONFIG_MEMSTICK=m 2602# CONFIG_MEMSTICK is not set
1936CONFIG_MEMSTICK_DEBUG=y
1937
1938#
1939# MemoryStick drivers
1940#
1941# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
1942CONFIG_MSPRO_BLOCK=m
1943
1944#
1945# MemoryStick Host Controller Drivers
1946#
1947# CONFIG_MEMSTICK_TIFM_MS is not set
1948# CONFIG_MEMSTICK_JMICRON_38X is not set
1949CONFIG_NEW_LEDS=y 2603CONFIG_NEW_LEDS=y
1950CONFIG_LEDS_CLASS=m 2604CONFIG_LEDS_CLASS=m
1951 2605
@@ -1960,16 +2614,14 @@ CONFIG_LEDS_CLASS=m
1960# LED Triggers 2614# LED Triggers
1961# 2615#
1962CONFIG_LEDS_TRIGGERS=y 2616CONFIG_LEDS_TRIGGERS=y
1963# CONFIG_LEDS_TRIGGER_TIMER is not set 2617CONFIG_LEDS_TRIGGER_TIMER=m
1964# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set 2618CONFIG_LEDS_TRIGGER_HEARTBEAT=m
1965# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set 2619# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
1966# CONFIG_ACCESSIBILITY is not set 2620# CONFIG_ACCESSIBILITY is not set
1967# CONFIG_INFINIBAND is not set 2621# CONFIG_INFINIBAND is not set
1968# CONFIG_EDAC is not set 2622# CONFIG_EDAC is not set
1969CONFIG_RTC_LIB=y 2623CONFIG_RTC_LIB=m
1970CONFIG_RTC_CLASS=y 2624CONFIG_RTC_CLASS=m
1971# CONFIG_RTC_HCTOSYS is not set
1972# CONFIG_RTC_DEBUG is not set
1973 2625
1974# 2626#
1975# RTC interfaces 2627# RTC interfaces
@@ -1977,21 +2629,21 @@ CONFIG_RTC_CLASS=y
1977CONFIG_RTC_INTF_SYSFS=y 2629CONFIG_RTC_INTF_SYSFS=y
1978CONFIG_RTC_INTF_PROC=y 2630CONFIG_RTC_INTF_PROC=y
1979CONFIG_RTC_INTF_DEV=y 2631CONFIG_RTC_INTF_DEV=y
1980# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set 2632CONFIG_RTC_INTF_DEV_UIE_EMUL=y
1981# CONFIG_RTC_DRV_TEST is not set 2633CONFIG_RTC_DRV_TEST=m
1982 2634
1983# 2635#
1984# I2C RTC drivers 2636# I2C RTC drivers
1985# 2637#
1986# CONFIG_RTC_DRV_DS1307 is not set 2638CONFIG_RTC_DRV_DS1307=m
1987# CONFIG_RTC_DRV_DS1374 is not set 2639# CONFIG_RTC_DRV_DS1374 is not set
1988# CONFIG_RTC_DRV_DS1672 is not set 2640CONFIG_RTC_DRV_DS1672=m
1989# CONFIG_RTC_DRV_MAX6900 is not set 2641# CONFIG_RTC_DRV_MAX6900 is not set
1990# CONFIG_RTC_DRV_RS5C372 is not set 2642CONFIG_RTC_DRV_RS5C372=m
1991# CONFIG_RTC_DRV_ISL1208 is not set 2643CONFIG_RTC_DRV_ISL1208=m
1992# CONFIG_RTC_DRV_X1205 is not set 2644CONFIG_RTC_DRV_X1205=m
1993# CONFIG_RTC_DRV_PCF8563 is not set 2645CONFIG_RTC_DRV_PCF8563=m
1994# CONFIG_RTC_DRV_PCF8583 is not set 2646CONFIG_RTC_DRV_PCF8583=m
1995# CONFIG_RTC_DRV_M41T80 is not set 2647# CONFIG_RTC_DRV_M41T80 is not set
1996# CONFIG_RTC_DRV_S35390A is not set 2648# CONFIG_RTC_DRV_S35390A is not set
1997# CONFIG_RTC_DRV_FM3130 is not set 2649# CONFIG_RTC_DRV_FM3130 is not set
@@ -1999,18 +2651,23 @@ CONFIG_RTC_INTF_DEV=y
1999# 2651#
2000# SPI RTC drivers 2652# SPI RTC drivers
2001# 2653#
2654# CONFIG_RTC_DRV_M41T94 is not set
2655# CONFIG_RTC_DRV_DS1305 is not set
2656CONFIG_RTC_DRV_MAX6902=m
2657# CONFIG_RTC_DRV_R9701 is not set
2658CONFIG_RTC_DRV_RS5C348=m
2002 2659
2003# 2660#
2004# Platform RTC drivers 2661# Platform RTC drivers
2005# 2662#
2006CONFIG_RTC_DRV_CMOS=y 2663# CONFIG_RTC_DRV_CMOS is not set
2007# CONFIG_RTC_DRV_DS1511 is not set 2664# CONFIG_RTC_DRV_DS1511 is not set
2008# CONFIG_RTC_DRV_DS1553 is not set 2665CONFIG_RTC_DRV_DS1553=m
2009# CONFIG_RTC_DRV_DS1742 is not set 2666CONFIG_RTC_DRV_DS1742=m
2010# CONFIG_RTC_DRV_STK17TA8 is not set 2667# CONFIG_RTC_DRV_STK17TA8 is not set
2011# CONFIG_RTC_DRV_M48T86 is not set 2668CONFIG_RTC_DRV_M48T86=m
2012# CONFIG_RTC_DRV_M48T59 is not set 2669# CONFIG_RTC_DRV_M48T59 is not set
2013# CONFIG_RTC_DRV_V3020 is not set 2670CONFIG_RTC_DRV_V3020=m
2014 2671
2015# 2672#
2016# on-CPU RTC drivers 2673# on-CPU RTC drivers
@@ -2021,17 +2678,23 @@ CONFIG_RTC_DRV_CMOS=y
2021# 2678#
2022# Firmware Drivers 2679# Firmware Drivers
2023# 2680#
2024# CONFIG_EDD is not set 2681CONFIG_EDD=m
2682# CONFIG_EDD_OFF is not set
2025CONFIG_FIRMWARE_MEMMAP=y 2683CONFIG_FIRMWARE_MEMMAP=y
2684# CONFIG_EFI_VARS is not set
2026# CONFIG_DELL_RBU is not set 2685# CONFIG_DELL_RBU is not set
2027# CONFIG_DCDBAS is not set 2686# CONFIG_DCDBAS is not set
2028# CONFIG_DMIID is not set 2687CONFIG_DMIID=y
2029# CONFIG_ISCSI_IBFT_FIND is not set 2688# CONFIG_ISCSI_IBFT_FIND is not set
2030 2689
2031# 2690#
2032# File systems 2691# File systems
2033# 2692#
2034CONFIG_EXT2_FS=m 2693CONFIG_EXT2_FS=y
2694CONFIG_EXT2_FS_XATTR=y
2695CONFIG_EXT2_FS_POSIX_ACL=y
2696CONFIG_EXT2_FS_SECURITY=y
2697# CONFIG_EXT2_FS_XIP is not set
2035CONFIG_EXT3_FS=y 2698CONFIG_EXT3_FS=y
2036CONFIG_EXT3_FS_XATTR=y 2699CONFIG_EXT3_FS_XATTR=y
2037CONFIG_EXT3_FS_POSIX_ACL=y 2700CONFIG_EXT3_FS_POSIX_ACL=y
@@ -2040,22 +2703,32 @@ CONFIG_EXT3_FS_SECURITY=y
2040CONFIG_JBD=y 2703CONFIG_JBD=y
2041# CONFIG_JBD_DEBUG is not set 2704# CONFIG_JBD_DEBUG is not set
2042CONFIG_FS_MBCACHE=y 2705CONFIG_FS_MBCACHE=y
2043# CONFIG_REISERFS_FS is not set 2706CONFIG_REISERFS_FS=m
2044# CONFIG_JFS_FS is not set 2707# CONFIG_REISERFS_CHECK is not set
2708# CONFIG_REISERFS_PROC_INFO is not set
2709CONFIG_REISERFS_FS_XATTR=y
2710CONFIG_REISERFS_FS_POSIX_ACL=y
2711CONFIG_REISERFS_FS_SECURITY=y
2712CONFIG_JFS_FS=m
2713CONFIG_JFS_POSIX_ACL=y
2714CONFIG_JFS_SECURITY=y
2715# CONFIG_JFS_DEBUG is not set
2716CONFIG_JFS_STATISTICS=y
2045CONFIG_FS_POSIX_ACL=y 2717CONFIG_FS_POSIX_ACL=y
2046# CONFIG_XFS_FS is not set 2718# CONFIG_XFS_FS is not set
2719# CONFIG_GFS2_FS is not set
2047# CONFIG_OCFS2_FS is not set 2720# CONFIG_OCFS2_FS is not set
2048CONFIG_DNOTIFY=y 2721CONFIG_DNOTIFY=y
2049CONFIG_INOTIFY=y 2722CONFIG_INOTIFY=y
2050CONFIG_INOTIFY_USER=y 2723CONFIG_INOTIFY_USER=y
2051CONFIG_QUOTA=y 2724CONFIG_QUOTA=y
2052CONFIG_QUOTA_NETLINK_INTERFACE=y 2725# CONFIG_QUOTA_NETLINK_INTERFACE is not set
2053# CONFIG_PRINT_QUOTA_WARNING is not set 2726CONFIG_PRINT_QUOTA_WARNING=y
2054# CONFIG_QFMT_V1 is not set 2727CONFIG_QFMT_V1=m
2055CONFIG_QFMT_V2=y 2728CONFIG_QFMT_V2=m
2056CONFIG_QUOTACTL=y 2729CONFIG_QUOTACTL=y
2057# CONFIG_AUTOFS_FS is not set 2730CONFIG_AUTOFS_FS=m
2058# CONFIG_AUTOFS4_FS is not set 2731CONFIG_AUTOFS4_FS=m
2059CONFIG_FUSE_FS=m 2732CONFIG_FUSE_FS=m
2060CONFIG_GENERIC_ACL=y 2733CONFIG_GENERIC_ACL=y
2061 2734
@@ -2075,15 +2748,16 @@ CONFIG_FAT_FS=y
2075CONFIG_MSDOS_FS=y 2748CONFIG_MSDOS_FS=y
2076CONFIG_VFAT_FS=y 2749CONFIG_VFAT_FS=y
2077CONFIG_FAT_DEFAULT_CODEPAGE=437 2750CONFIG_FAT_DEFAULT_CODEPAGE=437
2078CONFIG_FAT_DEFAULT_IOCHARSET="ascii" 2751CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
2079# CONFIG_NTFS_FS is not set 2752CONFIG_NTFS_FS=m
2753# CONFIG_NTFS_DEBUG is not set
2754CONFIG_NTFS_RW=y
2080 2755
2081# 2756#
2082# Pseudo filesystems 2757# Pseudo filesystems
2083# 2758#
2084CONFIG_PROC_FS=y 2759CONFIG_PROC_FS=y
2085CONFIG_PROC_KCORE=y 2760CONFIG_PROC_KCORE=y
2086CONFIG_PROC_VMCORE=y
2087CONFIG_PROC_SYSCTL=y 2761CONFIG_PROC_SYSCTL=y
2088CONFIG_SYSFS=y 2762CONFIG_SYSFS=y
2089CONFIG_TMPFS=y 2763CONFIG_TMPFS=y
@@ -2095,28 +2769,74 @@ CONFIG_CONFIGFS_FS=m
2095# 2769#
2096# Miscellaneous filesystems 2770# Miscellaneous filesystems
2097# 2771#
2098# CONFIG_ADFS_FS is not set 2772CONFIG_ADFS_FS=m
2099# CONFIG_AFFS_FS is not set 2773# CONFIG_ADFS_FS_RW is not set
2774CONFIG_AFFS_FS=m
2100# CONFIG_ECRYPT_FS is not set 2775# CONFIG_ECRYPT_FS is not set
2101# CONFIG_HFS_FS is not set 2776CONFIG_HFS_FS=m
2102# CONFIG_HFSPLUS_FS is not set 2777CONFIG_HFSPLUS_FS=m
2103# CONFIG_BEFS_FS is not set 2778CONFIG_BEFS_FS=m
2104# CONFIG_BFS_FS is not set 2779# CONFIG_BEFS_DEBUG is not set
2105# CONFIG_EFS_FS is not set 2780CONFIG_BFS_FS=m
2106# CONFIG_CRAMFS is not set 2781CONFIG_EFS_FS=m
2107# CONFIG_VXFS_FS is not set 2782CONFIG_JFFS2_FS=m
2783CONFIG_JFFS2_FS_DEBUG=0
2784CONFIG_JFFS2_FS_WRITEBUFFER=y
2785# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
2786CONFIG_JFFS2_SUMMARY=y
2787CONFIG_JFFS2_FS_XATTR=y
2788CONFIG_JFFS2_FS_POSIX_ACL=y
2789CONFIG_JFFS2_FS_SECURITY=y
2790CONFIG_JFFS2_COMPRESSION_OPTIONS=y
2791CONFIG_JFFS2_ZLIB=y
2792# CONFIG_JFFS2_LZO is not set
2793CONFIG_JFFS2_RTIME=y
2794# CONFIG_JFFS2_RUBIN is not set
2795# CONFIG_JFFS2_CMODE_NONE is not set
2796CONFIG_JFFS2_CMODE_PRIORITY=y
2797# CONFIG_JFFS2_CMODE_SIZE is not set
2798# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
2799CONFIG_CRAMFS=y
2800CONFIG_VXFS_FS=m
2108# CONFIG_MINIX_FS is not set 2801# CONFIG_MINIX_FS is not set
2109# CONFIG_OMFS_FS is not set 2802# CONFIG_OMFS_FS is not set
2110# CONFIG_HPFS_FS is not set 2803CONFIG_HPFS_FS=m
2111# CONFIG_QNX4FS_FS is not set 2804CONFIG_QNX4FS_FS=m
2112# CONFIG_ROMFS_FS is not set 2805CONFIG_ROMFS_FS=m
2113# CONFIG_SYSV_FS is not set 2806CONFIG_SYSV_FS=m
2114# CONFIG_UFS_FS is not set 2807CONFIG_UFS_FS=m
2808CONFIG_UFS_FS_WRITE=y
2809# CONFIG_UFS_DEBUG is not set
2115CONFIG_NETWORK_FILESYSTEMS=y 2810CONFIG_NETWORK_FILESYSTEMS=y
2116# CONFIG_NFS_FS is not set 2811CONFIG_NFS_FS=m
2117# CONFIG_NFSD is not set 2812CONFIG_NFS_V3=y
2118# CONFIG_SMB_FS is not set 2813CONFIG_NFS_V3_ACL=y
2119# CONFIG_CIFS is not set 2814CONFIG_NFS_V4=y
2815CONFIG_NFSD=m
2816CONFIG_NFSD_V2_ACL=y
2817CONFIG_NFSD_V3=y
2818CONFIG_NFSD_V3_ACL=y
2819CONFIG_NFSD_V4=y
2820CONFIG_LOCKD=m
2821CONFIG_LOCKD_V4=y
2822CONFIG_EXPORTFS=m
2823CONFIG_NFS_ACL_SUPPORT=m
2824CONFIG_NFS_COMMON=y
2825CONFIG_SUNRPC=m
2826CONFIG_SUNRPC_GSS=m
2827CONFIG_RPCSEC_GSS_KRB5=m
2828CONFIG_RPCSEC_GSS_SPKM3=m
2829CONFIG_SMB_FS=y
2830# CONFIG_SMB_NLS_DEFAULT is not set
2831CONFIG_CIFS=m
2832CONFIG_CIFS_STATS=y
2833CONFIG_CIFS_STATS2=y
2834CONFIG_CIFS_WEAK_PW_HASH=y
2835# CONFIG_CIFS_UPCALL is not set
2836CONFIG_CIFS_XATTR=y
2837CONFIG_CIFS_POSIX=y
2838# CONFIG_CIFS_DEBUG2 is not set
2839# CONFIG_CIFS_EXPERIMENTAL is not set
2120# CONFIG_NCP_FS is not set 2840# CONFIG_NCP_FS is not set
2121# CONFIG_CODA_FS is not set 2841# CONFIG_CODA_FS is not set
2122# CONFIG_AFS_FS is not set 2842# CONFIG_AFS_FS is not set
@@ -2127,17 +2847,18 @@ CONFIG_NETWORK_FILESYSTEMS=y
2127CONFIG_PARTITION_ADVANCED=y 2847CONFIG_PARTITION_ADVANCED=y
2128# CONFIG_ACORN_PARTITION is not set 2848# CONFIG_ACORN_PARTITION is not set
2129CONFIG_OSF_PARTITION=y 2849CONFIG_OSF_PARTITION=y
2130CONFIG_AMIGA_PARTITION=y 2850# CONFIG_AMIGA_PARTITION is not set
2131# CONFIG_ATARI_PARTITION is not set 2851CONFIG_ATARI_PARTITION=y
2132CONFIG_MAC_PARTITION=y 2852CONFIG_MAC_PARTITION=y
2133CONFIG_MSDOS_PARTITION=y 2853CONFIG_MSDOS_PARTITION=y
2134CONFIG_BSD_DISKLABEL=y 2854CONFIG_BSD_DISKLABEL=y
2135CONFIG_MINIX_SUBPARTITION=y 2855# CONFIG_MINIX_SUBPARTITION is not set
2136CONFIG_SOLARIS_X86_PARTITION=y 2856CONFIG_SOLARIS_X86_PARTITION=y
2137CONFIG_UNIXWARE_DISKLABEL=y 2857CONFIG_UNIXWARE_DISKLABEL=y
2138# CONFIG_LDM_PARTITION is not set 2858CONFIG_LDM_PARTITION=y
2859# CONFIG_LDM_DEBUG is not set
2139CONFIG_SGI_PARTITION=y 2860CONFIG_SGI_PARTITION=y
2140# CONFIG_ULTRIX_PARTITION is not set 2861CONFIG_ULTRIX_PARTITION=y
2141CONFIG_SUN_PARTITION=y 2862CONFIG_SUN_PARTITION=y
2142CONFIG_KARMA_PARTITION=y 2863CONFIG_KARMA_PARTITION=y
2143CONFIG_EFI_PARTITION=y 2864CONFIG_EFI_PARTITION=y
@@ -2168,7 +2889,7 @@ CONFIG_NLS_ISO8859_8=m
2168CONFIG_NLS_CODEPAGE_1250=m 2889CONFIG_NLS_CODEPAGE_1250=m
2169CONFIG_NLS_CODEPAGE_1251=m 2890CONFIG_NLS_CODEPAGE_1251=m
2170CONFIG_NLS_ASCII=y 2891CONFIG_NLS_ASCII=y
2171CONFIG_NLS_ISO8859_1=m 2892CONFIG_NLS_ISO8859_1=y
2172CONFIG_NLS_ISO8859_2=m 2893CONFIG_NLS_ISO8859_2=m
2173CONFIG_NLS_ISO8859_3=m 2894CONFIG_NLS_ISO8859_3=m
2174CONFIG_NLS_ISO8859_4=m 2895CONFIG_NLS_ISO8859_4=m
@@ -2188,21 +2909,21 @@ CONFIG_NLS_UTF8=m
2188# Kernel hacking 2909# Kernel hacking
2189# 2910#
2190CONFIG_TRACE_IRQFLAGS_SUPPORT=y 2911CONFIG_TRACE_IRQFLAGS_SUPPORT=y
2191CONFIG_PRINTK_TIME=y 2912# CONFIG_PRINTK_TIME is not set
2192# CONFIG_ENABLE_WARN_DEPRECATED is not set 2913CONFIG_ENABLE_WARN_DEPRECATED=y
2193CONFIG_ENABLE_MUST_CHECK=y 2914CONFIG_ENABLE_MUST_CHECK=y
2194CONFIG_FRAME_WARN=1024 2915CONFIG_FRAME_WARN=1024
2195CONFIG_MAGIC_SYSRQ=y 2916CONFIG_MAGIC_SYSRQ=y
2196CONFIG_UNUSED_SYMBOLS=y 2917# CONFIG_UNUSED_SYMBOLS is not set
2197CONFIG_DEBUG_FS=y 2918CONFIG_DEBUG_FS=y
2198# CONFIG_HEADERS_CHECK is not set 2919# CONFIG_HEADERS_CHECK is not set
2199CONFIG_DEBUG_KERNEL=y 2920CONFIG_DEBUG_KERNEL=y
2200CONFIG_DEBUG_SHIRQ=y 2921# CONFIG_DEBUG_SHIRQ is not set
2201CONFIG_DETECT_SOFTLOCKUP=y 2922CONFIG_DETECT_SOFTLOCKUP=y
2202# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 2923# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
2203CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 2924CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
2204CONFIG_SCHED_DEBUG=y 2925CONFIG_SCHED_DEBUG=y
2205CONFIG_SCHEDSTATS=y 2926# CONFIG_SCHEDSTATS is not set
2206CONFIG_TIMER_STATS=y 2927CONFIG_TIMER_STATS=y
2207# CONFIG_DEBUG_OBJECTS is not set 2928# CONFIG_DEBUG_OBJECTS is not set
2208# CONFIG_DEBUG_SLAB is not set 2929# CONFIG_DEBUG_SLAB is not set
@@ -2213,9 +2934,8 @@ CONFIG_TIMER_STATS=y
2213# CONFIG_DEBUG_LOCK_ALLOC is not set 2934# CONFIG_DEBUG_LOCK_ALLOC is not set
2214# CONFIG_PROVE_LOCKING is not set 2935# CONFIG_PROVE_LOCKING is not set
2215# CONFIG_LOCK_STAT is not set 2936# CONFIG_LOCK_STAT is not set
2216CONFIG_DEBUG_SPINLOCK_SLEEP=y 2937# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
2217# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 2938# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
2218CONFIG_STACKTRACE=y
2219# CONFIG_DEBUG_KOBJECT is not set 2939# CONFIG_DEBUG_KOBJECT is not set
2220# CONFIG_DEBUG_HIGHMEM is not set 2940# CONFIG_DEBUG_HIGHMEM is not set
2221CONFIG_DEBUG_BUGVERBOSE=y 2941CONFIG_DEBUG_BUGVERBOSE=y
@@ -2223,24 +2943,22 @@ CONFIG_DEBUG_BUGVERBOSE=y
2223# CONFIG_DEBUG_VM is not set 2943# CONFIG_DEBUG_VM is not set
2224# CONFIG_DEBUG_WRITECOUNT is not set 2944# CONFIG_DEBUG_WRITECOUNT is not set
2225CONFIG_DEBUG_MEMORY_INIT=y 2945CONFIG_DEBUG_MEMORY_INIT=y
2226CONFIG_DEBUG_LIST=y 2946# CONFIG_DEBUG_LIST is not set
2227# CONFIG_DEBUG_SG is not set 2947# CONFIG_DEBUG_SG is not set
2228CONFIG_FRAME_POINTER=y 2948# CONFIG_FRAME_POINTER is not set
2229CONFIG_BOOT_PRINTK_DELAY=y 2949# CONFIG_BOOT_PRINTK_DELAY is not set
2230# CONFIG_RCU_TORTURE_TEST is not set 2950# CONFIG_RCU_TORTURE_TEST is not set
2231# CONFIG_BACKTRACE_SELF_TEST is not set 2951# CONFIG_BACKTRACE_SELF_TEST is not set
2232# CONFIG_FAULT_INJECTION is not set 2952# CONFIG_FAULT_INJECTION is not set
2233CONFIG_LATENCYTOP=y 2953# CONFIG_LATENCYTOP is not set
2234CONFIG_SYSCTL_SYSCALL_CHECK=y 2954# CONFIG_SYSCTL_SYSCALL_CHECK is not set
2235CONFIG_HAVE_FTRACE=y 2955CONFIG_HAVE_FTRACE=y
2236CONFIG_HAVE_DYNAMIC_FTRACE=y 2956CONFIG_HAVE_DYNAMIC_FTRACE=y
2237CONFIG_TRACING=y
2238# CONFIG_FTRACE is not set 2957# CONFIG_FTRACE is not set
2239# CONFIG_IRQSOFF_TRACER is not set 2958# CONFIG_IRQSOFF_TRACER is not set
2240CONFIG_SYSPROF_TRACER=y 2959# CONFIG_SYSPROF_TRACER is not set
2241# CONFIG_SCHED_TRACER is not set 2960# CONFIG_SCHED_TRACER is not set
2242# CONFIG_CONTEXT_SWITCH_TRACER is not set 2961# CONFIG_CONTEXT_SWITCH_TRACER is not set
2243# CONFIG_FTRACE_STARTUP_TEST is not set
2244# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set 2962# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
2245# CONFIG_SAMPLES is not set 2963# CONFIG_SAMPLES is not set
2246CONFIG_HAVE_ARCH_KGDB=y 2964CONFIG_HAVE_ARCH_KGDB=y
@@ -2252,9 +2970,8 @@ CONFIG_EARLY_PRINTK=y
2252# CONFIG_DEBUG_STACK_USAGE is not set 2970# CONFIG_DEBUG_STACK_USAGE is not set
2253# CONFIG_DEBUG_PAGEALLOC is not set 2971# CONFIG_DEBUG_PAGEALLOC is not set
2254# CONFIG_DEBUG_PER_CPU_MAPS is not set 2972# CONFIG_DEBUG_PER_CPU_MAPS is not set
2255CONFIG_X86_PTDUMP=y 2973# CONFIG_X86_PTDUMP is not set
2256CONFIG_DEBUG_RODATA=y 2974# CONFIG_DEBUG_RODATA is not set
2257# CONFIG_DEBUG_RODATA_TEST is not set
2258# CONFIG_DEBUG_NX_TEST is not set 2975# CONFIG_DEBUG_NX_TEST is not set
2259# CONFIG_4KSTACKS is not set 2976# CONFIG_4KSTACKS is not set
2260CONFIG_DOUBLEFAULT=y 2977CONFIG_DOUBLEFAULT=y
@@ -2268,7 +2985,7 @@ CONFIG_IO_DELAY_0X80=y
2268# CONFIG_IO_DELAY_UDELAY is not set 2985# CONFIG_IO_DELAY_UDELAY is not set
2269# CONFIG_IO_DELAY_NONE is not set 2986# CONFIG_IO_DELAY_NONE is not set
2270CONFIG_DEFAULT_IO_DELAY_TYPE=0 2987CONFIG_DEFAULT_IO_DELAY_TYPE=0
2271CONFIG_DEBUG_BOOT_PARAMS=y 2988# CONFIG_DEBUG_BOOT_PARAMS is not set
2272# CONFIG_CPA_DEBUG is not set 2989# CONFIG_CPA_DEBUG is not set
2273# CONFIG_OPTIMIZE_INLINING is not set 2990# CONFIG_OPTIMIZE_INLINING is not set
2274 2991
@@ -2279,12 +2996,19 @@ CONFIG_KEYS=y
2279CONFIG_KEYS_DEBUG_PROC_KEYS=y 2996CONFIG_KEYS_DEBUG_PROC_KEYS=y
2280CONFIG_SECURITY=y 2997CONFIG_SECURITY=y
2281CONFIG_SECURITY_NETWORK=y 2998CONFIG_SECURITY_NETWORK=y
2282CONFIG_SECURITY_NETWORK_XFRM=y 2999# CONFIG_SECURITY_NETWORK_XFRM is not set
2283CONFIG_SECURITY_FILE_CAPABILITIES=y 3000# CONFIG_SECURITY_FILE_CAPABILITIES is not set
2284# CONFIG_SECURITY_ROOTPLUG is not set 3001# CONFIG_SECURITY_ROOTPLUG is not set
2285CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536 3002CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
2286# CONFIG_SECURITY_SELINUX is not set 3003CONFIG_SECURITY_SELINUX=y
2287# CONFIG_SECURITY_SMACK is not set 3004CONFIG_SECURITY_SELINUX_BOOTPARAM=y
3005CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
3006CONFIG_SECURITY_SELINUX_DISABLE=y
3007CONFIG_SECURITY_SELINUX_DEVELOP=y
3008CONFIG_SECURITY_SELINUX_AVC_STATS=y
3009CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
3010# CONFIG_SECURITY_SELINUX_ENABLE_SECMARK_DEFAULT is not set
3011# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
2288CONFIG_CRYPTO=y 3012CONFIG_CRYPTO=y
2289 3013
2290# 3014#
@@ -2292,10 +3016,10 @@ CONFIG_CRYPTO=y
2292# 3016#
2293CONFIG_CRYPTO_ALGAPI=y 3017CONFIG_CRYPTO_ALGAPI=y
2294CONFIG_CRYPTO_AEAD=m 3018CONFIG_CRYPTO_AEAD=m
2295CONFIG_CRYPTO_BLKCIPHER=m 3019CONFIG_CRYPTO_BLKCIPHER=y
2296CONFIG_CRYPTO_HASH=y 3020CONFIG_CRYPTO_HASH=y
2297CONFIG_CRYPTO_MANAGER=y 3021CONFIG_CRYPTO_MANAGER=y
2298CONFIG_CRYPTO_GF128MUL=m 3022# CONFIG_CRYPTO_GF128MUL is not set
2299CONFIG_CRYPTO_NULL=m 3023CONFIG_CRYPTO_NULL=m
2300# CONFIG_CRYPTO_CRYPTD is not set 3024# CONFIG_CRYPTO_CRYPTD is not set
2301CONFIG_CRYPTO_AUTHENC=m 3025CONFIG_CRYPTO_AUTHENC=m
@@ -2304,26 +3028,26 @@ CONFIG_CRYPTO_TEST=m
2304# 3028#
2305# Authenticated Encryption with Associated Data 3029# Authenticated Encryption with Associated Data
2306# 3030#
2307CONFIG_CRYPTO_CCM=m 3031# CONFIG_CRYPTO_CCM is not set
2308CONFIG_CRYPTO_GCM=m 3032# CONFIG_CRYPTO_GCM is not set
2309CONFIG_CRYPTO_SEQIV=m 3033# CONFIG_CRYPTO_SEQIV is not set
2310 3034
2311# 3035#
2312# Block modes 3036# Block modes
2313# 3037#
2314CONFIG_CRYPTO_CBC=m 3038CONFIG_CRYPTO_CBC=y
2315CONFIG_CRYPTO_CTR=m 3039# CONFIG_CRYPTO_CTR is not set
2316# CONFIG_CRYPTO_CTS is not set 3040# CONFIG_CRYPTO_CTS is not set
2317CONFIG_CRYPTO_ECB=m 3041CONFIG_CRYPTO_ECB=m
2318CONFIG_CRYPTO_LRW=m 3042# CONFIG_CRYPTO_LRW is not set
2319CONFIG_CRYPTO_PCBC=m 3043CONFIG_CRYPTO_PCBC=m
2320CONFIG_CRYPTO_XTS=m 3044# CONFIG_CRYPTO_XTS is not set
2321 3045
2322# 3046#
2323# Hash modes 3047# Hash modes
2324# 3048#
2325CONFIG_CRYPTO_HMAC=y 3049CONFIG_CRYPTO_HMAC=y
2326CONFIG_CRYPTO_XCBC=m 3050# CONFIG_CRYPTO_XCBC is not set
2327 3051
2328# 3052#
2329# Digest 3053# Digest
@@ -2336,7 +3060,7 @@ CONFIG_CRYPTO_MICHAEL_MIC=m
2336# CONFIG_CRYPTO_RMD160 is not set 3060# CONFIG_CRYPTO_RMD160 is not set
2337# CONFIG_CRYPTO_RMD256 is not set 3061# CONFIG_CRYPTO_RMD256 is not set
2338# CONFIG_CRYPTO_RMD320 is not set 3062# CONFIG_CRYPTO_RMD320 is not set
2339CONFIG_CRYPTO_SHA1=y 3063CONFIG_CRYPTO_SHA1=m
2340CONFIG_CRYPTO_SHA256=m 3064CONFIG_CRYPTO_SHA256=m
2341CONFIG_CRYPTO_SHA512=m 3065CONFIG_CRYPTO_SHA512=m
2342CONFIG_CRYPTO_TGR192=m 3066CONFIG_CRYPTO_TGR192=m
@@ -2346,19 +3070,19 @@ CONFIG_CRYPTO_WP512=m
2346# Ciphers 3070# Ciphers
2347# 3071#
2348CONFIG_CRYPTO_AES=m 3072CONFIG_CRYPTO_AES=m
2349# CONFIG_CRYPTO_AES_586 is not set 3073CONFIG_CRYPTO_AES_586=m
2350CONFIG_CRYPTO_ANUBIS=m 3074CONFIG_CRYPTO_ANUBIS=m
2351CONFIG_CRYPTO_ARC4=m 3075CONFIG_CRYPTO_ARC4=m
2352CONFIG_CRYPTO_BLOWFISH=m 3076CONFIG_CRYPTO_BLOWFISH=m
2353CONFIG_CRYPTO_CAMELLIA=m 3077# CONFIG_CRYPTO_CAMELLIA is not set
2354CONFIG_CRYPTO_CAST5=m 3078CONFIG_CRYPTO_CAST5=y
2355CONFIG_CRYPTO_CAST6=m 3079CONFIG_CRYPTO_CAST6=m
2356CONFIG_CRYPTO_DES=m 3080CONFIG_CRYPTO_DES=y
2357CONFIG_CRYPTO_FCRYPT=m 3081# CONFIG_CRYPTO_FCRYPT is not set
2358CONFIG_CRYPTO_KHAZAD=m 3082CONFIG_CRYPTO_KHAZAD=m
2359CONFIG_CRYPTO_SALSA20=m 3083# CONFIG_CRYPTO_SALSA20 is not set
2360# CONFIG_CRYPTO_SALSA20_586 is not set 3084# CONFIG_CRYPTO_SALSA20_586 is not set
2361CONFIG_CRYPTO_SEED=m 3085# CONFIG_CRYPTO_SEED is not set
2362CONFIG_CRYPTO_SERPENT=m 3086CONFIG_CRYPTO_SERPENT=m
2363CONFIG_CRYPTO_TEA=m 3087CONFIG_CRYPTO_TEA=m
2364CONFIG_CRYPTO_TWOFISH=m 3088CONFIG_CRYPTO_TWOFISH=m
@@ -2371,11 +3095,17 @@ CONFIG_CRYPTO_TWOFISH_COMMON=m
2371CONFIG_CRYPTO_DEFLATE=m 3095CONFIG_CRYPTO_DEFLATE=m
2372# CONFIG_CRYPTO_LZO is not set 3096# CONFIG_CRYPTO_LZO is not set
2373CONFIG_CRYPTO_HW=y 3097CONFIG_CRYPTO_HW=y
2374# CONFIG_CRYPTO_DEV_PADLOCK is not set 3098CONFIG_CRYPTO_DEV_PADLOCK=m
2375# CONFIG_CRYPTO_DEV_GEODE is not set 3099CONFIG_CRYPTO_DEV_PADLOCK_AES=m
3100CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
3101CONFIG_CRYPTO_DEV_GEODE=m
2376# CONFIG_CRYPTO_DEV_HIFN_795X is not set 3102# CONFIG_CRYPTO_DEV_HIFN_795X is not set
2377CONFIG_HAVE_KVM=y 3103CONFIG_HAVE_KVM=y
2378# CONFIG_VIRTUALIZATION is not set 3104CONFIG_VIRTUALIZATION=y
3105# CONFIG_KVM is not set
3106# CONFIG_LGUEST is not set
3107# CONFIG_VIRTIO_PCI is not set
3108# CONFIG_VIRTIO_BALLOON is not set
2379 3109
2380# 3110#
2381# Library routines 3111# Library routines
@@ -2385,7 +3115,7 @@ CONFIG_GENERIC_FIND_FIRST_BIT=y
2385CONFIG_GENERIC_FIND_NEXT_BIT=y 3115CONFIG_GENERIC_FIND_NEXT_BIT=y
2386CONFIG_CRC_CCITT=m 3116CONFIG_CRC_CCITT=m
2387CONFIG_CRC16=m 3117CONFIG_CRC16=m
2388CONFIG_CRC_T10DIF=y 3118# CONFIG_CRC_T10DIF is not set
2389CONFIG_CRC_ITU_T=m 3119CONFIG_CRC_ITU_T=m
2390CONFIG_CRC32=y 3120CONFIG_CRC32=y
2391# CONFIG_CRC7 is not set 3121# CONFIG_CRC7 is not set
@@ -2393,6 +3123,8 @@ CONFIG_LIBCRC32C=m
2393CONFIG_AUDIT_GENERIC=y 3123CONFIG_AUDIT_GENERIC=y
2394CONFIG_ZLIB_INFLATE=y 3124CONFIG_ZLIB_INFLATE=y
2395CONFIG_ZLIB_DEFLATE=m 3125CONFIG_ZLIB_DEFLATE=m
3126CONFIG_REED_SOLOMON=m
3127CONFIG_REED_SOLOMON_DEC16=y
2396CONFIG_TEXTSEARCH=y 3128CONFIG_TEXTSEARCH=y
2397CONFIG_TEXTSEARCH_KMP=m 3129CONFIG_TEXTSEARCH_KMP=m
2398CONFIG_TEXTSEARCH_BM=m 3130CONFIG_TEXTSEARCH_BM=m
@@ -2401,3 +3133,4 @@ CONFIG_PLIST=y
2401CONFIG_HAS_IOMEM=y 3133CONFIG_HAS_IOMEM=y
2402CONFIG_HAS_IOPORT=y 3134CONFIG_HAS_IOPORT=y
2403CONFIG_HAS_DMA=y 3135CONFIG_HAS_DMA=y
3136CONFIG_CHECK_SIGNATURE=y
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/psb-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/psb-driver.patch
new file mode 100644
index 0000000000..64deeeaf54
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.27/psb-driver.patch
@@ -0,0 +1,34380 @@
1Index: linux-2.6.27/include/drm/drm.h
2===================================================================
3--- linux-2.6.27.orig/include/drm/drm.h 2009-01-14 11:54:35.000000000 +0000
4+++ linux-2.6.27/include/drm/drm.h 2009-01-14 11:58:01.000000000 +0000
5@@ -173,6 +173,7 @@
6 _DRM_AGP = 3, /**< AGP/GART */
7 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
8 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
9+ _DRM_TTM = 6
10 };
11
12 /**
13@@ -598,6 +599,400 @@
14 uint64_t size;
15 };
16
17+#define DRM_FENCE_FLAG_EMIT 0x00000001
18+#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
19+#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
20+#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
21+#define DRM_FENCE_FLAG_NO_USER 0x00000010
22+
23+/* Reserved for driver use */
24+#define DRM_FENCE_MASK_DRIVER 0xFF000000
25+
26+#define DRM_FENCE_TYPE_EXE 0x00000001
27+
28+struct drm_fence_arg {
29+ unsigned int handle;
30+ unsigned int fence_class;
31+ unsigned int type;
32+ unsigned int flags;
33+ unsigned int signaled;
34+ unsigned int error;
35+ unsigned int sequence;
36+ unsigned int pad64;
37+ uint64_t expand_pad[2]; /*Future expansion */
38+};
39+
40+/* Buffer permissions, referring to how the GPU uses the buffers.
41+ * these translate to fence types used for the buffers.
42+ * Typically a texture buffer is read, A destination buffer is write and
43+ * a command (batch-) buffer is exe. Can be or-ed together.
44+ */
45+
46+#define DRM_BO_FLAG_READ (1ULL << 0)
47+#define DRM_BO_FLAG_WRITE (1ULL << 1)
48+#define DRM_BO_FLAG_EXE (1ULL << 2)
49+
50+/*
51+ * Status flags. Can be read to determine the actual state of a buffer.
52+ * Can also be set in the buffer mask before validation.
53+ */
54+
55+/*
56+ * Mask: Never evict this buffer. Not even with force. This type of buffer is only
57+ * available to root and must be manually removed before buffer manager shutdown
58+ * or lock.
59+ * Flags: Acknowledge
60+ */
61+#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
62+
63+/*
64+ * Mask: Require that the buffer is placed in mappable memory when validated.
65+ * If not set the buffer may or may not be in mappable memory when validated.
66+ * Flags: If set, the buffer is in mappable memory.
67+ */
68+#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
69+
70+/* Mask: The buffer should be shareable with other processes.
71+ * Flags: The buffer is shareable with other processes.
72+ */
73+#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
74+
75+/* Mask: If set, place the buffer in cache-coherent memory if available.
76+ * If clear, never place the buffer in cache coherent memory if validated.
77+ * Flags: The buffer is currently in cache-coherent memory.
78+ */
79+#define DRM_BO_FLAG_CACHED (1ULL << 7)
80+
81+/* Mask: Make sure that every time this buffer is validated,
82+ * it ends up on the same location provided that the memory mask is the same.
83+ * The buffer will also not be evicted when claiming space for
84+ * other buffers. Basically a pinned buffer but it may be thrown out as
85+ * part of buffer manager shutdown or locking.
86+ * Flags: Acknowledge.
87+ */
88+#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
89+
90+/* Mask: Make sure the buffer is in cached memory when mapped
91+ * Flags: Acknowledge.
92+ * Buffers allocated with this flag should not be used for suballocators
93+ * This type may have issues on CPUs with over-aggressive caching
94+ * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
95+ */
96+#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
97+
98+
99+/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
100+ * Flags: Acknowledge.
101+ */
102+#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
103+
104+/*
105+ * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
106+ * Flags: Acknowledge.
107+ */
108+#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
109+#define DRM_BO_FLAG_TILE (1ULL << 15)
110+
111+/*
112+ * Memory type flags that can be or'ed together in the mask, but only
113+ * one appears in flags.
114+ */
115+
116+/* System memory */
117+#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
118+/* Translation table memory */
119+#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
120+/* Vram memory */
121+#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
122+/* Up to the driver to define. */
123+#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
124+#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
125+#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
126+#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
127+#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
128+/* We can add more of these now with a 64-bit flag type */
129+
130+/* Memory flag mask */
131+#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
132+#define DRM_BO_MASK_MEMTYPE 0x00000000FF0800A0ULL
133+
134+/* Driver-private flags */
135+#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
136+
137+/* Don't block on validate and map */
138+#define DRM_BO_HINT_DONT_BLOCK 0x00000002
139+/* Don't place this buffer on the unfenced list.*/
140+#define DRM_BO_HINT_DONT_FENCE 0x00000004
141+#define DRM_BO_HINT_WAIT_LAZY 0x00000008
142+#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
143+
144+#define DRM_BO_INIT_MAGIC 0xfe769812
145+#define DRM_BO_INIT_MAJOR 1
146+#define DRM_BO_INIT_MINOR 0
147+#define DRM_BO_INIT_PATCH 0
148+
149+
150+struct drm_bo_info_req {
151+ uint64_t mask;
152+ uint64_t flags;
153+ unsigned int handle;
154+ unsigned int hint;
155+ unsigned int fence_class;
156+ unsigned int desired_tile_stride;
157+ unsigned int tile_info;
158+ unsigned int pad64;
159+ uint64_t presumed_offset;
160+};
161+
162+struct drm_bo_create_req {
163+ uint64_t mask;
164+ uint64_t size;
165+ uint64_t buffer_start;
166+ unsigned int hint;
167+ unsigned int page_alignment;
168+};
169+
170+
171+/*
172+ * Reply flags
173+ */
174+
175+#define DRM_BO_REP_BUSY 0x00000001
176+
177+struct drm_bo_info_rep {
178+ uint64_t flags;
179+ uint64_t mask;
180+ uint64_t size;
181+ uint64_t offset;
182+ uint64_t arg_handle;
183+ uint64_t buffer_start;
184+ unsigned int handle;
185+ unsigned int fence_flags;
186+ unsigned int rep_flags;
187+ unsigned int page_alignment;
188+ unsigned int desired_tile_stride;
189+ unsigned int hw_tile_stride;
190+ unsigned int tile_info;
191+ unsigned int pad64;
192+ uint64_t expand_pad[4]; /*Future expansion */
193+};
194+
195+struct drm_bo_arg_rep {
196+ struct drm_bo_info_rep bo_info;
197+ int ret;
198+ unsigned int pad64;
199+};
200+
201+struct drm_bo_create_arg {
202+ union {
203+ struct drm_bo_create_req req;
204+ struct drm_bo_info_rep rep;
205+ } d;
206+};
207+
208+struct drm_bo_handle_arg {
209+ unsigned int handle;
210+};
211+
212+struct drm_bo_reference_info_arg {
213+ union {
214+ struct drm_bo_handle_arg req;
215+ struct drm_bo_info_rep rep;
216+ } d;
217+};
218+
219+struct drm_bo_map_wait_idle_arg {
220+ union {
221+ struct drm_bo_info_req req;
222+ struct drm_bo_info_rep rep;
223+ } d;
224+};
225+
226+struct drm_bo_op_req {
227+ enum {
228+ drm_bo_validate,
229+ drm_bo_fence,
230+ drm_bo_ref_fence,
231+ } op;
232+ unsigned int arg_handle;
233+ struct drm_bo_info_req bo_req;
234+};
235+
236+
237+struct drm_bo_op_arg {
238+ uint64_t next;
239+ union {
240+ struct drm_bo_op_req req;
241+ struct drm_bo_arg_rep rep;
242+ } d;
243+ int handled;
244+ unsigned int pad64;
245+};
246+
247+
248+#define DRM_BO_MEM_LOCAL 0
249+#define DRM_BO_MEM_TT 1
250+#define DRM_BO_MEM_VRAM 2
251+#define DRM_BO_MEM_PRIV0 3
252+#define DRM_BO_MEM_PRIV1 4
253+#define DRM_BO_MEM_PRIV2 5
254+#define DRM_BO_MEM_PRIV3 6
255+#define DRM_BO_MEM_PRIV4 7
256+
257+#define DRM_BO_MEM_TYPES 8 /* For now. */
258+
259+#define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
260+#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
261+
262+struct drm_bo_version_arg {
263+ uint32_t major;
264+ uint32_t minor;
265+ uint32_t patchlevel;
266+};
267+
268+struct drm_mm_type_arg {
269+ unsigned int mem_type;
270+ unsigned int lock_flags;
271+};
272+
273+struct drm_mm_init_arg {
274+ unsigned int magic;
275+ unsigned int major;
276+ unsigned int minor;
277+ unsigned int mem_type;
278+ uint64_t p_offset;
279+ uint64_t p_size;
280+};
281+
282+/*
283+ * Drm mode setting
284+ */
285+#define DRM_DISPLAY_INFO_LEN 32
286+#define DRM_OUTPUT_NAME_LEN 32
287+#define DRM_DISPLAY_MODE_LEN 32
288+#define DRM_PROP_NAME_LEN 32
289+
290+#define DRM_MODE_TYPE_BUILTIN (1<<0)
291+#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
292+#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
293+#define DRM_MODE_TYPE_PREFERRED (1<<3)
294+#define DRM_MODE_TYPE_DEFAULT (1<<4)
295+#define DRM_MODE_TYPE_USERDEF (1<<5)
296+#define DRM_MODE_TYPE_DRIVER (1<<6)
297+#define DRM_MODE_TYPE_USERPREF (1<<7)
298+
299+struct drm_mode_modeinfo {
300+
301+ unsigned int id;
302+
303+ unsigned int clock;
304+ unsigned short hdisplay, hsync_start, hsync_end, htotal, hskew;
305+ unsigned short vdisplay, vsync_start, vsync_end, vtotal, vscan;
306+
307+ unsigned int vrefresh; /* vertical refresh * 1000 */
308+
309+ unsigned int flags;
310+ unsigned int type;
311+ char name[DRM_DISPLAY_MODE_LEN];
312+};
313+
314+struct drm_mode_card_res {
315+
316+ int count_fbs;
317+ unsigned int __user *fb_id;
318+
319+ int count_crtcs;
320+ unsigned int __user *crtc_id;
321+
322+ int count_outputs;
323+ unsigned int __user *output_id;
324+
325+ int count_modes;
326+ struct drm_mode_modeinfo __user *modes;
327+
328+};
329+
330+struct drm_mode_crtc {
331+ unsigned int crtc_id; /**< Id */
332+ unsigned int fb_id; /**< Id of framebuffer */
333+
334+ int x, y; /**< Position on the frameuffer */
335+
336+ unsigned int mode; /**< Current mode used */
337+
338+ int count_outputs;
339+ unsigned int outputs; /**< Outputs that are connected */
340+
341+ int count_possibles;
342+ unsigned int possibles; /**< Outputs that can be connected */
343+
344+ unsigned int __user *set_outputs; /**< Outputs to be connected */
345+
346+ int gamma_size;
347+
348+};
349+
350+struct drm_mode_get_output {
351+
352+ unsigned int output; /**< Id */
353+ unsigned int crtc; /**< Id of crtc */
354+ unsigned char name[DRM_OUTPUT_NAME_LEN];
355+
356+ unsigned int connection;
357+ unsigned int mm_width, mm_height; /**< HxW in millimeters */
358+ unsigned int subpixel;
359+
360+ int count_crtcs;
361+ unsigned int crtcs; /**< possible crtc to connect to */
362+
363+ int count_clones;
364+ unsigned int clones; /**< list of clones */
365+
366+ int count_modes;
367+ unsigned int __user *modes; /**< list of modes it supports */
368+
369+ int count_props;
370+ unsigned int __user *props;
371+ unsigned int __user *prop_values;
372+};
373+
374+#define DRM_MODE_PROP_PENDING (1<<0)
375+#define DRM_MODE_PROP_RANGE (1<<1)
376+#define DRM_MODE_PROP_IMMUTABLE (1<<2)
377+#define DRM_MODE_PROP_ENUM (1<<3) // enumerated type with text strings
378+
379+struct drm_mode_property_enum {
380+ uint32_t value;
381+ unsigned char name[DRM_PROP_NAME_LEN];
382+};
383+
384+struct drm_mode_get_property {
385+
386+ unsigned int prop_id;
387+ unsigned int flags;
388+ unsigned char name[DRM_PROP_NAME_LEN];
389+
390+ int count_values;
391+ uint32_t __user *values;
392+
393+ int count_enums;
394+ struct drm_mode_property_enum *enums;
395+};
396+
397+struct drm_mode_fb_cmd {
398+ unsigned int buffer_id;
399+ unsigned int width, height;
400+ unsigned int pitch;
401+ unsigned int bpp;
402+ unsigned int handle;
403+ unsigned int depth;
404+};
405+
406+struct drm_mode_mode_cmd {
407+ unsigned int output_id;
408+ unsigned int mode_id;
409+};
410+
411 #define DRM_IOCTL_BASE 'd'
412 #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
413 #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
414@@ -664,6 +1059,47 @@
415
416 #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
417
418+#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
419+#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
420+#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
421+#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
422+
423+#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
424+#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
425+#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
426+#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
427+#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
428+#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
429+#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
430+#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
431+
432+#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
433+#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
434+#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
435+#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
436+#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
437+#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
438+#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
439+#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
440+#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg)
441+
442+
443+#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
444+#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
445+#define DRM_IOCTL_MODE_GETOUTPUT DRM_IOWR(0xA2, struct drm_mode_get_output)
446+#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA3, struct drm_mode_crtc)
447+#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xA4, struct drm_mode_fb_cmd)
448+#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xA5, unsigned int)
449+#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xA6, struct drm_mode_fb_cmd)
450+
451+#define DRM_IOCTL_MODE_ADDMODE DRM_IOWR(0xA7, struct drm_mode_modeinfo)
452+#define DRM_IOCTL_MODE_RMMODE DRM_IOWR(0xA8, unsigned int)
453+#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
454+#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xAA, struct drm_mode_mode_cmd)
455+
456+#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAB, struct drm_mode_get_property)
457+/*@}*/
458+
459 /**
460 * Device specific ioctls should only be in their respective headers
461 * The device specific ioctl range is from 0x40 to 0x99.
462@@ -718,6 +1154,11 @@
463 typedef struct drm_agp_info drm_agp_info_t;
464 typedef struct drm_scatter_gather drm_scatter_gather_t;
465 typedef struct drm_set_version drm_set_version_t;
466+
467+typedef struct drm_fence_arg drm_fence_arg_t;
468+typedef struct drm_mm_type_arg drm_mm_type_arg_t;
469+typedef struct drm_mm_init_arg drm_mm_init_arg_t;
470+typedef enum drm_bo_type drm_bo_type_t;
471 #endif
472
473 #endif
474Index: linux-2.6.27/include/drm/drmP.h
475===================================================================
476--- linux-2.6.27.orig/include/drm/drmP.h 2009-01-14 11:54:35.000000000 +0000
477+++ linux-2.6.27/include/drm/drmP.h 2009-01-14 11:58:31.000000000 +0000
478@@ -57,6 +57,7 @@
479 #include <linux/dma-mapping.h>
480 #include <linux/mm.h>
481 #include <linux/cdev.h>
482+#include <linux/i2c.h>
483 #include <linux/mutex.h>
484 #if defined(__alpha__) || defined(__powerpc__)
485 #include <asm/pgtable.h> /* For pte_wrprotect */
486@@ -146,9 +147,24 @@
487 #define DRM_MEM_CTXLIST 21
488 #define DRM_MEM_MM 22
489 #define DRM_MEM_HASHTAB 23
490+#define DRM_MEM_OBJECTS 24
491+#define DRM_MEM_FENCE 25
492+#define DRM_MEM_TTM 26
493+#define DRM_MEM_BUFOBJ 27
494
495 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
496 #define DRM_MAP_HASH_OFFSET 0x10000000
497+#define DRM_MAP_HASH_ORDER 12
498+#define DRM_OBJECT_HASH_ORDER 12
499+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
500+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
501+/*
502+ * This should be small enough to allow the use of kmalloc for hash tables
503+ * instead of vmalloc.
504+ */
505+
506+#define DRM_FILE_HASH_ORDER 8
507+#define DRM_MM_INIT_MAX_PAGES 256
508
509 /*@}*/
510
511@@ -376,6 +392,14 @@
512 struct drm_freelist freelist;
513 };
514
515+
516+enum drm_ref_type {
517+ _DRM_REF_USE = 0,
518+ _DRM_REF_TYPE1,
519+ _DRM_NO_REF_TYPES
520+};
521+
522+
523 /** File private data */
524 struct drm_file {
525 int authenticated;
526@@ -388,12 +412,26 @@
527 struct drm_minor *minor;
528 int remove_auth_on_close;
529 unsigned long lock_count;
530+
531 /** Mapping of mm object handles to object pointers. */
532 struct idr object_idr;
533 /** Lock for synchronization of access to object_idr. */
534 spinlock_t table_lock;
535+
536+ /*
537+ * The user object hash table is global and resides in the
538+ * drm_device structure. We protect the lists and hash tables with the
539+ * device struct_mutex. A bit coarse-grained but probably the best
540+ * option.
541+ */
542+
543+ struct list_head refd_objects;
544+
545+ struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
546 struct file *filp;
547 void *driver_priv;
548+
549+ struct list_head fbs;
550 };
551
552 /** Wait queue */
553@@ -523,6 +561,7 @@
554 struct drm_hash_item hash;
555 struct drm_map *map; /**< mapping */
556 uint64_t user_token;
557+ struct drm_mm_node *file_offset_node;
558 };
559
560 typedef struct drm_map drm_local_map_t;
561@@ -612,6 +651,11 @@
562 void *driver_private;
563 };
564
565+
566+#include "drm_objects.h"
567+#include "drm_edid.h"
568+#include "drm_crtc.h"
569+
570 /**
571 * DRM driver structure. This structure represent the common code for
572 * a family of cards. There will one drm_device for each card present
573@@ -637,50 +681,8 @@
574 void (*kernel_context_switch_unlock) (struct drm_device *dev);
575 int (*dri_library_name) (struct drm_device *dev, char *buf);
576
577- /**
578- * get_vblank_counter - get raw hardware vblank counter
579- * @dev: DRM device
580- * @crtc: counter to fetch
581- *
582- * Driver callback for fetching a raw hardware vblank counter
583- * for @crtc. If a device doesn't have a hardware counter, the
584- * driver can simply return the value of drm_vblank_count and
585- * make the enable_vblank() and disable_vblank() hooks into no-ops,
586- * leaving interrupts enabled at all times.
587- *
588- * Wraparound handling and loss of events due to modesetting is dealt
589- * with in the DRM core code.
590- *
591- * RETURNS
592- * Raw vblank counter value.
593- */
594- u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
595-
596- /**
597- * enable_vblank - enable vblank interrupt events
598- * @dev: DRM device
599- * @crtc: which irq to enable
600- *
601- * Enable vblank interrupts for @crtc. If the device doesn't have
602- * a hardware vblank counter, this routine should be a no-op, since
603- * interrupts will have to stay on to keep the count accurate.
604- *
605- * RETURNS
606- * Zero on success, appropriate errno if the given @crtc's vblank
607- * interrupt cannot be enabled.
608- */
609- int (*enable_vblank) (struct drm_device *dev, int crtc);
610-
611- /**
612- * disable_vblank - disable vblank interrupt events
613- * @dev: DRM device
614- * @crtc: which irq to enable
615- *
616- * Disable vblank interrupts for @crtc. If the device doesn't have
617- * a hardware vblank counter, this routine should be a no-op, since
618- * interrupts will have to stay on to keep the count accurate.
619- */
620- void (*disable_vblank) (struct drm_device *dev, int crtc);
621+ int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
622+ int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
623
624 /**
625 * Called by \c drm_device_is_agp. Typically used to determine if a
626@@ -715,6 +717,13 @@
627 int (*proc_init)(struct drm_minor *minor);
628 void (*proc_cleanup)(struct drm_minor *minor);
629
630+ /* FB routines, if present */
631+ int (*fb_probe)(struct drm_device *dev, struct drm_crtc *crtc);
632+ int (*fb_remove)(struct drm_device *dev, struct drm_crtc *crtc);
633+
634+ struct drm_fence_driver *fence_driver;
635+ struct drm_bo_driver *bo_driver;
636+
637 /**
638 * Driver-specific constructor for drm_gem_objects, to set up
639 * obj->driver_private.
640@@ -800,6 +809,10 @@
641 struct list_head maplist; /**< Linked list of regions */
642 int map_count; /**< Number of mappable regions */
643 struct drm_open_hash map_hash; /**< User token hash table for maps */
644+ struct drm_mm offset_manager; /**< User token manager */
645+ struct drm_open_hash object_hash; /**< User token hash table for objects */
646+ struct address_space *dev_mapping; /**< For unmap_mapping_range() */
647+ struct page *ttm_dummy_page;
648
649 /** \name Context handle management */
650 /*@{ */
651@@ -848,20 +861,13 @@
652 */
653 int vblank_disable_allowed;
654
655- wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
656- atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
657+ wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
658+ atomic_t vbl_received;
659+ atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
660 spinlock_t vbl_lock;
661- struct list_head *vbl_sigs; /**< signal list to send on VBLANK */
662- atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
663- atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
664- u32 *last_vblank; /* protected by dev->vbl_lock, used */
665- /* for wraparound handling */
666- int *vblank_enabled; /* so we don't call enable more than
667- once per disable */
668- int *vblank_inmodeset; /* Display driver is setting mode */
669- struct timer_list vblank_disable_timer;
670-
671- u32 max_vblank_count; /**< size of vblank counter register */
672+ struct list_head vbl_sigs; /**< signal list to send on VBLANK */
673+ struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
674+ unsigned int vbl_pending;
675 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
676 void (*locked_tasklet_func)(struct drm_device *dev);
677
678@@ -892,12 +898,18 @@
679 unsigned int agp_buffer_token;
680 struct drm_minor *primary; /**< render type primary screen head */
681
682+ struct drm_fence_manager fm;
683+ struct drm_buffer_manager bm;
684+
685 /** \name Drawable information */
686 /*@{ */
687 spinlock_t drw_lock;
688 struct idr drw_idr;
689 /*@} */
690
691+ /* DRM mode setting */
692+ struct drm_mode_config mode_config;
693+
694 /** \name GEM information */
695 /*@{ */
696 spinlock_t object_name_lock;
697@@ -915,6 +927,27 @@
698
699 };
700
701+#if __OS_HAS_AGP
702+struct drm_agp_ttm_backend {
703+ struct drm_ttm_backend backend;
704+ DRM_AGP_MEM *mem;
705+ struct agp_bridge_data *bridge;
706+ int populated;
707+};
708+#endif
709+
710+typedef struct ati_pcigart_ttm_backend {
711+ struct drm_ttm_backend backend;
712+ int populated;
713+ void (*gart_flush_fn)(struct drm_device *dev);
714+ struct drm_ati_pcigart_info *gart_info;
715+ unsigned long offset;
716+ struct page **pages;
717+ int num_pages;
718+ int bound;
719+ struct drm_device *dev;
720+} ati_pcigart_ttm_backend_t;
721+
722 static __inline__ int drm_core_check_feature(struct drm_device *dev,
723 int feature)
724 {
725@@ -979,8 +1012,12 @@
726 /*@{*/
727
728 /* Driver support (drm_drv.h) */
729-extern int drm_init(struct drm_driver *driver);
730+extern int drm_init(struct drm_driver *driver,
731+ struct pci_device_id *pciidlist);
732 extern void drm_exit(struct drm_driver *driver);
733+extern void drm_cleanup_pci(struct pci_dev *pdev);
734+extern void drm_vbl_send_signals(struct drm_device *dev);
735+extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev);
736 extern int drm_ioctl(struct inode *inode, struct file *filp,
737 unsigned int cmd, unsigned long arg);
738 extern long drm_compat_ioctl(struct file *filp,
739Index: linux-2.6.27/include/drm/drm_pciids.h
740===================================================================
741--- linux-2.6.27.orig/include/drm/drm_pciids.h 2009-01-14 11:54:35.000000000 +0000
742+++ linux-2.6.27/include/drm/drm_pciids.h 2009-01-14 11:58:01.000000000 +0000
743@@ -413,3 +413,9 @@
744 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
745 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
746 {0, 0, 0}
747+
748+#define psb_PCI_IDS \
749+ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
750+ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
751+ {0, 0, 0}
752+
753Index: linux-2.6.27/drivers/gpu/drm/Makefile
754===================================================================
755--- linux-2.6.27.orig/drivers/gpu/drm/Makefile 2009-01-14 11:54:35.000000000 +0000
756+++ linux-2.6.27/drivers/gpu/drm/Makefile 2009-01-14 12:11:06.000000000 +0000
757@@ -9,11 +9,14 @@
758 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
759 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
760 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
761- drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
762+ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
763+ drm_fence.o drm_object.o drm_crtc.o drm_ttm.o drm_bo.o \
764+ drm_bo_lock.o drm_bo_move.o drm_edid.o drm_modes.o drm_regman.o
765
766 drm-$(CONFIG_COMPAT) += drm_ioc32.o
767
768 obj-$(CONFIG_DRM) += drm.o
769+obj-$(CONFIG_DRM_PSB) += psb/
770 obj-$(CONFIG_DRM_TDFX) += tdfx/
771 obj-$(CONFIG_DRM_R128) += r128/
772 obj-$(CONFIG_DRM_RADEON)+= radeon/
773@@ -24,4 +27,3 @@
774 obj-$(CONFIG_DRM_SIS) += sis/
775 obj-$(CONFIG_DRM_SAVAGE)+= savage/
776 obj-$(CONFIG_DRM_VIA) +=via/
777-
778Index: linux-2.6.27/drivers/gpu/drm/drm_agpsupport.c
779===================================================================
780--- linux-2.6.27.orig/drivers/gpu/drm/drm_agpsupport.c 2009-01-14 11:54:35.000000000 +0000
781+++ linux-2.6.27/drivers/gpu/drm/drm_agpsupport.c 2009-01-14 11:58:01.000000000 +0000
782@@ -453,47 +453,158 @@
783 return agp_unbind_memory(handle);
784 }
785
786-/**
787- * Binds a collection of pages into AGP memory at the given offset, returning
788- * the AGP memory structure containing them.
789- *
790- * No reference is held on the pages during this time -- it is up to the
791- * caller to handle that.
792+
793+
794+/*
795+ * AGP ttm backend interface.
796 */
797-DRM_AGP_MEM *
798-drm_agp_bind_pages(struct drm_device *dev,
799- struct page **pages,
800- unsigned long num_pages,
801- uint32_t gtt_offset)
802+
803+#ifndef AGP_USER_TYPES
804+#define AGP_USER_TYPES (1 << 16)
805+#define AGP_USER_MEMORY (AGP_USER_TYPES)
806+#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
807+#endif
808+#define AGP_REQUIRED_MAJOR 0
809+#define AGP_REQUIRED_MINOR 102
810+
811+static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
812 {
813+ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
814+}
815+
816+
817+static int drm_agp_populate(struct drm_ttm_backend *backend,
818+ unsigned long num_pages, struct page **pages)
819+{
820+ struct drm_agp_ttm_backend *agp_be =
821+ container_of(backend, struct drm_agp_ttm_backend, backend);
822+ struct page **cur_page, **last_page = pages + num_pages;
823 DRM_AGP_MEM *mem;
824- int ret, i;
825
826- DRM_DEBUG("\n");
827+ DRM_DEBUG("drm_agp_populate_ttm\n");
828+ mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
829+ if (!mem)
830+ return -ENOMEM;
831+
832+ DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
833+ mem->page_count = 0;
834+ for (cur_page = pages; cur_page < last_page; ++cur_page)
835+ mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
836+ agp_be->mem = mem;
837+ return 0;
838+}
839+
840+static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
841+ struct drm_bo_mem_reg *bo_mem)
842+{
843+ struct drm_agp_ttm_backend *agp_be =
844+ container_of(backend, struct drm_agp_ttm_backend, backend);
845+ DRM_AGP_MEM *mem = agp_be->mem;
846+ int ret;
847+ int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED);
848+
849+ DRM_DEBUG("drm_agp_bind_ttm\n");
850+ mem->is_flushed = 1;
851+ mem->type = AGP_USER_MEMORY;
852+ /* CACHED MAPPED implies not snooped memory */
853+ if (snooped)
854+ mem->type = AGP_USER_CACHED_MEMORY;
855+
856+ ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);
857+ if (ret)
858+ DRM_ERROR("AGP Bind memory failed\n");
859+
860+ DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
861+ DRM_BE_FLAG_BOUND_CACHED : 0,
862+ DRM_BE_FLAG_BOUND_CACHED);
863+ return ret;
864+}
865+
866+static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend)
867+{
868+ struct drm_agp_ttm_backend *agp_be =
869+ container_of(backend, struct drm_agp_ttm_backend, backend);
870+
871+ DRM_DEBUG("drm_agp_unbind_ttm\n");
872+ if (agp_be->mem->is_bound)
873+ return drm_agp_unbind_memory(agp_be->mem);
874+ else
875+ return 0;
876+}
877+
878+static void drm_agp_clear_ttm(struct drm_ttm_backend *backend)
879+{
880+ struct drm_agp_ttm_backend *agp_be =
881+ container_of(backend, struct drm_agp_ttm_backend, backend);
882+ DRM_AGP_MEM *mem = agp_be->mem;
883+
884+ DRM_DEBUG("drm_agp_clear_ttm\n");
885+ if (mem) {
886+ backend->func->unbind(backend);
887+ agp_free_memory(mem);
888+ }
889+ agp_be->mem = NULL;
890+}
891+
892+static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend)
893+{
894+ struct drm_agp_ttm_backend *agp_be;
895+
896+ if (backend) {
897+ DRM_DEBUG("drm_agp_destroy_ttm\n");
898+ agp_be = container_of(backend, struct drm_agp_ttm_backend, backend);
899+ if (agp_be && agp_be->mem)
900+ backend->func->clear(backend);
901+ }
902+}
903+
904+static struct drm_ttm_backend_func agp_ttm_backend = {
905+ .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
906+ .populate = drm_agp_populate,
907+ .clear = drm_agp_clear_ttm,
908+ .bind = drm_agp_bind_ttm,
909+ .unbind = drm_agp_unbind_ttm,
910+ .destroy = drm_agp_destroy_ttm,
911+};
912
913- mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
914- AGP_USER_MEMORY);
915- if (mem == NULL) {
916- DRM_ERROR("Failed to allocate memory for %ld pages\n",
917- num_pages);
918+struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
919+{
920+
921+ struct drm_agp_ttm_backend *agp_be;
922+ struct agp_kern_info *info;
923+
924+ if (!dev->agp) {
925+ DRM_ERROR("AGP is not initialized.\n");
926 return NULL;
927 }
928+ info = &dev->agp->agp_info;
929
930- for (i = 0; i < num_pages; i++)
931- mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
932- mem->page_count = num_pages;
933-
934- mem->is_flushed = true;
935- ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
936- if (ret != 0) {
937- DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
938- agp_free_memory(mem);
939+ if (info->version.major != AGP_REQUIRED_MAJOR ||
940+ info->version.minor < AGP_REQUIRED_MINOR) {
941+ DRM_ERROR("Wrong agpgart version %d.%d\n"
942+ "\tYou need at least version %d.%d.\n",
943+ info->version.major,
944+ info->version.minor,
945+ AGP_REQUIRED_MAJOR,
946+ AGP_REQUIRED_MINOR);
947 return NULL;
948 }
949
950- return mem;
951+
952+ agp_be = drm_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
953+ if (!agp_be)
954+ return NULL;
955+
956+ agp_be->mem = NULL;
957+
958+ agp_be->bridge = dev->agp->bridge;
959+ agp_be->populated = 0;
960+ agp_be->backend.func = &agp_ttm_backend;
961+ agp_be->backend.dev = dev;
962+
963+ return &agp_be->backend;
964 }
965-EXPORT_SYMBOL(drm_agp_bind_pages);
966+EXPORT_SYMBOL(drm_agp_init_ttm);
967
968 void drm_agp_chipset_flush(struct drm_device *dev)
969 {
970Index: linux-2.6.27/drivers/gpu/drm/drm_bo.c
971===================================================================
972--- /dev/null 1970-01-01 00:00:00.000000000 +0000
973+++ linux-2.6.27/drivers/gpu/drm/drm_bo.c 2009-01-14 11:58:01.000000000 +0000
974@@ -0,0 +1,2660 @@
975+/**************************************************************************
976+ *
977+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
978+ * All Rights Reserved.
979+ *
980+ * Permission is hereby granted, free of charge, to any person obtaining a
981+ * copy of this software and associated documentation files (the
982+ * "Software"), to deal in the Software without restriction, including
983+ * without limitation the rights to use, copy, modify, merge, publish,
984+ * distribute, sub license, and/or sell copies of the Software, and to
985+ * permit persons to whom the Software is furnished to do so, subject to
986+ * the following conditions:
987+ *
988+ * The above copyright notice and this permission notice (including the
989+ * next paragraph) shall be included in all copies or substantial portions
990+ * of the Software.
991+ *
992+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
993+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
994+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
995+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
996+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
997+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
998+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
999+ *
1000+ **************************************************************************/
1001+/*
1002+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
1003+ */
1004+
1005+#include "drmP.h"
1006+
1007+/*
1008+ * Locking may look a bit complicated but isn't really:
1009+ *
1010+ * The buffer usage atomic_t needs to be protected by dev->struct_mutex
1011+ * when there is a chance that it can be zero before or after the operation.
1012+ *
1013+ * dev->struct_mutex also protects all lists and list heads,
1014+ * Hash tables and hash heads.
1015+ *
1016+ * bo->mutex protects the buffer object itself excluding the usage field.
1017+ * bo->mutex does also protect the buffer list heads, so to manipulate those,
1018+ * we need both the bo->mutex and the dev->struct_mutex.
1019+ *
1020+ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
1021+ * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
1022+ * the list traversal will, in general, need to be restarted.
1023+ *
1024+ */
1025+
1026+static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
1027+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
1028+static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
1029+static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
1030+
1031+static inline uint64_t drm_bo_type_flags(unsigned type)
1032+{
1033+ return (1ULL << (24 + type));
1034+}
1035+
1036+/*
1037+ * bo locked. dev->struct_mutex locked.
1038+ */
1039+
1040+void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
1041+{
1042+ struct drm_mem_type_manager *man;
1043+
1044+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
1045+ DRM_ASSERT_LOCKED(&bo->mutex);
1046+
1047+ man = &bo->dev->bm.man[bo->pinned_mem_type];
1048+ list_add_tail(&bo->pinned_lru, &man->pinned);
1049+}
1050+
1051+void drm_bo_add_to_lru(struct drm_buffer_object *bo)
1052+{
1053+ struct drm_mem_type_manager *man;
1054+
1055+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
1056+
1057+ if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
1058+ || bo->mem.mem_type != bo->pinned_mem_type) {
1059+ man = &bo->dev->bm.man[bo->mem.mem_type];
1060+ list_add_tail(&bo->lru, &man->lru);
1061+ } else {
1062+ INIT_LIST_HEAD(&bo->lru);
1063+ }
1064+}
1065+
1066+static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
1067+{
1068+#ifdef DRM_ODD_MM_COMPAT
1069+ int ret;
1070+
1071+ if (!bo->map_list.map)
1072+ return 0;
1073+
1074+ ret = drm_bo_lock_kmm(bo);
1075+ if (ret)
1076+ return ret;
1077+ drm_bo_unmap_virtual(bo);
1078+ if (old_is_pci)
1079+ drm_bo_finish_unmap(bo);
1080+#else
1081+ if (!bo->map_list.map)
1082+ return 0;
1083+
1084+ drm_bo_unmap_virtual(bo);
1085+#endif
1086+ return 0;
1087+}
1088+
1089+static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
1090+{
1091+#ifdef DRM_ODD_MM_COMPAT
1092+ int ret;
1093+
1094+ if (!bo->map_list.map)
1095+ return;
1096+
1097+ ret = drm_bo_remap_bound(bo);
1098+ if (ret) {
1099+ DRM_ERROR("Failed to remap a bound buffer object.\n"
1100+ "\tThis might cause a sigbus later.\n");
1101+ }
1102+ drm_bo_unlock_kmm(bo);
1103+#endif
1104+}
1105+
1106+/*
1107+ * Call bo->mutex locked.
1108+ */
1109+
1110+static int drm_bo_add_ttm(struct drm_buffer_object *bo)
1111+{
1112+ struct drm_device *dev = bo->dev;
1113+ int ret = 0;
1114+
1115+ DRM_ASSERT_LOCKED(&bo->mutex);
1116+ bo->ttm = NULL;
1117+
1118+ switch (bo->type) {
1119+ case drm_bo_type_dc:
1120+ case drm_bo_type_kernel:
1121+ bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
1122+ if (!bo->ttm)
1123+ ret = -ENOMEM;
1124+ break;
1125+ case drm_bo_type_user:
1126+ bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
1127+ if (!bo->ttm)
1128+ ret = -ENOMEM;
1129+
1130+ ret = drm_ttm_set_user(bo->ttm, current,
1131+ bo->mem.mask & DRM_BO_FLAG_WRITE,
1132+ bo->buffer_start,
1133+ bo->num_pages,
1134+ dev->bm.dummy_read_page);
1135+ if (ret)
1136+ return ret;
1137+
1138+ break;
1139+ default:
1140+ DRM_ERROR("Illegal buffer object type\n");
1141+ ret = -EINVAL;
1142+ break;
1143+ }
1144+
1145+ return ret;
1146+}
1147+
1148+static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
1149+ struct drm_bo_mem_reg *mem,
1150+ int evict, int no_wait)
1151+{
1152+ struct drm_device *dev = bo->dev;
1153+ struct drm_buffer_manager *bm = &dev->bm;
1154+ int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
1155+ int new_is_pci = drm_mem_reg_is_pci(dev, mem);
1156+ struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
1157+ struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
1158+ int ret = 0;
1159+
1160+ if (old_is_pci || new_is_pci ||
1161+ ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
1162+ ret = drm_bo_vm_pre_move(bo, old_is_pci);
1163+ if (ret)
1164+ return ret;
1165+
1166+ /*
1167+ * Create and bind a ttm if required.
1168+ */
1169+
1170+ if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
1171+ ret = drm_bo_add_ttm(bo);
1172+ if (ret)
1173+ goto out_err;
1174+
1175+ if (mem->mem_type != DRM_BO_MEM_LOCAL) {
1176+ ret = drm_bind_ttm(bo->ttm, mem);
1177+ if (ret)
1178+ goto out_err;
1179+ }
1180+
1181+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
1182+
1183+ struct drm_bo_mem_reg *old_mem = &bo->mem;
1184+ uint64_t save_flags = old_mem->flags;
1185+ uint64_t save_mask = old_mem->mask;
1186+
1187+ *old_mem = *mem;
1188+ mem->mm_node = NULL;
1189+ old_mem->mask = save_mask;
1190+ DRM_FLAG_MASKED(save_flags, mem->flags,
1191+ DRM_BO_MASK_MEMTYPE);
1192+ goto moved;
1193+ }
1194+
1195+ }
1196+
1197+ if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
1198+ !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
1199+
1200+ ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
1201+
1202+ } else if (dev->driver->bo_driver->move) {
1203+ ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
1204+
1205+ } else {
1206+
1207+ ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
1208+
1209+ }
1210+
1211+ if (ret)
1212+ goto out_err;
1213+
1214+moved:
1215+ if (old_is_pci || new_is_pci)
1216+ drm_bo_vm_post_move(bo);
1217+
1218+ if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
1219+ ret =
1220+ dev->driver->bo_driver->invalidate_caches(dev,
1221+ bo->mem.flags);
1222+ if (ret)
1223+ DRM_ERROR("Can not flush read caches\n");
1224+ }
1225+
1226+ DRM_FLAG_MASKED(bo->priv_flags,
1227+ (evict) ? _DRM_BO_FLAG_EVICTED : 0,
1228+ _DRM_BO_FLAG_EVICTED);
1229+
1230+ if (bo->mem.mm_node)
1231+ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
1232+ bm->man[bo->mem.mem_type].gpu_offset;
1233+
1234+
1235+ return 0;
1236+
1237+out_err:
1238+ if (old_is_pci || new_is_pci)
1239+ drm_bo_vm_post_move(bo);
1240+
1241+ new_man = &bm->man[bo->mem.mem_type];
1242+ if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
1243+ drm_ttm_unbind(bo->ttm);
1244+ drm_destroy_ttm(bo->ttm);
1245+ bo->ttm = NULL;
1246+ }
1247+
1248+ return ret;
1249+}
1250+
1251+/*
1252+ * Call bo->mutex locked.
1253+ * Wait until the buffer is idle.
1254+ */
1255+
1256+int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
1257+ int no_wait)
1258+{
1259+ int ret;
1260+
1261+ DRM_ASSERT_LOCKED(&bo->mutex);
1262+
1263+ if (bo->fence) {
1264+ if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
1265+ drm_fence_usage_deref_unlocked(&bo->fence);
1266+ return 0;
1267+ }
1268+ if (no_wait)
1269+ return -EBUSY;
1270+
1271+ ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
1272+ bo->fence_type);
1273+ if (ret)
1274+ return ret;
1275+
1276+ drm_fence_usage_deref_unlocked(&bo->fence);
1277+ }
1278+ return 0;
1279+}
1280+EXPORT_SYMBOL(drm_bo_wait);
1281+
1282+static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
1283+{
1284+ struct drm_device *dev = bo->dev;
1285+ struct drm_buffer_manager *bm = &dev->bm;
1286+
1287+ if (bo->fence) {
1288+ if (bm->nice_mode) {
1289+ unsigned long _end = jiffies + 3 * DRM_HZ;
1290+ int ret;
1291+ do {
1292+ ret = drm_bo_wait(bo, 0, 1, 0);
1293+ if (ret && allow_errors)
1294+ return ret;
1295+
1296+ } while (ret && !time_after_eq(jiffies, _end));
1297+
1298+ if (bo->fence) {
1299+ bm->nice_mode = 0;
1300+ DRM_ERROR("Detected GPU lockup or "
1301+ "fence driver was taken down. "
1302+ "Evicting buffer.\n");
1303+ }
1304+ }
1305+ if (bo->fence)
1306+ drm_fence_usage_deref_unlocked(&bo->fence);
1307+ }
1308+ return 0;
1309+}
1310+
1311+/*
1312+ * Call dev->struct_mutex locked.
1313+ * Attempts to remove all private references to a buffer by expiring its
1314+ * fence object and removing from lru lists and memory managers.
1315+ */
1316+
1317+static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
1318+{
1319+ struct drm_device *dev = bo->dev;
1320+ struct drm_buffer_manager *bm = &dev->bm;
1321+
1322+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
1323+
1324+ atomic_inc(&bo->usage);
1325+ mutex_unlock(&dev->struct_mutex);
1326+ mutex_lock(&bo->mutex);
1327+
1328+ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1329+
1330+ if (bo->fence && drm_fence_object_signaled(bo->fence,
1331+ bo->fence_type))
1332+ drm_fence_usage_deref_unlocked(&bo->fence);
1333+
1334+ if (bo->fence && remove_all)
1335+ (void)drm_bo_expire_fence(bo, 0);
1336+
1337+ mutex_lock(&dev->struct_mutex);
1338+
1339+ if (!atomic_dec_and_test(&bo->usage))
1340+ goto out;
1341+
1342+ if (!bo->fence) {
1343+ list_del_init(&bo->lru);
1344+ if (bo->mem.mm_node) {
1345+ drm_mm_put_block(bo->mem.mm_node);
1346+ if (bo->pinned_node == bo->mem.mm_node)
1347+ bo->pinned_node = NULL;
1348+ bo->mem.mm_node = NULL;
1349+ }
1350+ list_del_init(&bo->pinned_lru);
1351+ if (bo->pinned_node) {
1352+ drm_mm_put_block(bo->pinned_node);
1353+ bo->pinned_node = NULL;
1354+ }
1355+ list_del_init(&bo->ddestroy);
1356+ mutex_unlock(&bo->mutex);
1357+ drm_bo_destroy_locked(bo);
1358+ return;
1359+ }
1360+
1361+ if (list_empty(&bo->ddestroy)) {
1362+ drm_fence_object_flush(bo->fence, bo->fence_type);
1363+ list_add_tail(&bo->ddestroy, &bm->ddestroy);
1364+ schedule_delayed_work(&bm->wq,
1365+ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
1366+ }
1367+
1368+out:
1369+ mutex_unlock(&bo->mutex);
1370+ return;
1371+}
1372+
1373+static void drm_bo_unreserve_size(unsigned long size)
1374+{
1375+ //drm_free_memctl(size);
1376+}
1377+
1378+/*
1379+ * Verify that refcount is 0 and that there are no internal references
1380+ * to the buffer object. Then destroy it.
1381+ */
1382+
1383+static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
1384+{
1385+ struct drm_device *dev = bo->dev;
1386+ struct drm_buffer_manager *bm = &dev->bm;
1387+ unsigned long reserved_size;
1388+
1389+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
1390+
1391+ if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
1392+ list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
1393+ list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
1394+ if (bo->fence != NULL) {
1395+ DRM_ERROR("Fence was non-zero.\n");
1396+ drm_bo_cleanup_refs(bo, 0);
1397+ return;
1398+ }
1399+
1400+#ifdef DRM_ODD_MM_COMPAT
1401+ BUG_ON(!list_empty(&bo->vma_list));
1402+ BUG_ON(!list_empty(&bo->p_mm_list));
1403+#endif
1404+
1405+ if (bo->ttm) {
1406+ drm_ttm_unbind(bo->ttm);
1407+ drm_destroy_ttm(bo->ttm);
1408+ bo->ttm = NULL;
1409+ }
1410+
1411+ atomic_dec(&bm->count);
1412+
1413+ reserved_size = bo->reserved_size;
1414+
1415+ drm_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
1416+ drm_bo_unreserve_size(reserved_size);
1417+
1418+ return;
1419+ }
1420+
1421+ /*
1422+ * Some stuff is still trying to reference the buffer object.
1423+ * Get rid of those references.
1424+ */
1425+
1426+ drm_bo_cleanup_refs(bo, 0);
1427+
1428+ return;
1429+}
1430+
1431+/*
1432+ * Call dev->struct_mutex locked.
1433+ */
1434+
1435+static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
1436+{
1437+ struct drm_buffer_manager *bm = &dev->bm;
1438+
1439+ struct drm_buffer_object *entry, *nentry;
1440+ struct list_head *list, *next;
1441+
1442+ list_for_each_safe(list, next, &bm->ddestroy) {
1443+ entry = list_entry(list, struct drm_buffer_object, ddestroy);
1444+
1445+ nentry = NULL;
1446+ if (next != &bm->ddestroy) {
1447+ nentry = list_entry(next, struct drm_buffer_object,
1448+ ddestroy);
1449+ atomic_inc(&nentry->usage);
1450+ }
1451+
1452+ drm_bo_cleanup_refs(entry, remove_all);
1453+
1454+ if (nentry)
1455+ atomic_dec(&nentry->usage);
1456+ }
1457+}
1458+
1459+static void drm_bo_delayed_workqueue(struct work_struct *work)
1460+{
1461+ struct drm_buffer_manager *bm =
1462+ container_of(work, struct drm_buffer_manager, wq.work);
1463+ struct drm_device *dev = container_of(bm, struct drm_device, bm);
1464+
1465+ DRM_DEBUG("Delayed delete Worker\n");
1466+
1467+ mutex_lock(&dev->struct_mutex);
1468+ if (!bm->initialized) {
1469+ mutex_unlock(&dev->struct_mutex);
1470+ return;
1471+ }
1472+ drm_bo_delayed_delete(dev, 0);
1473+ if (bm->initialized && !list_empty(&bm->ddestroy)) {
1474+ schedule_delayed_work(&bm->wq,
1475+ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
1476+ }
1477+ mutex_unlock(&dev->struct_mutex);
1478+}
1479+
1480+void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
1481+{
1482+ struct drm_buffer_object *tmp_bo = *bo;
1483+ bo = NULL;
1484+
1485+ DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
1486+
1487+ if (atomic_dec_and_test(&tmp_bo->usage))
1488+ drm_bo_destroy_locked(tmp_bo);
1489+}
1490+EXPORT_SYMBOL(drm_bo_usage_deref_locked);
1491+
1492+static void drm_bo_base_deref_locked(struct drm_file *file_priv,
1493+ struct drm_user_object *uo)
1494+{
1495+ struct drm_buffer_object *bo =
1496+ drm_user_object_entry(uo, struct drm_buffer_object, base);
1497+
1498+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
1499+
1500+ drm_bo_takedown_vm_locked(bo);
1501+ drm_bo_usage_deref_locked(&bo);
1502+}
1503+
1504+void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
1505+{
1506+ struct drm_buffer_object *tmp_bo = *bo;
1507+ struct drm_device *dev = tmp_bo->dev;
1508+
1509+ *bo = NULL;
1510+ if (atomic_dec_and_test(&tmp_bo->usage)) {
1511+ mutex_lock(&dev->struct_mutex);
1512+ if (atomic_read(&tmp_bo->usage) == 0)
1513+ drm_bo_destroy_locked(tmp_bo);
1514+ mutex_unlock(&dev->struct_mutex);
1515+ }
1516+}
1517+EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
1518+
1519+void drm_putback_buffer_objects(struct drm_device *dev)
1520+{
1521+ struct drm_buffer_manager *bm = &dev->bm;
1522+ struct list_head *list = &bm->unfenced;
1523+ struct drm_buffer_object *entry, *next;
1524+
1525+ mutex_lock(&dev->struct_mutex);
1526+ list_for_each_entry_safe(entry, next, list, lru) {
1527+ atomic_inc(&entry->usage);
1528+ mutex_unlock(&dev->struct_mutex);
1529+
1530+ mutex_lock(&entry->mutex);
1531+ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
1532+ mutex_lock(&dev->struct_mutex);
1533+
1534+ list_del_init(&entry->lru);
1535+ DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1536+ wake_up_all(&entry->event_queue);
1537+
1538+ /*
1539+ * FIXME: Might want to put back on head of list
1540+ * instead of tail here.
1541+ */
1542+
1543+ drm_bo_add_to_lru(entry);
1544+ mutex_unlock(&entry->mutex);
1545+ drm_bo_usage_deref_locked(&entry);
1546+ }
1547+ mutex_unlock(&dev->struct_mutex);
1548+}
1549+EXPORT_SYMBOL(drm_putback_buffer_objects);
1550+
1551+
1552+/*
1553+ * Note. The caller has to register (if applicable)
1554+ * and deregister fence object usage.
1555+ */
1556+
1557+int drm_fence_buffer_objects(struct drm_device *dev,
1558+ struct list_head *list,
1559+ uint32_t fence_flags,
1560+ struct drm_fence_object *fence,
1561+ struct drm_fence_object **used_fence)
1562+{
1563+ struct drm_buffer_manager *bm = &dev->bm;
1564+ struct drm_buffer_object *entry;
1565+ uint32_t fence_type = 0;
1566+ uint32_t fence_class = ~0;
1567+ int count = 0;
1568+ int ret = 0;
1569+ struct list_head *l;
1570+
1571+ mutex_lock(&dev->struct_mutex);
1572+
1573+ if (!list)
1574+ list = &bm->unfenced;
1575+
1576+ if (fence)
1577+ fence_class = fence->fence_class;
1578+
1579+ list_for_each_entry(entry, list, lru) {
1580+ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
1581+ fence_type |= entry->new_fence_type;
1582+ if (fence_class == ~0)
1583+ fence_class = entry->new_fence_class;
1584+ else if (entry->new_fence_class != fence_class) {
1585+ DRM_ERROR("Unmatching fence classes on unfenced list: "
1586+ "%d and %d.\n",
1587+ fence_class,
1588+ entry->new_fence_class);
1589+ ret = -EINVAL;
1590+ goto out;
1591+ }
1592+ count++;
1593+ }
1594+
1595+ if (!count) {
1596+ ret = -EINVAL;
1597+ goto out;
1598+ }
1599+
1600+ if (fence) {
1601+ if ((fence_type & fence->type) != fence_type ||
1602+ (fence->fence_class != fence_class)) {
1603+ DRM_ERROR("Given fence doesn't match buffers "
1604+ "on unfenced list.\n");
1605+ ret = -EINVAL;
1606+ goto out;
1607+ }
1608+ } else {
1609+ mutex_unlock(&dev->struct_mutex);
1610+ ret = drm_fence_object_create(dev, fence_class, fence_type,
1611+ fence_flags | DRM_FENCE_FLAG_EMIT,
1612+ &fence);
1613+ mutex_lock(&dev->struct_mutex);
1614+ if (ret)
1615+ goto out;
1616+ }
1617+
1618+ count = 0;
1619+ l = list->next;
1620+ while (l != list) {
1621+ prefetch(l->next);
1622+ entry = list_entry(l, struct drm_buffer_object, lru);
1623+ atomic_inc(&entry->usage);
1624+ mutex_unlock(&dev->struct_mutex);
1625+ mutex_lock(&entry->mutex);
1626+ mutex_lock(&dev->struct_mutex);
1627+ list_del_init(l);
1628+ if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1629+ count++;
1630+ if (entry->fence)
1631+ drm_fence_usage_deref_locked(&entry->fence);
1632+ entry->fence = drm_fence_reference_locked(fence);
1633+ entry->fence_class = entry->new_fence_class;
1634+ entry->fence_type = entry->new_fence_type;
1635+ DRM_FLAG_MASKED(entry->priv_flags, 0,
1636+ _DRM_BO_FLAG_UNFENCED);
1637+ wake_up_all(&entry->event_queue);
1638+ drm_bo_add_to_lru(entry);
1639+ }
1640+ mutex_unlock(&entry->mutex);
1641+ drm_bo_usage_deref_locked(&entry);
1642+ l = list->next;
1643+ }
1644+ DRM_DEBUG("Fenced %d buffers\n", count);
1645+out:
1646+ mutex_unlock(&dev->struct_mutex);
1647+ *used_fence = fence;
1648+ return ret;
1649+}
1650+EXPORT_SYMBOL(drm_fence_buffer_objects);
1651+
1652+/*
1653+ * bo->mutex locked
1654+ */
1655+
1656+static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
1657+ int no_wait)
1658+{
1659+ int ret = 0;
1660+ struct drm_device *dev = bo->dev;
1661+ struct drm_bo_mem_reg evict_mem;
1662+
1663+ /*
1664+ * Someone might have modified the buffer before we took the
1665+ * buffer mutex.
1666+ */
1667+
1668+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
1669+ goto out;
1670+ if (bo->mem.mem_type != mem_type)
1671+ goto out;
1672+
1673+ ret = drm_bo_wait(bo, 0, 0, no_wait);
1674+
1675+ if (ret && ret != -EAGAIN) {
1676+ DRM_ERROR("Failed to expire fence before "
1677+ "buffer eviction.\n");
1678+ goto out;
1679+ }
1680+
1681+ evict_mem = bo->mem;
1682+ evict_mem.mm_node = NULL;
1683+
1684+ evict_mem = bo->mem;
1685+ evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
1686+ ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
1687+
1688+ if (ret) {
1689+ if (ret != -EAGAIN)
1690+ DRM_ERROR("Failed to find memory space for "
1691+ "buffer 0x%p eviction.\n", bo);
1692+ goto out;
1693+ }
1694+
1695+ ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
1696+
1697+ if (ret) {
1698+ if (ret != -EAGAIN)
1699+ DRM_ERROR("Buffer eviction failed\n");
1700+ goto out;
1701+ }
1702+
1703+ mutex_lock(&dev->struct_mutex);
1704+ if (evict_mem.mm_node) {
1705+ if (evict_mem.mm_node != bo->pinned_node)
1706+ drm_mm_put_block(evict_mem.mm_node);
1707+ evict_mem.mm_node = NULL;
1708+ }
1709+ list_del(&bo->lru);
1710+ drm_bo_add_to_lru(bo);
1711+ mutex_unlock(&dev->struct_mutex);
1712+
1713+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
1714+ _DRM_BO_FLAG_EVICTED);
1715+
1716+out:
1717+ return ret;
1718+}
1719+
1720+/**
1721+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
1722+ * space, or we've evicted everything and there isn't enough space.
1723+ */
1724+static int drm_bo_mem_force_space(struct drm_device *dev,
1725+ struct drm_bo_mem_reg *mem,
1726+ uint32_t mem_type, int no_wait)
1727+{
1728+ struct drm_mm_node *node;
1729+ struct drm_buffer_manager *bm = &dev->bm;
1730+ struct drm_buffer_object *entry;
1731+ struct drm_mem_type_manager *man = &bm->man[mem_type];
1732+ struct list_head *lru;
1733+ unsigned long num_pages = mem->num_pages;
1734+ int ret;
1735+
1736+ mutex_lock(&dev->struct_mutex);
1737+ do {
1738+ node = drm_mm_search_free(&man->manager, num_pages,
1739+ mem->page_alignment, 1);
1740+ if (node)
1741+ break;
1742+
1743+ lru = &man->lru;
1744+ if (lru->next == lru)
1745+ break;
1746+
1747+ entry = list_entry(lru->next, struct drm_buffer_object, lru);
1748+ atomic_inc(&entry->usage);
1749+ mutex_unlock(&dev->struct_mutex);
1750+ mutex_lock(&entry->mutex);
1751+ BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
1752+
1753+ ret = drm_bo_evict(entry, mem_type, no_wait);
1754+ mutex_unlock(&entry->mutex);
1755+ drm_bo_usage_deref_unlocked(&entry);
1756+ if (ret)
1757+ return ret;
1758+ mutex_lock(&dev->struct_mutex);
1759+ } while (1);
1760+
1761+ if (!node) {
1762+ mutex_unlock(&dev->struct_mutex);
1763+ return -ENOMEM;
1764+ }
1765+
1766+ node = drm_mm_get_block(node, num_pages, mem->page_alignment);
1767+ if (!node) {
1768+ mutex_unlock(&dev->struct_mutex);
1769+ return -ENOMEM;
1770+ }
1771+
1772+ mutex_unlock(&dev->struct_mutex);
1773+ mem->mm_node = node;
1774+ mem->mem_type = mem_type;
1775+ return 0;
1776+}
1777+
1778+static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
1779+ int disallow_fixed,
1780+ uint32_t mem_type,
1781+ uint64_t mask, uint32_t *res_mask)
1782+{
1783+ uint64_t cur_flags = drm_bo_type_flags(mem_type);
1784+ uint64_t flag_diff;
1785+
1786+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
1787+ return 0;
1788+ if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
1789+ cur_flags |= DRM_BO_FLAG_CACHED;
1790+ if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
1791+ cur_flags |= DRM_BO_FLAG_MAPPABLE;
1792+ if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
1793+ DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
1794+
1795+ if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
1796+ return 0;
1797+
1798+ if (mem_type == DRM_BO_MEM_LOCAL) {
1799+ *res_mask = cur_flags;
1800+ return 1;
1801+ }
1802+
1803+ flag_diff = (mask ^ cur_flags);
1804+ if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
1805+ cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
1806+
1807+ if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1808+ (!(mask & DRM_BO_FLAG_CACHED) ||
1809+ (mask & DRM_BO_FLAG_FORCE_CACHING)))
1810+ return 0;
1811+
1812+ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1813+ ((mask & DRM_BO_FLAG_MAPPABLE) ||
1814+ (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1815+ return 0;
1816+
1817+ *res_mask = cur_flags;
1818+ return 1;
1819+}
1820+
1821+/**
1822+ * Creates space for memory region @mem according to its type.
1823+ *
1824+ * This function first searches for free space in compatible memory types in
1825+ * the priority order defined by the driver. If free space isn't found, then
1826+ * drm_bo_mem_force_space is attempted in priority order to evict and find
1827+ * space.
1828+ */
1829+int drm_bo_mem_space(struct drm_buffer_object *bo,
1830+ struct drm_bo_mem_reg *mem, int no_wait)
1831+{
1832+ struct drm_device *dev = bo->dev;
1833+ struct drm_buffer_manager *bm = &dev->bm;
1834+ struct drm_mem_type_manager *man;
1835+
1836+ uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1837+ const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1838+ uint32_t i;
1839+ uint32_t mem_type = DRM_BO_MEM_LOCAL;
1840+ uint32_t cur_flags;
1841+ int type_found = 0;
1842+ int type_ok = 0;
1843+ int has_eagain = 0;
1844+ struct drm_mm_node *node = NULL;
1845+ int ret;
1846+
1847+ mem->mm_node = NULL;
1848+ for (i = 0; i < num_prios; ++i) {
1849+ mem_type = prios[i];
1850+ man = &bm->man[mem_type];
1851+
1852+ type_ok = drm_bo_mt_compatible(man,
1853+ bo->type == drm_bo_type_user,
1854+ mem_type, mem->mask,
1855+ &cur_flags);
1856+
1857+ if (!type_ok)
1858+ continue;
1859+
1860+ if (mem_type == DRM_BO_MEM_LOCAL)
1861+ break;
1862+
1863+ if ((mem_type == bo->pinned_mem_type) &&
1864+ (bo->pinned_node != NULL)) {
1865+ node = bo->pinned_node;
1866+ break;
1867+ }
1868+
1869+ mutex_lock(&dev->struct_mutex);
1870+ if (man->has_type && man->use_type) {
1871+ type_found = 1;
1872+ node = drm_mm_search_free(&man->manager, mem->num_pages,
1873+ mem->page_alignment, 1);
1874+ if (node)
1875+ node = drm_mm_get_block(node, mem->num_pages,
1876+ mem->page_alignment);
1877+ }
1878+ mutex_unlock(&dev->struct_mutex);
1879+ if (node)
1880+ break;
1881+ }
1882+
1883+ if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
1884+ mem->mm_node = node;
1885+ mem->mem_type = mem_type;
1886+ mem->flags = cur_flags;
1887+ return 0;
1888+ }
1889+
1890+ if (!type_found)
1891+ return -EINVAL;
1892+
1893+ num_prios = dev->driver->bo_driver->num_mem_busy_prio;
1894+ prios = dev->driver->bo_driver->mem_busy_prio;
1895+
1896+ for (i = 0; i < num_prios; ++i) {
1897+ mem_type = prios[i];
1898+ man = &bm->man[mem_type];
1899+
1900+ if (!man->has_type)
1901+ continue;
1902+
1903+ if (!drm_bo_mt_compatible(man,
1904+ bo->type == drm_bo_type_user,
1905+ mem_type,
1906+ mem->mask,
1907+ &cur_flags))
1908+ continue;
1909+
1910+ ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
1911+
1912+ if (ret == 0 && mem->mm_node) {
1913+ mem->flags = cur_flags;
1914+ return 0;
1915+ }
1916+
1917+ if (ret == -EAGAIN)
1918+ has_eagain = 1;
1919+ }
1920+
1921+ ret = (has_eagain) ? -EAGAIN : -ENOMEM;
1922+ return ret;
1923+}
1924+EXPORT_SYMBOL(drm_bo_mem_space);
1925+
1926+static int drm_bo_new_mask(struct drm_buffer_object *bo,
1927+ uint64_t new_flags, uint64_t used_mask)
1928+{
1929+ uint32_t new_props;
1930+
1931+ if (bo->type == drm_bo_type_user &&
1932+ ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
1933+ (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
1934+ DRM_ERROR("User buffers require cache-coherent memory.\n");
1935+ return -EINVAL;
1936+ }
1937+
1938+ if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
1939+ DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
1940+ return -EPERM;
1941+ }
1942+
1943+ if (likely(used_mask & DRM_BO_MASK_MEM) &&
1944+ (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
1945+ !DRM_SUSER(DRM_CURPROC)) {
1946+ if (likely(bo->mem.flags & new_flags & used_mask &
1947+ DRM_BO_MASK_MEM))
1948+ new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
1949+ (bo->mem.flags & DRM_BO_MASK_MEM);
1950+ else {
1951+ DRM_ERROR("Incompatible memory type specification "
1952+ "for NO_EVICT buffer.\n");
1953+ return -EPERM;
1954+ }
1955+ }
1956+
1957+ if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
1958+ DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
1959+ return -EPERM;
1960+ }
1961+
1962+ new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
1963+ DRM_BO_FLAG_READ);
1964+
1965+ if (!new_props) {
1966+ DRM_ERROR("Invalid buffer object rwx properties\n");
1967+ return -EINVAL;
1968+ }
1969+
1970+ bo->mem.mask = new_flags;
1971+ return 0;
1972+}
1973+
1974+/*
1975+ * Call dev->struct_mutex locked.
1976+ */
1977+
1978+struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
1979+ uint32_t handle, int check_owner)
1980+{
1981+ struct drm_user_object *uo;
1982+ struct drm_buffer_object *bo;
1983+
1984+ uo = drm_lookup_user_object(file_priv, handle);
1985+
1986+ if (!uo || (uo->type != drm_buffer_type)) {
1987+ DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
1988+ return NULL;
1989+ }
1990+
1991+ if (check_owner && file_priv != uo->owner) {
1992+ if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
1993+ return NULL;
1994+ }
1995+
1996+ bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
1997+ atomic_inc(&bo->usage);
1998+ return bo;
1999+}
2000+EXPORT_SYMBOL(drm_lookup_buffer_object);
2001+
2002+/*
2003+ * Call bo->mutex locked.
2004+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
2005+ * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
2006+ */
2007+
2008+static int drm_bo_quick_busy(struct drm_buffer_object *bo)
2009+{
2010+ struct drm_fence_object *fence = bo->fence;
2011+
2012+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
2013+ if (fence) {
2014+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
2015+ drm_fence_usage_deref_unlocked(&bo->fence);
2016+ return 0;
2017+ }
2018+ return 1;
2019+ }
2020+ return 0;
2021+}
2022+
2023+/*
2024+ * Call bo->mutex locked.
2025+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
2026+ */
2027+
2028+static int drm_bo_busy(struct drm_buffer_object *bo)
2029+{
2030+ struct drm_fence_object *fence = bo->fence;
2031+
2032+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
2033+ if (fence) {
2034+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
2035+ drm_fence_usage_deref_unlocked(&bo->fence);
2036+ return 0;
2037+ }
2038+ drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
2039+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
2040+ drm_fence_usage_deref_unlocked(&bo->fence);
2041+ return 0;
2042+ }
2043+ return 1;
2044+ }
2045+ return 0;
2046+}
2047+
2048+static int drm_bo_evict_cached(struct drm_buffer_object *bo)
2049+{
2050+ int ret = 0;
2051+
2052+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
2053+ if (bo->mem.mm_node)
2054+ ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
2055+ return ret;
2056+}
2057+
2058+/*
2059+ * Wait until a buffer is unmapped.
2060+ */
2061+
2062+static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
2063+{
2064+ int ret = 0;
2065+
2066+ if ((atomic_read(&bo->mapped) >= 0) && no_wait)
2067+ return -EBUSY;
2068+
2069+ DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
2070+ atomic_read(&bo->mapped) == -1);
2071+
2072+ if (ret == -EINTR)
2073+ ret = -EAGAIN;
2074+
2075+ return ret;
2076+}
2077+
2078+static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
2079+{
2080+ int ret;
2081+
2082+ mutex_lock(&bo->mutex);
2083+ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
2084+ mutex_unlock(&bo->mutex);
2085+ return ret;
2086+}
2087+
2088+/*
2089+ * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
2090+ * Until then, we cannot really do anything with it except delete it.
2091+ */
2092+
2093+static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
2094+ int eagain_if_wait)
2095+{
2096+ int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
2097+
2098+ if (ret && no_wait)
2099+ return -EBUSY;
2100+ else if (!ret)
2101+ return 0;
2102+
2103+ ret = 0;
2104+ mutex_unlock(&bo->mutex);
2105+ DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
2106+ !drm_bo_check_unfenced(bo));
2107+ mutex_lock(&bo->mutex);
2108+ if (ret == -EINTR)
2109+ return -EAGAIN;
2110+ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
2111+ if (ret) {
2112+ DRM_ERROR("Timeout waiting for buffer to become fenced\n");
2113+ return -EBUSY;
2114+ }
2115+ if (eagain_if_wait)
2116+ return -EAGAIN;
2117+
2118+ return 0;
2119+}
2120+
2121+/*
2122+ * Fill in the ioctl reply argument with buffer info.
2123+ * Bo locked.
2124+ */
2125+
2126+void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
2127+ struct drm_bo_info_rep *rep)
2128+{
2129+ if (!rep)
2130+ return;
2131+
2132+ rep->handle = bo->base.hash.key;
2133+ rep->flags = bo->mem.flags;
2134+ rep->size = bo->num_pages * PAGE_SIZE;
2135+ rep->offset = bo->offset;
2136+
2137+ if (bo->type == drm_bo_type_dc)
2138+ rep->arg_handle = bo->map_list.user_token;
2139+ else
2140+ rep->arg_handle = 0;
2141+
2142+ rep->mask = bo->mem.mask;
2143+ rep->buffer_start = bo->buffer_start;
2144+ rep->fence_flags = bo->fence_type;
2145+ rep->rep_flags = 0;
2146+ rep->page_alignment = bo->mem.page_alignment;
2147+
2148+ if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
2149+ DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
2150+ DRM_BO_REP_BUSY);
2151+ }
2152+}
2153+EXPORT_SYMBOL(drm_bo_fill_rep_arg);
2154+
2155+/*
2156+ * Wait for buffer idle and register that we've mapped the buffer.
2157+ * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
2158+ * so that if the client dies, the mapping is automatically
2159+ * unregistered.
2160+ */
2161+
2162+static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
2163+ uint32_t map_flags, unsigned hint,
2164+ struct drm_bo_info_rep *rep)
2165+{
2166+ struct drm_buffer_object *bo;
2167+ struct drm_device *dev = file_priv->minor->dev;
2168+ int ret = 0;
2169+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
2170+
2171+ mutex_lock(&dev->struct_mutex);
2172+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
2173+ mutex_unlock(&dev->struct_mutex);
2174+
2175+ if (!bo)
2176+ return -EINVAL;
2177+
2178+ mutex_lock(&bo->mutex);
2179+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
2180+ if (ret)
2181+ goto out;
2182+
2183+ /*
2184+ * If this returns true, we are currently unmapped.
2185+ * We need to do this test, because unmapping can
2186+ * be done without the bo->mutex held.
2187+ */
2188+
2189+ while (1) {
2190+ if (atomic_inc_and_test(&bo->mapped)) {
2191+ if (no_wait && drm_bo_busy(bo)) {
2192+ atomic_dec(&bo->mapped);
2193+ ret = -EBUSY;
2194+ goto out;
2195+ }
2196+ ret = drm_bo_wait(bo, 0, 0, no_wait);
2197+ if (ret) {
2198+ atomic_dec(&bo->mapped);
2199+ goto out;
2200+ }
2201+
2202+ if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
2203+ drm_bo_evict_cached(bo);
2204+
2205+ break;
2206+ } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
2207+
2208+ /*
2209+ * We are already mapped with different flags.
2210+ * need to wait for unmap.
2211+ */
2212+
2213+ ret = drm_bo_wait_unmapped(bo, no_wait);
2214+ if (ret)
2215+ goto out;
2216+
2217+ continue;
2218+ }
2219+ break;
2220+ }
2221+
2222+ mutex_lock(&dev->struct_mutex);
2223+ ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
2224+ mutex_unlock(&dev->struct_mutex);
2225+ if (ret) {
2226+ if (atomic_add_negative(-1, &bo->mapped))
2227+ wake_up_all(&bo->event_queue);
2228+
2229+ } else
2230+ drm_bo_fill_rep_arg(bo, rep);
2231+out:
2232+ mutex_unlock(&bo->mutex);
2233+ drm_bo_usage_deref_unlocked(&bo);
2234+ return ret;
2235+}
2236+
2237+static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
2238+{
2239+ struct drm_device *dev = file_priv->minor->dev;
2240+ struct drm_buffer_object *bo;
2241+ struct drm_ref_object *ro;
2242+ int ret = 0;
2243+
2244+ mutex_lock(&dev->struct_mutex);
2245+
2246+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
2247+ if (!bo) {
2248+ ret = -EINVAL;
2249+ goto out;
2250+ }
2251+
2252+ ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
2253+ if (!ro) {
2254+ ret = -EINVAL;
2255+ goto out;
2256+ }
2257+
2258+ drm_remove_ref_object(file_priv, ro);
2259+ drm_bo_usage_deref_locked(&bo);
2260+out:
2261+ mutex_unlock(&dev->struct_mutex);
2262+ return ret;
2263+}
2264+
2265+/*
2266+ * Call struct-sem locked.
2267+ */
2268+
2269+static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
2270+ struct drm_user_object *uo,
2271+ enum drm_ref_type action)
2272+{
2273+ struct drm_buffer_object *bo =
2274+ drm_user_object_entry(uo, struct drm_buffer_object, base);
2275+
2276+ /*
2277+ * We DON'T want to take the bo->lock here, because we want to
2278+ * hold it when we wait for unmapped buffer.
2279+ */
2280+
2281+ BUG_ON(action != _DRM_REF_TYPE1);
2282+
2283+ if (atomic_add_negative(-1, &bo->mapped))
2284+ wake_up_all(&bo->event_queue);
2285+}
2286+
2287+/*
2288+ * bo->mutex locked.
2289+ * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
2290+ */
2291+
2292+int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
2293+ int no_wait, int move_unfenced)
2294+{
2295+ struct drm_device *dev = bo->dev;
2296+ struct drm_buffer_manager *bm = &dev->bm;
2297+ int ret = 0;
2298+ struct drm_bo_mem_reg mem;
2299+ /*
2300+ * Flush outstanding fences.
2301+ */
2302+
2303+ drm_bo_busy(bo);
2304+
2305+ /*
2306+ * Wait for outstanding fences.
2307+ */
2308+
2309+ ret = drm_bo_wait(bo, 0, 0, no_wait);
2310+ if (ret)
2311+ return ret;
2312+
2313+ mem.num_pages = bo->num_pages;
2314+ mem.size = mem.num_pages << PAGE_SHIFT;
2315+ mem.mask = new_mem_flags;
2316+ mem.page_alignment = bo->mem.page_alignment;
2317+
2318+ mutex_lock(&bm->evict_mutex);
2319+ mutex_lock(&dev->struct_mutex);
2320+ list_del_init(&bo->lru);
2321+ mutex_unlock(&dev->struct_mutex);
2322+
2323+ /*
2324+ * Determine where to move the buffer.
2325+ */
2326+ ret = drm_bo_mem_space(bo, &mem, no_wait);
2327+ if (ret)
2328+ goto out_unlock;
2329+
2330+ ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
2331+
2332+out_unlock:
2333+ mutex_lock(&dev->struct_mutex);
2334+ if (ret || !move_unfenced) {
2335+ if (mem.mm_node) {
2336+ if (mem.mm_node != bo->pinned_node)
2337+ drm_mm_put_block(mem.mm_node);
2338+ mem.mm_node = NULL;
2339+ }
2340+ drm_bo_add_to_lru(bo);
2341+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
2342+ wake_up_all(&bo->event_queue);
2343+ DRM_FLAG_MASKED(bo->priv_flags, 0,
2344+ _DRM_BO_FLAG_UNFENCED);
2345+ }
2346+ } else {
2347+ list_add_tail(&bo->lru, &bm->unfenced);
2348+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
2349+ _DRM_BO_FLAG_UNFENCED);
2350+ }
2351+ mutex_unlock(&dev->struct_mutex);
2352+ mutex_unlock(&bm->evict_mutex);
2353+ return ret;
2354+}
2355+
2356+static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
2357+{
2358+ uint32_t flag_diff = (mem->mask ^ mem->flags);
2359+
2360+ if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
2361+ return 0;
2362+ if ((flag_diff & DRM_BO_FLAG_CACHED) &&
2363+ (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
2364+ (mem->mask & DRM_BO_FLAG_FORCE_CACHING)))
2365+ return 0;
2366+
2367+ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
2368+ ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
2369+ (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
2370+ return 0;
2371+ return 1;
2372+}
2373+
2374+/*
2375+ * bo locked.
2376+ */
2377+
2378+static int drm_buffer_object_validate(struct drm_buffer_object *bo,
2379+ uint32_t fence_class,
2380+ int move_unfenced, int no_wait)
2381+{
2382+ struct drm_device *dev = bo->dev;
2383+ struct drm_buffer_manager *bm = &dev->bm;
2384+ struct drm_bo_driver *driver = dev->driver->bo_driver;
2385+ uint32_t ftype;
2386+ int ret;
2387+
2388+ DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
2389+ (unsigned long long) bo->mem.mask,
2390+ (unsigned long long) bo->mem.flags);
2391+
2392+ ret = driver->fence_type(bo, &fence_class, &ftype);
2393+
2394+ if (ret) {
2395+ DRM_ERROR("Driver did not support given buffer permissions\n");
2396+ return ret;
2397+ }
2398+
2399+ /*
2400+ * We're switching command submission mechanism,
2401+ * or cannot simply rely on the hardware serializing for us.
2402+ *
2403+ * Insert a driver-dependant barrier or wait for buffer idle.
2404+ */
2405+
2406+ if ((fence_class != bo->fence_class) ||
2407+ ((ftype ^ bo->fence_type) & bo->fence_type)) {
2408+
2409+ ret = -EINVAL;
2410+ if (driver->command_stream_barrier) {
2411+ ret = driver->command_stream_barrier(bo,
2412+ fence_class,
2413+ ftype,
2414+ no_wait);
2415+ }
2416+ if (ret)
2417+ ret = drm_bo_wait(bo, 0, 0, no_wait);
2418+
2419+ if (ret)
2420+ return ret;
2421+
2422+ }
2423+
2424+ bo->new_fence_class = fence_class;
2425+ bo->new_fence_type = ftype;
2426+
2427+ ret = drm_bo_wait_unmapped(bo, no_wait);
2428+ if (ret) {
2429+ DRM_ERROR("Timed out waiting for buffer unmap.\n");
2430+ return ret;
2431+ }
2432+
2433+ /*
2434+ * Check whether we need to move buffer.
2435+ */
2436+
2437+ if (!drm_bo_mem_compat(&bo->mem)) {
2438+ ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
2439+ move_unfenced);
2440+ if (ret) {
2441+ if (ret != -EAGAIN)
2442+ DRM_ERROR("Failed moving buffer.\n");
2443+ if (ret == -ENOMEM)
2444+ DRM_ERROR("Out of aperture space.\n");
2445+ return ret;
2446+ }
2447+ }
2448+
2449+ /*
2450+ * Pinned buffers.
2451+ */
2452+
2453+ if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
2454+ bo->pinned_mem_type = bo->mem.mem_type;
2455+ mutex_lock(&dev->struct_mutex);
2456+ list_del_init(&bo->pinned_lru);
2457+ drm_bo_add_to_pinned_lru(bo);
2458+
2459+ if (bo->pinned_node != bo->mem.mm_node) {
2460+ if (bo->pinned_node != NULL)
2461+ drm_mm_put_block(bo->pinned_node);
2462+ bo->pinned_node = bo->mem.mm_node;
2463+ }
2464+
2465+ mutex_unlock(&dev->struct_mutex);
2466+
2467+ } else if (bo->pinned_node != NULL) {
2468+
2469+ mutex_lock(&dev->struct_mutex);
2470+
2471+ if (bo->pinned_node != bo->mem.mm_node)
2472+ drm_mm_put_block(bo->pinned_node);
2473+
2474+ list_del_init(&bo->pinned_lru);
2475+ bo->pinned_node = NULL;
2476+ mutex_unlock(&dev->struct_mutex);
2477+
2478+ }
2479+
2480+ /*
2481+ * We might need to add a TTM.
2482+ */
2483+
2484+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
2485+ ret = drm_bo_add_ttm(bo);
2486+ if (ret)
2487+ return ret;
2488+ }
2489+ DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
2490+
2491+ /*
2492+ * Finally, adjust lru to be sure.
2493+ */
2494+
2495+ mutex_lock(&dev->struct_mutex);
2496+ list_del(&bo->lru);
2497+ if (move_unfenced) {
2498+ list_add_tail(&bo->lru, &bm->unfenced);
2499+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
2500+ _DRM_BO_FLAG_UNFENCED);
2501+ } else {
2502+ drm_bo_add_to_lru(bo);
2503+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
2504+ wake_up_all(&bo->event_queue);
2505+ DRM_FLAG_MASKED(bo->priv_flags, 0,
2506+ _DRM_BO_FLAG_UNFENCED);
2507+ }
2508+ }
2509+ mutex_unlock(&dev->struct_mutex);
2510+
2511+ return 0;
2512+}
2513+
2514+int drm_bo_do_validate(struct drm_buffer_object *bo,
2515+ uint64_t flags, uint64_t mask, uint32_t hint,
2516+ uint32_t fence_class,
2517+ int no_wait,
2518+ struct drm_bo_info_rep *rep)
2519+{
2520+ int ret;
2521+
2522+ mutex_lock(&bo->mutex);
2523+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
2524+
2525+ if (ret)
2526+ goto out;
2527+
2528+ DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
2529+ ret = drm_bo_new_mask(bo, flags, mask);
2530+ if (ret)
2531+ goto out;
2532+
2533+ ret = drm_buffer_object_validate(bo,
2534+ fence_class,
2535+ !(hint & DRM_BO_HINT_DONT_FENCE),
2536+ no_wait);
2537+out:
2538+ if (rep)
2539+ drm_bo_fill_rep_arg(bo, rep);
2540+
2541+ mutex_unlock(&bo->mutex);
2542+ return ret;
2543+}
2544+EXPORT_SYMBOL(drm_bo_do_validate);
2545+
2546+
2547+int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
2548+ uint32_t fence_class,
2549+ uint64_t flags, uint64_t mask,
2550+ uint32_t hint,
2551+ int use_old_fence_class,
2552+ struct drm_bo_info_rep *rep,
2553+ struct drm_buffer_object **bo_rep)
2554+{
2555+ struct drm_device *dev = file_priv->minor->dev;
2556+ struct drm_buffer_object *bo;
2557+ int ret;
2558+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
2559+
2560+ mutex_lock(&dev->struct_mutex);
2561+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
2562+ mutex_unlock(&dev->struct_mutex);
2563+
2564+ if (!bo)
2565+ return -EINVAL;
2566+
2567+ if (use_old_fence_class)
2568+ fence_class = bo->fence_class;
2569+
2570+ /*
2571+ * Only allow creator to change shared buffer mask.
2572+ */
2573+
2574+ if (bo->base.owner != file_priv)
2575+ mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
2576+
2577+
2578+ ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
2579+ no_wait, rep);
2580+
2581+ if (!ret && bo_rep)
2582+ *bo_rep = bo;
2583+ else
2584+ drm_bo_usage_deref_unlocked(&bo);
2585+
2586+ return ret;
2587+}
2588+EXPORT_SYMBOL(drm_bo_handle_validate);
2589+
2590+static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
2591+ struct drm_bo_info_rep *rep)
2592+{
2593+ struct drm_device *dev = file_priv->minor->dev;
2594+ struct drm_buffer_object *bo;
2595+
2596+ mutex_lock(&dev->struct_mutex);
2597+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
2598+ mutex_unlock(&dev->struct_mutex);
2599+
2600+ if (!bo)
2601+ return -EINVAL;
2602+
2603+ mutex_lock(&bo->mutex);
2604+ if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
2605+ (void)drm_bo_busy(bo);
2606+ drm_bo_fill_rep_arg(bo, rep);
2607+ mutex_unlock(&bo->mutex);
2608+ drm_bo_usage_deref_unlocked(&bo);
2609+ return 0;
2610+}
2611+
2612+static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
2613+ uint32_t hint,
2614+ struct drm_bo_info_rep *rep)
2615+{
2616+ struct drm_device *dev = file_priv->minor->dev;
2617+ struct drm_buffer_object *bo;
2618+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
2619+ int ret;
2620+
2621+ mutex_lock(&dev->struct_mutex);
2622+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
2623+ mutex_unlock(&dev->struct_mutex);
2624+
2625+ if (!bo)
2626+ return -EINVAL;
2627+
2628+ mutex_lock(&bo->mutex);
2629+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
2630+ if (ret)
2631+ goto out;
2632+ ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
2633+ if (ret)
2634+ goto out;
2635+
2636+ drm_bo_fill_rep_arg(bo, rep);
2637+
2638+out:
2639+ mutex_unlock(&bo->mutex);
2640+ drm_bo_usage_deref_unlocked(&bo);
2641+ return ret;
2642+}
2643+
2644+static inline size_t drm_size_align(size_t size)
2645+{
2646+ size_t tmpSize = 4;
2647+ if (size > PAGE_SIZE)
2648+ return PAGE_ALIGN(size);
2649+ while (tmpSize < size)
2650+ tmpSize <<= 1;
2651+
2652+ return (size_t) tmpSize;
2653+}
2654+
2655+static int drm_bo_reserve_size(struct drm_device *dev,
2656+ int user_bo,
2657+ unsigned long num_pages,
2658+ unsigned long *size)
2659+{
2660+ struct drm_bo_driver *driver = dev->driver->bo_driver;
2661+
2662+ *size = drm_size_align(sizeof(struct drm_buffer_object)) +
2663+ /* Always account for a TTM, even for fixed memory types */
2664+ drm_ttm_size(dev, num_pages, user_bo) +
2665+ /* user space mapping structure */
2666+ drm_size_align(sizeof(drm_local_map_t)) +
2667+ /* file offset space, aperture space, pinned space */
2668+ 3*drm_size_align(sizeof(struct drm_mm_node *)) +
2669+ /* ttm backend */
2670+ driver->backend_size(dev, num_pages);
2671+
2672+ // FIXME - ENOMEM?
2673+ return 0;
2674+}
2675+
2676+int drm_buffer_object_create(struct drm_device *dev,
2677+ unsigned long size,
2678+ enum drm_bo_type type,
2679+ uint64_t mask,
2680+ uint32_t hint,
2681+ uint32_t page_alignment,
2682+ unsigned long buffer_start,
2683+ struct drm_buffer_object **buf_obj)
2684+{
2685+ struct drm_buffer_manager *bm = &dev->bm;
2686+ struct drm_buffer_object *bo;
2687+ int ret = 0;
2688+ unsigned long num_pages;
2689+ unsigned long reserved_size;
2690+
2691+ size += buffer_start & ~PAGE_MASK;
2692+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2693+ if (num_pages == 0) {
2694+ DRM_ERROR("Illegal buffer object size.\n");
2695+ return -EINVAL;
2696+ }
2697+
2698+ ret = drm_bo_reserve_size(dev, type == drm_bo_type_user,
2699+ num_pages, &reserved_size);
2700+
2701+ if (ret) {
2702+ DRM_DEBUG("Failed reserving space for buffer object.\n");
2703+ return ret;
2704+ }
2705+
2706+ bo = drm_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
2707+
2708+ if (!bo) {
2709+ drm_bo_unreserve_size(num_pages);
2710+ return -ENOMEM;
2711+ }
2712+
2713+ mutex_init(&bo->mutex);
2714+ mutex_lock(&bo->mutex);
2715+
2716+ bo->reserved_size = reserved_size;
2717+ atomic_set(&bo->usage, 1);
2718+ atomic_set(&bo->mapped, -1);
2719+ DRM_INIT_WAITQUEUE(&bo->event_queue);
2720+ INIT_LIST_HEAD(&bo->lru);
2721+ INIT_LIST_HEAD(&bo->pinned_lru);
2722+ INIT_LIST_HEAD(&bo->ddestroy);
2723+#ifdef DRM_ODD_MM_COMPAT
2724+ INIT_LIST_HEAD(&bo->p_mm_list);
2725+ INIT_LIST_HEAD(&bo->vma_list);
2726+#endif
2727+ bo->dev = dev;
2728+ bo->type = type;
2729+ bo->num_pages = num_pages;
2730+ bo->mem.mem_type = DRM_BO_MEM_LOCAL;
2731+ bo->mem.num_pages = bo->num_pages;
2732+ bo->mem.mm_node = NULL;
2733+ bo->mem.page_alignment = page_alignment;
2734+ bo->buffer_start = buffer_start & PAGE_MASK;
2735+ bo->priv_flags = 0;
2736+ bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
2737+ DRM_BO_FLAG_MAPPABLE;
2738+ bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
2739+ DRM_BO_FLAG_MAPPABLE;
2740+ atomic_inc(&bm->count);
2741+ ret = drm_bo_new_mask(bo, mask, mask);
2742+ if (ret)
2743+ goto out_err;
2744+
2745+ if (bo->type == drm_bo_type_dc) {
2746+ mutex_lock(&dev->struct_mutex);
2747+ ret = drm_bo_setup_vm_locked(bo);
2748+ mutex_unlock(&dev->struct_mutex);
2749+ if (ret)
2750+ goto out_err;
2751+ }
2752+
2753+ ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
2754+ if (ret)
2755+ goto out_err;
2756+
2757+ mutex_unlock(&bo->mutex);
2758+ *buf_obj = bo;
2759+ return 0;
2760+
2761+out_err:
2762+ mutex_unlock(&bo->mutex);
2763+
2764+ drm_bo_usage_deref_unlocked(&bo);
2765+ return ret;
2766+}
2767+EXPORT_SYMBOL(drm_buffer_object_create);
2768+
2769+
2770+static int drm_bo_add_user_object(struct drm_file *file_priv,
2771+ struct drm_buffer_object *bo, int shareable)
2772+{
2773+ struct drm_device *dev = file_priv->minor->dev;
2774+ int ret;
2775+
2776+ mutex_lock(&dev->struct_mutex);
2777+ ret = drm_add_user_object(file_priv, &bo->base, shareable);
2778+ if (ret)
2779+ goto out;
2780+
2781+ bo->base.remove = drm_bo_base_deref_locked;
2782+ bo->base.type = drm_buffer_type;
2783+ bo->base.ref_struct_locked = NULL;
2784+ bo->base.unref = drm_buffer_user_object_unmap;
2785+
2786+out:
2787+ mutex_unlock(&dev->struct_mutex);
2788+ return ret;
2789+}
2790+
2791+int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2792+{
2793+ struct drm_bo_create_arg *arg = data;
2794+ struct drm_bo_create_req *req = &arg->d.req;
2795+ struct drm_bo_info_rep *rep = &arg->d.rep;
2796+ struct drm_buffer_object *entry;
2797+ enum drm_bo_type bo_type;
2798+ int ret = 0;
2799+
2800+ DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
2801+ (int)(req->size / 1024), req->page_alignment * 4);
2802+
2803+ if (!dev->bm.initialized) {
2804+ DRM_ERROR("Buffer object manager is not initialized.\n");
2805+ return -EINVAL;
2806+ }
2807+
2808+ bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
2809+
2810+ if (bo_type == drm_bo_type_user)
2811+ req->mask &= ~DRM_BO_FLAG_SHAREABLE;
2812+
2813+ ret = drm_buffer_object_create(file_priv->minor->dev,
2814+ req->size, bo_type, req->mask,
2815+ req->hint, req->page_alignment,
2816+ req->buffer_start, &entry);
2817+ if (ret)
2818+ goto out;
2819+
2820+ ret = drm_bo_add_user_object(file_priv, entry,
2821+ req->mask & DRM_BO_FLAG_SHAREABLE);
2822+ if (ret) {
2823+ drm_bo_usage_deref_unlocked(&entry);
2824+ goto out;
2825+ }
2826+
2827+ mutex_lock(&entry->mutex);
2828+ drm_bo_fill_rep_arg(entry, rep);
2829+ mutex_unlock(&entry->mutex);
2830+
2831+out:
2832+ return ret;
2833+}
2834+
2835+int drm_bo_setstatus_ioctl(struct drm_device *dev,
2836+ void *data, struct drm_file *file_priv)
2837+{
2838+ struct drm_bo_map_wait_idle_arg *arg = data;
2839+ struct drm_bo_info_req *req = &arg->d.req;
2840+ struct drm_bo_info_rep *rep = &arg->d.rep;
2841+ int ret;
2842+
2843+ if (!dev->bm.initialized) {
2844+ DRM_ERROR("Buffer object manager is not initialized.\n");
2845+ return -EINVAL;
2846+ }
2847+
2848+ ret = drm_bo_read_lock(&dev->bm.bm_lock);
2849+ if (ret)
2850+ return ret;
2851+
2852+ ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
2853+ req->flags,
2854+ req->mask,
2855+ req->hint | DRM_BO_HINT_DONT_FENCE,
2856+ 1,
2857+ rep, NULL);
2858+
2859+ (void) drm_bo_read_unlock(&dev->bm.bm_lock);
2860+ if (ret)
2861+ return ret;
2862+
2863+ return 0;
2864+}
2865+
2866+int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2867+{
2868+ struct drm_bo_map_wait_idle_arg *arg = data;
2869+ struct drm_bo_info_req *req = &arg->d.req;
2870+ struct drm_bo_info_rep *rep = &arg->d.rep;
2871+ int ret;
2872+ if (!dev->bm.initialized) {
2873+ DRM_ERROR("Buffer object manager is not initialized.\n");
2874+ return -EINVAL;
2875+ }
2876+
2877+ ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
2878+ req->hint, rep);
2879+ if (ret)
2880+ return ret;
2881+
2882+ return 0;
2883+}
2884+
2885+int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2886+{
2887+ struct drm_bo_handle_arg *arg = data;
2888+ int ret;
2889+ if (!dev->bm.initialized) {
2890+ DRM_ERROR("Buffer object manager is not initialized.\n");
2891+ return -EINVAL;
2892+ }
2893+
2894+ ret = drm_buffer_object_unmap(file_priv, arg->handle);
2895+ return ret;
2896+}
2897+
2898+
2899+int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2900+{
2901+ struct drm_bo_reference_info_arg *arg = data;
2902+ struct drm_bo_handle_arg *req = &arg->d.req;
2903+ struct drm_bo_info_rep *rep = &arg->d.rep;
2904+ struct drm_user_object *uo;
2905+ int ret;
2906+
2907+ if (!dev->bm.initialized) {
2908+ DRM_ERROR("Buffer object manager is not initialized.\n");
2909+ return -EINVAL;
2910+ }
2911+
2912+ ret = drm_user_object_ref(file_priv, req->handle,
2913+ drm_buffer_type, &uo);
2914+ if (ret)
2915+ return ret;
2916+
2917+ ret = drm_bo_handle_info(file_priv, req->handle, rep);
2918+ if (ret)
2919+ return ret;
2920+
2921+ return 0;
2922+}
2923+
2924+int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2925+{
2926+ struct drm_bo_handle_arg *arg = data;
2927+ int ret = 0;
2928+
2929+ if (!dev->bm.initialized) {
2930+ DRM_ERROR("Buffer object manager is not initialized.\n");
2931+ return -EINVAL;
2932+ }
2933+
2934+ ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
2935+ return ret;
2936+}
2937+
2938+int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2939+{
2940+ struct drm_bo_reference_info_arg *arg = data;
2941+ struct drm_bo_handle_arg *req = &arg->d.req;
2942+ struct drm_bo_info_rep *rep = &arg->d.rep;
2943+ int ret;
2944+
2945+ if (!dev->bm.initialized) {
2946+ DRM_ERROR("Buffer object manager is not initialized.\n");
2947+ return -EINVAL;
2948+ }
2949+
2950+ ret = drm_bo_handle_info(file_priv, req->handle, rep);
2951+ if (ret)
2952+ return ret;
2953+
2954+ return 0;
2955+}
2956+
2957+int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2958+{
2959+ struct drm_bo_map_wait_idle_arg *arg = data;
2960+ struct drm_bo_info_req *req = &arg->d.req;
2961+ struct drm_bo_info_rep *rep = &arg->d.rep;
2962+ int ret;
2963+ if (!dev->bm.initialized) {
2964+ DRM_ERROR("Buffer object manager is not initialized.\n");
2965+ return -EINVAL;
2966+ }
2967+
2968+ ret = drm_bo_handle_wait(file_priv, req->handle,
2969+ req->hint, rep);
2970+ if (ret)
2971+ return ret;
2972+
2973+ return 0;
2974+}
2975+
2976+static int drm_bo_leave_list(struct drm_buffer_object *bo,
2977+ uint32_t mem_type,
2978+ int free_pinned,
2979+ int allow_errors)
2980+{
2981+ struct drm_device *dev = bo->dev;
2982+ int ret = 0;
2983+
2984+ mutex_lock(&bo->mutex);
2985+
2986+ ret = drm_bo_expire_fence(bo, allow_errors);
2987+ if (ret)
2988+ goto out;
2989+
2990+ if (free_pinned) {
2991+ DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2992+ mutex_lock(&dev->struct_mutex);
2993+ list_del_init(&bo->pinned_lru);
2994+ if (bo->pinned_node == bo->mem.mm_node)
2995+ bo->pinned_node = NULL;
2996+ if (bo->pinned_node != NULL) {
2997+ drm_mm_put_block(bo->pinned_node);
2998+ bo->pinned_node = NULL;
2999+ }
3000+ mutex_unlock(&dev->struct_mutex);
3001+ }
3002+
3003+ if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
3004+ DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
3005+ "cleanup. Removing flag and evicting.\n");
3006+ bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
3007+ bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
3008+ }
3009+
3010+ if (bo->mem.mem_type == mem_type)
3011+ ret = drm_bo_evict(bo, mem_type, 0);
3012+
3013+ if (ret) {
3014+ if (allow_errors) {
3015+ goto out;
3016+ } else {
3017+ ret = 0;
3018+ DRM_ERROR("Cleanup eviction failed\n");
3019+ }
3020+ }
3021+
3022+out:
3023+ mutex_unlock(&bo->mutex);
3024+ return ret;
3025+}
3026+
3027+
3028+static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
3029+ int pinned_list)
3030+{
3031+ if (pinned_list)
3032+ return list_entry(list, struct drm_buffer_object, pinned_lru);
3033+ else
3034+ return list_entry(list, struct drm_buffer_object, lru);
3035+}
3036+
3037+/*
3038+ * dev->struct_mutex locked.
3039+ */
3040+
3041+static int drm_bo_force_list_clean(struct drm_device *dev,
3042+ struct list_head *head,
3043+ unsigned mem_type,
3044+ int free_pinned,
3045+ int allow_errors,
3046+ int pinned_list)
3047+{
3048+ struct list_head *list, *next, *prev;
3049+ struct drm_buffer_object *entry, *nentry;
3050+ int ret;
3051+ int do_restart;
3052+
3053+ /*
3054+ * The list traversal is a bit odd here, because an item may
3055+ * disappear from the list when we release the struct_mutex or
3056+ * when we decrease the usage count. Also we're not guaranteed
3057+ * to drain pinned lists, so we can't always restart.
3058+ */
3059+
3060+restart:
3061+ nentry = NULL;
3062+ list_for_each_safe(list, next, head) {
3063+ prev = list->prev;
3064+
3065+ entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
3066+ atomic_inc(&entry->usage);
3067+ if (nentry) {
3068+ atomic_dec(&nentry->usage);
3069+ nentry = NULL;
3070+ }
3071+
3072+ /*
3073+ * Protect the next item from destruction, so we can check
3074+ * its list pointers later on.
3075+ */
3076+
3077+ if (next != head) {
3078+ nentry = drm_bo_entry(next, pinned_list);
3079+ atomic_inc(&nentry->usage);
3080+ }
3081+ mutex_unlock(&dev->struct_mutex);
3082+
3083+ ret = drm_bo_leave_list(entry, mem_type, free_pinned,
3084+ allow_errors);
3085+ mutex_lock(&dev->struct_mutex);
3086+
3087+ drm_bo_usage_deref_locked(&entry);
3088+ if (ret)
3089+ return ret;
3090+
3091+ /*
3092+ * Has the next item disappeared from the list?
3093+ */
3094+
3095+ do_restart = ((next->prev != list) && (next->prev != prev));
3096+
3097+ if (nentry != NULL && do_restart)
3098+ drm_bo_usage_deref_locked(&nentry);
3099+
3100+ if (do_restart)
3101+ goto restart;
3102+ }
3103+ return 0;
3104+}
3105+
3106+int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type)
3107+{
3108+ struct drm_buffer_manager *bm = &dev->bm;
3109+ struct drm_mem_type_manager *man = &bm->man[mem_type];
3110+ int ret = -EINVAL;
3111+
3112+ if (mem_type >= DRM_BO_MEM_TYPES) {
3113+ DRM_ERROR("Illegal memory type %d\n", mem_type);
3114+ return ret;
3115+ }
3116+
3117+ if (!man->has_type) {
3118+ DRM_ERROR("Trying to take down uninitialized "
3119+ "memory manager type %u\n", mem_type);
3120+ return ret;
3121+ }
3122+ man->use_type = 0;
3123+ man->has_type = 0;
3124+
3125+ ret = 0;
3126+ if (mem_type > 0) {
3127+ BUG_ON(!list_empty(&bm->unfenced));
3128+ drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
3129+ drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
3130+
3131+ if (drm_mm_clean(&man->manager)) {
3132+ drm_mm_takedown(&man->manager);
3133+ } else {
3134+ ret = -EBUSY;
3135+ }
3136+ }
3137+
3138+ return ret;
3139+}
3140+EXPORT_SYMBOL(drm_bo_clean_mm);
3141+
3142+/**
3143+ *Evict all buffers of a particular mem_type, but leave memory manager
3144+ *regions for NO_MOVE buffers intact. New buffers cannot be added at this
3145+ *point since we have the hardware lock.
3146+ */
3147+
3148+static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
3149+{
3150+ int ret;
3151+ struct drm_buffer_manager *bm = &dev->bm;
3152+ struct drm_mem_type_manager *man = &bm->man[mem_type];
3153+
3154+ if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
3155+ DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
3156+ return -EINVAL;
3157+ }
3158+
3159+ if (!man->has_type) {
3160+ DRM_ERROR("Memory type %u has not been initialized.\n",
3161+ mem_type);
3162+ return 0;
3163+ }
3164+
3165+ ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
3166+ if (ret)
3167+ return ret;
3168+ ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
3169+
3170+ return ret;
3171+}
3172+
3173+int drm_bo_init_mm(struct drm_device *dev,
3174+ unsigned type,
3175+ unsigned long p_offset, unsigned long p_size)
3176+{
3177+ struct drm_buffer_manager *bm = &dev->bm;
3178+ int ret = -EINVAL;
3179+ struct drm_mem_type_manager *man;
3180+
3181+ if (type >= DRM_BO_MEM_TYPES) {
3182+ DRM_ERROR("Illegal memory type %d\n", type);
3183+ return ret;
3184+ }
3185+
3186+ man = &bm->man[type];
3187+ if (man->has_type) {
3188+ DRM_ERROR("Memory manager already initialized for type %d\n",
3189+ type);
3190+ return ret;
3191+ }
3192+
3193+ ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
3194+ if (ret)
3195+ return ret;
3196+
3197+ ret = 0;
3198+ if (type != DRM_BO_MEM_LOCAL) {
3199+ if (!p_size) {
3200+ DRM_ERROR("Zero size memory manager type %d\n", type);
3201+ return ret;
3202+ }
3203+ ret = drm_mm_init(&man->manager, p_offset, p_size);
3204+ if (ret)
3205+ return ret;
3206+ }
3207+ man->has_type = 1;
3208+ man->use_type = 1;
3209+
3210+ INIT_LIST_HEAD(&man->lru);
3211+ INIT_LIST_HEAD(&man->pinned);
3212+
3213+ return 0;
3214+}
3215+EXPORT_SYMBOL(drm_bo_init_mm);
3216+
3217+/*
3218+ * This function is intended to be called on drm driver unload.
3219+ * If you decide to call it from lastclose, you must protect the call
3220+ * from a potentially racing drm_bo_driver_init in firstopen.
3221+ * (This may happen on X server restart).
3222+ */
3223+
3224+int drm_bo_driver_finish(struct drm_device *dev)
3225+{
3226+ struct drm_buffer_manager *bm = &dev->bm;
3227+ int ret = 0;
3228+ unsigned i = DRM_BO_MEM_TYPES;
3229+ struct drm_mem_type_manager *man;
3230+
3231+ mutex_lock(&dev->struct_mutex);
3232+
3233+ if (!bm->initialized)
3234+ goto out;
3235+ bm->initialized = 0;
3236+
3237+ while (i--) {
3238+ man = &bm->man[i];
3239+ if (man->has_type) {
3240+ man->use_type = 0;
3241+ if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
3242+ ret = -EBUSY;
3243+ DRM_ERROR("DRM memory manager type %d "
3244+ "is not clean.\n", i);
3245+ }
3246+ man->has_type = 0;
3247+ }
3248+ }
3249+ mutex_unlock(&dev->struct_mutex);
3250+
3251+ if (!cancel_delayed_work(&bm->wq))
3252+ flush_scheduled_work();
3253+
3254+ mutex_lock(&dev->struct_mutex);
3255+ drm_bo_delayed_delete(dev, 1);
3256+ if (list_empty(&bm->ddestroy))
3257+ DRM_DEBUG("Delayed destroy list was clean\n");
3258+
3259+ if (list_empty(&bm->man[0].lru))
3260+ DRM_DEBUG("Swap list was clean\n");
3261+
3262+ if (list_empty(&bm->man[0].pinned))
3263+ DRM_DEBUG("NO_MOVE list was clean\n");
3264+
3265+ if (list_empty(&bm->unfenced))
3266+ DRM_DEBUG("Unfenced list was clean\n");
3267+
3268+ __free_page(bm->dummy_read_page);
3269+
3270+out:
3271+ mutex_unlock(&dev->struct_mutex);
3272+ return ret;
3273+}
3274+EXPORT_SYMBOL(drm_bo_driver_finish);
3275+
3276+/*
3277+ * This function is intended to be called on drm driver load.
3278+ * If you decide to call it from firstopen, you must protect the call
3279+ * from a potentially racing drm_bo_driver_finish in lastclose.
3280+ * (This may happen on X server restart).
3281+ */
3282+
3283+int drm_bo_driver_init(struct drm_device *dev)
3284+{
3285+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3286+ struct drm_buffer_manager *bm = &dev->bm;
3287+ int ret = -EINVAL;
3288+
3289+ bm->dummy_read_page = NULL;
3290+ drm_bo_init_lock(&bm->bm_lock);
3291+ mutex_lock(&dev->struct_mutex);
3292+ if (!driver)
3293+ goto out_unlock;
3294+
3295+ bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
3296+ if (!bm->dummy_read_page) {
3297+ ret = -ENOMEM;
3298+ goto out_unlock;
3299+ }
3300+
3301+
3302+ /*
3303+ * Initialize the system memory buffer type.
3304+ * Other types need to be driver / IOCTL initialized.
3305+ */
3306+ ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
3307+ if (ret)
3308+ goto out_unlock;
3309+
3310+ INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
3311+
3312+ bm->initialized = 1;
3313+ bm->nice_mode = 1;
3314+ atomic_set(&bm->count, 0);
3315+ bm->cur_pages = 0;
3316+ INIT_LIST_HEAD(&bm->unfenced);
3317+ INIT_LIST_HEAD(&bm->ddestroy);
3318+out_unlock:
3319+ mutex_unlock(&dev->struct_mutex);
3320+ return ret;
3321+}
3322+EXPORT_SYMBOL(drm_bo_driver_init);
3323+
3324+int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
3325+{
3326+ struct drm_mm_init_arg *arg = data;
3327+ struct drm_buffer_manager *bm = &dev->bm;
3328+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3329+ int ret;
3330+
3331+ if (!driver) {
3332+ DRM_ERROR("Buffer objects are not supported by this driver\n");
3333+ return -EINVAL;
3334+ }
3335+
3336+ ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
3337+ if (ret)
3338+ return ret;
3339+
3340+ ret = -EINVAL;
3341+ if (arg->magic != DRM_BO_INIT_MAGIC) {
3342+ DRM_ERROR("You are using an old libdrm that is not compatible with\n"
3343+ "\tthe kernel DRM module. Please upgrade your libdrm.\n");
3344+ return -EINVAL;
3345+ }
3346+ if (arg->major != DRM_BO_INIT_MAJOR) {
3347+ DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
3348+ "\tversion don't match. Got %d, expected %d.\n",
3349+ arg->major, DRM_BO_INIT_MAJOR);
3350+ return -EINVAL;
3351+ }
3352+
3353+ mutex_lock(&dev->struct_mutex);
3354+ if (!bm->initialized) {
3355+ DRM_ERROR("DRM memory manager was not initialized.\n");
3356+ goto out;
3357+ }
3358+ if (arg->mem_type == 0) {
3359+ DRM_ERROR("System memory buffers already initialized.\n");
3360+ goto out;
3361+ }
3362+ ret = drm_bo_init_mm(dev, arg->mem_type,
3363+ arg->p_offset, arg->p_size);
3364+
3365+out:
3366+ mutex_unlock(&dev->struct_mutex);
3367+ (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
3368+
3369+ if (ret)
3370+ return ret;
3371+
3372+ return 0;
3373+}
3374+
3375+int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
3376+{
3377+ struct drm_mm_type_arg *arg = data;
3378+ struct drm_buffer_manager *bm = &dev->bm;
3379+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3380+ int ret;
3381+
3382+ if (!driver) {
3383+ DRM_ERROR("Buffer objects are not supported by this driver\n");
3384+ return -EINVAL;
3385+ }
3386+
3387+ ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
3388+ if (ret)
3389+ return ret;
3390+
3391+ mutex_lock(&dev->struct_mutex);
3392+ ret = -EINVAL;
3393+ if (!bm->initialized) {
3394+ DRM_ERROR("DRM memory manager was not initialized\n");
3395+ goto out;
3396+ }
3397+ if (arg->mem_type == 0) {
3398+ DRM_ERROR("No takedown for System memory buffers.\n");
3399+ goto out;
3400+ }
3401+ ret = 0;
3402+ if (drm_bo_clean_mm(dev, arg->mem_type)) {
3403+ DRM_ERROR("Memory manager type %d not clean. "
3404+ "Delaying takedown\n", arg->mem_type);
3405+ }
3406+out:
3407+ mutex_unlock(&dev->struct_mutex);
3408+ (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
3409+
3410+ if (ret)
3411+ return ret;
3412+
3413+ return 0;
3414+}
3415+
3416+int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
3417+{
3418+ struct drm_mm_type_arg *arg = data;
3419+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3420+ int ret;
3421+
3422+ if (!driver) {
3423+ DRM_ERROR("Buffer objects are not supported by this driver\n");
3424+ return -EINVAL;
3425+ }
3426+
3427+ if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
3428+ DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
3429+ return -EINVAL;
3430+ }
3431+
3432+ if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
3433+ ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
3434+ if (ret)
3435+ return ret;
3436+ }
3437+
3438+ mutex_lock(&dev->struct_mutex);
3439+ ret = drm_bo_lock_mm(dev, arg->mem_type);
3440+ mutex_unlock(&dev->struct_mutex);
3441+ if (ret) {
3442+ (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
3443+ return ret;
3444+ }
3445+
3446+ return 0;
3447+}
3448+
3449+int drm_mm_unlock_ioctl(struct drm_device *dev,
3450+ void *data,
3451+ struct drm_file *file_priv)
3452+{
3453+ struct drm_mm_type_arg *arg = data;
3454+ struct drm_bo_driver *driver = dev->driver->bo_driver;
3455+ int ret;
3456+
3457+ if (!driver) {
3458+ DRM_ERROR("Buffer objects are not supported by this driver\n");
3459+ return -EINVAL;
3460+ }
3461+
3462+ if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
3463+ ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
3464+ if (ret)
3465+ return ret;
3466+ }
3467+
3468+ return 0;
3469+}
3470+
3471+/*
3472+ * buffer object vm functions.
3473+ */
3474+
3475+int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
3476+{
3477+ struct drm_buffer_manager *bm = &dev->bm;
3478+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
3479+
3480+ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
3481+ if (mem->mem_type == DRM_BO_MEM_LOCAL)
3482+ return 0;
3483+
3484+ if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
3485+ return 0;
3486+
3487+ if (mem->flags & DRM_BO_FLAG_CACHED)
3488+ return 0;
3489+ }
3490+ return 1;
3491+}
3492+EXPORT_SYMBOL(drm_mem_reg_is_pci);
3493+
3494+/**
3495+ * \c Get the PCI offset for the buffer object memory.
3496+ *
3497+ * \param bo The buffer object.
3498+ * \param bus_base On return the base of the PCI region
3499+ * \param bus_offset On return the byte offset into the PCI region
3500+ * \param bus_size On return the byte size of the buffer object or zero if
3501+ * the buffer object memory is not accessible through a PCI region.
3502+ * \return Failure indication.
3503+ *
3504+ * Returns -EINVAL if the buffer object is currently not mappable.
3505+ * Otherwise returns zero.
3506+ */
3507+
3508+int drm_bo_pci_offset(struct drm_device *dev,
3509+ struct drm_bo_mem_reg *mem,
3510+ unsigned long *bus_base,
3511+ unsigned long *bus_offset, unsigned long *bus_size)
3512+{
3513+ struct drm_buffer_manager *bm = &dev->bm;
3514+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
3515+
3516+ *bus_size = 0;
3517+ if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
3518+ return -EINVAL;
3519+
3520+ if (drm_mem_reg_is_pci(dev, mem)) {
3521+ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
3522+ *bus_size = mem->num_pages << PAGE_SHIFT;
3523+ *bus_base = man->io_offset;
3524+ }
3525+
3526+ return 0;
3527+}
3528+
3529+/**
3530+ * \c Kill all user-space virtual mappings of this buffer object.
3531+ *
3532+ * \param bo The buffer object.
3533+ *
3534+ * Call bo->mutex locked.
3535+ */
3536+
3537+void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
3538+{
3539+ struct drm_device *dev = bo->dev;
3540+ loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
3541+ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
3542+
3543+ if (!dev->dev_mapping)
3544+ return;
3545+
3546+ unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
3547+}
3548+
3549+static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
3550+{
3551+ struct drm_map_list *list;
3552+ drm_local_map_t *map;
3553+ struct drm_device *dev = bo->dev;
3554+
3555+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
3556+ if (bo->type != drm_bo_type_dc)
3557+ return;
3558+
3559+ list = &bo->map_list;
3560+ if (list->user_token) {
3561+ drm_ht_remove_item(&dev->map_hash, &list->hash);
3562+ list->user_token = 0;
3563+ }
3564+ if (list->file_offset_node) {
3565+ drm_mm_put_block(list->file_offset_node);
3566+ list->file_offset_node = NULL;
3567+ }
3568+
3569+ map = list->map;
3570+ if (!map)
3571+ return;
3572+
3573+ drm_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
3574+ list->map = NULL;
3575+ list->user_token = 0ULL;
3576+ drm_bo_usage_deref_locked(&bo);
3577+}
3578+
3579+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
3580+{
3581+ struct drm_map_list *list = &bo->map_list;
3582+ drm_local_map_t *map;
3583+ struct drm_device *dev = bo->dev;
3584+
3585+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
3586+ list->map = drm_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
3587+ if (!list->map)
3588+ return -ENOMEM;
3589+
3590+ map = list->map;
3591+ map->offset = 0;
3592+ map->type = _DRM_TTM;
3593+ map->flags = _DRM_REMOVABLE;
3594+ map->size = bo->mem.num_pages * PAGE_SIZE;
3595+ atomic_inc(&bo->usage);
3596+ map->handle = (void *)bo;
3597+
3598+ list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
3599+ bo->mem.num_pages, 0, 0);
3600+
3601+ if (!list->file_offset_node) {
3602+ drm_bo_takedown_vm_locked(bo);
3603+ return -ENOMEM;
3604+ }
3605+
3606+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
3607+ bo->mem.num_pages, 0);
3608+ if (!list->file_offset_node) {
3609+ drm_bo_takedown_vm_locked(bo);
3610+ return -ENOMEM;
3611+ }
3612+
3613+ list->hash.key = list->file_offset_node->start;
3614+ if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
3615+ drm_bo_takedown_vm_locked(bo);
3616+ return -ENOMEM;
3617+ }
3618+
3619+ list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
3620+
3621+ return 0;
3622+}
3623+
3624+int drm_bo_version_ioctl(struct drm_device *dev, void *data,
3625+ struct drm_file *file_priv)
3626+{
3627+ struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
3628+
3629+ arg->major = DRM_BO_INIT_MAJOR;
3630+ arg->minor = DRM_BO_INIT_MINOR;
3631+ arg->patchlevel = DRM_BO_INIT_PATCH;
3632+
3633+ return 0;
3634+}
3635Index: linux-2.6.27/drivers/gpu/drm/drm_bo_lock.c
3636===================================================================
3637--- /dev/null 1970-01-01 00:00:00.000000000 +0000
3638+++ linux-2.6.27/drivers/gpu/drm/drm_bo_lock.c 2009-01-14 11:58:01.000000000 +0000
3639@@ -0,0 +1,175 @@
3640+/**************************************************************************
3641+ *
3642+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
3643+ * All Rights Reserved.
3644+ *
3645+ * Permission is hereby granted, free of charge, to any person obtaining a
3646+ * copy of this software and associated documentation files (the
3647+ * "Software"), to deal in the Software without restriction, including
3648+ * without limitation the rights to use, copy, modify, merge, publish,
3649+ * distribute, sub license, and/or sell copies of the Software, and to
3650+ * permit persons to whom the Software is furnished to do so, subject to
3651+ * the following conditions:
3652+ *
3653+ * The above copyright notice and this permission notice (including the
3654+ * next paragraph) shall be included in all copies or substantial portions
3655+ * of the Software.
3656+ *
3657+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3658+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3659+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
3660+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
3661+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
3662+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
3663+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
3664+ *
3665+ **************************************************************************/
3666+/*
3667+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
3668+ */
3669+
3670+/*
3671+ * This file implements a simple replacement for the buffer manager use
3672+ * of the heavyweight hardware lock.
3673+ * The lock is a read-write lock. Taking it in read mode is fast, and
3674+ * intended for in-kernel use only.
3675+ * Taking it in write mode is slow.
3676+ *
3677+ * The write mode is used only when there is a need to block all
3678+ * user-space processes from allocating a
3679+ * new memory area.
3680+ * Typical use in write mode is X server VT switching, and it's allowed
3681+ * to leave kernel space with the write lock held. If a user-space process
3682+ * dies while having the write-lock, it will be released during the file
3683+ * descriptor release.
3684+ *
3685+ * The read lock is typically placed at the start of an IOCTL- or
3686+ * user-space callable function that may end up allocating a memory area.
3687+ * This includes setstatus, super-ioctls and no_pfn; the latter may move
3688+ * unmappable regions to mappable. It's a bug to leave kernel space with the
3689+ * read lock held.
3690+ *
3691+ * Both read- and write lock taking is interruptible for low signal-delivery
3692+ * latency. The locking functions will return -EAGAIN if interrupted by a
3693+ * signal.
3694+ *
3695+ * Locking order: The lock should be taken BEFORE any kernel mutexes
3696+ * or spinlocks.
3697+ */
3698+
3699+#include "drmP.h"
3700+
3701+void drm_bo_init_lock(struct drm_bo_lock *lock)
3702+{
3703+ DRM_INIT_WAITQUEUE(&lock->queue);
3704+ atomic_set(&lock->write_lock_pending, 0);
3705+ atomic_set(&lock->readers, 0);
3706+}
3707+
3708+void drm_bo_read_unlock(struct drm_bo_lock *lock)
3709+{
3710+ if (unlikely(atomic_add_negative(-1, &lock->readers)))
3711+ BUG();
3712+ if (atomic_read(&lock->readers) == 0)
3713+ wake_up_interruptible(&lock->queue);
3714+}
3715+EXPORT_SYMBOL(drm_bo_read_unlock);
3716+
3717+int drm_bo_read_lock(struct drm_bo_lock *lock)
3718+{
3719+ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
3720+ int ret;
3721+ ret = wait_event_interruptible
3722+ (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
3723+ if (ret)
3724+ return -EAGAIN;
3725+ }
3726+
3727+ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
3728+ int ret;
3729+ ret = wait_event_interruptible
3730+ (lock->queue, atomic_add_unless(&lock->readers, 1, -1));
3731+ if (ret)
3732+ return -EAGAIN;
3733+ }
3734+ return 0;
3735+}
3736+EXPORT_SYMBOL(drm_bo_read_lock);
3737+
3738+static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
3739+{
3740+ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
3741+ return -EINVAL;
3742+ if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1))
3743+ return -EINVAL;
3744+ wake_up_interruptible(&lock->queue);
3745+ return 0;
3746+}
3747+
3748+static void drm_bo_write_lock_remove(struct drm_file *file_priv,
3749+ struct drm_user_object *item)
3750+{
3751+ struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base);
3752+ int ret;
3753+
3754+ ret = __drm_bo_write_unlock(lock);
3755+ BUG_ON(ret);
3756+}
3757+
3758+int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
3759+{
3760+ int ret = 0;
3761+ struct drm_device *dev;
3762+
3763+ if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0))
3764+ return -EINVAL;
3765+
3766+ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
3767+ ret = wait_event_interruptible
3768+ (lock->queue, atomic_cmpxchg(&lock->readers, 0, -1) == 0);
3769+
3770+ if (ret) {
3771+ atomic_set(&lock->write_lock_pending, 0);
3772+ wake_up_interruptible(&lock->queue);
3773+ return -EAGAIN;
3774+ }
3775+ }
3776+
3777+ /*
3778+ * Add a dummy user-object, the destructor of which will
3779+ * make sure the lock is released if the client dies
3780+ * while holding it.
3781+ */
3782+
3783+ dev = file_priv->minor->dev;
3784+ mutex_lock(&dev->struct_mutex);
3785+ ret = drm_add_user_object(file_priv, &lock->base, 0);
3786+ lock->base.remove = &drm_bo_write_lock_remove;
3787+ lock->base.type = drm_lock_type;
3788+ if (ret)
3789+ (void)__drm_bo_write_unlock(lock);
3790+
3791+ mutex_unlock(&dev->struct_mutex);
3792+
3793+ return ret;
3794+}
3795+
3796+int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv)
3797+{
3798+ struct drm_device *dev = file_priv->minor->dev;
3799+ struct drm_ref_object *ro;
3800+
3801+ mutex_lock(&dev->struct_mutex);
3802+
3803+ if (lock->base.owner != file_priv) {
3804+ mutex_unlock(&dev->struct_mutex);
3805+ return -EINVAL;
3806+ }
3807+ ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE);
3808+ BUG_ON(!ro);
3809+ drm_remove_ref_object(file_priv, ro);
3810+ lock->base.owner = NULL;
3811+
3812+ mutex_unlock(&dev->struct_mutex);
3813+ return 0;
3814+}
3815Index: linux-2.6.27/drivers/gpu/drm/drm_bo_move.c
3816===================================================================
3817--- /dev/null 1970-01-01 00:00:00.000000000 +0000
3818+++ linux-2.6.27/drivers/gpu/drm/drm_bo_move.c 2009-01-14 11:58:01.000000000 +0000
3819@@ -0,0 +1,597 @@
3820+/**************************************************************************
3821+ *
3822+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
3823+ * All Rights Reserved.
3824+ *
3825+ * Permission is hereby granted, free of charge, to any person obtaining a
3826+ * copy of this software and associated documentation files (the
3827+ * "Software"), to deal in the Software without restriction, including
3828+ * without limitation the rights to use, copy, modify, merge, publish,
3829+ * distribute, sub license, and/or sell copies of the Software, and to
3830+ * permit persons to whom the Software is furnished to do so, subject to
3831+ * the following conditions:
3832+ *
3833+ * The above copyright notice and this permission notice (including the
3834+ * next paragraph) shall be included in all copies or substantial portions
3835+ * of the Software.
3836+ *
3837+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3838+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3839+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
3840+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
3841+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
3842+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
3843+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
3844+ *
3845+ **************************************************************************/
3846+/*
3847+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
3848+ */
3849+
3850+#include "drmP.h"
3851+
3852+/**
3853+ * Free the old memory node unless it's a pinned region and we
3854+ * have not been requested to free also pinned regions.
3855+ */
3856+
3857+static void drm_bo_free_old_node(struct drm_buffer_object *bo)
3858+{
3859+ struct drm_bo_mem_reg *old_mem = &bo->mem;
3860+
3861+ if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
3862+ mutex_lock(&bo->dev->struct_mutex);
3863+ drm_mm_put_block(old_mem->mm_node);
3864+ mutex_unlock(&bo->dev->struct_mutex);
3865+ }
3866+ old_mem->mm_node = NULL;
3867+}
3868+
3869+int drm_bo_move_ttm(struct drm_buffer_object *bo,
3870+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
3871+{
3872+ struct drm_ttm *ttm = bo->ttm;
3873+ struct drm_bo_mem_reg *old_mem = &bo->mem;
3874+ uint64_t save_flags = old_mem->flags;
3875+ uint64_t save_mask = old_mem->mask;
3876+ int ret;
3877+
3878+ if (old_mem->mem_type != DRM_BO_MEM_LOCAL) {
3879+ if (evict)
3880+ drm_ttm_evict(ttm);
3881+ else
3882+ drm_ttm_unbind(ttm);
3883+
3884+ drm_bo_free_old_node(bo);
3885+ DRM_FLAG_MASKED(old_mem->flags,
3886+ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
3887+ DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
3888+ old_mem->mem_type = DRM_BO_MEM_LOCAL;
3889+ save_flags = old_mem->flags;
3890+ }
3891+ if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
3892+ ret = drm_bind_ttm(ttm, new_mem);
3893+ if (ret)
3894+ return ret;
3895+ }
3896+
3897+ *old_mem = *new_mem;
3898+ new_mem->mm_node = NULL;
3899+ old_mem->mask = save_mask;
3900+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
3901+ return 0;
3902+}
3903+EXPORT_SYMBOL(drm_bo_move_ttm);
3904+
3905+/**
3906+ * \c Return a kernel virtual address to the buffer object PCI memory.
3907+ *
3908+ * \param bo The buffer object.
3909+ * \return Failure indication.
3910+ *
3911+ * Returns -EINVAL if the buffer object is currently not mappable.
3912+ * Returns -ENOMEM if the ioremap operation failed.
3913+ * Otherwise returns zero.
3914+ *
3915+ * After a successfull call, bo->iomap contains the virtual address, or NULL
3916+ * if the buffer object content is not accessible through PCI space.
3917+ * Call bo->mutex locked.
3918+ */
3919+
3920+int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
3921+ void **virtual)
3922+{
3923+ struct drm_buffer_manager *bm = &dev->bm;
3924+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
3925+ unsigned long bus_offset;
3926+ unsigned long bus_size;
3927+ unsigned long bus_base;
3928+ int ret;
3929+ void *addr;
3930+
3931+ *virtual = NULL;
3932+ ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
3933+ if (ret || bus_size == 0)
3934+ return ret;
3935+
3936+ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
3937+ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
3938+ else {
3939+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
3940+ if (!addr)
3941+ return -ENOMEM;
3942+ }
3943+ *virtual = addr;
3944+ return 0;
3945+}
3946+EXPORT_SYMBOL(drm_mem_reg_ioremap);
3947+
3948+/**
3949+ * \c Unmap mapping obtained using drm_bo_ioremap
3950+ *
3951+ * \param bo The buffer object.
3952+ *
3953+ * Call bo->mutex locked.
3954+ */
3955+
3956+void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
3957+ void *virtual)
3958+{
3959+ struct drm_buffer_manager *bm;
3960+ struct drm_mem_type_manager *man;
3961+
3962+ bm = &dev->bm;
3963+ man = &bm->man[mem->mem_type];
3964+
3965+ if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
3966+ iounmap(virtual);
3967+}
3968+EXPORT_SYMBOL(drm_mem_reg_iounmap);
3969+
3970+static int drm_copy_io_page(void *dst, void *src, unsigned long page)
3971+{
3972+ uint32_t *dstP =
3973+ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
3974+ uint32_t *srcP =
3975+ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
3976+
3977+ int i;
3978+ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
3979+ iowrite32(ioread32(srcP++), dstP++);
3980+ return 0;
3981+}
3982+
3983+static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
3984+ unsigned long page)
3985+{
3986+ struct page *d = drm_ttm_get_page(ttm, page);
3987+ void *dst;
3988+
3989+ if (!d)
3990+ return -ENOMEM;
3991+
3992+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
3993+ dst = kmap(d);
3994+ if (!dst)
3995+ return -ENOMEM;
3996+
3997+ memcpy_fromio(dst, src, PAGE_SIZE);
3998+ kunmap(d);
3999+ return 0;
4000+}
4001+
4002+static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
4003+{
4004+ struct page *s = drm_ttm_get_page(ttm, page);
4005+ void *src;
4006+
4007+ if (!s)
4008+ return -ENOMEM;
4009+
4010+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
4011+ src = kmap(s);
4012+ if (!src)
4013+ return -ENOMEM;
4014+
4015+ memcpy_toio(dst, src, PAGE_SIZE);
4016+ kunmap(s);
4017+ return 0;
4018+}
4019+
4020+int drm_bo_move_memcpy(struct drm_buffer_object *bo,
4021+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
4022+{
4023+ struct drm_device *dev = bo->dev;
4024+ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
4025+ struct drm_ttm *ttm = bo->ttm;
4026+ struct drm_bo_mem_reg *old_mem = &bo->mem;
4027+ struct drm_bo_mem_reg old_copy = *old_mem;
4028+ void *old_iomap;
4029+ void *new_iomap;
4030+ int ret;
4031+ uint64_t save_flags = old_mem->flags;
4032+ uint64_t save_mask = old_mem->mask;
4033+ unsigned long i;
4034+ unsigned long page;
4035+ unsigned long add = 0;
4036+ int dir;
4037+
4038+ ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
4039+ if (ret)
4040+ return ret;
4041+ ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
4042+ if (ret)
4043+ goto out;
4044+
4045+ if (old_iomap == NULL && new_iomap == NULL)
4046+ goto out2;
4047+ if (old_iomap == NULL && ttm == NULL)
4048+ goto out2;
4049+
4050+ add = 0;
4051+ dir = 1;
4052+
4053+ if ((old_mem->mem_type == new_mem->mem_type) &&
4054+ (new_mem->mm_node->start <
4055+ old_mem->mm_node->start + old_mem->mm_node->size)) {
4056+ dir = -1;
4057+ add = new_mem->num_pages - 1;
4058+ }
4059+
4060+ for (i = 0; i < new_mem->num_pages; ++i) {
4061+ page = i * dir + add;
4062+ if (old_iomap == NULL)
4063+ ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
4064+ else if (new_iomap == NULL)
4065+ ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
4066+ else
4067+ ret = drm_copy_io_page(new_iomap, old_iomap, page);
4068+ if (ret)
4069+ goto out1;
4070+ }
4071+ mb();
4072+out2:
4073+ drm_bo_free_old_node(bo);
4074+
4075+ *old_mem = *new_mem;
4076+ new_mem->mm_node = NULL;
4077+ old_mem->mask = save_mask;
4078+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
4079+
4080+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
4081+ drm_ttm_unbind(ttm);
4082+ drm_destroy_ttm(ttm);
4083+ bo->ttm = NULL;
4084+ }
4085+
4086+out1:
4087+ drm_mem_reg_iounmap(dev, new_mem, new_iomap);
4088+out:
4089+ drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
4090+ return ret;
4091+}
4092+EXPORT_SYMBOL(drm_bo_move_memcpy);
4093+
4094+/*
4095+ * Transfer a buffer object's memory and LRU status to a newly
4096+ * created object. User-space references remains with the old
4097+ * object. Call bo->mutex locked.
4098+ */
4099+
4100+int drm_buffer_object_transfer(struct drm_buffer_object *bo,
4101+ struct drm_buffer_object **new_obj)
4102+{
4103+ struct drm_buffer_object *fbo;
4104+ struct drm_device *dev = bo->dev;
4105+ struct drm_buffer_manager *bm = &dev->bm;
4106+
4107+ fbo = drm_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
4108+ if (!fbo)
4109+ return -ENOMEM;
4110+
4111+ *fbo = *bo;
4112+ mutex_init(&fbo->mutex);
4113+ mutex_lock(&fbo->mutex);
4114+ mutex_lock(&dev->struct_mutex);
4115+
4116+ DRM_INIT_WAITQUEUE(&bo->event_queue);
4117+ INIT_LIST_HEAD(&fbo->ddestroy);
4118+ INIT_LIST_HEAD(&fbo->lru);
4119+ INIT_LIST_HEAD(&fbo->pinned_lru);
4120+#ifdef DRM_ODD_MM_COMPAT
4121+ INIT_LIST_HEAD(&fbo->vma_list);
4122+ INIT_LIST_HEAD(&fbo->p_mm_list);
4123+#endif
4124+
4125+ fbo->fence = drm_fence_reference_locked(bo->fence);
4126+ fbo->pinned_node = NULL;
4127+ fbo->mem.mm_node->private = (void *)fbo;
4128+ atomic_set(&fbo->usage, 1);
4129+ atomic_inc(&bm->count);
4130+ mutex_unlock(&dev->struct_mutex);
4131+ mutex_unlock(&fbo->mutex);
4132+ bo->reserved_size = 0;
4133+ *new_obj = fbo;
4134+ return 0;
4135+}
4136+
4137+/*
4138+ * Since move is underway, we need to block signals in this function.
4139+ * We cannot restart until it has finished.
4140+ */
4141+
4142+int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
4143+ int evict, int no_wait, uint32_t fence_class,
4144+ uint32_t fence_type, uint32_t fence_flags,
4145+ struct drm_bo_mem_reg *new_mem)
4146+{
4147+ struct drm_device *dev = bo->dev;
4148+ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
4149+ struct drm_bo_mem_reg *old_mem = &bo->mem;
4150+ int ret;
4151+ uint64_t save_flags = old_mem->flags;
4152+ uint64_t save_mask = old_mem->mask;
4153+ struct drm_buffer_object *old_obj;
4154+
4155+ if (bo->fence)
4156+ drm_fence_usage_deref_unlocked(&bo->fence);
4157+ ret = drm_fence_object_create(dev, fence_class, fence_type,
4158+ fence_flags | DRM_FENCE_FLAG_EMIT,
4159+ &bo->fence);
4160+ bo->fence_type = fence_type;
4161+ if (ret)
4162+ return ret;
4163+
4164+#ifdef DRM_ODD_MM_COMPAT
4165+ /*
4166+ * In this mode, we don't allow pipelining a copy blit,
4167+ * since the buffer will be accessible from user space
4168+ * the moment we return and rebuild the page tables.
4169+ *
4170+ * With normal vm operation, page tables are rebuilt
4171+ * on demand using fault(), which waits for buffer idle.
4172+ */
4173+ if (1)
4174+#else
4175+ if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
4176+ bo->mem.mm_node != NULL))
4177+#endif
4178+ {
4179+ ret = drm_bo_wait(bo, 0, 1, 0);
4180+ if (ret)
4181+ return ret;
4182+
4183+ drm_bo_free_old_node(bo);
4184+
4185+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
4186+ drm_ttm_unbind(bo->ttm);
4187+ drm_destroy_ttm(bo->ttm);
4188+ bo->ttm = NULL;
4189+ }
4190+ } else {
4191+
4192+ /* This should help pipeline ordinary buffer moves.
4193+ *
4194+ * Hang old buffer memory on a new buffer object,
4195+ * and leave it to be released when the GPU
4196+ * operation has completed.
4197+ */
4198+
4199+ ret = drm_buffer_object_transfer(bo, &old_obj);
4200+
4201+ if (ret)
4202+ return ret;
4203+
4204+ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
4205+ old_obj->ttm = NULL;
4206+ else
4207+ bo->ttm = NULL;
4208+
4209+ mutex_lock(&dev->struct_mutex);
4210+ list_del_init(&old_obj->lru);
4211+ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
4212+ drm_bo_add_to_lru(old_obj);
4213+
4214+ drm_bo_usage_deref_locked(&old_obj);
4215+ mutex_unlock(&dev->struct_mutex);
4216+
4217+ }
4218+
4219+ *old_mem = *new_mem;
4220+ new_mem->mm_node = NULL;
4221+ old_mem->mask = save_mask;
4222+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
4223+ return 0;
4224+}
4225+EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
4226+
4227+int drm_bo_same_page(unsigned long offset,
4228+ unsigned long offset2)
4229+{
4230+ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
4231+}
4232+EXPORT_SYMBOL(drm_bo_same_page);
4233+
4234+unsigned long drm_bo_offset_end(unsigned long offset,
4235+ unsigned long end)
4236+{
4237+ offset = (offset + PAGE_SIZE) & PAGE_MASK;
4238+ return (end < offset) ? end : offset;
4239+}
4240+EXPORT_SYMBOL(drm_bo_offset_end);
4241+
4242+static pgprot_t drm_kernel_io_prot(uint32_t map_type)
4243+{
4244+ pgprot_t tmp = PAGE_KERNEL;
4245+
4246+#if defined(__i386__) || defined(__x86_64__)
4247+#ifdef USE_PAT_WC
4248+#warning using pat
4249+ if (drm_use_pat() && map_type == _DRM_TTM) {
4250+ pgprot_val(tmp) |= _PAGE_PAT;
4251+ return tmp;
4252+ }
4253+#endif
4254+ if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
4255+ pgprot_val(tmp) |= _PAGE_PCD;
4256+ pgprot_val(tmp) &= ~_PAGE_PWT;
4257+ }
4258+#elif defined(__powerpc__)
4259+ pgprot_val(tmp) |= _PAGE_NO_CACHE;
4260+ if (map_type == _DRM_REGISTERS)
4261+ pgprot_val(tmp) |= _PAGE_GUARDED;
4262+#endif
4263+#if defined(__ia64__)
4264+ if (map_type == _DRM_TTM)
4265+ tmp = pgprot_writecombine(tmp);
4266+ else
4267+ tmp = pgprot_noncached(tmp);
4268+#endif
4269+ return tmp;
4270+}
4271+
4272+static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
4273+ unsigned long bus_offset, unsigned long bus_size,
4274+ struct drm_bo_kmap_obj *map)
4275+{
4276+ struct drm_device *dev = bo->dev;
4277+ struct drm_bo_mem_reg *mem = &bo->mem;
4278+ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
4279+
4280+ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
4281+ map->bo_kmap_type = bo_map_premapped;
4282+ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
4283+ } else {
4284+ map->bo_kmap_type = bo_map_iomap;
4285+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
4286+ }
4287+ return (!map->virtual) ? -ENOMEM : 0;
4288+}
4289+
4290+static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
4291+ unsigned long start_page, unsigned long num_pages,
4292+ struct drm_bo_kmap_obj *map)
4293+{
4294+ struct drm_device *dev = bo->dev;
4295+ struct drm_bo_mem_reg *mem = &bo->mem;
4296+ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
4297+ pgprot_t prot;
4298+ struct drm_ttm *ttm = bo->ttm;
4299+ struct page *d;
4300+ int i;
4301+
4302+ BUG_ON(!ttm);
4303+
4304+ if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
4305+
4306+ /*
4307+ * We're mapping a single page, and the desired
4308+ * page protection is consistent with the bo.
4309+ */
4310+
4311+ map->bo_kmap_type = bo_map_kmap;
4312+ map->page = drm_ttm_get_page(ttm, start_page);
4313+ map->virtual = kmap(map->page);
4314+ } else {
4315+ /*
4316+ * Populate the part we're mapping;
4317+ */
4318+
4319+ for (i = start_page; i < start_page + num_pages; ++i) {
4320+ d = drm_ttm_get_page(ttm, i);
4321+ if (!d)
4322+ return -ENOMEM;
4323+ }
4324+
4325+ /*
4326+ * We need to use vmap to get the desired page protection
4327+ * or to make the buffer object look contigous.
4328+ */
4329+
4330+ prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
4331+ PAGE_KERNEL :
4332+ drm_kernel_io_prot(man->drm_bus_maptype);
4333+ map->bo_kmap_type = bo_map_vmap;
4334+ map->virtual = vmap(ttm->pages + start_page,
4335+ num_pages, 0, prot);
4336+ }
4337+ return (!map->virtual) ? -ENOMEM : 0;
4338+}
4339+
4340+/*
4341+ * This function is to be used for kernel mapping of buffer objects.
4342+ * It chooses the appropriate mapping method depending on the memory type
4343+ * and caching policy the buffer currently has.
4344+ * Mapping multiple pages or buffers that live in io memory is a bit slow and
4345+ * consumes vmalloc space. Be restrictive with such mappings.
4346+ * Mapping single pages usually returns the logical kernel address,
4347+ * (which is fast)
4348+ * BUG may use slower temporary mappings for high memory pages or
4349+ * uncached / write-combined pages.
4350+ *
4351+ * The function fills in a drm_bo_kmap_obj which can be used to return the
4352+ * kernel virtual address of the buffer.
4353+ *
4354+ * Code servicing a non-priviliged user request is only allowed to map one
4355+ * page at a time. We might need to implement a better scheme to stop such
4356+ * processes from consuming all vmalloc space.
4357+ */
4358+
4359+int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
4360+ unsigned long num_pages, struct drm_bo_kmap_obj *map)
4361+{
4362+ int ret;
4363+ unsigned long bus_base;
4364+ unsigned long bus_offset;
4365+ unsigned long bus_size;
4366+
4367+ map->virtual = NULL;
4368+
4369+ if (num_pages > bo->num_pages)
4370+ return -EINVAL;
4371+ if (start_page > bo->num_pages)
4372+ return -EINVAL;
4373+#if 0
4374+ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
4375+ return -EPERM;
4376+#endif
4377+ ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
4378+ &bus_offset, &bus_size);
4379+
4380+ if (ret)
4381+ return ret;
4382+
4383+ if (bus_size == 0) {
4384+ return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
4385+ } else {
4386+ bus_offset += start_page << PAGE_SHIFT;
4387+ bus_size = num_pages << PAGE_SHIFT;
4388+ return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
4389+ }
4390+}
4391+EXPORT_SYMBOL(drm_bo_kmap);
4392+
4393+void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
4394+{
4395+ if (!map->virtual)
4396+ return;
4397+
4398+ switch (map->bo_kmap_type) {
4399+ case bo_map_iomap:
4400+ iounmap(map->virtual);
4401+ break;
4402+ case bo_map_vmap:
4403+ vunmap(map->virtual);
4404+ break;
4405+ case bo_map_kmap:
4406+ kunmap(map->page);
4407+ break;
4408+ case bo_map_premapped:
4409+ break;
4410+ default:
4411+ BUG();
4412+ }
4413+ map->virtual = NULL;
4414+ map->page = NULL;
4415+}
4416+EXPORT_SYMBOL(drm_bo_kunmap);
4417Index: linux-2.6.27/drivers/gpu/drm/drm_bufs.c
4418===================================================================
4419--- linux-2.6.27.orig/drivers/gpu/drm/drm_bufs.c 2009-01-14 11:54:35.000000000 +0000
4420+++ linux-2.6.27/drivers/gpu/drm/drm_bufs.c 2009-01-14 11:58:01.000000000 +0000
4421@@ -409,6 +409,7 @@
4422 break;
4423 case _DRM_SHM:
4424 vfree(map->handle);
4425+ dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
4426 break;
4427 case _DRM_AGP:
4428 case _DRM_SCATTER_GATHER:
4429@@ -419,6 +420,8 @@
4430 dmah.size = map->size;
4431 __drm_pci_free(dev, &dmah);
4432 break;
4433+ case _DRM_TTM:
4434+ BUG_ON(1);
4435 }
4436 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
4437
4438Index: linux-2.6.27/drivers/gpu/drm/drm_crtc.c
4439===================================================================
4440--- /dev/null 1970-01-01 00:00:00.000000000 +0000
4441+++ linux-2.6.27/drivers/gpu/drm/drm_crtc.c 2009-01-14 11:58:01.000000000 +0000
4442@@ -0,0 +1,2170 @@
4443+/*
4444+ * Copyright (c) 2006-2007 Intel Corporation
4445+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
4446+ *
4447+ * DRM core CRTC related functions
4448+ *
4449+ * Permission to use, copy, modify, distribute, and sell this software and its
4450+ * documentation for any purpose is hereby granted without fee, provided that
4451+ * the above copyright notice appear in all copies and that both that copyright
4452+ * notice and this permission notice appear in supporting documentation, and
4453+ * that the name of the copyright holders not be used in advertising or
4454+ * publicity pertaining to distribution of the software without specific,
4455+ * written prior permission. The copyright holders make no representations
4456+ * about the suitability of this software for any purpose. It is provided "as
4457+ * is" without express or implied warranty.
4458+ *
4459+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
4460+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
4461+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
4462+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
4463+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
4464+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
4465+ * OF THIS SOFTWARE.
4466+ *
4467+ * Authors:
4468+ * Keith Packard
4469+ * Eric Anholt <eric@anholt.net>
4470+ * Dave Airlie <airlied@linux.ie>
4471+ * Jesse Barnes <jesse.barnes@intel.com>
4472+ */
4473+#include <linux/list.h>
4474+#include "drm.h"
4475+#include "drmP.h"
4476+#include "drm_crtc.h"
4477+
4478+/**
4479+ * drm_idr_get - allocate a new identifier
4480+ * @dev: DRM device
4481+ * @ptr: object pointer, used to generate unique ID
4482+ *
4483+ * LOCKING:
4484+ * Caller must hold DRM mode_config lock.
4485+ *
4486+ * Create a unique identifier based on @ptr in @dev's identifier space. Used
4487+ * for tracking modes, CRTCs and outputs.
4488+ *
4489+ * RETURNS:
4490+ * New unique (relative to other objects in @dev) integer identifier for the
4491+ * object.
4492+ */
4493+int drm_idr_get(struct drm_device *dev, void *ptr)
4494+{
4495+ int new_id = 0;
4496+ int ret;
4497+again:
4498+ if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
4499+ DRM_ERROR("Ran out memory getting a mode number\n");
4500+ return 0;
4501+ }
4502+
4503+ ret = idr_get_new_above(&dev->mode_config.crtc_idr, ptr, 1, &new_id);
4504+ if (ret == -EAGAIN)
4505+ goto again;
4506+
4507+ return new_id;
4508+}
4509+
4510+/**
4511+ * drm_idr_put - free an identifer
4512+ * @dev: DRM device
4513+ * @id: ID to free
4514+ *
4515+ * LOCKING:
4516+ * Caller must hold DRM mode_config lock.
4517+ *
4518+ * Free @id from @dev's unique identifier pool.
4519+ */
4520+void drm_idr_put(struct drm_device *dev, int id)
4521+{
4522+ idr_remove(&dev->mode_config.crtc_idr, id);
4523+}
4524+
4525+/**
4526+ * drm_crtc_from_fb - find the CRTC structure associated with an fb
4527+ * @dev: DRM device
4528+ * @fb: framebuffer in question
4529+ *
4530+ * LOCKING:
4531+ * Caller must hold mode_config lock.
4532+ *
4533+ * Find CRTC in the mode_config structure that matches @fb.
4534+ *
4535+ * RETURNS:
4536+ * Pointer to the CRTC or NULL if it wasn't found.
4537+ */
4538+struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev,
4539+ struct drm_framebuffer *fb)
4540+{
4541+ struct drm_crtc *crtc;
4542+
4543+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4544+ if (crtc->fb == fb)
4545+ return crtc;
4546+ }
4547+ return NULL;
4548+}
4549+
4550+/**
4551+ * drm_framebuffer_create - create a new framebuffer object
4552+ * @dev: DRM device
4553+ *
4554+ * LOCKING:
4555+ * Caller must hold mode config lock.
4556+ *
4557+ * Creates a new framebuffer objects and adds it to @dev's DRM mode_config.
4558+ *
4559+ * RETURNS:
4560+ * Pointer to new framebuffer or NULL on error.
4561+ */
4562+struct drm_framebuffer *drm_framebuffer_create(struct drm_device *dev)
4563+{
4564+ struct drm_framebuffer *fb;
4565+
4566+ /* Limit to single framebuffer for now */
4567+ if (dev->mode_config.num_fb > 1) {
4568+ mutex_unlock(&dev->mode_config.mutex);
4569+ DRM_ERROR("Attempt to add multiple framebuffers failed\n");
4570+ return NULL;
4571+ }
4572+
4573+ fb = kzalloc(sizeof(struct drm_framebuffer), GFP_KERNEL);
4574+ if (!fb)
4575+ return NULL;
4576+
4577+ fb->id = drm_idr_get(dev, fb);
4578+ fb->dev = dev;
4579+ dev->mode_config.num_fb++;
4580+ list_add(&fb->head, &dev->mode_config.fb_list);
4581+
4582+ return fb;
4583+}
4584+EXPORT_SYMBOL(drm_framebuffer_create);
4585+
4586+/**
4587+ * drm_framebuffer_destroy - remove a framebuffer object
4588+ * @fb: framebuffer to remove
4589+ *
4590+ * LOCKING:
4591+ * Caller must hold mode config lock.
4592+ *
4593+ * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
4594+ * it, setting it to NULL.
4595+ */
4596+void drm_framebuffer_destroy(struct drm_framebuffer *fb)
4597+{
4598+ struct drm_device *dev = fb->dev;
4599+ struct drm_crtc *crtc;
4600+
4601+ /* remove from any CRTC */
4602+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4603+ if (crtc->fb == fb)
4604+ crtc->fb = NULL;
4605+ }
4606+
4607+ drm_idr_put(dev, fb->id);
4608+ list_del(&fb->head);
4609+ dev->mode_config.num_fb--;
4610+
4611+ kfree(fb);
4612+}
4613+EXPORT_SYMBOL(drm_framebuffer_destroy);
4614+
4615+/**
4616+ * drm_crtc_create - create a new CRTC object
4617+ * @dev: DRM device
4618+ * @funcs: callbacks for the new CRTC
4619+ *
4620+ * LOCKING:
4621+ * Caller must hold mode config lock.
4622+ *
4623+ * Creates a new CRTC object and adds it to @dev's mode_config structure.
4624+ *
4625+ * RETURNS:
4626+ * Pointer to new CRTC object or NULL on error.
4627+ */
4628+struct drm_crtc *drm_crtc_create(struct drm_device *dev,
4629+ const struct drm_crtc_funcs *funcs)
4630+{
4631+ struct drm_crtc *crtc;
4632+
4633+ crtc = kzalloc(sizeof(struct drm_crtc), GFP_KERNEL);
4634+ if (!crtc)
4635+ return NULL;
4636+
4637+ crtc->dev = dev;
4638+ crtc->funcs = funcs;
4639+
4640+ crtc->id = drm_idr_get(dev, crtc);
4641+
4642+ list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
4643+ dev->mode_config.num_crtc++;
4644+
4645+ return crtc;
4646+}
4647+EXPORT_SYMBOL(drm_crtc_create);
4648+
4649+/**
4650+ * drm_crtc_destroy - remove a CRTC object
4651+ * @crtc: CRTC to remove
4652+ *
4653+ * LOCKING:
4654+ * Caller must hold mode config lock.
4655+ *
4656+ * Cleanup @crtc. Calls @crtc's cleanup function, then removes @crtc from
4657+ * its associated DRM device's mode_config. Frees it afterwards.
4658+ */
4659+void drm_crtc_destroy(struct drm_crtc *crtc)
4660+{
4661+ struct drm_device *dev = crtc->dev;
4662+
4663+ if (crtc->funcs->cleanup)
4664+ (*crtc->funcs->cleanup)(crtc);
4665+
4666+ drm_idr_put(dev, crtc->id);
4667+ list_del(&crtc->head);
4668+ dev->mode_config.num_crtc--;
4669+ kfree(crtc);
4670+}
4671+EXPORT_SYMBOL(drm_crtc_destroy);
4672+
4673+/**
4674+ * drm_crtc_in_use - check if a given CRTC is in a mode_config
4675+ * @crtc: CRTC to check
4676+ *
4677+ * LOCKING:
4678+ * Caller must hold mode config lock.
4679+ *
4680+ * Walk @crtc's DRM device's mode_config and see if it's in use.
4681+ *
4682+ * RETURNS:
4683+ * True if @crtc is part of the mode_config, false otherwise.
4684+ */
4685+bool drm_crtc_in_use(struct drm_crtc *crtc)
4686+{
4687+ struct drm_output *output;
4688+ struct drm_device *dev = crtc->dev;
4689+ /* FIXME: Locking around list access? */
4690+ list_for_each_entry(output, &dev->mode_config.output_list, head)
4691+ if (output->crtc == crtc)
4692+ return true;
4693+ return false;
4694+}
4695+EXPORT_SYMBOL(drm_crtc_in_use);
4696+
4697+/*
4698+ * Detailed mode info for a standard 640x480@60Hz monitor
4699+ */
4700+static struct drm_display_mode std_mode[] = {
4701+ { DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 25200, 640, 656,
4702+ 752, 800, 0, 480, 490, 492, 525, 0,
4703+ V_NHSYNC | V_NVSYNC) }, /* 640x480@60Hz */
4704+};
4705+
4706+/**
4707+ * drm_crtc_probe_output_modes - get complete set of display modes
4708+ * @dev: DRM device
4709+ * @maxX: max width for modes
4710+ * @maxY: max height for modes
4711+ *
4712+ * LOCKING:
4713+ * Caller must hold mode config lock.
4714+ *
4715+ * Based on @dev's mode_config layout, scan all the outputs and try to detect
4716+ * modes on them. Modes will first be added to the output's probed_modes
4717+ * list, then culled (based on validity and the @maxX, @maxY parameters) and
4718+ * put into the normal modes list.
4719+ *
4720+ * Intended to be used either at bootup time or when major configuration
4721+ * changes have occurred.
4722+ *
4723+ * FIXME: take into account monitor limits
4724+ */
4725+void drm_crtc_probe_output_modes(struct drm_device *dev, int maxX, int maxY)
4726+{
4727+ struct drm_output *output;
4728+ struct drm_display_mode *mode, *t;
4729+ int ret;
4730+ //if (maxX == 0 || maxY == 0)
4731+ // TODO
4732+
4733+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
4734+
4735+ /* set all modes to the unverified state */
4736+ list_for_each_entry_safe(mode, t, &output->modes, head)
4737+ mode->status = MODE_UNVERIFIED;
4738+
4739+ output->status = (*output->funcs->detect)(output);
4740+
4741+ if (output->status == output_status_disconnected) {
4742+ DRM_DEBUG("%s is disconnected\n", output->name);
4743+ /* TODO set EDID to NULL */
4744+ continue;
4745+ }
4746+
4747+ ret = (*output->funcs->get_modes)(output);
4748+
4749+ if (ret) {
4750+ drm_mode_output_list_update(output);
4751+ }
4752+
4753+ if (maxX && maxY)
4754+ drm_mode_validate_size(dev, &output->modes, maxX,
4755+ maxY, 0);
4756+ list_for_each_entry_safe(mode, t, &output->modes, head) {
4757+ if (mode->status == MODE_OK)
4758+ mode->status = (*output->funcs->mode_valid)(output,mode);
4759+ }
4760+
4761+
4762+ drm_mode_prune_invalid(dev, &output->modes, 1);
4763+
4764+ if (list_empty(&output->modes)) {
4765+ struct drm_display_mode *stdmode;
4766+
4767+ DRM_DEBUG("No valid modes on %s\n", output->name);
4768+
4769+ /* Should we do this here ???
4770+ * When no valid EDID modes are available we end up
4771+ * here and bailed in the past, now we add a standard
4772+ * 640x480@60Hz mode and carry on.
4773+ */
4774+ stdmode = drm_mode_duplicate(dev, &std_mode[0]);
4775+ drm_mode_probed_add(output, stdmode);
4776+ drm_mode_list_concat(&output->probed_modes,
4777+ &output->modes);
4778+
4779+ DRM_DEBUG("Adding standard 640x480 @ 60Hz to %s\n",
4780+ output->name);
4781+ }
4782+
4783+ drm_mode_sort(&output->modes);
4784+
4785+ DRM_DEBUG("Probed modes for %s\n", output->name);
4786+ list_for_each_entry_safe(mode, t, &output->modes, head) {
4787+ mode->vrefresh = drm_mode_vrefresh(mode);
4788+
4789+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
4790+ drm_mode_debug_printmodeline(dev, mode);
4791+ }
4792+ }
4793+}
4794+EXPORT_SYMBOL(drm_crtc_probe_output_modes);
4795+
4796+/**
4797+ * drm_crtc_set_mode - set a mode
4798+ * @crtc: CRTC to program
4799+ * @mode: mode to use
4800+ * @x: width of mode
4801+ * @y: height of mode
4802+ *
4803+ * LOCKING:
4804+ * Caller must hold mode config lock.
4805+ *
4806+ * Try to set @mode on @crtc. Give @crtc and its associated outputs a chance
4807+ * to fixup or reject the mode prior to trying to set it.
4808+ *
4809+ * RETURNS:
4810+ * True if the mode was set successfully, or false otherwise.
4811+ */
4812+bool drm_crtc_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
4813+ int x, int y)
4814+{
4815+ struct drm_device *dev = crtc->dev;
4816+ struct drm_display_mode *adjusted_mode, saved_mode;
4817+ int saved_x, saved_y;
4818+ bool didLock = false;
4819+ bool ret = false;
4820+ struct drm_output *output;
4821+
4822+ adjusted_mode = drm_mode_duplicate(dev, mode);
4823+
4824+ crtc->enabled = drm_crtc_in_use(crtc);
4825+
4826+ if (!crtc->enabled) {
4827+ return true;
4828+ }
4829+
4830+ didLock = crtc->funcs->lock(crtc);
4831+
4832+ saved_mode = crtc->mode;
4833+ saved_x = crtc->x;
4834+ saved_y = crtc->y;
4835+
4836+ /* Update crtc values up front so the driver can rely on them for mode
4837+ * setting.
4838+ */
4839+ crtc->mode = *mode;
4840+ crtc->x = x;
4841+ crtc->y = y;
4842+
4843+ /* XXX short-circuit changes to base location only */
4844+
4845+ /* Pass our mode to the outputs and the CRTC to give them a chance to
4846+ * adjust it according to limitations or output properties, and also
4847+ * a chance to reject the mode entirely.
4848+ */
4849+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
4850+
4851+ if (output->crtc != crtc)
4852+ continue;
4853+
4854+ if (!output->funcs->mode_fixup(output, mode, adjusted_mode)) {
4855+ goto done;
4856+ }
4857+ }
4858+
4859+ if (!crtc->funcs->mode_fixup(crtc, mode, adjusted_mode)) {
4860+ goto done;
4861+ }
4862+
4863+ /* Prepare the outputs and CRTCs before setting the mode. */
4864+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
4865+
4866+ if (output->crtc != crtc)
4867+ continue;
4868+
4869+ /* Disable the output as the first thing we do. */
4870+ output->funcs->prepare(output);
4871+ }
4872+
4873+ crtc->funcs->prepare(crtc);
4874+
4875+ /* Set up the DPLL and any output state that needs to adjust or depend
4876+ * on the DPLL.
4877+ */
4878+ crtc->funcs->mode_set(crtc, mode, adjusted_mode, x, y);
4879+
4880+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
4881+
4882+ if (output->crtc != crtc)
4883+ continue;
4884+
4885+ DRM_INFO("%s: set mode %s %x\n", output->name, mode->name, mode->mode_id);
4886+
4887+ output->funcs->mode_set(output, mode, adjusted_mode);
4888+ }
4889+
4890+ /* Now, enable the clocks, plane, pipe, and outputs that we set up. */
4891+ crtc->funcs->commit(crtc);
4892+
4893+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
4894+
4895+ if (output->crtc != crtc)
4896+ continue;
4897+
4898+ output->funcs->commit(output);
4899+
4900+#if 0 // TODO def RANDR_12_INTERFACE
4901+ if (output->randr_output)
4902+ RRPostPendingProperties (output->randr_output);
4903+#endif
4904+ }
4905+
4906+ /* XXX free adjustedmode */
4907+ drm_mode_destroy(dev, adjusted_mode);
4908+ ret = 1;
4909+ /* TODO */
4910+// if (scrn->pScreen)
4911+// drm_crtc_set_screen_sub_pixel_order(dev);
4912+
4913+done:
4914+ if (!ret) {
4915+ crtc->x = saved_x;
4916+ crtc->y = saved_y;
4917+ crtc->mode = saved_mode;
4918+ }
4919+
4920+ if (didLock)
4921+ crtc->funcs->unlock (crtc);
4922+
4923+ return ret;
4924+}
4925+EXPORT_SYMBOL(drm_crtc_set_mode);
4926+
4927+/**
4928+ * drm_disable_unused_functions - disable unused objects
4929+ * @dev: DRM device
4930+ *
4931+ * LOCKING:
4932+ * Caller must hold mode config lock.
4933+ *
4934+ * If an output or CRTC isn't part of @dev's mode_config, it can be disabled
4935+ * by calling its dpms function, which should power it off.
4936+ */
4937+void drm_disable_unused_functions(struct drm_device *dev)
4938+{
4939+ struct drm_output *output;
4940+ struct drm_crtc *crtc;
4941+
4942+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
4943+ if (!output->crtc)
4944+ (*output->funcs->dpms)(output, DPMSModeOff);
4945+ }
4946+
4947+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4948+ if (!crtc->enabled)
4949+ crtc->funcs->dpms(crtc, DPMSModeOff);
4950+ }
4951+}
4952+EXPORT_SYMBOL(drm_disable_unused_functions);
4953+
4954+/**
4955+ * drm_mode_probed_add - add a mode to the specified output's probed mode list
4956+ * @output: output the new mode
4957+ * @mode: mode data
4958+ *
4959+ * LOCKING:
4960+ * Caller must hold mode config lock.
4961+ *
4962+ * Add @mode to @output's mode list for later use.
4963+ */
4964+void drm_mode_probed_add(struct drm_output *output,
4965+ struct drm_display_mode *mode)
4966+{
4967+ list_add(&mode->head, &output->probed_modes);
4968+}
4969+EXPORT_SYMBOL(drm_mode_probed_add);
4970+
4971+/**
4972+ * drm_mode_remove - remove and free a mode
4973+ * @output: output list to modify
4974+ * @mode: mode to remove
4975+ *
4976+ * LOCKING:
4977+ * Caller must hold mode config lock.
4978+ *
4979+ * Remove @mode from @output's mode list, then free it.
4980+ */
4981+void drm_mode_remove(struct drm_output *output, struct drm_display_mode *mode)
4982+{
4983+ list_del(&mode->head);
4984+ kfree(mode);
4985+}
4986+EXPORT_SYMBOL(drm_mode_remove);
4987+
4988+/**
4989+ * drm_output_create - create a new output
4990+ * @dev: DRM device
4991+ * @funcs: callbacks for this output
4992+ * @name: user visible name of the output
4993+ *
4994+ * LOCKING:
4995+ * Caller must hold @dev's mode_config lock.
4996+ *
4997+ * Creates a new drm_output structure and adds it to @dev's mode_config
4998+ * structure.
4999+ *
5000+ * RETURNS:
5001+ * Pointer to the new output or NULL on error.
5002+ */
5003+struct drm_output *drm_output_create(struct drm_device *dev,
5004+ const struct drm_output_funcs *funcs,
5005+ const char *name)
5006+{
5007+ struct drm_output *output = NULL;
5008+
5009+ output = kzalloc(sizeof(struct drm_output), GFP_KERNEL);
5010+ if (!output)
5011+ return NULL;
5012+
5013+ output->dev = dev;
5014+ output->funcs = funcs;
5015+ output->id = drm_idr_get(dev, output);
5016+ if (name)
5017+ strncpy(output->name, name, DRM_OUTPUT_LEN);
5018+ output->name[DRM_OUTPUT_LEN - 1] = 0;
5019+ output->subpixel_order = SubPixelUnknown;
5020+ INIT_LIST_HEAD(&output->probed_modes);
5021+ INIT_LIST_HEAD(&output->modes);
5022+ /* randr_output? */
5023+ /* output_set_monitor(output)? */
5024+ /* check for output_ignored(output)? */
5025+
5026+ mutex_lock(&dev->mode_config.mutex);
5027+ list_add_tail(&output->head, &dev->mode_config.output_list);
5028+ dev->mode_config.num_output++;
5029+
5030+ mutex_unlock(&dev->mode_config.mutex);
5031+
5032+ return output;
5033+
5034+}
5035+EXPORT_SYMBOL(drm_output_create);
5036+
5037+/**
5038+ * drm_output_destroy - remove an output
5039+ * @output: output to remove
5040+ *
5041+ * LOCKING:
5042+ * Caller must hold @dev's mode_config lock.
5043+ *
5044+ * Call @output's cleanup function, then remove the output from the DRM
5045+ * mode_config after freeing @output's modes.
5046+ */
5047+void drm_output_destroy(struct drm_output *output)
5048+{
5049+ struct drm_device *dev = output->dev;
5050+ struct drm_display_mode *mode, *t;
5051+
5052+ if (*output->funcs->cleanup)
5053+ (*output->funcs->cleanup)(output);
5054+
5055+ list_for_each_entry_safe(mode, t, &output->probed_modes, head)
5056+ drm_mode_remove(output, mode);
5057+
5058+ list_for_each_entry_safe(mode, t, &output->modes, head)
5059+ drm_mode_remove(output, mode);
5060+
5061+ mutex_lock(&dev->mode_config.mutex);
5062+ drm_idr_put(dev, output->id);
5063+ list_del(&output->head);
5064+ mutex_unlock(&dev->mode_config.mutex);
5065+ kfree(output);
5066+}
5067+EXPORT_SYMBOL(drm_output_destroy);
5068+
5069+/**
5070+ * drm_output_rename - rename an output
5071+ * @output: output to rename
5072+ * @name: new user visible name
5073+ *
5074+ * LOCKING:
5075+ * None.
5076+ *
5077+ * Simply stuff a new name into @output's name field, based on @name.
5078+ *
5079+ * RETURNS:
5080+ * True if the name was changed, false otherwise.
5081+ */
5082+bool drm_output_rename(struct drm_output *output, const char *name)
5083+{
5084+ if (!name)
5085+ return false;
5086+
5087+ strncpy(output->name, name, DRM_OUTPUT_LEN);
5088+ output->name[DRM_OUTPUT_LEN - 1] = 0;
5089+
5090+ DRM_DEBUG("Changed name to %s\n", output->name);
5091+// drm_output_set_monitor(output);
5092+// if (drm_output_ignored(output))
5093+// return FALSE;
5094+
5095+ return 1;
5096+}
5097+EXPORT_SYMBOL(drm_output_rename);
5098+
5099+/**
5100+ * drm_mode_create - create a new display mode
5101+ * @dev: DRM device
5102+ *
5103+ * LOCKING:
5104+ * None.
5105+ *
5106+ * Create a new drm_display_mode, give it an ID, and return it.
5107+ *
5108+ * RETURNS:
5109+ * Pointer to new mode on success, NULL on error.
5110+ */
5111+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
5112+{
5113+ struct drm_display_mode *nmode;
5114+
5115+ nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
5116+ if (!nmode)
5117+ return NULL;
5118+
5119+ nmode->mode_id = drm_idr_get(dev, nmode);
5120+ return nmode;
5121+}
5122+EXPORT_SYMBOL(drm_mode_create);
5123+
5124+/**
5125+ * drm_mode_destroy - remove a mode
5126+ * @dev: DRM device
5127+ * @mode: mode to remove
5128+ *
5129+ * LOCKING:
5130+ * Caller must hold mode config lock.
5131+ *
5132+ * Free @mode's unique identifier, then free it.
5133+ */
5134+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
5135+{
5136+ drm_idr_put(dev, mode->mode_id);
5137+
5138+ kfree(mode);
5139+}
5140+EXPORT_SYMBOL(drm_mode_destroy);
5141+
5142+/**
5143+ * drm_mode_config_init - initialize DRM mode_configuration structure
5144+ * @dev: DRM device
5145+ *
5146+ * LOCKING:
5147+ * None, should happen single threaded at init time.
5148+ *
5149+ * Initialize @dev's mode_config structure, used for tracking the graphics
5150+ * configuration of @dev.
5151+ */
5152+void drm_mode_config_init(struct drm_device *dev)
5153+{
5154+ mutex_init(&dev->mode_config.mutex);
5155+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
5156+ INIT_LIST_HEAD(&dev->mode_config.crtc_list);
5157+ INIT_LIST_HEAD(&dev->mode_config.output_list);
5158+ INIT_LIST_HEAD(&dev->mode_config.property_list);
5159+ INIT_LIST_HEAD(&dev->mode_config.usermode_list);
5160+ idr_init(&dev->mode_config.crtc_idr);
5161+}
5162+EXPORT_SYMBOL(drm_mode_config_init);
5163+
5164+/**
5165+ * drm_get_buffer_object - find the buffer object for a given handle
5166+ * @dev: DRM device
5167+ * @bo: pointer to caller's buffer_object pointer
5168+ * @handle: handle to lookup
5169+ *
5170+ * LOCKING:
5171+ * Must take @dev's struct_mutex to protect buffer object lookup.
5172+ *
5173+ * Given @handle, lookup the buffer object in @dev and put it in the caller's
5174+ * @bo pointer.
5175+ *
5176+ * RETURNS:
5177+ * Zero on success, -EINVAL if the handle couldn't be found.
5178+ */
5179+static int drm_get_buffer_object(struct drm_device *dev, struct drm_buffer_object **bo, unsigned long handle)
5180+{
5181+ struct drm_user_object *uo;
5182+ struct drm_hash_item *hash;
5183+ int ret;
5184+
5185+ *bo = NULL;
5186+
5187+ mutex_lock(&dev->struct_mutex);
5188+ ret = drm_ht_find_item(&dev->object_hash, handle, &hash);
5189+ if (ret) {
5190+ DRM_ERROR("Couldn't find handle.\n");
5191+ ret = -EINVAL;
5192+ goto out_err;
5193+ }
5194+
5195+ uo = drm_hash_entry(hash, struct drm_user_object, hash);
5196+ if (uo->type != drm_buffer_type) {
5197+ ret = -EINVAL;
5198+ goto out_err;
5199+ }
5200+
5201+ *bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
5202+ ret = 0;
5203+out_err:
5204+ mutex_unlock(&dev->struct_mutex);
5205+ return ret;
5206+}
5207+
5208+char drm_init_mode[32];
5209+int drm_init_xres;
5210+int drm_init_yres;
5211+EXPORT_SYMBOL(drm_init_mode);
5212+EXPORT_SYMBOL(drm_init_xres);
5213+EXPORT_SYMBOL(drm_init_yres);
5214+
5215+/**
5216+ * drm_pick_crtcs - pick crtcs for output devices
5217+ * @dev: DRM device
5218+ *
5219+ * LOCKING:
5220+ * Caller must hold mode config lock.
5221+ */
5222+static void drm_pick_crtcs (struct drm_device *dev)
5223+{
5224+ int c, o, assigned;
5225+ struct drm_output *output, *output_equal;
5226+ struct drm_crtc *crtc;
5227+ struct drm_display_mode *des_mode = NULL, *modes, *modes_equal;
5228+
5229+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
5230+ output->crtc = NULL;
5231+
5232+ /* Don't hook up outputs that are disconnected ??
5233+ *
5234+ * This is debateable. Do we want fixed /dev/fbX or
5235+ * dynamic on hotplug (need mode code for that though) ?
5236+ *
5237+ * If we don't hook up outputs now, then we only create
5238+ * /dev/fbX for the output that's enabled, that's good as
5239+ * the users console will be on that output.
5240+ *
5241+ * If we do hook up outputs that are disconnected now, then
5242+ * the user may end up having to muck about with the fbcon
5243+ * map flags to assign his console to the enabled output. Ugh.
5244+ */
5245+ if (output->status != output_status_connected)
5246+ continue;
5247+
5248+ des_mode = NULL;
5249+ list_for_each_entry(des_mode, &output->modes, head) {
5250+ if (/* !strcmp(des_mode->name, drm_init_mode) || */
5251+ des_mode->hdisplay==drm_init_xres
5252+ && des_mode->vdisplay==drm_init_yres) {
5253+ des_mode->type |= DRM_MODE_TYPE_USERPREF;
5254+ break;
5255+ }
5256+
5257+ }
5258+ /* No userdef mode (initial mode set from module parameter) */
5259+ if (!des_mode || !(des_mode->type & DRM_MODE_TYPE_USERPREF)) {
5260+ list_for_each_entry(des_mode, &output->modes, head) {
5261+ if (des_mode->type & DRM_MODE_TYPE_PREFERRED)
5262+ break;
5263+ }
5264+ }
5265+
5266+ /* No preferred mode, and no default mode, let's just
5267+ select the first available */
5268+ if (!des_mode || (!(des_mode->type & DRM_MODE_TYPE_PREFERRED)
5269+ && !(des_mode->type & DRM_MODE_TYPE_USERPREF))) {
5270+ list_for_each_entry(des_mode, &output->modes, head) {
5271+ if (des_mode)
5272+ break;
5273+ }
5274+ }
5275+
5276+ c = -1;
5277+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5278+ assigned = 0;
5279+
5280+ c++;
5281+ if ((output->possible_crtcs & (1 << c)) == 0)
5282+ continue;
5283+
5284+ list_for_each_entry(output_equal, &dev->mode_config.output_list, head) {
5285+ if (output->id == output_equal->id)
5286+ continue;
5287+
5288+ /* Find out if crtc has been assigned before */
5289+ if (output_equal->crtc == crtc)
5290+ assigned = 1;
5291+ }
5292+
5293+#if 1 /* continue for now */
5294+ if (assigned)
5295+ continue;
5296+#endif
5297+
5298+ o = -1;
5299+ list_for_each_entry(output_equal, &dev->mode_config.output_list, head) {
5300+ o++;
5301+ if (output->id == output_equal->id)
5302+ continue;
5303+
5304+ list_for_each_entry(modes, &output->modes, head) {
5305+ list_for_each_entry(modes_equal, &output_equal->modes, head) {
5306+ if (drm_mode_equal (modes, modes_equal)) {
5307+ if ((output->possible_clones & output_equal->possible_clones) && (output_equal->crtc == crtc)) {
5308+ printk("Cloning %s (0x%lx) to %s (0x%lx)\n",output->name,output->possible_clones,output_equal->name,output_equal->possible_clones);
5309+ assigned = 0;
5310+ goto clone;
5311+ }
5312+ }
5313+ }
5314+ }
5315+ }
5316+
5317+clone:
5318+ /* crtc has been assigned skip it */
5319+ if (assigned)
5320+ continue;
5321+
5322+ /* Found a CRTC to attach to, do it ! */
5323+ output->crtc = crtc;
5324+ output->crtc->desired_mode = des_mode;
5325+ output->initial_x = 0;
5326+ output->initial_y = 0;
5327+ DRM_DEBUG("Desired mode for CRTC %d is 0x%x:%s\n",c,des_mode->mode_id, des_mode->name);
5328+ break;
5329+ }
5330+ }
5331+}
5332+EXPORT_SYMBOL(drm_pick_crtcs);
5333+
5334+/**
5335+ * drm_initial_config - setup a sane initial output configuration
5336+ * @dev: DRM device
5337+ * @can_grow: this configuration is growable
5338+ *
5339+ * LOCKING:
5340+ * Called at init time, must take mode config lock.
5341+ *
5342+ * Scan the CRTCs and outputs and try to put together an initial setup.
5343+ * At the moment, this is a cloned configuration across all heads with
5344+ * a new framebuffer object as the backing store.
5345+ *
5346+ * RETURNS:
5347+ * Zero if everything went ok, nonzero otherwise.
5348+ */
5349+bool drm_initial_config(struct drm_device *dev, bool can_grow)
5350+{
5351+ struct drm_output *output;
5352+ struct drm_crtc *crtc;
5353+ int ret = false;
5354+
5355+ mutex_lock(&dev->mode_config.mutex);
5356+
5357+ drm_crtc_probe_output_modes(dev, 2048, 2048);
5358+
5359+ drm_pick_crtcs(dev);
5360+
5361+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5362+
5363+ /* can't setup the crtc if there's no assigned mode */
5364+ if (!crtc->desired_mode)
5365+ continue;
5366+
5367+ /* Now setup the fbdev for attached crtcs */
5368+ dev->driver->fb_probe(dev, crtc);
5369+ }
5370+
5371+ /* This is a little screwy, as we've already walked the outputs
5372+ * above, but it's a little bit of magic too. There's the potential
5373+ * for things not to get setup above if an existing device gets
5374+ * re-assigned thus confusing the hardware. By walking the outputs
5375+ * this fixes up their crtc's.
5376+ */
5377+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
5378+
5379+ /* can't setup the output if there's no assigned mode */
5380+ if (!output->crtc || !output->crtc->desired_mode)
5381+ continue;
5382+
5383+ /* and needs an attached fb */
5384+ if (output->crtc->fb)
5385+ drm_crtc_set_mode(output->crtc, output->crtc->desired_mode, 0, 0);
5386+ }
5387+
5388+ drm_disable_unused_functions(dev);
5389+
5390+ mutex_unlock(&dev->mode_config.mutex);
5391+ return ret;
5392+}
5393+EXPORT_SYMBOL(drm_initial_config);
5394+
5395+/**
5396+ * drm_mode_config_cleanup - free up DRM mode_config info
5397+ * @dev: DRM device
5398+ *
5399+ * LOCKING:
5400+ * Caller must hold mode config lock.
5401+ *
5402+ * Free up all the outputs and CRTCs associated with this DRM device, then
5403+ * free up the framebuffers and associated buffer objects.
5404+ *
5405+ * FIXME: cleanup any dangling user buffer objects too
5406+ */
5407+void drm_mode_config_cleanup(struct drm_device *dev)
5408+{
5409+ struct drm_output *output, *ot;
5410+ struct drm_crtc *crtc, *ct;
5411+ struct drm_framebuffer *fb, *fbt;
5412+ struct drm_display_mode *mode, *mt;
5413+ struct drm_property *property, *pt;
5414+
5415+ list_for_each_entry_safe(output, ot, &dev->mode_config.output_list, head) {
5416+ drm_output_destroy(output);
5417+ }
5418+
5419+ list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, head) {
5420+ drm_property_destroy(dev, property);
5421+ }
5422+
5423+ list_for_each_entry_safe(mode, mt, &dev->mode_config.usermode_list, head) {
5424+ drm_mode_destroy(dev, mode);
5425+ }
5426+
5427+ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
5428+ if (fb->bo->type != drm_bo_type_kernel)
5429+ drm_framebuffer_destroy(fb);
5430+ else
5431+ dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
5432+ }
5433+
5434+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
5435+ drm_crtc_destroy(crtc);
5436+ }
5437+
5438+}
5439+EXPORT_SYMBOL(drm_mode_config_cleanup);
5440+
5441+/**
5442+ * drm_crtc_set_config - set a new config from userspace
5443+ * @crtc: CRTC to setup
5444+ * @crtc_info: user provided configuration
5445+ * @new_mode: new mode to set
5446+ * @output_set: set of outputs for the new config
5447+ * @fb: new framebuffer
5448+ *
5449+ * LOCKING:
5450+ * Caller must hold mode config lock.
5451+ *
5452+ * Setup a new configuration, provided by the user in @crtc_info, and enable
5453+ * it.
5454+ *
5455+ * RETURNS:
5456+ * Zero. (FIXME)
5457+ */
5458+int drm_crtc_set_config(struct drm_crtc *crtc, struct drm_mode_crtc *crtc_info, struct drm_display_mode *new_mode, struct drm_output **output_set, struct drm_framebuffer *fb)
5459+{
5460+ struct drm_device *dev = crtc->dev;
5461+ struct drm_crtc **save_crtcs, *new_crtc;
5462+ bool save_enabled = crtc->enabled;
5463+ bool changed;
5464+ struct drm_output *output;
5465+ int count = 0, ro;
5466+
5467+ save_crtcs = kzalloc(dev->mode_config.num_crtc * sizeof(struct drm_crtc *), GFP_KERNEL);
5468+ if (!save_crtcs)
5469+ return -ENOMEM;
5470+
5471+ if (crtc->fb != fb)
5472+ changed = true;
5473+
5474+ if (crtc_info->x != crtc->x || crtc_info->y != crtc->y)
5475+ changed = true;
5476+
5477+ if (new_mode && (crtc->mode.mode_id != new_mode->mode_id))
5478+ changed = true;
5479+
5480+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
5481+ save_crtcs[count++] = output->crtc;
5482+
5483+ if (output->crtc == crtc)
5484+ new_crtc = NULL;
5485+ else
5486+ new_crtc = output->crtc;
5487+
5488+ for (ro = 0; ro < crtc_info->count_outputs; ro++) {
5489+ if (output_set[ro] == output)
5490+ new_crtc = crtc;
5491+ }
5492+ if (new_crtc != output->crtc) {
5493+ changed = true;
5494+ output->crtc = new_crtc;
5495+ }
5496+ }
5497+
5498+ if (changed) {
5499+ crtc->fb = fb;
5500+ crtc->enabled = (new_mode != NULL);
5501+ if (new_mode != NULL) {
5502+ DRM_DEBUG("attempting to set mode from userspace\n");
5503+ drm_mode_debug_printmodeline(dev, new_mode);
5504+ if (!drm_crtc_set_mode(crtc, new_mode, crtc_info->x,
5505+ crtc_info->y)) {
5506+ crtc->enabled = save_enabled;
5507+ count = 0;
5508+ list_for_each_entry(output, &dev->mode_config.output_list, head)
5509+ output->crtc = save_crtcs[count++];
5510+ kfree(save_crtcs);
5511+ return -EINVAL;
5512+ }
5513+ crtc->desired_x = crtc_info->x;
5514+ crtc->desired_y = crtc_info->y;
5515+ crtc->desired_mode = new_mode;
5516+ }
5517+ drm_disable_unused_functions(dev);
5518+ }
5519+ kfree(save_crtcs);
5520+ return 0;
5521+}
5522+
5523+/**
5524+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
5525+ * @out: drm_mode_modeinfo struct to return to the user
5526+ * @in: drm_display_mode to use
5527+ *
5528+ * LOCKING:
5529+ * None.
5530+ *
5531+ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
5532+ * the user.
5533+ */
5534+void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out, struct drm_display_mode *in)
5535+{
5536+
5537+ out->id = in->mode_id;
5538+ out->clock = in->clock;
5539+ out->hdisplay = in->hdisplay;
5540+ out->hsync_start = in->hsync_start;
5541+ out->hsync_end = in->hsync_end;
5542+ out->htotal = in->htotal;
5543+ out->hskew = in->hskew;
5544+ out->vdisplay = in->vdisplay;
5545+ out->vsync_start = in->vsync_start;
5546+ out->vsync_end = in->vsync_end;
5547+ out->vtotal = in->vtotal;
5548+ out->vscan = in->vscan;
5549+ out->vrefresh = in->vrefresh;
5550+ out->flags = in->flags;
5551+ out->type = in->type;
5552+ strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
5553+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
5554+}
5555+
5556+/**
5557+ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
5558+ * @out: drm_display_mode to return to the user
5559+ * @in: drm_mode_modeinfo to use
5560+ *
5561+ * LOCKING:
5562+ * None.
5563+ *
5564+ * Convert a drmo_mode_modeinfo into a drm_display_mode structure to return to
5565+ * the caller.
5566+ */
5567+void drm_crtc_convert_umode(struct drm_display_mode *out, struct drm_mode_modeinfo *in)
5568+{
5569+ out->clock = in->clock;
5570+ out->hdisplay = in->hdisplay;
5571+ out->hsync_start = in->hsync_start;
5572+ out->hsync_end = in->hsync_end;
5573+ out->htotal = in->htotal;
5574+ out->hskew = in->hskew;
5575+ out->vdisplay = in->vdisplay;
5576+ out->vsync_start = in->vsync_start;
5577+ out->vsync_end = in->vsync_end;
5578+ out->vtotal = in->vtotal;
5579+ out->vscan = in->vscan;
5580+ out->vrefresh = in->vrefresh;
5581+ out->flags = in->flags;
5582+ out->type = in->type;
5583+ strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
5584+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
5585+}
5586+
5587+/**
5588+ * drm_mode_getresources - get graphics configuration
5589+ * @inode: inode from the ioctl
5590+ * @filp: file * from the ioctl
5591+ * @cmd: cmd from ioctl
5592+ * @arg: arg from ioctl
5593+ *
5594+ * LOCKING:
5595+ * Takes mode config lock.
5596+ *
5597+ * Construct a set of configuration description structures and return
5598+ * them to the user, including CRTC, output and framebuffer configuration.
5599+ *
5600+ * Called by the user via ioctl.
5601+ *
5602+ * RETURNS:
5603+ * Zero on success, errno on failure.
5604+ */
5605+int drm_mode_getresources(struct drm_device *dev,
5606+ void *data, struct drm_file *file_priv)
5607+{
5608+ struct drm_mode_card_res *card_res = data;
5609+ struct list_head *lh;
5610+ struct drm_framebuffer *fb;
5611+ struct drm_output *output;
5612+ struct drm_crtc *crtc;
5613+ struct drm_mode_modeinfo u_mode;
5614+ struct drm_display_mode *mode;
5615+ int ret = 0;
5616+ int mode_count= 0;
5617+ int output_count = 0;
5618+ int crtc_count = 0;
5619+ int fb_count = 0;
5620+ int copied = 0;
5621+
5622+ memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
5623+
5624+ mutex_lock(&dev->mode_config.mutex);
5625+
5626+ list_for_each(lh, &dev->mode_config.fb_list)
5627+ fb_count++;
5628+
5629+ list_for_each(lh, &dev->mode_config.crtc_list)
5630+ crtc_count++;
5631+
5632+ list_for_each_entry(output, &dev->mode_config.output_list,
5633+ head) {
5634+ output_count++;
5635+ list_for_each(lh, &output->modes)
5636+ mode_count++;
5637+ }
5638+ list_for_each(lh, &dev->mode_config.usermode_list)
5639+ mode_count++;
5640+
5641+ if (card_res->count_modes == 0) {
5642+ DRM_DEBUG("probing modes %dx%d\n", dev->mode_config.max_width, dev->mode_config.max_height);
5643+ drm_crtc_probe_output_modes(dev, dev->mode_config.max_width, dev->mode_config.max_height);
5644+ mode_count = 0;
5645+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
5646+ list_for_each(lh, &output->modes)
5647+ mode_count++;
5648+ }
5649+ list_for_each(lh, &dev->mode_config.usermode_list)
5650+ mode_count++;
5651+ }
5652+
5653+ /* handle this in 4 parts */
5654+ /* FBs */
5655+ if (card_res->count_fbs >= fb_count) {
5656+ copied = 0;
5657+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
5658+ if (put_user(fb->id, card_res->fb_id + copied))
5659+ return -EFAULT;
5660+ copied++;
5661+ }
5662+ }
5663+ card_res->count_fbs = fb_count;
5664+
5665+ /* CRTCs */
5666+ if (card_res->count_crtcs >= crtc_count) {
5667+ copied = 0;
5668+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head){
5669+ DRM_DEBUG("CRTC ID is %d\n", crtc->id);
5670+ if (put_user(crtc->id, card_res->crtc_id + copied))
5671+ return -EFAULT;
5672+ copied++;
5673+ }
5674+ }
5675+ card_res->count_crtcs = crtc_count;
5676+
5677+
5678+ /* Outputs */
5679+ if (card_res->count_outputs >= output_count) {
5680+ copied = 0;
5681+ list_for_each_entry(output, &dev->mode_config.output_list,
5682+ head) {
5683+ DRM_DEBUG("OUTPUT ID is %d\n", output->id);
5684+ if (put_user(output->id, card_res->output_id + copied))
5685+ return -EFAULT;
5686+ copied++;
5687+ }
5688+ }
5689+ card_res->count_outputs = output_count;
5690+
5691+ /* Modes */
5692+ if (card_res->count_modes >= mode_count) {
5693+ copied = 0;
5694+ list_for_each_entry(output, &dev->mode_config.output_list,
5695+ head) {
5696+ list_for_each_entry(mode, &output->modes, head) {
5697+ drm_crtc_convert_to_umode(&u_mode, mode);
5698+ if (copy_to_user(card_res->modes + copied,
5699+ &u_mode, sizeof(u_mode)))
5700+ return -EFAULT;
5701+ copied++;
5702+ }
5703+ }
5704+ /* add in user modes */
5705+ list_for_each_entry(mode, &dev->mode_config.usermode_list, head) {
5706+ drm_crtc_convert_to_umode(&u_mode, mode);
5707+ if (copy_to_user(card_res->modes + copied, &u_mode,
5708+ sizeof(u_mode)))
5709+ return -EFAULT;
5710+ copied++;
5711+ }
5712+ }
5713+ card_res->count_modes = mode_count;
5714+
5715+ DRM_DEBUG("Counted %d %d %d\n", card_res->count_crtcs,
5716+ card_res->count_outputs,
5717+ card_res->count_modes);
5718+
5719+ mutex_unlock(&dev->mode_config.mutex);
5720+ return ret;
5721+}
5722+
5723+/**
5724+ * drm_mode_getcrtc - get CRTC configuration
5725+ * @inode: inode from the ioctl
5726+ * @filp: file * from the ioctl
5727+ * @cmd: cmd from ioctl
5728+ * @arg: arg from ioctl
5729+ *
5730+ * LOCKING:
5731+ * Caller? (FIXME)
5732+ *
5733+ * Construct a CRTC configuration structure to return to the user.
5734+ *
5735+ * Called by the user via ioctl.
5736+ *
5737+ * RETURNS:
5738+ * Zero on success, errno on failure.
5739+ */
5740+int drm_mode_getcrtc(struct drm_device *dev,
5741+ void *data, struct drm_file *file_priv)
5742+{
5743+ struct drm_mode_crtc *crtc_resp = data;
5744+ struct drm_crtc *crtc;
5745+ struct drm_output *output;
5746+ int ocount;
5747+ int ret = 0;
5748+
5749+ mutex_lock(&dev->mode_config.mutex);
5750+ crtc = idr_find(&dev->mode_config.crtc_idr, crtc_resp->crtc_id);
5751+ if (!crtc || (crtc->id != crtc_resp->crtc_id)) {
5752+ ret = -EINVAL;
5753+ goto out;
5754+ }
5755+
5756+ crtc_resp->x = crtc->x;
5757+ crtc_resp->y = crtc->y;
5758+
5759+ if (crtc->fb)
5760+ crtc_resp->fb_id = crtc->fb->id;
5761+ else
5762+ crtc_resp->fb_id = 0;
5763+
5764+ crtc_resp->outputs = 0;
5765+ if (crtc->enabled) {
5766+
5767+ crtc_resp->mode = crtc->mode.mode_id;
5768+ ocount = 0;
5769+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
5770+ if (output->crtc == crtc)
5771+ crtc_resp->outputs |= 1 << (ocount++);
5772+ }
5773+ } else {
5774+ crtc_resp->mode = 0;
5775+ }
5776+
5777+out:
5778+ mutex_unlock(&dev->mode_config.mutex);
5779+ return ret;
5780+}
5781+
5782+/**
5783+ * drm_mode_getoutput - get output configuration
5784+ * @inode: inode from the ioctl
5785+ * @filp: file * from the ioctl
5786+ * @cmd: cmd from ioctl
5787+ * @arg: arg from ioctl
5788+ *
5789+ * LOCKING:
5790+ * Caller? (FIXME)
5791+ *
5792+ * Construct a output configuration structure to return to the user.
5793+ *
5794+ * Called by the user via ioctl.
5795+ *
5796+ * RETURNS:
5797+ * Zero on success, errno on failure.
5798+ */
5799+int drm_mode_getoutput(struct drm_device *dev,
5800+ void *data, struct drm_file *file_priv)
5801+{
5802+ struct drm_mode_get_output *out_resp = data;
5803+ struct drm_output *output;
5804+ struct drm_display_mode *mode;
5805+ int mode_count = 0;
5806+ int props_count = 0;
5807+ int ret = 0;
5808+ int copied = 0;
5809+ int i;
5810+
5811+ DRM_DEBUG("output id %d:\n", out_resp->output);
5812+
5813+ mutex_lock(&dev->mode_config.mutex);
5814+ output= idr_find(&dev->mode_config.crtc_idr, out_resp->output);
5815+ if (!output || (output->id != out_resp->output)) {
5816+ ret = -EINVAL;
5817+ goto out;
5818+ }
5819+
5820+ list_for_each_entry(mode, &output->modes, head)
5821+ mode_count++;
5822+
5823+ for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++)
5824+ if (output->user_mode_ids[i] != 0)
5825+ mode_count++;
5826+
5827+ for (i = 0; i < DRM_OUTPUT_MAX_PROPERTY; i++) {
5828+ if (output->property_ids[i] != 0) {
5829+ props_count++;
5830+ }
5831+ }
5832+
5833+ strncpy(out_resp->name, output->name, DRM_OUTPUT_NAME_LEN);
5834+ out_resp->name[DRM_OUTPUT_NAME_LEN-1] = 0;
5835+
5836+ out_resp->mm_width = output->mm_width;
5837+ out_resp->mm_height = output->mm_height;
5838+ out_resp->subpixel = output->subpixel_order;
5839+ out_resp->connection = output->status;
5840+ if (output->crtc)
5841+ out_resp->crtc = output->crtc->id;
5842+ else
5843+ out_resp->crtc = 0;
5844+
5845+ out_resp->crtcs = output->possible_crtcs;
5846+ out_resp->clones = output->possible_clones;
5847+
5848+ if ((out_resp->count_modes >= mode_count) && mode_count) {
5849+ copied = 0;
5850+ list_for_each_entry(mode, &output->modes, head) {
5851+ out_resp->modes[copied++] = mode->mode_id;
5852+ }
5853+ for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++) {
5854+ if (output->user_mode_ids[i] != 0) {
5855+ if (put_user(output->user_mode_ids[i], out_resp->modes + copied))
5856+ return -EFAULT;
5857+ copied++;
5858+ }
5859+ }
5860+ }
5861+ out_resp->count_modes = mode_count;
5862+
5863+ if ((out_resp->count_props >= props_count) && props_count) {
5864+ copied = 0;
5865+ for (i = 0; i < DRM_OUTPUT_MAX_PROPERTY; i++) {
5866+ if (output->property_ids[i] != 0) {
5867+ if (put_user(output->property_ids[i], out_resp->props + copied)) {
5868+ ret = -EFAULT;
5869+ goto out;
5870+ }
5871+
5872+ if (put_user(output->property_values[i], out_resp->prop_values + copied)) {
5873+ ret = -EFAULT;
5874+ goto out;
5875+ }
5876+ copied++;
5877+ }
5878+ }
5879+ }
5880+ out_resp->count_props = props_count;
5881+
5882+out:
5883+ mutex_unlock(&dev->mode_config.mutex);
5884+ return ret;
5885+}
5886+
5887+/**
5888+ * drm_mode_setcrtc - set CRTC configuration
5889+ * @inode: inode from the ioctl
5890+ * @filp: file * from the ioctl
5891+ * @cmd: cmd from ioctl
5892+ * @arg: arg from ioctl
5893+ *
5894+ * LOCKING:
5895+ * Caller? (FIXME)
5896+ *
5897+ * Build a new CRTC configuration based on user request.
5898+ *
5899+ * Called by the user via ioctl.
5900+ *
5901+ * RETURNS:
5902+ * Zero on success, errno on failure.
5903+ */
5904+int drm_mode_setcrtc(struct drm_device *dev,
5905+ void *data, struct drm_file *file_priv)
5906+{
5907+ struct drm_mode_crtc *crtc_req = data;
5908+ struct drm_crtc *crtc;
5909+ struct drm_output **output_set = NULL, *output;
5910+ struct drm_display_mode *mode;
5911+ struct drm_framebuffer *fb = NULL;
5912+ int ret = 0;
5913+ int i;
5914+
5915+ mutex_lock(&dev->mode_config.mutex);
5916+ crtc = idr_find(&dev->mode_config.crtc_idr, crtc_req->crtc_id);
5917+ if (!crtc || (crtc->id != crtc_req->crtc_id)) {
5918+ DRM_DEBUG("Unknown CRTC ID %d\n", crtc_req->crtc_id);
5919+ ret = -EINVAL;
5920+ goto out;
5921+ }
5922+
5923+ if (crtc_req->mode) {
5924+ /* if we have a mode we need a framebuffer */
5925+ if (crtc_req->fb_id) {
5926+ fb = idr_find(&dev->mode_config.crtc_idr, crtc_req->fb_id);
5927+ if (!fb || (fb->id != crtc_req->fb_id)) {
5928+ DRM_DEBUG("Unknown FB ID%d\n", crtc_req->fb_id);
5929+ ret = -EINVAL;
5930+ goto out;
5931+ }
5932+ }
5933+ mode = idr_find(&dev->mode_config.crtc_idr, crtc_req->mode);
5934+ if (!mode || (mode->mode_id != crtc_req->mode)) {
5935+ struct drm_output *output;
5936+
5937+ list_for_each_entry(output,
5938+ &dev->mode_config.output_list,
5939+ head) {
5940+ list_for_each_entry(mode, &output->modes,
5941+ head) {
5942+ drm_mode_debug_printmodeline(dev,
5943+ mode);
5944+ }
5945+ }
5946+
5947+ DRM_DEBUG("Unknown mode id %d, %p\n", crtc_req->mode, mode);
5948+ ret = -EINVAL;
5949+ goto out;
5950+ }
5951+ } else
5952+ mode = NULL;
5953+
5954+ if (crtc_req->count_outputs == 0 && mode) {
5955+ DRM_DEBUG("Count outputs is 0 but mode set\n");
5956+ ret = -EINVAL;
5957+ goto out;
5958+ }
5959+
5960+ if (crtc_req->count_outputs > 0 && !mode && !fb) {
5961+ DRM_DEBUG("Count outputs is %d but no mode or fb set\n", crtc_req->count_outputs);
5962+ ret = -EINVAL;
5963+ goto out;
5964+ }
5965+
5966+ if (crtc_req->count_outputs > 0) {
5967+ u32 out_id;
5968+ output_set = kmalloc(crtc_req->count_outputs *
5969+ sizeof(struct drm_output *), GFP_KERNEL);
5970+ if (!output_set) {
5971+ ret = -ENOMEM;
5972+ goto out;
5973+ }
5974+
5975+ for (i = 0; i < crtc_req->count_outputs; i++) {
5976+ if (get_user(out_id, &crtc_req->set_outputs[i])) {
5977+ ret = -EFAULT;
5978+ goto out;
5979+ }
5980+
5981+ output = idr_find(&dev->mode_config.crtc_idr, out_id);
5982+ if (!output || (out_id != output->id)) {
5983+ DRM_DEBUG("Output id %d unknown\n", out_id);
5984+ ret = -EINVAL;
5985+ goto out;
5986+ }
5987+
5988+ output_set[i] = output;
5989+ }
5990+ }
5991+
5992+ ret = drm_crtc_set_config(crtc, crtc_req, mode, output_set, fb);
5993+
5994+out:
5995+ mutex_unlock(&dev->mode_config.mutex);
5996+ return ret;
5997+}
5998+
5999+/**
6000+ * drm_mode_addfb - add an FB to the graphics configuration
6001+ * @inode: inode from the ioctl
6002+ * @filp: file * from the ioctl
6003+ * @cmd: cmd from ioctl
6004+ * @arg: arg from ioctl
6005+ *
6006+ * LOCKING:
6007+ * Takes mode config lock.
6008+ *
6009+ * Add a new FB to the specified CRTC, given a user request.
6010+ *
6011+ * Called by the user via ioctl.
6012+ *
6013+ * RETURNS:
6014+ * Zero on success, errno on failure.
6015+ */
6016+int drm_mode_addfb(struct drm_device *dev,
6017+ void *data, struct drm_file *file_priv)
6018+{
6019+ struct drm_mode_fb_cmd *r = data;
6020+ struct drm_mode_config *config = &dev->mode_config;
6021+ struct drm_framebuffer *fb;
6022+ struct drm_buffer_object *bo;
6023+ struct drm_crtc *crtc;
6024+ int ret = 0;
6025+
6026+ if ((config->min_width > r->width) || (r->width > config->max_width)) {
6027+ DRM_ERROR("mode new framebuffer width not within limits\n");
6028+ return -EINVAL;
6029+ }
6030+ if ((config->min_height > r->height) || (r->height > config->max_height)) {
6031+ DRM_ERROR("mode new framebuffer height not within limits\n");
6032+ return -EINVAL;
6033+ }
6034+
6035+ mutex_lock(&dev->mode_config.mutex);
6036+ /* TODO check limits are okay */
6037+ ret = drm_get_buffer_object(dev, &bo, r->handle);
6038+ if (ret || !bo) {
6039+ ret = -EINVAL;
6040+ goto out;
6041+ }
6042+
6043+ /* TODO check buffer is sufficently large */
6044+ /* TODO setup destructor callback */
6045+
6046+ fb = drm_framebuffer_create(dev);
6047+ if (!fb) {
6048+ ret = -EINVAL;
6049+ goto out;
6050+ }
6051+
6052+ fb->width = r->width;
6053+ fb->height = r->height;
6054+ fb->pitch = r->pitch;
6055+ fb->bits_per_pixel = r->bpp;
6056+ fb->depth = r->depth;
6057+ fb->offset = bo->offset;
6058+ fb->bo = bo;
6059+
6060+ r->buffer_id = fb->id;
6061+
6062+ list_add(&fb->filp_head, &file_priv->fbs);
6063+
6064+ /* FIXME: bind the fb to the right crtc */
6065+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6066+ crtc->fb = fb;
6067+ dev->driver->fb_probe(dev, crtc);
6068+ }
6069+
6070+out:
6071+ mutex_unlock(&dev->mode_config.mutex);
6072+ return ret;
6073+}
6074+
6075+/**
6076+ * drm_mode_rmfb - remove an FB from the configuration
6077+ * @inode: inode from the ioctl
6078+ * @filp: file * from the ioctl
6079+ * @cmd: cmd from ioctl
6080+ * @arg: arg from ioctl
6081+ *
6082+ * LOCKING:
6083+ * Takes mode config lock.
6084+ *
6085+ * Remove the FB specified by the user.
6086+ *
6087+ * Called by the user via ioctl.
6088+ *
6089+ * RETURNS:
6090+ * Zero on success, errno on failure.
6091+ */
6092+int drm_mode_rmfb(struct drm_device *dev,
6093+ void *data, struct drm_file *file_priv)
6094+{
6095+ struct drm_framebuffer *fb = 0;
6096+ uint32_t *id = data;
6097+ int ret = 0;
6098+
6099+ mutex_lock(&dev->mode_config.mutex);
6100+ fb = idr_find(&dev->mode_config.crtc_idr, *id);
6101+ /* TODO check that we realy get a framebuffer back. */
6102+ if (!fb || (*id != fb->id)) {
6103+ DRM_ERROR("mode invalid framebuffer id\n");
6104+ ret = -EINVAL;
6105+ goto out;
6106+ }
6107+
6108+ /* TODO check if we own the buffer */
6109+ /* TODO release all crtc connected to the framebuffer */
6110+ /* bind the fb to the crtc for now */
6111+ /* TODO unhock the destructor from the buffer object */
6112+
6113+ if (fb->bo->type != drm_bo_type_kernel)
6114+ drm_framebuffer_destroy(fb);
6115+ else
6116+ dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
6117+
6118+out:
6119+ mutex_unlock(&dev->mode_config.mutex);
6120+ return ret;
6121+}
6122+
6123+/**
6124+ * drm_mode_getfb - get FB info
6125+ * @inode: inode from the ioctl
6126+ * @filp: file * from the ioctl
6127+ * @cmd: cmd from ioctl
6128+ * @arg: arg from ioctl
6129+ *
6130+ * LOCKING:
6131+ * Caller? (FIXME)
6132+ *
6133+ * Lookup the FB given its ID and return info about it.
6134+ *
6135+ * Called by the user via ioctl.
6136+ *
6137+ * RETURNS:
6138+ * Zero on success, errno on failure.
6139+ */
6140+int drm_mode_getfb(struct drm_device *dev,
6141+ void *data, struct drm_file *file_priv)
6142+{
6143+ struct drm_mode_fb_cmd *r = data;
6144+ struct drm_framebuffer *fb;
6145+ int ret = 0;
6146+
6147+ mutex_lock(&dev->mode_config.mutex);
6148+ fb = idr_find(&dev->mode_config.crtc_idr, r->buffer_id);
6149+ if (!fb || (r->buffer_id != fb->id)) {
6150+ DRM_ERROR("invalid framebuffer id\n");
6151+ ret = -EINVAL;
6152+ goto out;
6153+ }
6154+
6155+ r->height = fb->height;
6156+ r->width = fb->width;
6157+ r->depth = fb->depth;
6158+ r->bpp = fb->bits_per_pixel;
6159+ r->handle = fb->bo->base.hash.key;
6160+ r->pitch = fb->pitch;
6161+
6162+out:
6163+ mutex_unlock(&dev->mode_config.mutex);
6164+ return ret;
6165+}
6166+
6167+/**
6168+ * drm_fb_release - remove and free the FBs on this file
6169+ * @filp: file * from the ioctl
6170+ *
6171+ * LOCKING:
6172+ * Takes mode config lock.
6173+ *
6174+ * Destroy all the FBs associated with @filp.
6175+ *
6176+ * Called by the user via ioctl.
6177+ *
6178+ * RETURNS:
6179+ * Zero on success, errno on failure.
6180+ */
6181+void drm_fb_release(struct file *filp)
6182+{
6183+ struct drm_file *priv = filp->private_data;
6184+ struct drm_device *dev = priv->minor->dev;
6185+ struct drm_framebuffer *fb, *tfb;
6186+
6187+ mutex_lock(&dev->mode_config.mutex);
6188+ list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
6189+ list_del(&fb->filp_head);
6190+ if (fb->bo->type != drm_bo_type_kernel)
6191+ drm_framebuffer_destroy(fb);
6192+ else
6193+ dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
6194+ }
6195+ mutex_unlock(&dev->mode_config.mutex);
6196+}
6197+
6198+/*
6199+ *
6200+ */
6201+void drm_mode_addmode(struct drm_device *dev, struct drm_display_mode *user_mode)
6202+{
6203+ user_mode->type |= DRM_MODE_TYPE_USERDEF;
6204+
6205+ user_mode->output_count = 0;
6206+ list_add(&user_mode->head, &dev->mode_config.usermode_list);
6207+}
6208+EXPORT_SYMBOL(drm_mode_addmode);
6209+
6210+int drm_mode_rmmode(struct drm_device *dev, struct drm_display_mode *mode)
6211+{
6212+ struct drm_display_mode *t;
6213+ int ret = -EINVAL;
6214+ list_for_each_entry(t, &dev->mode_config.usermode_list, head) {
6215+ if (t == mode) {
6216+ list_del(&mode->head);
6217+ drm_mode_destroy(dev, mode);
6218+ ret = 0;
6219+ break;
6220+ }
6221+ }
6222+ return ret;
6223+}
6224+EXPORT_SYMBOL(drm_mode_rmmode);
6225+
6226+static int drm_mode_attachmode(struct drm_device *dev,
6227+ struct drm_output *output,
6228+ struct drm_display_mode *mode)
6229+{
6230+ int ret = 0;
6231+ int i;
6232+
6233+ for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++) {
6234+ if (output->user_mode_ids[i] == 0) {
6235+ output->user_mode_ids[i] = mode->mode_id;
6236+ mode->output_count++;
6237+ break;
6238+ }
6239+ }
6240+
6241+ if (i == DRM_OUTPUT_MAX_UMODES)
6242+ ret = -ENOSPC;
6243+
6244+ return ret;
6245+}
6246+
6247+int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
6248+ struct drm_display_mode *mode)
6249+{
6250+ struct drm_output *output;
6251+
6252+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
6253+ if (output->crtc == crtc)
6254+ drm_mode_attachmode(dev, output, mode);
6255+ }
6256+ return 0;
6257+}
6258+EXPORT_SYMBOL(drm_mode_attachmode_crtc);
6259+
6260+static int drm_mode_detachmode(struct drm_device *dev,
6261+ struct drm_output *output,
6262+ struct drm_display_mode *mode)
6263+{
6264+ int found = 0;
6265+ int ret = 0, i;
6266+
6267+ for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++) {
6268+ if (output->user_mode_ids[i] == mode->mode_id) {
6269+ output->user_mode_ids[i] = 0;
6270+ mode->output_count--;
6271+ found = 1;
6272+ }
6273+ }
6274+
6275+ if (!found)
6276+ ret = -EINVAL;
6277+
6278+ return ret;
6279+}
6280+
6281+int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
6282+{
6283+ struct drm_output *output;
6284+
6285+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
6286+ drm_mode_detachmode(dev, output, mode);
6287+ }
6288+ return 0;
6289+}
6290+EXPORT_SYMBOL(drm_mode_detachmode_crtc);
6291+
6292+/**
6293+ * drm_fb_addmode - adds a user defined mode
6294+ * @inode: inode from the ioctl
6295+ * @filp: file * from the ioctl
6296+ * @cmd: cmd from ioctl
6297+ * @arg: arg from ioctl
6298+ *
6299+ * Adds a user specified mode to the kernel.
6300+ *
6301+ * Called by the user via ioctl.
6302+ *
6303+ * RETURNS:
6304+ * writes new mode id into arg.
6305+ * Zero on success, errno on failure.
6306+ */
6307+int drm_mode_addmode_ioctl(struct drm_device *dev,
6308+ void *data, struct drm_file *file_priv)
6309+{
6310+ struct drm_mode_modeinfo *new_mode = data;
6311+ struct drm_display_mode *user_mode;
6312+ int ret = 0;
6313+
6314+ mutex_lock(&dev->mode_config.mutex);
6315+ user_mode = drm_mode_create(dev);
6316+ if (!user_mode) {
6317+ ret = -ENOMEM;
6318+ goto out;
6319+ }
6320+
6321+ drm_crtc_convert_umode(user_mode, new_mode);
6322+
6323+ drm_mode_addmode(dev, user_mode);
6324+ new_mode->id = user_mode->mode_id;
6325+
6326+out:
6327+ mutex_unlock(&dev->mode_config.mutex);
6328+ return ret;
6329+}
6330+
6331+/**
6332+ * drm_fb_rmmode - removes a user defined mode
6333+ * @inode: inode from the ioctl
6334+ * @filp: file * from the ioctl
6335+ * @cmd: cmd from ioctl
6336+ * @arg: arg from ioctl
6337+ *
6338+ * Remove the user defined mode specified by the user.
6339+ *
6340+ * Called by the user via ioctl
6341+ *
6342+ * RETURNS:
6343+ * Zero on success, errno on failure.
6344+ */
6345+int drm_mode_rmmode_ioctl(struct drm_device *dev,
6346+ void *data, struct drm_file *file_priv)
6347+{
6348+ uint32_t *id = data;
6349+ struct drm_display_mode *mode;
6350+ int ret = -EINVAL;
6351+
6352+ mutex_lock(&dev->mode_config.mutex);
6353+ mode = idr_find(&dev->mode_config.crtc_idr, *id);
6354+ if (!mode || (*id != mode->mode_id)) {
6355+ goto out;
6356+ }
6357+
6358+ if (!(mode->type & DRM_MODE_TYPE_USERDEF)) {
6359+ goto out;
6360+ }
6361+
6362+ if (mode->output_count) {
6363+ goto out;
6364+ }
6365+
6366+ ret = drm_mode_rmmode(dev, mode);
6367+
6368+out:
6369+ mutex_unlock(&dev->mode_config.mutex);
6370+ return ret;
6371+}
6372+
6373+/**
6374+ * drm_fb_attachmode - Attach a user mode to an output
6375+ * @inode: inode from the ioctl
6376+ * @filp: file * from the ioctl
6377+ * @cmd: cmd from ioctl
6378+ * @arg: arg from ioctl
6379+ *
6380+ * This attaches a user specified mode to an output.
6381+ * Called by the user via ioctl.
6382+ *
6383+ * RETURNS:
6384+ * Zero on success, errno on failure.
6385+ */
6386+int drm_mode_attachmode_ioctl(struct drm_device *dev,
6387+ void *data, struct drm_file *file_priv)
6388+{
6389+ struct drm_mode_mode_cmd *mode_cmd = data;
6390+ struct drm_output *output;
6391+ struct drm_display_mode *mode;
6392+ int ret = 0;
6393+
6394+ mutex_lock(&dev->mode_config.mutex);
6395+
6396+ mode = idr_find(&dev->mode_config.crtc_idr, mode_cmd->mode_id);
6397+ if (!mode || (mode->mode_id != mode_cmd->mode_id)) {
6398+ ret = -EINVAL;
6399+ goto out;
6400+ }
6401+
6402+ output = idr_find(&dev->mode_config.crtc_idr, mode_cmd->output_id);
6403+ if (!output || (output->id != mode_cmd->output_id)) {
6404+ ret = -EINVAL;
6405+ goto out;
6406+ }
6407+
6408+ ret = drm_mode_attachmode(dev, output, mode);
6409+out:
6410+ mutex_unlock(&dev->mode_config.mutex);
6411+ return ret;
6412+}
6413+
6414+
6415+/**
6416+ * drm_fb_detachmode - Detach a user specified mode from an output
6417+ * @inode: inode from the ioctl
6418+ * @filp: file * from the ioctl
6419+ * @cmd: cmd from ioctl
6420+ * @arg: arg from ioctl
6421+ *
6422+ * Called by the user via ioctl.
6423+ *
6424+ * RETURNS:
6425+ * Zero on success, errno on failure.
6426+ */
6427+int drm_mode_detachmode_ioctl(struct drm_device *dev,
6428+ void *data, struct drm_file *file_priv)
6429+{
6430+ struct drm_mode_mode_cmd *mode_cmd = data;
6431+ struct drm_output *output;
6432+ struct drm_display_mode *mode;
6433+ int ret = 0;
6434+
6435+ mutex_lock(&dev->mode_config.mutex);
6436+
6437+ mode = idr_find(&dev->mode_config.crtc_idr, mode_cmd->mode_id);
6438+ if (!mode || (mode->mode_id != mode_cmd->mode_id)) {
6439+ ret = -EINVAL;
6440+ goto out;
6441+ }
6442+
6443+ output = idr_find(&dev->mode_config.crtc_idr, mode_cmd->output_id);
6444+ if (!output || (output->id != mode_cmd->output_id)) {
6445+ ret = -EINVAL;
6446+ goto out;
6447+ }
6448+
6449+
6450+ ret = drm_mode_detachmode(dev, output, mode);
6451+out:
6452+ mutex_unlock(&dev->mode_config.mutex);
6453+ return ret;
6454+}
6455+
6456+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
6457+ const char *name, int num_values)
6458+{
6459+ struct drm_property *property = NULL;
6460+
6461+ property = kzalloc(sizeof(struct drm_output), GFP_KERNEL);
6462+ if (!property)
6463+ return NULL;
6464+
6465+ property->values = kzalloc(sizeof(uint32_t)*num_values, GFP_KERNEL);
6466+ if (!property->values)
6467+ goto fail;
6468+
6469+ property->id = drm_idr_get(dev, property);
6470+ property->flags = flags;
6471+ property->num_values = num_values;
6472+ INIT_LIST_HEAD(&property->enum_list);
6473+
6474+ if (name)
6475+ strncpy(property->name, name, DRM_PROP_NAME_LEN);
6476+
6477+ list_add_tail(&property->head, &dev->mode_config.property_list);
6478+ return property;
6479+fail:
6480+ kfree(property);
6481+ return NULL;
6482+}
6483+EXPORT_SYMBOL(drm_property_create);
6484+
6485+int drm_property_add_enum(struct drm_property *property, int index,
6486+ uint32_t value, const char *name)
6487+{
6488+ struct drm_property_enum *prop_enum;
6489+
6490+ if (!(property->flags & DRM_MODE_PROP_ENUM))
6491+ return -EINVAL;
6492+
6493+ if (!list_empty(&property->enum_list)) {
6494+ list_for_each_entry(prop_enum, &property->enum_list, head) {
6495+ if (prop_enum->value == value) {
6496+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
6497+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
6498+ return 0;
6499+ }
6500+ }
6501+ }
6502+
6503+ prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
6504+ if (!prop_enum)
6505+ return -ENOMEM;
6506+
6507+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
6508+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
6509+ prop_enum->value = value;
6510+
6511+ property->values[index] = value;
6512+ list_add_tail(&prop_enum->head, &property->enum_list);
6513+ return 0;
6514+}
6515+EXPORT_SYMBOL(drm_property_add_enum);
6516+
6517+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
6518+{
6519+ struct drm_property_enum *prop_enum, *pt;
6520+
6521+ list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
6522+ list_del(&prop_enum->head);
6523+ kfree(prop_enum);
6524+ }
6525+
6526+ kfree(property->values);
6527+ drm_idr_put(dev, property->id);
6528+ list_del(&property->head);
6529+ kfree(property);
6530+}
6531+EXPORT_SYMBOL(drm_property_destroy);
6532+
6533+
6534+int drm_output_attach_property(struct drm_output *output,
6535+ struct drm_property *property, int init_val)
6536+{
6537+ int i;
6538+
6539+ for (i = 0; i < DRM_OUTPUT_MAX_PROPERTY; i++) {
6540+ if (output->property_ids[i] == 0) {
6541+ output->property_ids[i] = property->id;
6542+ output->property_values[i] = init_val;
6543+ break;
6544+ }
6545+ }
6546+
6547+ if (i == DRM_OUTPUT_MAX_PROPERTY)
6548+ return -EINVAL;
6549+ return 0;
6550+}
6551+EXPORT_SYMBOL(drm_output_attach_property);
6552+
6553+int drm_mode_getproperty_ioctl(struct drm_device *dev,
6554+ void *data, struct drm_file *file_priv)
6555+{
6556+ struct drm_mode_get_property *out_resp = data;
6557+ struct drm_property *property;
6558+ int enum_count = 0;
6559+ int value_count = 0;
6560+ int ret = 0, i;
6561+ int copied;
6562+ struct drm_property_enum *prop_enum;
6563+
6564+ mutex_lock(&dev->mode_config.mutex);
6565+ property = idr_find(&dev->mode_config.crtc_idr, out_resp->prop_id);
6566+ if (!property || (property->id != out_resp->prop_id)) {
6567+ ret = -EINVAL;
6568+ goto done;
6569+ }
6570+
6571+
6572+ list_for_each_entry(prop_enum, &property->enum_list, head)
6573+ enum_count++;
6574+
6575+ value_count = property->num_values;
6576+
6577+ strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
6578+ out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
6579+ out_resp->flags = property->flags;
6580+
6581+ if ((out_resp->count_values >= value_count) && value_count) {
6582+ for (i = 0; i < value_count; i++) {
6583+ if (put_user(property->values[i], out_resp->values + i)) {
6584+ ret = -EFAULT;
6585+ goto done;
6586+ }
6587+ }
6588+ }
6589+ out_resp->count_values = value_count;
6590+
6591+ if ((out_resp->count_enums >= enum_count) && enum_count) {
6592+ copied = 0;
6593+ list_for_each_entry(prop_enum, &property->enum_list, head) {
6594+ if (put_user(prop_enum->value, &out_resp->enums[copied].value)) {
6595+ ret = -EFAULT;
6596+ goto done;
6597+ }
6598+
6599+ if (copy_to_user(&out_resp->enums[copied].name,
6600+ prop_enum->name, DRM_PROP_NAME_LEN)) {
6601+ ret = -EFAULT;
6602+ goto done;
6603+ }
6604+ copied++;
6605+ }
6606+ }
6607+ out_resp->count_enums = enum_count;
6608+
6609+done:
6610+ mutex_unlock(&dev->mode_config.mutex);
6611+ return ret;
6612+}
6613Index: linux-2.6.27/drivers/gpu/drm/drm_drv.c
6614===================================================================
6615--- linux-2.6.27.orig/drivers/gpu/drm/drm_drv.c 2009-01-14 11:54:35.000000000 +0000
6616+++ linux-2.6.27/drivers/gpu/drm/drm_drv.c 2009-01-14 11:58:01.000000000 +0000
6617@@ -49,6 +49,9 @@
6618 #include "drmP.h"
6619 #include "drm_core.h"
6620
6621+static void drm_cleanup(struct drm_device * dev);
6622+int drm_fb_loaded = 0;
6623+
6624 static int drm_version(struct drm_device *dev, void *data,
6625 struct drm_file *file_priv);
6626
6627@@ -113,16 +116,48 @@
6628
6629 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6630 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6631-
6632 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
6633-
6634- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
6635-
6636 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6637-
6638- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
6639- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
6640- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
6641+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_ROOT_ONLY),
6642+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_ROOT_ONLY),
6643+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETOUTPUT, drm_mode_getoutput, DRM_MASTER|DRM_ROOT_ONLY),
6644+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_ROOT_ONLY),
6645+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_ROOT_ONLY),
6646+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_ROOT_ONLY),
6647+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_ROOT_ONLY),
6648+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDMODE, drm_mode_addmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
6649+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMMODE, drm_mode_rmmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
6650+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
6651+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
6652+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_ROOT_ONLY),
6653+
6654+ DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
6655+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6656+ DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
6657+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6658+ DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
6659+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6660+ DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
6661+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6662+
6663+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
6664+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
6665+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
6666+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
6667+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH),
6668+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH),
6669+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH),
6670+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
6671+
6672+ DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
6673+ DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
6674+ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
6675+ DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
6676+ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
6677+ DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH),
6678+ DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
6679+ DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
6680+ DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
6681 };
6682
6683 #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
6684@@ -164,7 +199,12 @@
6685 drm_drawable_free_all(dev);
6686 del_timer(&dev->timer);
6687
6688- /* Clear pid list */
6689+ if (dev->unique) {
6690+ drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
6691+ dev->unique = NULL;
6692+ dev->unique_len = 0;
6693+ }
6694+
6695 if (dev->magicfree.next) {
6696 list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
6697 list_del(&pt->head);
6698@@ -236,12 +276,24 @@
6699 dev->lock.file_priv = NULL;
6700 wake_up_interruptible(&dev->lock.lock_queue);
6701 }
6702+ dev->dev_mapping = NULL;
6703 mutex_unlock(&dev->struct_mutex);
6704
6705 DRM_DEBUG("lastclose completed\n");
6706 return 0;
6707 }
6708
6709+void drm_cleanup_pci(struct pci_dev *pdev)
6710+{
6711+ struct drm_device *dev = pci_get_drvdata(pdev);
6712+
6713+ pci_set_drvdata(pdev, NULL);
6714+ pci_release_regions(pdev);
6715+ if (dev)
6716+ drm_cleanup(dev);
6717+}
6718+EXPORT_SYMBOL(drm_cleanup_pci);
6719+
6720 /**
6721 * Module initialization. Called via init_module at module load time, or via
6722 * linux/init/main.c (this is not currently supported).
6723@@ -255,26 +307,68 @@
6724 * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
6725 * after the initialization for driver customization.
6726 */
6727-int drm_init(struct drm_driver *driver)
6728+int drm_init(struct drm_driver *driver,
6729+ struct pci_device_id *pciidlist)
6730 {
6731- struct pci_dev *pdev = NULL;
6732+ struct pci_dev *pdev;
6733 struct pci_device_id *pid;
6734- int i;
6735+ int rc, i;
6736
6737 DRM_DEBUG("\n");
6738
6739- for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
6740- pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
6741+ for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) {
6742+ pid = &pciidlist[i];
6743
6744 pdev = NULL;
6745 /* pass back in pdev to account for multiple identical cards */
6746 while ((pdev =
6747 pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
6748- pid->subdevice, pdev)) != NULL) {
6749- /* stealth mode requires a manual probe */
6750- pci_dev_get(pdev);
6751- drm_get_dev(pdev, pid, driver);
6752+ pid->subdevice, pdev))) {
6753+ /* Are there device class requirements? */
6754+ if ((pid->class != 0)
6755+ && ((pdev->class & pid->class_mask) != pid->class)) {
6756+ continue;
6757+ }
6758+ /* is there already a driver loaded, or (short circuit saves work) */
6759+ /* does something like VesaFB have control of the memory region? */
6760+ if (pci_dev_driver(pdev)
6761+ || pci_request_regions(pdev, "DRM scan")) {
6762+ /* go into stealth mode */
6763+ drm_fb_loaded = 1;
6764+ pci_dev_put(pdev);
6765+ break;
6766+ }
6767+ /* no fbdev or vesadev, put things back and wait for normal probe */
6768+ pci_release_regions(pdev);
6769+ }
6770+ }
6771+
6772+ if (!drm_fb_loaded)
6773+ return pci_register_driver(&driver->pci_driver);
6774+ else {
6775+ for (i = 0; pciidlist[i].vendor != 0; i++) {
6776+ pid = &pciidlist[i];
6777+
6778+ pdev = NULL;
6779+ /* pass back in pdev to account for multiple identical cards */
6780+ while ((pdev =
6781+ pci_get_subsys(pid->vendor, pid->device,
6782+ pid->subvendor, pid->subdevice,
6783+ pdev))) {
6784+ /* Are there device class requirements? */
6785+ if ((pid->class != 0)
6786+ && ((pdev->class & pid->class_mask) != pid->class)) {
6787+ continue;
6788+ }
6789+ /* stealth mode requires a manual probe */
6790+ pci_dev_get(pdev);
6791+ if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) {
6792+ pci_dev_put(pdev);
6793+ return rc;
6794+ }
6795+ }
6796 }
6797+ DRM_INFO("Used old pci detect: framebuffer loaded\n");
6798 }
6799 return 0;
6800 }
6801@@ -298,6 +392,7 @@
6802 }
6803
6804 drm_lastclose(dev);
6805+ drm_ctxbitmap_cleanup(dev);
6806
6807 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
6808 dev->agp && dev->agp->agp_mtrr >= 0) {
6809@@ -308,6 +403,9 @@
6810 DRM_DEBUG("mtrr_del=%d\n", retval);
6811 }
6812
6813+ drm_bo_driver_finish(dev);
6814+ drm_fence_manager_takedown(dev);
6815+
6816 if (drm_core_has_AGP(dev) && dev->agp) {
6817 drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
6818 dev->agp = NULL;
6819@@ -317,7 +415,12 @@
6820 dev->driver->unload(dev);
6821
6822 drm_ht_remove(&dev->map_hash);
6823- drm_ctxbitmap_cleanup(dev);
6824+ drm_mm_takedown(&dev->offset_manager);
6825+ drm_ht_remove(&dev->object_hash);
6826+
6827+
6828+ if (!drm_fb_loaded)
6829+ pci_disable_device(dev->pdev);
6830
6831 drm_put_minor(&dev->primary);
6832 if (drm_put_dev(dev))
6833Index: linux-2.6.27/drivers/gpu/drm/drm_edid.c
6834===================================================================
6835--- /dev/null 1970-01-01 00:00:00.000000000 +0000
6836+++ linux-2.6.27/drivers/gpu/drm/drm_edid.c 2009-01-14 11:58:01.000000000 +0000
6837@@ -0,0 +1,519 @@
6838+/*
6839+ * Copyright (c) 2007 Intel Corporation
6840+ * Jesse Barnes <jesse.barnes@intel.com>
6841+ *
6842+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
6843+ * FB layer.
6844+ * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
6845+ */
6846+#include "drmP.h"
6847+#include <linux/i2c-algo-bit.h>
6848+#include "drm_edid.h"
6849+
6850+#include <acpi/acpi_drivers.h>
6851+
6852+/* Valid EDID header has these bytes */
6853+static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6854+
6855+int drm_get_acpi_edid(char *method, char *edid, ssize_t length)
6856+{
6857+ int status;
6858+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
6859+ union acpi_object *obj;
6860+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
6861+ struct acpi_object_list args = { 1, &arg0 };
6862+
6863+ if (length == 128)
6864+ arg0.integer.value = 1;
6865+ else if (length == 256)
6866+ arg0.integer.value = 2;
6867+ else
6868+ return -EINVAL;
6869+
6870+ status = acpi_evaluate_object(NULL, method, &args, &buffer);
6871+ if (ACPI_FAILURE(status))
6872+ return -ENODEV;
6873+
6874+ obj = buffer.pointer;
6875+
6876+ if (obj && obj->type == ACPI_TYPE_BUFFER)
6877+ memcpy(edid, obj->buffer.pointer, obj->buffer.length);
6878+ else {
6879+ printk(KERN_ERR PREFIX "Invalid _DDC data\n");
6880+ status = -EFAULT;
6881+ kfree(obj);
6882+ }
6883+
6884+ return status;
6885+}
6886+EXPORT_SYMBOL(drm_get_acpi_edid);
6887+
6888+/**
6889+ * edid_valid - sanity check EDID data
6890+ * @edid: EDID data
6891+ *
6892+ * Sanity check the EDID block by looking at the header, the version number
6893+ * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
6894+ * valid.
6895+ */
6896+static bool edid_valid(struct edid *edid)
6897+{
6898+ int i;
6899+ u8 csum = 0;
6900+ u8 *raw_edid = (u8 *)edid;
6901+
6902+ if (memcmp(edid->header, edid_header, sizeof(edid_header)))
6903+ goto bad;
6904+ if (edid->version != 1)
6905+ goto bad;
6906+ if (edid->revision <= 0 || edid->revision > 3)
6907+ goto bad;
6908+
6909+ for (i = 0; i < EDID_LENGTH; i++)
6910+ csum += raw_edid[i];
6911+ if (csum)
6912+ goto bad;
6913+
6914+ return 1;
6915+
6916+bad:
6917+ return 0;
6918+}
6919+
6920+/**
6921+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
6922+ * @t: standard timing params
6923+ *
6924+ * Take the standard timing params (in this case width, aspect, and refresh)
6925+ * and convert them into a real mode using CVT.
6926+ *
6927+ * Punts for now, but should eventually use the FB layer's CVT based mode
6928+ * generation code.
6929+ */
6930+struct drm_display_mode *drm_mode_std(struct drm_device *dev,
6931+ struct std_timing *t)
6932+{
6933+// struct fb_videomode mode;
6934+
6935+// fb_find_mode_cvt(&mode, 0, 0);
6936+ /* JJJ: convert to drm_display_mode */
6937+ struct drm_display_mode *mode;
6938+ int hsize = t->hsize * 8 + 248, vsize;
6939+
6940+ mode = drm_mode_create(dev);
6941+ if (!mode)
6942+ return NULL;
6943+
6944+ if (t->aspect_ratio == 0)
6945+ vsize = (hsize * 10) / 16;
6946+ else if (t->aspect_ratio == 1)
6947+ vsize = (hsize * 3) / 4;
6948+ else if (t->aspect_ratio == 2)
6949+ vsize = (hsize * 4) / 5;
6950+ else
6951+ vsize = (hsize * 9) / 16;
6952+
6953+ drm_mode_set_name(mode);
6954+
6955+ return mode;
6956+}
6957+
6958+/**
6959+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
6960+ * @timing: EDID detailed timing info
6961+ * @preferred: is this a preferred mode?
6962+ *
6963+ * An EDID detailed timing block contains enough info for us to create and
6964+ * return a new struct drm_display_mode. The @preferred flag will be set
6965+ * if this is the display's preferred timing, and we'll use it to indicate
6966+ * to the other layers that this mode is desired.
6967+ */
6968+struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
6969+ struct detailed_timing *timing)
6970+{
6971+ struct drm_display_mode *mode;
6972+ struct detailed_pixel_timing *pt = &timing->data.pixel_data;
6973+
6974+ if (pt->stereo) {
6975+ printk(KERN_WARNING "stereo mode not supported\n");
6976+ return NULL;
6977+ }
6978+ if (!pt->separate_sync) {
6979+ printk(KERN_WARNING "integrated sync not supported\n");
6980+ return NULL;
6981+ }
6982+
6983+ mode = drm_mode_create(dev);
6984+ if (!mode)
6985+ return NULL;
6986+
6987+ mode->type = DRM_MODE_TYPE_DRIVER;
6988+ mode->clock = timing->pixel_clock * 10;
6989+
6990+ mode->hdisplay = (pt->hactive_hi << 8) | pt->hactive_lo;
6991+ mode->hsync_start = mode->hdisplay + ((pt->hsync_offset_hi << 8) |
6992+ pt->hsync_offset_lo);
6993+ mode->hsync_end = mode->hsync_start +
6994+ ((pt->hsync_pulse_width_hi << 8) |
6995+ pt->hsync_pulse_width_lo);
6996+ mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo);
6997+
6998+ mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo;
6999+ mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 8) |
7000+ pt->vsync_offset_lo);
7001+ mode->vsync_end = mode->vsync_start +
7002+ ((pt->vsync_pulse_width_hi << 8) |
7003+ pt->vsync_pulse_width_lo);
7004+ mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo);
7005+
7006+ drm_mode_set_name(mode);
7007+
7008+ if (pt->interlaced)
7009+ mode->flags |= V_INTERLACE;
7010+
7011+ mode->flags |= pt->hsync_positive ? V_PHSYNC : V_NHSYNC;
7012+ mode->flags |= pt->vsync_positive ? V_PVSYNC : V_NVSYNC;
7013+
7014+ return mode;
7015+}
7016+
7017+/*
7018+ * Detailed mode info for the EDID "established modes" data to use.
7019+ */
7020+static struct drm_display_mode edid_est_modes[] = {
7021+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
7022+ 968, 1056, 0, 600, 601, 605, 628, 0,
7023+ V_PHSYNC | V_PVSYNC) }, /* 800x600@60Hz */
7024+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
7025+ 896, 1024, 0, 600, 601, 603, 625, 0,
7026+ V_PHSYNC | V_PVSYNC) }, /* 800x600@56Hz */
7027+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
7028+ 720, 840, 0, 480, 481, 484, 500, 0,
7029+ V_NHSYNC | V_NVSYNC) }, /* 640x480@75Hz */
7030+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
7031+ 704, 832, 0, 480, 489, 491, 520, 0,
7032+ V_NHSYNC | V_NVSYNC) }, /* 640x480@72Hz */
7033+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
7034+ 768, 864, 0, 480, 483, 486, 525, 0,
7035+ V_NHSYNC | V_NVSYNC) }, /* 640x480@67Hz */
7036+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
7037+ 752, 800, 0, 480, 490, 492, 525, 0,
7038+ V_NHSYNC | V_NVSYNC) }, /* 640x480@60Hz */
7039+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
7040+ 846, 900, 0, 400, 421, 423, 449, 0,
7041+ V_NHSYNC | V_NVSYNC) }, /* 720x400@88Hz */
7042+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
7043+ 846, 900, 0, 400, 412, 414, 449, 0,
7044+ V_NHSYNC | V_PVSYNC) }, /* 720x400@70Hz */
7045+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
7046+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
7047+ V_PHSYNC | V_PVSYNC) }, /* 1280x1024@75Hz */
7048+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
7049+ 1136, 1312, 0, 768, 769, 772, 800, 0,
7050+ V_PHSYNC | V_PVSYNC) }, /* 1024x768@75Hz */
7051+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
7052+ 1184, 1328, 0, 768, 771, 777, 806, 0,
7053+ V_NHSYNC | V_NVSYNC) }, /* 1024x768@70Hz */
7054+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
7055+ 1184, 1344, 0, 768, 771, 777, 806, 0,
7056+ V_NHSYNC | V_NVSYNC) }, /* 1024x768@60Hz */
7057+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
7058+ 1208, 1264, 0, 768, 768, 776, 817, 0,
7059+ V_PHSYNC | V_PVSYNC | V_INTERLACE) }, /* 1024x768@43Hz */
7060+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
7061+ 928, 1152, 0, 624, 625, 628, 667, 0,
7062+ V_NHSYNC | V_NVSYNC) }, /* 832x624@75Hz */
7063+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
7064+ 896, 1056, 0, 600, 601, 604, 625, 0,
7065+ V_PHSYNC | V_PVSYNC) }, /* 800x600@75Hz */
7066+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
7067+ 976, 1040, 0, 600, 637, 643, 666, 0,
7068+ V_PHSYNC | V_PVSYNC) }, /* 800x600@72Hz */
7069+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
7070+ 1344, 1600, 0, 864, 865, 868, 900, 0,
7071+ V_PHSYNC | V_PVSYNC) }, /* 1152x864@75Hz */
7072+};
7073+
7074+#define EDID_EST_TIMINGS 16
7075+#define EDID_STD_TIMINGS 8
7076+#define EDID_DETAILED_TIMINGS 4
7077+
7078+/**
7079+ * add_established_modes - get est. modes from EDID and add them
7080+ * @edid: EDID block to scan
7081+ *
7082+ * Each EDID block contains a bitmap of the supported "established modes" list
7083+ * (defined above). Tease them out and add them to the global modes list.
7084+ */
7085+static int add_established_modes(struct drm_output *output, struct edid *edid)
7086+{
7087+ struct drm_device *dev = output->dev;
7088+ unsigned long est_bits = edid->established_timings.t1 |
7089+ (edid->established_timings.t2 << 8) |
7090+ ((edid->established_timings.mfg_rsvd & 0x80) << 9);
7091+ int i, modes = 0;
7092+
7093+ for (i = 0; i <= EDID_EST_TIMINGS; i++)
7094+ if (est_bits & (1<<i)) {
7095+ struct drm_display_mode *newmode;
7096+ newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
7097+ drm_mode_probed_add(output, newmode);
7098+ modes++;
7099+ }
7100+
7101+ return modes;
7102+}
7103+
7104+/**
7105+ * add_standard_modes - get std. modes from EDID and add them
7106+ * @edid: EDID block to scan
7107+ *
7108+ * Standard modes can be calculated using the CVT standard. Grab them from
7109+ * @edid, calculate them, and add them to the list.
7110+ */
7111+static int add_standard_modes(struct drm_output *output, struct edid *edid)
7112+{
7113+ struct drm_device *dev = output->dev;
7114+ int i, modes = 0;
7115+
7116+ for (i = 0; i < EDID_STD_TIMINGS; i++) {
7117+ struct std_timing *t = &edid->standard_timings[i];
7118+ struct drm_display_mode *newmode;
7119+
7120+ /* If std timings bytes are 1, 1 it's empty */
7121+ if (t->hsize == 1 && (t->aspect_ratio | t->vfreq) == 1)
7122+ continue;
7123+
7124+ newmode = drm_mode_std(dev, &edid->standard_timings[i]);
7125+ drm_mode_probed_add(output, newmode);
7126+ modes++;
7127+ }
7128+
7129+ return modes;
7130+}
7131+
7132+/**
7133+ * add_detailed_modes - get detailed mode info from EDID data
7134+ * @edid: EDID block to scan
7135+ *
7136+ * Some of the detailed timing sections may contain mode information. Grab
7137+ * it and add it to the list.
7138+ */
7139+static int add_detailed_info(struct drm_output *output, struct edid *edid)
7140+{
7141+ struct drm_device *dev = output->dev;
7142+ int i, j, modes = 0;
7143+
7144+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
7145+ struct detailed_timing *timing = &edid->detailed_timings[i];
7146+ struct detailed_non_pixel *data = &timing->data.other_data;
7147+ struct drm_display_mode *newmode;
7148+
7149+ /* EDID up to and including 1.2 may put monitor info here */
7150+ if (edid->version == 1 && edid->revision < 3)
7151+ continue;
7152+
7153+ /* Detailed mode timing */
7154+ if (timing->pixel_clock) {
7155+ newmode = drm_mode_detailed(dev, timing);
7156+ /* First detailed mode is preferred */
7157+ if (i == 0 && edid->preferred_timing)
7158+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
7159+ drm_mode_probed_add(output, newmode);
7160+
7161+ modes++;
7162+ continue;
7163+ }
7164+
7165+ /* Other timing or info */
7166+ switch (data->type) {
7167+ case EDID_DETAIL_MONITOR_SERIAL:
7168+ break;
7169+ case EDID_DETAIL_MONITOR_STRING:
7170+ break;
7171+ case EDID_DETAIL_MONITOR_RANGE:
7172+ /* Get monitor range data */
7173+ break;
7174+ case EDID_DETAIL_MONITOR_NAME:
7175+ break;
7176+ case EDID_DETAIL_MONITOR_CPDATA:
7177+ break;
7178+ case EDID_DETAIL_STD_MODES:
7179+ /* Five modes per detailed section */
7180+ for (j = 0; j < 5; i++) {
7181+ struct std_timing *std;
7182+ struct drm_display_mode *newmode;
7183+
7184+ std = &data->data.timings[j];
7185+ newmode = drm_mode_std(dev, std);
7186+ drm_mode_probed_add(output, newmode);
7187+ modes++;
7188+ }
7189+ break;
7190+ default:
7191+ break;
7192+ }
7193+ }
7194+
7195+ return modes;
7196+}
7197+
7198+#define DDC_ADDR 0x50
7199+
7200+static unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter)
7201+{
7202+ unsigned char start = 0x0;
7203+ unsigned char *buf = kmalloc(EDID_LENGTH, GFP_KERNEL);
7204+ struct i2c_msg msgs[] = {
7205+ {
7206+ .addr = DDC_ADDR,
7207+ .flags = 0,
7208+ .len = 1,
7209+ .buf = &start,
7210+ }, {
7211+ .addr = DDC_ADDR,
7212+ .flags = I2C_M_RD,
7213+ .len = EDID_LENGTH,
7214+ .buf = buf,
7215+ }
7216+ };
7217+
7218+ if (!buf) {
7219+ DRM_ERROR("unable to allocate memory for EDID block.\n");
7220+ return NULL;
7221+ }
7222+
7223+ if (i2c_transfer(adapter, msgs, 2) == 2)
7224+ return buf;
7225+
7226+ DRM_INFO("unable to read EDID block.\n");
7227+ kfree(buf);
7228+ return NULL;
7229+}
7230+
7231+unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
7232+{
7233+ struct i2c_algo_bit_data *algo_data = adapter->algo_data;
7234+ unsigned char *edid = NULL;
7235+ int i, j;
7236+
7237+ /*
7238+ * Startup the bus:
7239+ * Set clock line high (but give it time to come up)
7240+ * Then set clock & data low
7241+ */
7242+ algo_data->setscl(algo_data->data, 1);
7243+ udelay(550); /* startup delay */
7244+ algo_data->setscl(algo_data->data, 0);
7245+ algo_data->setsda(algo_data->data, 0);
7246+
7247+ for (i = 0; i < 3; i++) {
7248+ /* For some old monitors we need the
7249+ * following process to initialize/stop DDC
7250+ */
7251+ algo_data->setsda(algo_data->data, 0);
7252+ msleep(13);
7253+
7254+ algo_data->setscl(algo_data->data, 1);
7255+ for (j = 0; j < 5; j++) {
7256+ msleep(10);
7257+ if (algo_data->getscl(algo_data->data))
7258+ break;
7259+ }
7260+ if (j == 5)
7261+ continue;
7262+
7263+ algo_data->setsda(algo_data->data, 0);
7264+ msleep(15);
7265+ algo_data->setscl(algo_data->data, 0);
7266+ msleep(15);
7267+ algo_data->setsda(algo_data->data, 1);
7268+ msleep(15);
7269+
7270+ /* Do the real work */
7271+ edid = drm_do_probe_ddc_edid(adapter);
7272+ algo_data->setsda(algo_data->data, 0);
7273+ algo_data->setscl(algo_data->data, 0);
7274+ msleep(15);
7275+
7276+ algo_data->setscl(algo_data->data, 1);
7277+ for (j = 0; j < 10; j++) {
7278+ msleep(10);
7279+ if (algo_data->getscl(algo_data->data))
7280+ break;
7281+ }
7282+
7283+ algo_data->setsda(algo_data->data, 1);
7284+ msleep(15);
7285+ algo_data->setscl(algo_data->data, 0);
7286+ if (edid)
7287+ break;
7288+ }
7289+ /* Release the DDC lines when done or the Apple Cinema HD display
7290+ * will switch off
7291+ */
7292+ algo_data->setsda(algo_data->data, 0);
7293+ algo_data->setscl(algo_data->data, 0);
7294+ algo_data->setscl(algo_data->data, 1);
7295+
7296+ return edid;
7297+}
7298+EXPORT_SYMBOL(drm_ddc_read);
7299+
7300+/**
7301+ * drm_get_edid - get EDID data, if available
7302+ * @output: output we're probing
7303+ * @adapter: i2c adapter to use for DDC
7304+ *
7305+ * Poke the given output's i2c channel to grab EDID data if possible.
7306+ *
7307+ * Return edid data or NULL if we couldn't find any.
7308+ */
7309+struct edid *drm_get_edid(struct drm_output *output,
7310+ struct i2c_adapter *adapter)
7311+{
7312+ struct edid *edid;
7313+
7314+ edid = (struct edid *)drm_ddc_read(adapter);
7315+ if (!edid) {
7316+ dev_warn(&output->dev->pdev->dev, "%s: no EDID data\n",
7317+ output->name);
7318+ return NULL;
7319+ }
7320+ if (!edid_valid(edid)) {
7321+ dev_warn(&output->dev->pdev->dev, "%s: EDID invalid.\n",
7322+ output->name);
7323+ kfree(edid);
7324+ return NULL;
7325+ }
7326+ return edid;
7327+}
7328+EXPORT_SYMBOL(drm_get_edid);
7329+
7330+/**
7331+ * drm_add_edid_modes - add modes from EDID data, if available
7332+ * @output: output we're probing
7333+ * @edid: edid data
7334+ *
7335+ * Add the specified modes to the output's mode list.
7336+ *
7337+ * Return number of modes added or 0 if we couldn't find any.
7338+ */
7339+int drm_add_edid_modes(struct drm_output *output, struct edid *edid)
7340+{
7341+ int num_modes = 0;
7342+
7343+ if (edid == NULL) {
7344+ return 0;
7345+ }
7346+ if (!edid_valid(edid)) {
7347+ dev_warn(&output->dev->pdev->dev, "%s: EDID invalid.\n",
7348+ output->name);
7349+ return 0;
7350+ }
7351+ num_modes += add_established_modes(output, edid);
7352+ num_modes += add_standard_modes(output, edid);
7353+ num_modes += add_detailed_info(output, edid);
7354+ return num_modes;
7355+}
7356+EXPORT_SYMBOL(drm_add_edid_modes);
7357Index: linux-2.6.27/drivers/gpu/drm/drm_fence.c
7358===================================================================
7359--- /dev/null 1970-01-01 00:00:00.000000000 +0000
7360+++ linux-2.6.27/drivers/gpu/drm/drm_fence.c 2009-01-14 11:58:01.000000000 +0000
7361@@ -0,0 +1,829 @@
7362+/**************************************************************************
7363+ *
7364+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
7365+ * All Rights Reserved.
7366+ *
7367+ * Permission is hereby granted, free of charge, to any person obtaining a
7368+ * copy of this software and associated documentation files (the
7369+ * "Software"), to deal in the Software without restriction, including
7370+ * without limitation the rights to use, copy, modify, merge, publish,
7371+ * distribute, sub license, and/or sell copies of the Software, and to
7372+ * permit persons to whom the Software is furnished to do so, subject to
7373+ * the following conditions:
7374+ *
7375+ * The above copyright notice and this permission notice (including the
7376+ * next paragraph) shall be included in all copies or substantial portions
7377+ * of the Software.
7378+ *
7379+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
7380+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
7381+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
7382+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
7383+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
7384+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
7385+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
7386+ *
7387+ **************************************************************************/
7388+/*
7389+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
7390+ */
7391+
7392+#include "drmP.h"
7393+
7394+
7395+/*
7396+ * Convenience function to be called by fence::wait methods that
7397+ * need polling.
7398+ */
7399+
7400+int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
7401+ int interruptible, uint32_t mask,
7402+ unsigned long end_jiffies)
7403+{
7404+ struct drm_device *dev = fence->dev;
7405+ struct drm_fence_manager *fm = &dev->fm;
7406+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
7407+ uint32_t count = 0;
7408+ int ret;
7409+
7410+ DECLARE_WAITQUEUE(entry, current);
7411+ add_wait_queue(&fc->fence_queue, &entry);
7412+
7413+ ret = 0;
7414+
7415+ for (;;) {
7416+ __set_current_state((interruptible) ?
7417+ TASK_INTERRUPTIBLE :
7418+ TASK_UNINTERRUPTIBLE);
7419+ if (drm_fence_object_signaled(fence, mask))
7420+ break;
7421+ if (time_after_eq(jiffies, end_jiffies)) {
7422+ ret = -EBUSY;
7423+ break;
7424+ }
7425+ if (lazy)
7426+ schedule_timeout(1);
7427+ else if ((++count & 0x0F) == 0){
7428+ __set_current_state(TASK_RUNNING);
7429+ schedule();
7430+ __set_current_state((interruptible) ?
7431+ TASK_INTERRUPTIBLE :
7432+ TASK_UNINTERRUPTIBLE);
7433+ }
7434+ if (interruptible && signal_pending(current)) {
7435+ ret = -EAGAIN;
7436+ break;
7437+ }
7438+ }
7439+ __set_current_state(TASK_RUNNING);
7440+ remove_wait_queue(&fc->fence_queue, &entry);
7441+ return ret;
7442+}
7443+EXPORT_SYMBOL(drm_fence_wait_polling);
7444+
7445+/*
7446+ * Typically called by the IRQ handler.
7447+ */
7448+
7449+void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
7450+ uint32_t sequence, uint32_t type, uint32_t error)
7451+{
7452+ int wake = 0;
7453+ uint32_t diff;
7454+ uint32_t relevant_type;
7455+ uint32_t new_type;
7456+ struct drm_fence_manager *fm = &dev->fm;
7457+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
7458+ struct drm_fence_driver *driver = dev->driver->fence_driver;
7459+ struct list_head *head;
7460+ struct drm_fence_object *fence, *next;
7461+ int found = 0;
7462+
7463+ if (list_empty(&fc->ring))
7464+ return;
7465+
7466+ list_for_each_entry(fence, &fc->ring, ring) {
7467+ diff = (sequence - fence->sequence) & driver->sequence_mask;
7468+ if (diff > driver->wrap_diff) {
7469+ found = 1;
7470+ break;
7471+ }
7472+ }
7473+
7474+ fc->waiting_types &= ~type;
7475+ head = (found) ? &fence->ring : &fc->ring;
7476+
7477+ list_for_each_entry_safe_reverse(fence, next, head, ring) {
7478+ if (&fence->ring == &fc->ring)
7479+ break;
7480+
7481+ if (error) {
7482+ fence->error = error;
7483+ fence->signaled_types = fence->type;
7484+ list_del_init(&fence->ring);
7485+ wake = 1;
7486+ break;
7487+ }
7488+
7489+ if (type & DRM_FENCE_TYPE_EXE)
7490+ type |= fence->native_types;
7491+
7492+ relevant_type = type & fence->type;
7493+ new_type = (fence->signaled_types | relevant_type) ^
7494+ fence->signaled_types;
7495+
7496+ if (new_type) {
7497+ fence->signaled_types |= new_type;
7498+ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
7499+ fence->base.hash.key, fence->signaled_types);
7500+
7501+ if (driver->needed_flush)
7502+ fc->pending_flush |= driver->needed_flush(fence);
7503+
7504+ if (new_type & fence->waiting_types)
7505+ wake = 1;
7506+ }
7507+
7508+ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
7509+
7510+ if (!(fence->type & ~fence->signaled_types)) {
7511+ DRM_DEBUG("Fence completely signaled 0x%08lx\n",
7512+ fence->base.hash.key);
7513+ list_del_init(&fence->ring);
7514+ }
7515+ }
7516+
7517+ /*
7518+ * Reinstate lost waiting types.
7519+ */
7520+
7521+ if ((fc->waiting_types & type) != type) {
7522+ head = head->prev;
7523+ list_for_each_entry(fence, head, ring) {
7524+ if (&fence->ring == &fc->ring)
7525+ break;
7526+ diff = (fc->highest_waiting_sequence - fence->sequence) &
7527+ driver->sequence_mask;
7528+ if (diff > driver->wrap_diff)
7529+ break;
7530+
7531+ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
7532+ }
7533+ }
7534+
7535+ if (wake)
7536+ wake_up_all(&fc->fence_queue);
7537+}
7538+EXPORT_SYMBOL(drm_fence_handler);
7539+
7540+static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
7541+{
7542+ struct drm_fence_manager *fm = &dev->fm;
7543+ unsigned long flags;
7544+
7545+ write_lock_irqsave(&fm->lock, flags);
7546+ list_del_init(ring);
7547+ write_unlock_irqrestore(&fm->lock, flags);
7548+}
7549+
7550+void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
7551+{
7552+ struct drm_fence_object *tmp_fence = *fence;
7553+ struct drm_device *dev = tmp_fence->dev;
7554+ struct drm_fence_manager *fm = &dev->fm;
7555+
7556+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
7557+ *fence = NULL;
7558+ if (atomic_dec_and_test(&tmp_fence->usage)) {
7559+ drm_fence_unring(dev, &tmp_fence->ring);
7560+ DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
7561+ tmp_fence->base.hash.key);
7562+ atomic_dec(&fm->count);
7563+ BUG_ON(!list_empty(&tmp_fence->base.list));
7564+ drm_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
7565+ }
7566+}
7567+EXPORT_SYMBOL(drm_fence_usage_deref_locked);
7568+
7569+void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
7570+{
7571+ struct drm_fence_object *tmp_fence = *fence;
7572+ struct drm_device *dev = tmp_fence->dev;
7573+ struct drm_fence_manager *fm = &dev->fm;
7574+
7575+ *fence = NULL;
7576+ if (atomic_dec_and_test(&tmp_fence->usage)) {
7577+ mutex_lock(&dev->struct_mutex);
7578+ if (atomic_read(&tmp_fence->usage) == 0) {
7579+ drm_fence_unring(dev, &tmp_fence->ring);
7580+ atomic_dec(&fm->count);
7581+ BUG_ON(!list_empty(&tmp_fence->base.list));
7582+ drm_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
7583+ }
7584+ mutex_unlock(&dev->struct_mutex);
7585+ }
7586+}
7587+EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
7588+
7589+struct drm_fence_object
7590+*drm_fence_reference_locked(struct drm_fence_object *src)
7591+{
7592+ DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
7593+
7594+ atomic_inc(&src->usage);
7595+ return src;
7596+}
7597+
7598+void drm_fence_reference_unlocked(struct drm_fence_object **dst,
7599+ struct drm_fence_object *src)
7600+{
7601+ mutex_lock(&src->dev->struct_mutex);
7602+ *dst = src;
7603+ atomic_inc(&src->usage);
7604+ mutex_unlock(&src->dev->struct_mutex);
7605+}
7606+EXPORT_SYMBOL(drm_fence_reference_unlocked);
7607+
7608+static void drm_fence_object_destroy(struct drm_file *priv,
7609+ struct drm_user_object *base)
7610+{
7611+ struct drm_fence_object *fence =
7612+ drm_user_object_entry(base, struct drm_fence_object, base);
7613+
7614+ drm_fence_usage_deref_locked(&fence);
7615+}
7616+
7617+int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
7618+{
7619+ unsigned long flags;
7620+ int signaled;
7621+ struct drm_device *dev = fence->dev;
7622+ struct drm_fence_manager *fm = &dev->fm;
7623+ struct drm_fence_driver *driver = dev->driver->fence_driver;
7624+
7625+ mask &= fence->type;
7626+ read_lock_irqsave(&fm->lock, flags);
7627+ signaled = (mask & fence->signaled_types) == mask;
7628+ read_unlock_irqrestore(&fm->lock, flags);
7629+ if (!signaled && driver->poll) {
7630+ write_lock_irqsave(&fm->lock, flags);
7631+ driver->poll(dev, fence->fence_class, mask);
7632+ signaled = (mask & fence->signaled_types) == mask;
7633+ write_unlock_irqrestore(&fm->lock, flags);
7634+ }
7635+ return signaled;
7636+}
7637+EXPORT_SYMBOL(drm_fence_object_signaled);
7638+
7639+
7640+int drm_fence_object_flush(struct drm_fence_object *fence,
7641+ uint32_t type)
7642+{
7643+ struct drm_device *dev = fence->dev;
7644+ struct drm_fence_manager *fm = &dev->fm;
7645+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
7646+ struct drm_fence_driver *driver = dev->driver->fence_driver;
7647+ unsigned long irq_flags;
7648+ uint32_t saved_pending_flush;
7649+ uint32_t diff;
7650+ int call_flush;
7651+
7652+ if (type & ~fence->type) {
7653+ DRM_ERROR("Flush trying to extend fence type, "
7654+ "0x%x, 0x%x\n", type, fence->type);
7655+ return -EINVAL;
7656+ }
7657+
7658+ write_lock_irqsave(&fm->lock, irq_flags);
7659+ fence->waiting_types |= type;
7660+ fc->waiting_types |= fence->waiting_types;
7661+ diff = (fence->sequence - fc->highest_waiting_sequence) &
7662+ driver->sequence_mask;
7663+
7664+ if (diff < driver->wrap_diff)
7665+ fc->highest_waiting_sequence = fence->sequence;
7666+
7667+ /*
7668+ * fence->waiting_types has changed. Determine whether
7669+ * we need to initiate some kind of flush as a result of this.
7670+ */
7671+
7672+ saved_pending_flush = fc->pending_flush;
7673+ if (driver->needed_flush)
7674+ fc->pending_flush |= driver->needed_flush(fence);
7675+
7676+ if (driver->poll)
7677+ driver->poll(dev, fence->fence_class, fence->waiting_types);
7678+
7679+ call_flush = fc->pending_flush;
7680+ write_unlock_irqrestore(&fm->lock, irq_flags);
7681+
7682+ if (call_flush && driver->flush)
7683+ driver->flush(dev, fence->fence_class);
7684+
7685+ return 0;
7686+}
7687+EXPORT_SYMBOL(drm_fence_object_flush);
7688+
7689+/*
7690+ * Make sure old fence objects are signaled before their fence sequences are
7691+ * wrapped around and reused.
7692+ */
7693+
7694+void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
7695+ uint32_t sequence)
7696+{
7697+ struct drm_fence_manager *fm = &dev->fm;
7698+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
7699+ struct drm_fence_object *fence;
7700+ unsigned long irq_flags;
7701+ struct drm_fence_driver *driver = dev->driver->fence_driver;
7702+ int call_flush;
7703+
7704+ uint32_t diff;
7705+
7706+ write_lock_irqsave(&fm->lock, irq_flags);
7707+
7708+ list_for_each_entry_reverse(fence, &fc->ring, ring) {
7709+ diff = (sequence - fence->sequence) & driver->sequence_mask;
7710+ if (diff <= driver->flush_diff)
7711+ break;
7712+
7713+ fence->waiting_types = fence->type;
7714+ fc->waiting_types |= fence->type;
7715+
7716+ if (driver->needed_flush)
7717+ fc->pending_flush |= driver->needed_flush(fence);
7718+ }
7719+
7720+ if (driver->poll)
7721+ driver->poll(dev, fence_class, fc->waiting_types);
7722+
7723+ call_flush = fc->pending_flush;
7724+ write_unlock_irqrestore(&fm->lock, irq_flags);
7725+
7726+ if (call_flush && driver->flush)
7727+ driver->flush(dev, fence->fence_class);
7728+
7729+ /*
7730+ * FIXME: Shold we implement a wait here for really old fences?
7731+ */
7732+
7733+}
7734+EXPORT_SYMBOL(drm_fence_flush_old);
7735+
7736+int drm_fence_object_wait(struct drm_fence_object *fence,
7737+ int lazy, int ignore_signals, uint32_t mask)
7738+{
7739+ struct drm_device *dev = fence->dev;
7740+ struct drm_fence_driver *driver = dev->driver->fence_driver;
7741+ struct drm_fence_manager *fm = &dev->fm;
7742+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
7743+ int ret = 0;
7744+ unsigned long _end = 3 * DRM_HZ;
7745+
7746+ if (mask & ~fence->type) {
7747+ DRM_ERROR("Wait trying to extend fence type"
7748+ " 0x%08x 0x%08x\n", mask, fence->type);
7749+ BUG();
7750+ return -EINVAL;
7751+ }
7752+
7753+ if (driver->wait)
7754+ return driver->wait(fence, lazy, !ignore_signals, mask);
7755+
7756+
7757+ drm_fence_object_flush(fence, mask);
7758+ if (driver->has_irq(dev, fence->fence_class, mask)) {
7759+ if (!ignore_signals)
7760+ ret = wait_event_interruptible_timeout
7761+ (fc->fence_queue,
7762+ drm_fence_object_signaled(fence, mask),
7763+ 3 * DRM_HZ);
7764+ else
7765+ ret = wait_event_timeout
7766+ (fc->fence_queue,
7767+ drm_fence_object_signaled(fence, mask),
7768+ 3 * DRM_HZ);
7769+
7770+ if (unlikely(ret == -ERESTARTSYS))
7771+ return -EAGAIN;
7772+
7773+ if (unlikely(ret == 0))
7774+ return -EBUSY;
7775+
7776+ return 0;
7777+ }
7778+
7779+ return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
7780+ _end);
7781+}
7782+EXPORT_SYMBOL(drm_fence_object_wait);
7783+
7784+
7785+
7786+int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
7787+ uint32_t fence_class, uint32_t type)
7788+{
7789+ struct drm_device *dev = fence->dev;
7790+ struct drm_fence_manager *fm = &dev->fm;
7791+ struct drm_fence_driver *driver = dev->driver->fence_driver;
7792+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
7793+ unsigned long flags;
7794+ uint32_t sequence;
7795+ uint32_t native_types;
7796+ int ret;
7797+
7798+ drm_fence_unring(dev, &fence->ring);
7799+ ret = driver->emit(dev, fence_class, fence_flags, &sequence,
7800+ &native_types);
7801+ if (ret)
7802+ return ret;
7803+
7804+ write_lock_irqsave(&fm->lock, flags);
7805+ fence->fence_class = fence_class;
7806+ fence->type = type;
7807+ fence->waiting_types = 0;
7808+ fence->signaled_types = 0;
7809+ fence->error = 0;
7810+ fence->sequence = sequence;
7811+ fence->native_types = native_types;
7812+ if (list_empty(&fc->ring))
7813+ fc->highest_waiting_sequence = sequence - 1;
7814+ list_add_tail(&fence->ring, &fc->ring);
7815+ fc->latest_queued_sequence = sequence;
7816+ write_unlock_irqrestore(&fm->lock, flags);
7817+ return 0;
7818+}
7819+EXPORT_SYMBOL(drm_fence_object_emit);
7820+
7821+static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
7822+ uint32_t type,
7823+ uint32_t fence_flags,
7824+ struct drm_fence_object *fence)
7825+{
7826+ int ret = 0;
7827+ unsigned long flags;
7828+ struct drm_fence_manager *fm = &dev->fm;
7829+
7830+ mutex_lock(&dev->struct_mutex);
7831+ atomic_set(&fence->usage, 1);
7832+ mutex_unlock(&dev->struct_mutex);
7833+
7834+ write_lock_irqsave(&fm->lock, flags);
7835+ INIT_LIST_HEAD(&fence->ring);
7836+
7837+ /*
7838+ * Avoid hitting BUG() for kernel-only fence objects.
7839+ */
7840+
7841+ INIT_LIST_HEAD(&fence->base.list);
7842+ fence->fence_class = fence_class;
7843+ fence->type = type;
7844+ fence->signaled_types = 0;
7845+ fence->waiting_types = 0;
7846+ fence->sequence = 0;
7847+ fence->error = 0;
7848+ fence->dev = dev;
7849+ write_unlock_irqrestore(&fm->lock, flags);
7850+ if (fence_flags & DRM_FENCE_FLAG_EMIT) {
7851+ ret = drm_fence_object_emit(fence, fence_flags,
7852+ fence->fence_class, type);
7853+ }
7854+ return ret;
7855+}
7856+
7857+int drm_fence_add_user_object(struct drm_file *priv,
7858+ struct drm_fence_object *fence, int shareable)
7859+{
7860+ struct drm_device *dev = priv->minor->dev;
7861+ int ret;
7862+
7863+ mutex_lock(&dev->struct_mutex);
7864+ ret = drm_add_user_object(priv, &fence->base, shareable);
7865+ if (ret)
7866+ goto out;
7867+ atomic_inc(&fence->usage);
7868+ fence->base.type = drm_fence_type;
7869+ fence->base.remove = &drm_fence_object_destroy;
7870+ DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
7871+out:
7872+ mutex_unlock(&dev->struct_mutex);
7873+ return ret;
7874+}
7875+EXPORT_SYMBOL(drm_fence_add_user_object);
7876+
7877+int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
7878+ uint32_t type, unsigned flags,
7879+ struct drm_fence_object **c_fence)
7880+{
7881+ struct drm_fence_object *fence;
7882+ int ret;
7883+ struct drm_fence_manager *fm = &dev->fm;
7884+
7885+ fence = drm_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
7886+ if (!fence) {
7887+ DRM_INFO("Out of memory creating fence object.\n");
7888+ return -ENOMEM;
7889+ }
7890+ ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
7891+ if (ret) {
7892+ drm_fence_usage_deref_unlocked(&fence);
7893+ return ret;
7894+ }
7895+ *c_fence = fence;
7896+ atomic_inc(&fm->count);
7897+
7898+ return 0;
7899+}
7900+EXPORT_SYMBOL(drm_fence_object_create);
7901+
7902+void drm_fence_manager_init(struct drm_device *dev)
7903+{
7904+ struct drm_fence_manager *fm = &dev->fm;
7905+ struct drm_fence_class_manager *fence_class;
7906+ struct drm_fence_driver *fed = dev->driver->fence_driver;
7907+ int i;
7908+ unsigned long flags;
7909+
7910+ rwlock_init(&fm->lock);
7911+ write_lock_irqsave(&fm->lock, flags);
7912+ fm->initialized = 0;
7913+ if (!fed)
7914+ goto out_unlock;
7915+
7916+ fm->initialized = 1;
7917+ fm->num_classes = fed->num_classes;
7918+ BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
7919+
7920+ for (i = 0; i < fm->num_classes; ++i) {
7921+ fence_class = &fm->fence_class[i];
7922+
7923+ memset(fence_class, 0, sizeof(*fence_class));
7924+ INIT_LIST_HEAD(&fence_class->ring);
7925+ DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
7926+ }
7927+
7928+ atomic_set(&fm->count, 0);
7929+ out_unlock:
7930+ write_unlock_irqrestore(&fm->lock, flags);
7931+}
7932+
7933+void drm_fence_fill_arg(struct drm_fence_object *fence,
7934+ struct drm_fence_arg *arg)
7935+{
7936+ struct drm_device *dev = fence->dev;
7937+ struct drm_fence_manager *fm = &dev->fm;
7938+ unsigned long irq_flags;
7939+
7940+ read_lock_irqsave(&fm->lock, irq_flags);
7941+ arg->handle = fence->base.hash.key;
7942+ arg->fence_class = fence->fence_class;
7943+ arg->type = fence->type;
7944+ arg->signaled = fence->signaled_types;
7945+ arg->error = fence->error;
7946+ arg->sequence = fence->sequence;
7947+ read_unlock_irqrestore(&fm->lock, irq_flags);
7948+}
7949+EXPORT_SYMBOL(drm_fence_fill_arg);
7950+
7951+void drm_fence_manager_takedown(struct drm_device *dev)
7952+{
7953+}
7954+
7955+struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
7956+ uint32_t handle)
7957+{
7958+ struct drm_device *dev = priv->minor->dev;
7959+ struct drm_user_object *uo;
7960+ struct drm_fence_object *fence;
7961+
7962+ mutex_lock(&dev->struct_mutex);
7963+ uo = drm_lookup_user_object(priv, handle);
7964+ if (!uo || (uo->type != drm_fence_type)) {
7965+ mutex_unlock(&dev->struct_mutex);
7966+ return NULL;
7967+ }
7968+ fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
7969+ mutex_unlock(&dev->struct_mutex);
7970+ return fence;
7971+}
7972+
7973+int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
7974+{
7975+ int ret;
7976+ struct drm_fence_manager *fm = &dev->fm;
7977+ struct drm_fence_arg *arg = data;
7978+ struct drm_fence_object *fence;
7979+ ret = 0;
7980+
7981+ if (!fm->initialized) {
7982+ DRM_ERROR("The DRM driver does not support fencing.\n");
7983+ return -EINVAL;
7984+ }
7985+
7986+ if (arg->flags & DRM_FENCE_FLAG_EMIT)
7987+ LOCK_TEST_WITH_RETURN(dev, file_priv);
7988+ ret = drm_fence_object_create(dev, arg->fence_class,
7989+ arg->type, arg->flags, &fence);
7990+ if (ret)
7991+ return ret;
7992+ ret = drm_fence_add_user_object(file_priv, fence,
7993+ arg->flags &
7994+ DRM_FENCE_FLAG_SHAREABLE);
7995+ if (ret) {
7996+ drm_fence_usage_deref_unlocked(&fence);
7997+ return ret;
7998+ }
7999+
8000+ /*
8001+ * usage > 0. No need to lock dev->struct_mutex;
8002+ */
8003+
8004+ arg->handle = fence->base.hash.key;
8005+
8006+ drm_fence_fill_arg(fence, arg);
8007+ drm_fence_usage_deref_unlocked(&fence);
8008+
8009+ return ret;
8010+}
8011+
8012+int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
8013+{
8014+ int ret;
8015+ struct drm_fence_manager *fm = &dev->fm;
8016+ struct drm_fence_arg *arg = data;
8017+ struct drm_fence_object *fence;
8018+ struct drm_user_object *uo;
8019+ ret = 0;
8020+
8021+ if (!fm->initialized) {
8022+ DRM_ERROR("The DRM driver does not support fencing.\n");
8023+ return -EINVAL;
8024+ }
8025+
8026+ ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
8027+ if (ret)
8028+ return ret;
8029+ fence = drm_lookup_fence_object(file_priv, arg->handle);
8030+ drm_fence_fill_arg(fence, arg);
8031+ drm_fence_usage_deref_unlocked(&fence);
8032+
8033+ return ret;
8034+}
8035+
8036+
8037+int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
8038+{
8039+ int ret;
8040+ struct drm_fence_manager *fm = &dev->fm;
8041+ struct drm_fence_arg *arg = data;
8042+ ret = 0;
8043+
8044+ if (!fm->initialized) {
8045+ DRM_ERROR("The DRM driver does not support fencing.\n");
8046+ return -EINVAL;
8047+ }
8048+
8049+ return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
8050+}
8051+
8052+int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
8053+{
8054+ int ret;
8055+ struct drm_fence_manager *fm = &dev->fm;
8056+ struct drm_fence_arg *arg = data;
8057+ struct drm_fence_object *fence;
8058+ ret = 0;
8059+
8060+ if (!fm->initialized) {
8061+ DRM_ERROR("The DRM driver does not support fencing.\n");
8062+ return -EINVAL;
8063+ }
8064+
8065+ fence = drm_lookup_fence_object(file_priv, arg->handle);
8066+ if (!fence)
8067+ return -EINVAL;
8068+
8069+ drm_fence_fill_arg(fence, arg);
8070+ drm_fence_usage_deref_unlocked(&fence);
8071+
8072+ return ret;
8073+}
8074+
8075+int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
8076+{
8077+ int ret;
8078+ struct drm_fence_manager *fm = &dev->fm;
8079+ struct drm_fence_arg *arg = data;
8080+ struct drm_fence_object *fence;
8081+ ret = 0;
8082+
8083+ if (!fm->initialized) {
8084+ DRM_ERROR("The DRM driver does not support fencing.\n");
8085+ return -EINVAL;
8086+ }
8087+
8088+ fence = drm_lookup_fence_object(file_priv, arg->handle);
8089+ if (!fence)
8090+ return -EINVAL;
8091+ ret = drm_fence_object_flush(fence, arg->type);
8092+
8093+ drm_fence_fill_arg(fence, arg);
8094+ drm_fence_usage_deref_unlocked(&fence);
8095+
8096+ return ret;
8097+}
8098+
8099+
8100+int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
8101+{
8102+ int ret;
8103+ struct drm_fence_manager *fm = &dev->fm;
8104+ struct drm_fence_arg *arg = data;
8105+ struct drm_fence_object *fence;
8106+ ret = 0;
8107+
8108+ if (!fm->initialized) {
8109+ DRM_ERROR("The DRM driver does not support fencing.\n");
8110+ return -EINVAL;
8111+ }
8112+
8113+ fence = drm_lookup_fence_object(file_priv, arg->handle);
8114+ if (!fence)
8115+ return -EINVAL;
8116+ ret = drm_fence_object_wait(fence,
8117+ arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
8118+ 0, arg->type);
8119+
8120+ drm_fence_fill_arg(fence, arg);
8121+ drm_fence_usage_deref_unlocked(&fence);
8122+
8123+ return ret;
8124+}
8125+
8126+
8127+int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
8128+{
8129+ int ret;
8130+ struct drm_fence_manager *fm = &dev->fm;
8131+ struct drm_fence_arg *arg = data;
8132+ struct drm_fence_object *fence;
8133+ ret = 0;
8134+
8135+ if (!fm->initialized) {
8136+ DRM_ERROR("The DRM driver does not support fencing.\n");
8137+ return -EINVAL;
8138+ }
8139+
8140+ LOCK_TEST_WITH_RETURN(dev, file_priv);
8141+ fence = drm_lookup_fence_object(file_priv, arg->handle);
8142+ if (!fence)
8143+ return -EINVAL;
8144+ ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
8145+ arg->type);
8146+
8147+ drm_fence_fill_arg(fence, arg);
8148+ drm_fence_usage_deref_unlocked(&fence);
8149+
8150+ return ret;
8151+}
8152+
8153+int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
8154+{
8155+ int ret;
8156+ struct drm_fence_manager *fm = &dev->fm;
8157+ struct drm_fence_arg *arg = data;
8158+ struct drm_fence_object *fence;
8159+ ret = 0;
8160+
8161+ if (!fm->initialized) {
8162+ DRM_ERROR("The DRM driver does not support fencing.\n");
8163+ return -EINVAL;
8164+ }
8165+
8166+ if (!dev->bm.initialized) {
8167+ DRM_ERROR("Buffer object manager is not initialized\n");
8168+ return -EINVAL;
8169+ }
8170+ LOCK_TEST_WITH_RETURN(dev, file_priv);
8171+ ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
8172+ NULL, &fence);
8173+ if (ret)
8174+ return ret;
8175+
8176+ if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
8177+ ret = drm_fence_add_user_object(file_priv, fence,
8178+ arg->flags &
8179+ DRM_FENCE_FLAG_SHAREABLE);
8180+ if (ret)
8181+ return ret;
8182+ }
8183+
8184+ arg->handle = fence->base.hash.key;
8185+
8186+ drm_fence_fill_arg(fence, arg);
8187+ drm_fence_usage_deref_unlocked(&fence);
8188+
8189+ return ret;
8190+}
8191Index: linux-2.6.27/drivers/gpu/drm/drm_fops.c
8192===================================================================
8193--- linux-2.6.27.orig/drivers/gpu/drm/drm_fops.c 2009-01-14 11:54:35.000000000 +0000
8194+++ linux-2.6.27/drivers/gpu/drm/drm_fops.c 2009-01-14 11:58:01.000000000 +0000
8195@@ -231,6 +231,7 @@
8196 int minor_id = iminor(inode);
8197 struct drm_file *priv;
8198 int ret;
8199+ int i, j;
8200
8201 if (filp->f_flags & O_EXCL)
8202 return -EBUSY; /* No exclusive opens */
8203@@ -255,9 +256,21 @@
8204 priv->lock_count = 0;
8205
8206 INIT_LIST_HEAD(&priv->lhead);
8207+ INIT_LIST_HEAD(&priv->refd_objects);
8208+ INIT_LIST_HEAD(&priv->fbs);
8209
8210- if (dev->driver->driver_features & DRIVER_GEM)
8211- drm_gem_open(dev, priv);
8212+ for (i = 0; i < _DRM_NO_REF_TYPES; ++i) {
8213+ ret = drm_ht_create(&priv->refd_object_hash[i],
8214+ DRM_FILE_HASH_ORDER);
8215+ if (ret)
8216+ break;
8217+ }
8218+
8219+ if (ret) {
8220+ for (j = 0; j < i; ++j)
8221+ drm_ht_remove(&priv->refd_object_hash[j]);
8222+ goto out_free;
8223+ }
8224
8225 if (dev->driver->open) {
8226 ret = dev->driver->open(dev, priv);
8227@@ -314,6 +327,33 @@
8228 }
8229 EXPORT_SYMBOL(drm_fasync);
8230
8231+static void drm_object_release(struct file *filp)
8232+{
8233+ struct drm_file *priv = filp->private_data;
8234+ struct list_head *head;
8235+ struct drm_ref_object *ref_object;
8236+ int i;
8237+
8238+ /*
8239+ * Free leftover ref objects created by me. Note that we cannot use
8240+ * list_for_each() here, as the struct_mutex may be temporarily
8241+ * released by the remove_() functions, and thus the lists may be
8242+ * altered.
8243+ * Also, a drm_remove_ref_object() will not remove it
8244+ * from the list unless its refcount is 1.
8245+ */
8246+
8247+ head = &priv->refd_objects;
8248+ while (head->next != head) {
8249+ ref_object = list_entry(head->next, struct drm_ref_object, list);
8250+ drm_remove_ref_object(priv, ref_object);
8251+ head = &priv->refd_objects;
8252+ }
8253+
8254+ for (i = 0; i < _DRM_NO_REF_TYPES; ++i)
8255+ drm_ht_remove(&priv->refd_object_hash[i]);
8256+}
8257+
8258 /**
8259 * Release file.
8260 *
8261@@ -403,9 +443,6 @@
8262 dev->driver->reclaim_buffers(dev, file_priv);
8263 }
8264
8265- if (dev->driver->driver_features & DRIVER_GEM)
8266- drm_gem_release(dev, file_priv);
8267-
8268 drm_fasync(-1, filp, 0);
8269
8270 mutex_lock(&dev->ctxlist_mutex);
8271@@ -430,6 +467,8 @@
8272 mutex_unlock(&dev->ctxlist_mutex);
8273
8274 mutex_lock(&dev->struct_mutex);
8275+ drm_fb_release(filp);
8276+ drm_object_release(filp);
8277 if (file_priv->remove_auth_on_close == 1) {
8278 struct drm_file *temp;
8279
8280Index: linux-2.6.27/drivers/gpu/drm/drm_hashtab.c
8281===================================================================
8282--- linux-2.6.27.orig/drivers/gpu/drm/drm_hashtab.c 2009-01-14 11:54:35.000000000 +0000
8283+++ linux-2.6.27/drivers/gpu/drm/drm_hashtab.c 2009-01-14 11:58:01.000000000 +0000
8284@@ -29,7 +29,7 @@
8285 * Simple open hash tab implementation.
8286 *
8287 * Authors:
8288- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
8289+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
8290 */
8291
8292 #include "drmP.h"
8293Index: linux-2.6.27/drivers/gpu/drm/drm_irq.c
8294===================================================================
8295--- linux-2.6.27.orig/drivers/gpu/drm/drm_irq.c 2009-01-14 11:54:35.000000000 +0000
8296+++ linux-2.6.27/drivers/gpu/drm/drm_irq.c 2009-01-14 11:58:01.000000000 +0000
8297@@ -70,6 +70,7 @@
8298
8299 return 0;
8300 }
8301+#if 0
8302
8303 static void vblank_disable_fn(unsigned long arg)
8304 {
8305@@ -184,6 +185,8 @@
8306 }
8307 EXPORT_SYMBOL(drm_vblank_init);
8308
8309+#endif
8310+
8311 /**
8312 * Install IRQ handler.
8313 *
8314@@ -221,6 +224,17 @@
8315
8316 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
8317
8318+ if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
8319+ init_waitqueue_head(&dev->vbl_queue);
8320+
8321+ spin_lock_init(&dev->vbl_lock);
8322+
8323+ INIT_LIST_HEAD(&dev->vbl_sigs);
8324+ INIT_LIST_HEAD(&dev->vbl_sigs2);
8325+
8326+ dev->vbl_pending = 0;
8327+ }
8328+
8329 /* Before installing handler */
8330 dev->driver->irq_preinstall(dev);
8331
8332@@ -281,8 +295,6 @@
8333
8334 free_irq(dev->pdev->irq, dev);
8335
8336- drm_vblank_cleanup(dev);
8337-
8338 dev->locked_tasklet_func = NULL;
8339
8340 return 0;
8341@@ -326,174 +338,6 @@
8342 }
8343
8344 /**
8345- * drm_vblank_count - retrieve "cooked" vblank counter value
8346- * @dev: DRM device
8347- * @crtc: which counter to retrieve
8348- *
8349- * Fetches the "cooked" vblank count value that represents the number of
8350- * vblank events since the system was booted, including lost events due to
8351- * modesetting activity.
8352- */
8353-u32 drm_vblank_count(struct drm_device *dev, int crtc)
8354-{
8355- return atomic_read(&dev->_vblank_count[crtc]);
8356-}
8357-EXPORT_SYMBOL(drm_vblank_count);
8358-
8359-/**
8360- * drm_update_vblank_count - update the master vblank counter
8361- * @dev: DRM device
8362- * @crtc: counter to update
8363- *
8364- * Call back into the driver to update the appropriate vblank counter
8365- * (specified by @crtc). Deal with wraparound, if it occurred, and
8366- * update the last read value so we can deal with wraparound on the next
8367- * call if necessary.
8368- *
8369- * Only necessary when going from off->on, to account for frames we
8370- * didn't get an interrupt for.
8371- *
8372- * Note: caller must hold dev->vbl_lock since this reads & writes
8373- * device vblank fields.
8374- */
8375-static void drm_update_vblank_count(struct drm_device *dev, int crtc)
8376-{
8377- u32 cur_vblank, diff;
8378-
8379- /*
8380- * Interrupts were disabled prior to this call, so deal with counter
8381- * wrap if needed.
8382- * NOTE! It's possible we lost a full dev->max_vblank_count events
8383- * here if the register is small or we had vblank interrupts off for
8384- * a long time.
8385- */
8386- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
8387- diff = cur_vblank - dev->last_vblank[crtc];
8388- if (cur_vblank < dev->last_vblank[crtc]) {
8389- diff += dev->max_vblank_count;
8390-
8391- DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
8392- crtc, dev->last_vblank[crtc], cur_vblank, diff);
8393- }
8394-
8395- DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
8396- crtc, diff);
8397-
8398- atomic_add(diff, &dev->_vblank_count[crtc]);
8399-}
8400-
8401-/**
8402- * drm_vblank_get - get a reference count on vblank events
8403- * @dev: DRM device
8404- * @crtc: which CRTC to own
8405- *
8406- * Acquire a reference count on vblank events to avoid having them disabled
8407- * while in use.
8408- *
8409- * RETURNS
8410- * Zero on success, nonzero on failure.
8411- */
8412-int drm_vblank_get(struct drm_device *dev, int crtc)
8413-{
8414- unsigned long irqflags;
8415- int ret = 0;
8416-
8417- spin_lock_irqsave(&dev->vbl_lock, irqflags);
8418- /* Going from 0->1 means we have to enable interrupts again */
8419- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
8420- !dev->vblank_enabled[crtc]) {
8421- ret = dev->driver->enable_vblank(dev, crtc);
8422- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
8423- if (ret)
8424- atomic_dec(&dev->vblank_refcount[crtc]);
8425- else {
8426- dev->vblank_enabled[crtc] = 1;
8427- drm_update_vblank_count(dev, crtc);
8428- }
8429- }
8430- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
8431-
8432- return ret;
8433-}
8434-EXPORT_SYMBOL(drm_vblank_get);
8435-
8436-/**
8437- * drm_vblank_put - give up ownership of vblank events
8438- * @dev: DRM device
8439- * @crtc: which counter to give up
8440- *
8441- * Release ownership of a given vblank counter, turning off interrupts
8442- * if possible.
8443- */
8444-void drm_vblank_put(struct drm_device *dev, int crtc)
8445-{
8446- /* Last user schedules interrupt disable */
8447- if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
8448- mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
8449-}
8450-EXPORT_SYMBOL(drm_vblank_put);
8451-
8452-/**
8453- * drm_modeset_ctl - handle vblank event counter changes across mode switch
8454- * @DRM_IOCTL_ARGS: standard ioctl arguments
8455- *
8456- * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
8457- * ioctls around modesetting so that any lost vblank events are accounted for.
8458- *
8459- * Generally the counter will reset across mode sets. If interrupts are
8460- * enabled around this call, we don't have to do anything since the counter
8461- * will have already been incremented.
8462- */
8463-int drm_modeset_ctl(struct drm_device *dev, void *data,
8464- struct drm_file *file_priv)
8465-{
8466- struct drm_modeset_ctl *modeset = data;
8467- unsigned long irqflags;
8468- int crtc, ret = 0;
8469-
8470- /* If drm_vblank_init() hasn't been called yet, just no-op */
8471- if (!dev->num_crtcs)
8472- goto out;
8473-
8474- crtc = modeset->crtc;
8475- if (crtc >= dev->num_crtcs) {
8476- ret = -EINVAL;
8477- goto out;
8478- }
8479-
8480- /*
8481- * To avoid all the problems that might happen if interrupts
8482- * were enabled/disabled around or between these calls, we just
8483- * have the kernel take a reference on the CRTC (just once though
8484- * to avoid corrupting the count if multiple, mismatch calls occur),
8485- * so that interrupts remain enabled in the interim.
8486- */
8487- switch (modeset->cmd) {
8488- case _DRM_PRE_MODESET:
8489- if (!dev->vblank_inmodeset[crtc]) {
8490- dev->vblank_inmodeset[crtc] = 1;
8491- drm_vblank_get(dev, crtc);
8492- }
8493- break;
8494- case _DRM_POST_MODESET:
8495- if (dev->vblank_inmodeset[crtc]) {
8496- spin_lock_irqsave(&dev->vbl_lock, irqflags);
8497- dev->vblank_disable_allowed = 1;
8498- dev->vblank_inmodeset[crtc] = 0;
8499- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
8500- drm_vblank_put(dev, crtc);
8501- }
8502- break;
8503- default:
8504- ret = -EINVAL;
8505- break;
8506- }
8507-
8508-out:
8509- return ret;
8510-}
8511-
8512-/**
8513 * Wait for VBLANK.
8514 *
8515 * \param inode device inode.
8516@@ -512,14 +356,14 @@
8517 *
8518 * If a signal is not requested, then calls vblank_wait().
8519 */
8520-int drm_wait_vblank(struct drm_device *dev, void *data,
8521- struct drm_file *file_priv)
8522+int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
8523 {
8524 union drm_wait_vblank *vblwait = data;
8525+ struct timeval now;
8526 int ret = 0;
8527- unsigned int flags, seq, crtc;
8528+ unsigned int flags, seq;
8529
8530- if ((!dev->pdev->irq) || (!dev->irq_enabled))
8531+ if ((!dev->irq) || (!dev->irq_enabled))
8532 return -EINVAL;
8533
8534 if (vblwait->request.type &
8535@@ -531,17 +375,13 @@
8536 }
8537
8538 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
8539- crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
8540
8541- if (crtc >= dev->num_crtcs)
8542+ if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
8543+ DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
8544 return -EINVAL;
8545
8546- ret = drm_vblank_get(dev, crtc);
8547- if (ret) {
8548- DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
8549- return ret;
8550- }
8551- seq = drm_vblank_count(dev, crtc);
8552+ seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
8553+ : &dev->vbl_received);
8554
8555 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
8556 case _DRM_VBLANK_RELATIVE:
8557@@ -550,8 +390,7 @@
8558 case _DRM_VBLANK_ABSOLUTE:
8559 break;
8560 default:
8561- ret = -EINVAL;
8562- goto done;
8563+ return -EINVAL;
8564 }
8565
8566 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
8567@@ -561,7 +400,8 @@
8568
8569 if (flags & _DRM_VBLANK_SIGNAL) {
8570 unsigned long irqflags;
8571- struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
8572+ struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
8573+ ? &dev->vbl_sigs2 : &dev->vbl_sigs;
8574 struct drm_vbl_sig *vbl_sig;
8575
8576 spin_lock_irqsave(&dev->vbl_lock, irqflags);
8577@@ -582,29 +422,22 @@
8578 }
8579 }
8580
8581- if (atomic_read(&dev->vbl_signal_pending) >= 100) {
8582+ if (dev->vbl_pending >= 100) {
8583 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
8584- ret = -EBUSY;
8585- goto done;
8586+ return -EBUSY;
8587 }
8588
8589- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
8590+ dev->vbl_pending++;
8591
8592- vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
8593- DRM_MEM_DRIVER);
8594- if (!vbl_sig) {
8595- ret = -ENOMEM;
8596- goto done;
8597- }
8598+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
8599
8600- ret = drm_vblank_get(dev, crtc);
8601- if (ret) {
8602- drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
8603- DRM_MEM_DRIVER);
8604- return ret;
8605+ if (!
8606+ (vbl_sig =
8607+ drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
8608+ return -ENOMEM;
8609 }
8610
8611- atomic_inc(&dev->vbl_signal_pending);
8612+ memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
8613
8614 vbl_sig->sequence = vblwait->request.sequence;
8615 vbl_sig->info.si_signo = vblwait->request.signal;
8616@@ -618,29 +451,20 @@
8617
8618 vblwait->reply.sequence = seq;
8619 } else {
8620- DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
8621- vblwait->request.sequence, crtc);
8622- DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
8623- ((drm_vblank_count(dev, crtc)
8624- - vblwait->request.sequence) <= (1 << 23)));
8625-
8626- if (ret != -EINTR) {
8627- struct timeval now;
8628-
8629- do_gettimeofday(&now);
8630-
8631- vblwait->reply.tval_sec = now.tv_sec;
8632- vblwait->reply.tval_usec = now.tv_usec;
8633- vblwait->reply.sequence = drm_vblank_count(dev, crtc);
8634- DRM_DEBUG("returning %d to client\n",
8635- vblwait->reply.sequence);
8636- } else {
8637- DRM_DEBUG("vblank wait interrupted by signal\n");
8638- }
8639+ if (flags & _DRM_VBLANK_SECONDARY) {
8640+ if (dev->driver->vblank_wait2)
8641+ ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
8642+ } else if (dev->driver->vblank_wait)
8643+ ret =
8644+ dev->driver->vblank_wait(dev,
8645+ &vblwait->request.sequence);
8646+
8647+ do_gettimeofday(&now);
8648+ vblwait->reply.tval_sec = now.tv_sec;
8649+ vblwait->reply.tval_usec = now.tv_usec;
8650 }
8651
8652-done:
8653- drm_vblank_put(dev, crtc);
8654+ done:
8655 return ret;
8656 }
8657
8658@@ -648,57 +472,43 @@
8659 * Send the VBLANK signals.
8660 *
8661 * \param dev DRM device.
8662- * \param crtc CRTC where the vblank event occurred
8663 *
8664 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
8665 *
8666 * If a signal is not requested, then calls vblank_wait().
8667 */
8668-static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
8669+void drm_vbl_send_signals(struct drm_device * dev)
8670 {
8671- struct drm_vbl_sig *vbl_sig, *tmp;
8672- struct list_head *vbl_sigs;
8673- unsigned int vbl_seq;
8674 unsigned long flags;
8675+ int i;
8676
8677 spin_lock_irqsave(&dev->vbl_lock, flags);
8678
8679- vbl_sigs = &dev->vbl_sigs[crtc];
8680- vbl_seq = drm_vblank_count(dev, crtc);
8681+ for (i = 0; i < 2; i++) {
8682+ struct drm_vbl_sig *vbl_sig, *tmp;
8683+ struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
8684+ unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
8685+ &dev->vbl_received);
8686+
8687+ list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
8688+ if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
8689+ vbl_sig->info.si_code = vbl_seq;
8690+ send_sig_info(vbl_sig->info.si_signo,
8691+ &vbl_sig->info, vbl_sig->task);
8692+
8693+ list_del(&vbl_sig->head);
8694
8695- list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
8696- if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
8697- vbl_sig->info.si_code = vbl_seq;
8698- send_sig_info(vbl_sig->info.si_signo,
8699- &vbl_sig->info, vbl_sig->task);
8700-
8701- list_del(&vbl_sig->head);
8702-
8703- drm_free(vbl_sig, sizeof(*vbl_sig),
8704- DRM_MEM_DRIVER);
8705- atomic_dec(&dev->vbl_signal_pending);
8706- drm_vblank_put(dev, crtc);
8707- }
8708+ drm_free(vbl_sig, sizeof(*vbl_sig),
8709+ DRM_MEM_DRIVER);
8710+
8711+ dev->vbl_pending--;
8712+ }
8713+ }
8714 }
8715
8716 spin_unlock_irqrestore(&dev->vbl_lock, flags);
8717 }
8718-
8719-/**
8720- * drm_handle_vblank - handle a vblank event
8721- * @dev: DRM device
8722- * @crtc: where this event occurred
8723- *
8724- * Drivers should call this routine in their vblank interrupt handlers to
8725- * update the vblank counter and send any signals that may be pending.
8726- */
8727-void drm_handle_vblank(struct drm_device *dev, int crtc)
8728-{
8729- atomic_inc(&dev->_vblank_count[crtc]);
8730- DRM_WAKEUP(&dev->vbl_queue[crtc]);
8731- drm_vbl_send_signals(dev, crtc);
8732-}
8733-EXPORT_SYMBOL(drm_handle_vblank);
8734+EXPORT_SYMBOL(drm_vbl_send_signals);
8735
8736 /**
8737 * Tasklet wrapper function.
8738Index: linux-2.6.27/drivers/gpu/drm/drm_mm.c
8739===================================================================
8740--- linux-2.6.27.orig/drivers/gpu/drm/drm_mm.c 2009-01-14 11:54:35.000000000 +0000
8741+++ linux-2.6.27/drivers/gpu/drm/drm_mm.c 2009-01-14 11:58:01.000000000 +0000
8742@@ -38,7 +38,7 @@
8743 * Aligned allocations can also see improvement.
8744 *
8745 * Authors:
8746- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
8747+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
8748 */
8749
8750 #include "drmP.h"
8751Index: linux-2.6.27/drivers/gpu/drm/drm_modes.c
8752===================================================================
8753--- /dev/null 1970-01-01 00:00:00.000000000 +0000
8754+++ linux-2.6.27/drivers/gpu/drm/drm_modes.c 2009-01-14 11:58:01.000000000 +0000
8755@@ -0,0 +1,560 @@
8756+/*
8757+ * Copyright © 1997-2003 by The XFree86 Project, Inc.
8758+ *
8759+ * Permission is hereby granted, free of charge, to any person obtaining a
8760+ * copy of this software and associated documentation files (the "Software"),
8761+ * to deal in the Software without restriction, including without limitation
8762+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8763+ * and/or sell copies of the Software, and to permit persons to whom the
8764+ * Software is furnished to do so, subject to the following conditions:
8765+ *
8766+ * The above copyright notice and this permission notice shall be included in
8767+ * all copies or substantial portions of the Software.
8768+ *
8769+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
8770+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
8771+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
8772+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
8773+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
8774+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
8775+ * OTHER DEALINGS IN THE SOFTWARE.
8776+ *
8777+ * Except as contained in this notice, the name of the copyright holder(s)
8778+ * and author(s) shall not be used in advertising or otherwise to promote
8779+ * the sale, use or other dealings in this Software without prior written
8780+ * authorization from the copyright holder(s) and author(s).
8781+ */
8782+/*
8783+ * Copyright © 2007 Dave Airlie
8784+ */
8785+
8786+#include <linux/list.h>
8787+#include "drmP.h"
8788+#include "drm.h"
8789+#include "drm_crtc.h"
8790+
8791+/**
8792+ * drm_mode_debug_printmodeline - debug print a mode
8793+ * @dev: DRM device
8794+ * @mode: mode to print
8795+ *
8796+ * LOCKING:
8797+ * None.
8798+ *
8799+ * Describe @mode using DRM_DEBUG.
8800+ */
8801+void drm_mode_debug_printmodeline(struct drm_device *dev,
8802+ struct drm_display_mode *mode)
8803+{
8804+ DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x\n",
8805+ mode->mode_id, mode->name, mode->vrefresh, mode->clock,
8806+ mode->hdisplay, mode->hsync_start,
8807+ mode->hsync_end, mode->htotal,
8808+ mode->vdisplay, mode->vsync_start,
8809+ mode->vsync_end, mode->vtotal, mode->type);
8810+}
8811+EXPORT_SYMBOL(drm_mode_debug_printmodeline);
8812+
8813+/**
8814+ * drm_mode_set_name - set the name on a mode
8815+ * @mode: name will be set in this mode
8816+ *
8817+ * LOCKING:
8818+ * None.
8819+ *
8820+ * Set the name of @mode to a standard format.
8821+ */
8822+void drm_mode_set_name(struct drm_display_mode *mode)
8823+{
8824+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
8825+ mode->vdisplay);
8826+}
8827+EXPORT_SYMBOL(drm_mode_set_name);
8828+
8829+/**
8830+ * drm_mode_list_concat - move modes from one list to another
8831+ * @head: source list
8832+ * @new: dst list
8833+ *
8834+ * LOCKING:
8835+ * Caller must ensure both lists are locked.
8836+ *
8837+ * Move all the modes from @head to @new.
8838+ */
8839+void drm_mode_list_concat(struct list_head *head, struct list_head *new)
8840+{
8841+
8842+ struct list_head *entry, *tmp;
8843+
8844+ list_for_each_safe(entry, tmp, head) {
8845+ list_move_tail(entry, new);
8846+ }
8847+}
8848+
8849+/**
8850+ * drm_mode_width - get the width of a mode
8851+ * @mode: mode
8852+ *
8853+ * LOCKING:
8854+ * None.
8855+ *
8856+ * Return @mode's width (hdisplay) value.
8857+ *
8858+ * FIXME: is this needed?
8859+ *
8860+ * RETURNS:
8861+ * @mode->hdisplay
8862+ */
8863+int drm_mode_width(struct drm_display_mode *mode)
8864+{
8865+ return mode->hdisplay;
8866+
8867+}
8868+EXPORT_SYMBOL(drm_mode_width);
8869+
8870+/**
8871+ * drm_mode_height - get the height of a mode
8872+ * @mode: mode
8873+ *
8874+ * LOCKING:
8875+ * None.
8876+ *
8877+ * Return @mode's height (vdisplay) value.
8878+ *
8879+ * FIXME: is this needed?
8880+ *
8881+ * RETURNS:
8882+ * @mode->vdisplay
8883+ */
8884+int drm_mode_height(struct drm_display_mode *mode)
8885+{
8886+ return mode->vdisplay;
8887+}
8888+EXPORT_SYMBOL(drm_mode_height);
8889+
8890+/**
8891+ * drm_mode_vrefresh - get the vrefresh of a mode
8892+ * @mode: mode
8893+ *
8894+ * LOCKING:
8895+ * None.
8896+ *
8897+ * Return @mode's vrefresh rate or calculate it if necessary.
8898+ *
8899+ * FIXME: why is this needed? shouldn't vrefresh be set already?
8900+ *
8901+ * RETURNS:
8902+ * Vertical refresh rate of @mode x 1000. For precision reasons.
8903+ */
8904+int drm_mode_vrefresh(struct drm_display_mode *mode)
8905+{
8906+ int refresh = 0;
8907+ unsigned int calc_val;
8908+
8909+ if (mode->vrefresh > 0)
8910+ refresh = mode->vrefresh;
8911+ else if (mode->htotal > 0 && mode->vtotal > 0) {
8912+ /* work out vrefresh the value will be x1000 */
8913+ calc_val = (mode->clock * 1000);
8914+
8915+ calc_val /= mode->htotal;
8916+ calc_val *= 1000;
8917+ calc_val /= mode->vtotal;
8918+
8919+ refresh = calc_val;
8920+ if (mode->flags & V_INTERLACE)
8921+ refresh *= 2;
8922+ if (mode->flags & V_DBLSCAN)
8923+ refresh /= 2;
8924+ if (mode->vscan > 1)
8925+ refresh /= mode->vscan;
8926+ }
8927+ return refresh;
8928+}
8929+EXPORT_SYMBOL(drm_mode_vrefresh);
8930+
8931+/**
8932+ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
8933+ * @p: mode
8934+ * @adjust_flags: unused? (FIXME)
8935+ *
8936+ * LOCKING:
8937+ * None.
8938+ *
8939+ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
8940+ */
8941+void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
8942+{
8943+ if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
8944+ return;
8945+
8946+ p->crtc_hdisplay = p->hdisplay;
8947+ p->crtc_hsync_start = p->hsync_start;
8948+ p->crtc_hsync_end = p->hsync_end;
8949+ p->crtc_htotal = p->htotal;
8950+ p->crtc_hskew = p->hskew;
8951+ p->crtc_vdisplay = p->vdisplay;
8952+ p->crtc_vsync_start = p->vsync_start;
8953+ p->crtc_vsync_end = p->vsync_end;
8954+ p->crtc_vtotal = p->vtotal;
8955+
8956+ if (p->flags & V_INTERLACE) {
8957+ if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
8958+ p->crtc_vdisplay /= 2;
8959+ p->crtc_vsync_start /= 2;
8960+ p->crtc_vsync_end /= 2;
8961+ p->crtc_vtotal /= 2;
8962+ }
8963+
8964+ p->crtc_vtotal |= 1;
8965+ }
8966+
8967+ if (p->flags & V_DBLSCAN) {
8968+ p->crtc_vdisplay *= 2;
8969+ p->crtc_vsync_start *= 2;
8970+ p->crtc_vsync_end *= 2;
8971+ p->crtc_vtotal *= 2;
8972+ }
8973+
8974+ if (p->vscan > 1) {
8975+ p->crtc_vdisplay *= p->vscan;
8976+ p->crtc_vsync_start *= p->vscan;
8977+ p->crtc_vsync_end *= p->vscan;
8978+ p->crtc_vtotal *= p->vscan;
8979+ }
8980+
8981+ p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
8982+ p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
8983+ p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
8984+ p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
8985+
8986+ p->crtc_hadjusted = false;
8987+ p->crtc_vadjusted = false;
8988+}
8989+EXPORT_SYMBOL(drm_mode_set_crtcinfo);
8990+
8991+
8992+/**
8993+ * drm_mode_duplicate - allocate and duplicate an existing mode
8994+ * @m: mode to duplicate
8995+ *
8996+ * LOCKING:
8997+ * None.
8998+ *
8999+ * Just allocate a new mode, copy the existing mode into it, and return
9000+ * a pointer to it. Used to create new instances of established modes.
9001+ */
9002+struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
9003+ struct drm_display_mode *mode)
9004+{
9005+ struct drm_display_mode *nmode;
9006+ int new_id;
9007+
9008+ nmode = drm_mode_create(dev);
9009+ if (!nmode)
9010+ return NULL;
9011+
9012+ new_id = nmode->mode_id;
9013+ *nmode = *mode;
9014+ nmode->mode_id = new_id;
9015+ INIT_LIST_HEAD(&nmode->head);
9016+ return nmode;
9017+}
9018+EXPORT_SYMBOL(drm_mode_duplicate);
9019+
9020+/**
9021+ * drm_mode_equal - test modes for equality
9022+ * @mode1: first mode
9023+ * @mode2: second mode
9024+ *
9025+ * LOCKING:
9026+ * None.
9027+ *
9028+ * Check to see if @mode1 and @mode2 are equivalent.
9029+ *
9030+ * RETURNS:
9031+ * True if the modes are equal, false otherwise.
9032+ */
9033+bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
9034+{
9035+ if (mode1->clock == mode2->clock &&
9036+ mode1->hdisplay == mode2->hdisplay &&
9037+ mode1->hsync_start == mode2->hsync_start &&
9038+ mode1->hsync_end == mode2->hsync_end &&
9039+ mode1->htotal == mode2->htotal &&
9040+ mode1->hskew == mode2->hskew &&
9041+ mode1->vdisplay == mode2->vdisplay &&
9042+ mode1->vsync_start == mode2->vsync_start &&
9043+ mode1->vsync_end == mode2->vsync_end &&
9044+ mode1->vtotal == mode2->vtotal &&
9045+ mode1->vscan == mode2->vscan &&
9046+ mode1->flags == mode2->flags)
9047+ return true;
9048+
9049+ return false;
9050+}
9051+EXPORT_SYMBOL(drm_mode_equal);
9052+
9053+/**
9054+ * drm_mode_validate_size - make sure modes adhere to size constraints
9055+ * @dev: DRM device
9056+ * @mode_list: list of modes to check
9057+ * @maxX: maximum width
9058+ * @maxY: maximum height
9059+ * @maxPitch: max pitch
9060+ *
9061+ * LOCKING:
9062+ * Caller must hold a lock protecting @mode_list.
9063+ *
9064+ * The DRM device (@dev) has size and pitch limits. Here we validate the
9065+ * modes we probed for @dev against those limits and set their status as
9066+ * necessary.
9067+ */
9068+void drm_mode_validate_size(struct drm_device *dev,
9069+ struct list_head *mode_list,
9070+ int maxX, int maxY, int maxPitch)
9071+{
9072+ struct drm_display_mode *mode;
9073+
9074+ list_for_each_entry(mode, mode_list, head) {
9075+ if (maxPitch > 0 && mode->hdisplay > maxPitch)
9076+ mode->status = MODE_BAD_WIDTH;
9077+
9078+ if (maxX > 0 && mode->hdisplay > maxX)
9079+ mode->status = MODE_VIRTUAL_X;
9080+
9081+ if (maxY > 0 && mode->vdisplay > maxY)
9082+ mode->status = MODE_VIRTUAL_Y;
9083+ }
9084+}
9085+EXPORT_SYMBOL(drm_mode_validate_size);
9086+
9087+/**
9088+ * drm_mode_validate_clocks - validate modes against clock limits
9089+ * @dev: DRM device
9090+ * @mode_list: list of modes to check
9091+ * @min: minimum clock rate array
9092+ * @max: maximum clock rate array
9093+ * @n_ranges: number of clock ranges (size of arrays)
9094+ *
9095+ * LOCKING:
9096+ * Caller must hold a lock protecting @mode_list.
9097+ *
9098+ * Some code may need to check a mode list against the clock limits of the
9099+ * device in question. This function walks the mode list, testing to make
9100+ * sure each mode falls within a given range (defined by @min and @max
9101+ * arrays) and sets @mode->status as needed.
9102+ */
9103+void drm_mode_validate_clocks(struct drm_device *dev,
9104+ struct list_head *mode_list,
9105+ int *min, int *max, int n_ranges)
9106+{
9107+ struct drm_display_mode *mode;
9108+ int i;
9109+
9110+ list_for_each_entry(mode, mode_list, head) {
9111+ bool good = false;
9112+ for (i = 0; i < n_ranges; i++) {
9113+ if (mode->clock >= min[i] && mode->clock <= max[i]) {
9114+ good = true;
9115+ break;
9116+ }
9117+ }
9118+ if (!good)
9119+ mode->status = MODE_CLOCK_RANGE;
9120+ }
9121+}
9122+EXPORT_SYMBOL(drm_mode_validate_clocks);
9123+
9124+/**
9125+ * drm_mode_prune_invalid - remove invalid modes from mode list
9126+ * @dev: DRM device
9127+ * @mode_list: list of modes to check
9128+ * @verbose: be verbose about it
9129+ *
9130+ * LOCKING:
9131+ * Caller must hold a lock protecting @mode_list.
9132+ *
9133+ * Once mode list generation is complete, a caller can use this routine to
9134+ * remove invalid modes from a mode list. If any of the modes have a
9135+ * status other than %MODE_OK, they are removed from @mode_list and freed.
9136+ */
9137+void drm_mode_prune_invalid(struct drm_device *dev,
9138+ struct list_head *mode_list, bool verbose)
9139+{
9140+ struct drm_display_mode *mode, *t;
9141+
9142+ list_for_each_entry_safe(mode, t, mode_list, head) {
9143+ if (mode->status != MODE_OK) {
9144+ list_del(&mode->head);
9145+ if (verbose) {
9146+ drm_mode_debug_printmodeline(dev, mode);
9147+ DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status);
9148+ }
9149+ kfree(mode);
9150+ }
9151+ }
9152+}
9153+
9154+/**
9155+ * drm_mode_compare - compare modes for favorability
9156+ * @lh_a: list_head for first mode
9157+ * @lh_b: list_head for second mode
9158+ *
9159+ * LOCKING:
9160+ * None.
9161+ *
9162+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
9163+ * which is better.
9164+ *
9165+ * RETURNS:
9166+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
9167+ * positive if @lh_b is better than @lh_a.
9168+ */
9169+static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
9170+{
9171+ struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
9172+ struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
9173+ int diff;
9174+
9175+ diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
9176+ ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
9177+ if (diff)
9178+ return diff;
9179+ diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
9180+ if (diff)
9181+ return diff;
9182+ diff = b->clock - a->clock;
9183+ return diff;
9184+}
9185+
9186+/* FIXME: what we don't have a list sort function? */
9187+/* list sort from Mark J Roberts (mjr@znex.org) */
9188+void list_sort(struct list_head *head, int (*cmp)(struct list_head *a, struct list_head *b))
9189+{
9190+ struct list_head *p, *q, *e, *list, *tail, *oldhead;
9191+ int insize, nmerges, psize, qsize, i;
9192+
9193+ list = head->next;
9194+ list_del(head);
9195+ insize = 1;
9196+ for (;;) {
9197+ p = oldhead = list;
9198+ list = tail = NULL;
9199+ nmerges = 0;
9200+
9201+ while (p) {
9202+ nmerges++;
9203+ q = p;
9204+ psize = 0;
9205+ for (i = 0; i < insize; i++) {
9206+ psize++;
9207+ q = q->next == oldhead ? NULL : q->next;
9208+ if (!q)
9209+ break;
9210+ }
9211+
9212+ qsize = insize;
9213+ while (psize > 0 || (qsize > 0 && q)) {
9214+ if (!psize) {
9215+ e = q;
9216+ q = q->next;
9217+ qsize--;
9218+ if (q == oldhead)
9219+ q = NULL;
9220+ } else if (!qsize || !q) {
9221+ e = p;
9222+ p = p->next;
9223+ psize--;
9224+ if (p == oldhead)
9225+ p = NULL;
9226+ } else if (cmp(p, q) <= 0) {
9227+ e = p;
9228+ p = p->next;
9229+ psize--;
9230+ if (p == oldhead)
9231+ p = NULL;
9232+ } else {
9233+ e = q;
9234+ q = q->next;
9235+ qsize--;
9236+ if (q == oldhead)
9237+ q = NULL;
9238+ }
9239+ if (tail)
9240+ tail->next = e;
9241+ else
9242+ list = e;
9243+ e->prev = tail;
9244+ tail = e;
9245+ }
9246+ p = q;
9247+ }
9248+
9249+ tail->next = list;
9250+ list->prev = tail;
9251+
9252+ if (nmerges <= 1)
9253+ break;
9254+
9255+ insize *= 2;
9256+ }
9257+
9258+ head->next = list;
9259+ head->prev = list->prev;
9260+ list->prev->next = head;
9261+ list->prev = head;
9262+}
9263+
9264+/**
9265+ * drm_mode_sort - sort mode list
9266+ * @mode_list: list to sort
9267+ *
9268+ * LOCKING:
9269+ * Caller must hold a lock protecting @mode_list.
9270+ *
9271+ * Sort @mode_list by favorability, putting good modes first.
9272+ */
9273+void drm_mode_sort(struct list_head *mode_list)
9274+{
9275+ list_sort(mode_list, drm_mode_compare);
9276+}
9277+
9278+
9279+/**
9280+ * drm_mode_output_list_update - update the mode list for the output
9281+ * @output: the output to update
9282+ *
9283+ * LOCKING:
9284+ * Caller must hold a lock protecting @mode_list.
9285+ *
9286+ * This moves the modes from the @output probed_modes list
9287+ * to the actual mode list. It compares the probed mode against the current
9288+ * list and only adds different modes. All modes unverified after this point
9289+ * will be removed by the prune invalid modes.
9290+ */
9291+void drm_mode_output_list_update(struct drm_output *output)
9292+{
9293+ struct drm_display_mode *mode;
9294+ struct drm_display_mode *pmode, *pt;
9295+ int found_it;
9296+ list_for_each_entry_safe(pmode, pt, &output->probed_modes,
9297+ head) {
9298+ found_it = 0;
9299+ /* go through current modes checking for the new probed mode */
9300+ list_for_each_entry(mode, &output->modes, head) {
9301+ if (drm_mode_equal(pmode, mode)) {
9302+ found_it = 1;
9303+ /* if equal delete the probed mode */
9304+ mode->status = pmode->status;
9305+ list_del(&pmode->head);
9306+ kfree(pmode);
9307+ break;
9308+ }
9309+ }
9310+
9311+ if (!found_it) {
9312+ list_move_tail(&pmode->head, &output->modes);
9313+ }
9314+ }
9315+}
9316Index: linux-2.6.27/drivers/gpu/drm/drm_object.c
9317===================================================================
9318--- /dev/null 1970-01-01 00:00:00.000000000 +0000
9319+++ linux-2.6.27/drivers/gpu/drm/drm_object.c 2009-01-14 11:58:01.000000000 +0000
9320@@ -0,0 +1,294 @@
9321+/**************************************************************************
9322+ *
9323+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
9324+ * All Rights Reserved.
9325+ *
9326+ * Permission is hereby granted, free of charge, to any person obtaining a
9327+ * copy of this software and associated documentation files (the
9328+ * "Software"), to deal in the Software without restriction, including
9329+ * without limitation the rights to use, copy, modify, merge, publish,
9330+ * distribute, sub license, and/or sell copies of the Software, and to
9331+ * permit persons to whom the Software is furnished to do so, subject to
9332+ * the following conditions:
9333+ *
9334+ * The above copyright notice and this permission notice (including the
9335+ * next paragraph) shall be included in all copies or substantial portions
9336+ * of the Software.
9337+ *
9338+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
9339+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
9340+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
9341+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
9342+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
9343+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
9344+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
9345+ *
9346+ **************************************************************************/
9347+/*
9348+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
9349+ */
9350+
9351+#include "drmP.h"
9352+
9353+int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
9354+ int shareable)
9355+{
9356+ struct drm_device *dev = priv->minor->dev;
9357+ int ret;
9358+
9359+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
9360+
9361+ /* The refcount will be bumped to 1 when we add the ref object below. */
9362+ atomic_set(&item->refcount, 0);
9363+ item->shareable = shareable;
9364+ item->owner = priv;
9365+
9366+ ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
9367+ (unsigned long)item, 32, 0, 0);
9368+ if (ret)
9369+ return ret;
9370+
9371+ ret = drm_add_ref_object(priv, item, _DRM_REF_USE);
9372+ if (ret)
9373+ ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
9374+
9375+ return ret;
9376+}
9377+EXPORT_SYMBOL(drm_add_user_object);
9378+
9379+struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)
9380+{
9381+ struct drm_device *dev = priv->minor->dev;
9382+ struct drm_hash_item *hash;
9383+ int ret;
9384+ struct drm_user_object *item;
9385+
9386+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
9387+
9388+ ret = drm_ht_find_item(&dev->object_hash, key, &hash);
9389+ if (ret)
9390+ return NULL;
9391+
9392+ item = drm_hash_entry(hash, struct drm_user_object, hash);
9393+
9394+ if (priv != item->owner) {
9395+ struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE];
9396+ ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
9397+ if (ret) {
9398+ DRM_ERROR("Object not registered for usage\n");
9399+ return NULL;
9400+ }
9401+ }
9402+ return item;
9403+}
9404+EXPORT_SYMBOL(drm_lookup_user_object);
9405+
9406+static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)
9407+{
9408+ struct drm_device *dev = priv->minor->dev;
9409+ int ret;
9410+
9411+ if (atomic_dec_and_test(&item->refcount)) {
9412+ ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
9413+ BUG_ON(ret);
9414+ item->remove(priv, item);
9415+ }
9416+}
9417+
9418+static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro,
9419+ enum drm_ref_type action)
9420+{
9421+ int ret = 0;
9422+
9423+ switch (action) {
9424+ case _DRM_REF_USE:
9425+ atomic_inc(&ro->refcount);
9426+ break;
9427+ default:
9428+ if (!ro->ref_struct_locked) {
9429+ break;
9430+ } else {
9431+ ro->ref_struct_locked(priv, ro, action);
9432+ }
9433+ }
9434+ return ret;
9435+}
9436+
9437+int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object,
9438+ enum drm_ref_type ref_action)
9439+{
9440+ int ret = 0;
9441+ struct drm_ref_object *item;
9442+ struct drm_open_hash *ht = &priv->refd_object_hash[ref_action];
9443+
9444+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
9445+ if (!referenced_object->shareable && priv != referenced_object->owner) {
9446+ DRM_ERROR("Not allowed to reference this object\n");
9447+ return -EINVAL;
9448+ }
9449+
9450+ /*
9451+ * If this is not a usage reference, Check that usage has been registered
9452+ * first. Otherwise strange things may happen on destruction.
9453+ */
9454+
9455+ if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
9456+ item =
9457+ drm_lookup_ref_object(priv, referenced_object,
9458+ _DRM_REF_USE);
9459+ if (!item) {
9460+ DRM_ERROR
9461+ ("Object not registered for usage by this client\n");
9462+ return -EINVAL;
9463+ }
9464+ }
9465+
9466+ if (NULL !=
9467+ (item =
9468+ drm_lookup_ref_object(priv, referenced_object, ref_action))) {
9469+ atomic_inc(&item->refcount);
9470+ return drm_object_ref_action(priv, referenced_object,
9471+ ref_action);
9472+ }
9473+
9474+ item = drm_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
9475+ if (item == NULL) {
9476+ DRM_ERROR("Could not allocate reference object\n");
9477+ return -ENOMEM;
9478+ }
9479+
9480+ atomic_set(&item->refcount, 1);
9481+ item->hash.key = (unsigned long)referenced_object;
9482+ ret = drm_ht_insert_item(ht, &item->hash);
9483+ item->unref_action = ref_action;
9484+
9485+ if (ret)
9486+ goto out;
9487+
9488+ list_add(&item->list, &priv->refd_objects);
9489+ ret = drm_object_ref_action(priv, referenced_object, ref_action);
9490+out:
9491+ return ret;
9492+}
9493+
9494+struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
9495+ struct drm_user_object *referenced_object,
9496+ enum drm_ref_type ref_action)
9497+{
9498+ struct drm_hash_item *hash;
9499+ int ret;
9500+
9501+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
9502+ ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
9503+ (unsigned long)referenced_object, &hash);
9504+ if (ret)
9505+ return NULL;
9506+
9507+ return drm_hash_entry(hash, struct drm_ref_object, hash);
9508+}
9509+EXPORT_SYMBOL(drm_lookup_ref_object);
9510+
9511+static void drm_remove_other_references(struct drm_file *priv,
9512+ struct drm_user_object *ro)
9513+{
9514+ int i;
9515+ struct drm_open_hash *ht;
9516+ struct drm_hash_item *hash;
9517+ struct drm_ref_object *item;
9518+
9519+ for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
9520+ ht = &priv->refd_object_hash[i];
9521+ while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
9522+ item = drm_hash_entry(hash, struct drm_ref_object, hash);
9523+ drm_remove_ref_object(priv, item);
9524+ }
9525+ }
9526+}
9527+
9528+void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item)
9529+{
9530+ int ret;
9531+ struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key;
9532+ struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action];
9533+ enum drm_ref_type unref_action;
9534+
9535+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
9536+ unref_action = item->unref_action;
9537+ if (atomic_dec_and_test(&item->refcount)) {
9538+ ret = drm_ht_remove_item(ht, &item->hash);
9539+ BUG_ON(ret);
9540+ list_del_init(&item->list);
9541+ if (unref_action == _DRM_REF_USE)
9542+ drm_remove_other_references(priv, user_object);
9543+ drm_free(item, sizeof(*item), DRM_MEM_OBJECTS);
9544+ }
9545+
9546+ switch (unref_action) {
9547+ case _DRM_REF_USE:
9548+ drm_deref_user_object(priv, user_object);
9549+ break;
9550+ default:
9551+ BUG_ON(!user_object->unref);
9552+ user_object->unref(priv, user_object, unref_action);
9553+ break;
9554+ }
9555+
9556+}
9557+EXPORT_SYMBOL(drm_remove_ref_object);
9558+
9559+int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
9560+ enum drm_object_type type, struct drm_user_object **object)
9561+{
9562+ struct drm_device *dev = priv->minor->dev;
9563+ struct drm_user_object *uo;
9564+ struct drm_hash_item *hash;
9565+ int ret;
9566+
9567+ mutex_lock(&dev->struct_mutex);
9568+ ret = drm_ht_find_item(&dev->object_hash, user_token, &hash);
9569+ if (ret) {
9570+ DRM_ERROR("Could not find user object to reference.\n");
9571+ goto out_err;
9572+ }
9573+ uo = drm_hash_entry(hash, struct drm_user_object, hash);
9574+ if (uo->type != type) {
9575+ ret = -EINVAL;
9576+ goto out_err;
9577+ }
9578+ ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
9579+ if (ret)
9580+ goto out_err;
9581+ mutex_unlock(&dev->struct_mutex);
9582+ *object = uo;
9583+ return 0;
9584+out_err:
9585+ mutex_unlock(&dev->struct_mutex);
9586+ return ret;
9587+}
9588+
9589+int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
9590+ enum drm_object_type type)
9591+{
9592+ struct drm_device *dev = priv->minor->dev;
9593+ struct drm_user_object *uo;
9594+ struct drm_ref_object *ro;
9595+ int ret;
9596+
9597+ mutex_lock(&dev->struct_mutex);
9598+ uo = drm_lookup_user_object(priv, user_token);
9599+ if (!uo || (uo->type != type)) {
9600+ ret = -EINVAL;
9601+ goto out_err;
9602+ }
9603+ ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
9604+ if (!ro) {
9605+ ret = -EINVAL;
9606+ goto out_err;
9607+ }
9608+ drm_remove_ref_object(priv, ro);
9609+ mutex_unlock(&dev->struct_mutex);
9610+ return 0;
9611+out_err:
9612+ mutex_unlock(&dev->struct_mutex);
9613+ return ret;
9614+}
9615Index: linux-2.6.27/drivers/gpu/drm/drm_regman.c
9616===================================================================
9617--- /dev/null 1970-01-01 00:00:00.000000000 +0000
9618+++ linux-2.6.27/drivers/gpu/drm/drm_regman.c 2009-01-14 11:58:01.000000000 +0000
9619@@ -0,0 +1,200 @@
9620+/**************************************************************************
9621+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
9622+ * All Rights Reserved.
9623+ *
9624+ * Permission is hereby granted, free of charge, to any person obtaining a
9625+ * copy of this software and associated documentation files (the
9626+ * "Software"), to deal in the Software without restriction, including
9627+ * without limitation the rights to use, copy, modify, merge, publish,
9628+ * distribute, sub license, and/or sell copies of the Software, and to
9629+ * permit persons to whom the Software is furnished to do so, subject to
9630+ * the following conditions:
9631+ *
9632+ * The above copyright notice and this permission notice (including the
9633+ * next paragraph) shall be included in all copies or substantial portions
9634+ * of the Software.
9635+ *
9636+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
9637+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
9638+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
9639+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
9640+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
9641+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
9642+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
9643+ *
9644+ **************************************************************************/
9645+/*
9646+ * An allocate-fence manager implementation intended for sets of base-registers
9647+ * or tiling-registers.
9648+ */
9649+
9650+#include "drmP.h"
9651+
9652+/*
9653+ * Allocate a compatible register and put it on the unfenced list.
9654+ */
9655+
9656+int drm_regs_alloc(struct drm_reg_manager *manager,
9657+ const void *data,
9658+ uint32_t fence_class,
9659+ uint32_t fence_type,
9660+ int interruptible, int no_wait, struct drm_reg **reg)
9661+{
9662+ struct drm_reg *entry, *next_entry;
9663+ int ret;
9664+
9665+ *reg = NULL;
9666+
9667+ /*
9668+ * Search the unfenced list.
9669+ */
9670+
9671+ list_for_each_entry(entry, &manager->unfenced, head) {
9672+ if (manager->reg_reusable(entry, data)) {
9673+ entry->new_fence_type |= fence_type;
9674+ goto out;
9675+ }
9676+ }
9677+
9678+ /*
9679+ * Search the lru list.
9680+ */
9681+
9682+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
9683+ struct drm_fence_object *fence = entry->fence;
9684+ if (fence->fence_class == fence_class &&
9685+ (entry->fence_type & fence_type) == entry->fence_type &&
9686+ manager->reg_reusable(entry, data)) {
9687+ list_del(&entry->head);
9688+ entry->new_fence_type = fence_type;
9689+ list_add_tail(&entry->head, &manager->unfenced);
9690+ goto out;
9691+ }
9692+ }
9693+
9694+ /*
9695+ * Search the free list.
9696+ */
9697+
9698+ list_for_each_entry(entry, &manager->free, head) {
9699+ list_del(&entry->head);
9700+ entry->new_fence_type = fence_type;
9701+ list_add_tail(&entry->head, &manager->unfenced);
9702+ goto out;
9703+ }
9704+
9705+ if (no_wait)
9706+ return -EBUSY;
9707+
9708+ /*
9709+ * Go back to the lru list and try to expire fences.
9710+ */
9711+
9712+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
9713+ BUG_ON(!entry->fence);
9714+ ret = drm_fence_object_wait(entry->fence, 0, !interruptible,
9715+ entry->fence_type);
9716+ if (ret)
9717+ return ret;
9718+
9719+ drm_fence_usage_deref_unlocked(&entry->fence);
9720+ list_del(&entry->head);
9721+ entry->new_fence_type = fence_type;
9722+ list_add_tail(&entry->head, &manager->unfenced);
9723+ goto out;
9724+ }
9725+
9726+ /*
9727+ * Oops. All registers are used up :(.
9728+ */
9729+
9730+ return -EBUSY;
9731+out:
9732+ *reg = entry;
9733+ return 0;
9734+}
9735+EXPORT_SYMBOL(drm_regs_alloc);
9736+
9737+void drm_regs_fence(struct drm_reg_manager *manager,
9738+ struct drm_fence_object *fence)
9739+{
9740+ struct drm_reg *entry;
9741+ struct drm_reg *next_entry;
9742+
9743+ if (!fence) {
9744+
9745+ /*
9746+ * Old fence (if any) is still valid.
9747+ * Put back on free and lru lists.
9748+ */
9749+
9750+ list_for_each_entry_safe_reverse(entry, next_entry,
9751+ &manager->unfenced, head) {
9752+ list_del(&entry->head);
9753+ list_add(&entry->head, (entry->fence) ?
9754+ &manager->lru : &manager->free);
9755+ }
9756+ } else {
9757+
9758+ /*
9759+ * Fence with a new fence and put on lru list.
9760+ */
9761+
9762+ list_for_each_entry_safe(entry, next_entry, &manager->unfenced,
9763+ head) {
9764+ list_del(&entry->head);
9765+ if (entry->fence)
9766+ drm_fence_usage_deref_unlocked(&entry->fence);
9767+ drm_fence_reference_unlocked(&entry->fence, fence);
9768+
9769+ entry->fence_type = entry->new_fence_type;
9770+ BUG_ON((entry->fence_type & fence->type) !=
9771+ entry->fence_type);
9772+
9773+ list_add_tail(&entry->head, &manager->lru);
9774+ }
9775+ }
9776+}
9777+EXPORT_SYMBOL(drm_regs_fence);
9778+
9779+void drm_regs_free(struct drm_reg_manager *manager)
9780+{
9781+ struct drm_reg *entry;
9782+ struct drm_reg *next_entry;
9783+
9784+ drm_regs_fence(manager, NULL);
9785+
9786+ list_for_each_entry_safe(entry, next_entry, &manager->free, head) {
9787+ list_del(&entry->head);
9788+ manager->reg_destroy(entry);
9789+ }
9790+
9791+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
9792+
9793+ (void)drm_fence_object_wait(entry->fence, 1, 1,
9794+ entry->fence_type);
9795+ list_del(&entry->head);
9796+ drm_fence_usage_deref_unlocked(&entry->fence);
9797+ manager->reg_destroy(entry);
9798+ }
9799+}
9800+EXPORT_SYMBOL(drm_regs_free);
9801+
9802+void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg)
9803+{
9804+ reg->fence = NULL;
9805+ list_add_tail(&reg->head, &manager->free);
9806+}
9807+EXPORT_SYMBOL(drm_regs_add);
9808+
9809+void drm_regs_init(struct drm_reg_manager *manager,
9810+ int (*reg_reusable) (const struct drm_reg *, const void *),
9811+ void (*reg_destroy) (struct drm_reg *))
9812+{
9813+ INIT_LIST_HEAD(&manager->free);
9814+ INIT_LIST_HEAD(&manager->lru);
9815+ INIT_LIST_HEAD(&manager->unfenced);
9816+ manager->reg_reusable = reg_reusable;
9817+ manager->reg_destroy = reg_destroy;
9818+}
9819+EXPORT_SYMBOL(drm_regs_init);
9820Index: linux-2.6.27/drivers/gpu/drm/drm_sman.c
9821===================================================================
9822--- linux-2.6.27.orig/drivers/gpu/drm/drm_sman.c 2009-01-14 11:54:35.000000000 +0000
9823+++ linux-2.6.27/drivers/gpu/drm/drm_sman.c 2009-01-14 11:58:01.000000000 +0000
9824@@ -33,7 +33,7 @@
9825 * struct or a context identifier.
9826 *
9827 * Authors:
9828- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
9829+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
9830 */
9831
9832 #include "drm_sman.h"
9833Index: linux-2.6.27/drivers/gpu/drm/drm_stub.c
9834===================================================================
9835--- linux-2.6.27.orig/drivers/gpu/drm/drm_stub.c 2009-01-14 11:54:35.000000000 +0000
9836+++ linux-2.6.27/drivers/gpu/drm/drm_stub.c 2009-01-14 11:58:01.000000000 +0000
9837@@ -97,6 +97,7 @@
9838 init_timer(&dev->timer);
9839 mutex_init(&dev->struct_mutex);
9840 mutex_init(&dev->ctxlist_mutex);
9841+ mutex_init(&dev->bm.evict_mutex);
9842
9843 idr_init(&dev->drw_idr);
9844
9845@@ -113,6 +114,18 @@
9846 return -ENOMEM;
9847 }
9848
9849+ if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
9850+ DRM_FILE_PAGE_OFFSET_SIZE)) {
9851+ drm_ht_remove(&dev->map_hash);
9852+ return -ENOMEM;
9853+ }
9854+
9855+ if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
9856+ drm_ht_remove(&dev->map_hash);
9857+ drm_mm_takedown(&dev->offset_manager);
9858+ return -ENOMEM;
9859+ }
9860+
9861 /* the DRM has 6 basic counters */
9862 dev->counters = 6;
9863 dev->types[0] = _DRM_STAT_LOCK;
9864@@ -152,15 +165,7 @@
9865 goto error_out_unreg;
9866 }
9867
9868- if (driver->driver_features & DRIVER_GEM) {
9869- retcode = drm_gem_init(dev);
9870- if (retcode) {
9871- DRM_ERROR("Cannot initialize graphics execution "
9872- "manager (GEM)\n");
9873- goto error_out_unreg;
9874- }
9875- }
9876-
9877+ drm_fence_manager_init(dev);
9878 return 0;
9879
9880 error_out_unreg:
9881@@ -284,6 +289,8 @@
9882 drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
9883 return ret;
9884 }
9885+EXPORT_SYMBOL(drm_get_dev);
9886+
9887
9888 /**
9889 * Put a device minor number.
9890Index: linux-2.6.27/drivers/gpu/drm/drm_ttm.c
9891===================================================================
9892--- /dev/null 1970-01-01 00:00:00.000000000 +0000
9893+++ linux-2.6.27/drivers/gpu/drm/drm_ttm.c 2009-01-14 11:58:01.000000000 +0000
9894@@ -0,0 +1,430 @@
9895+/**************************************************************************
9896+ *
9897+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
9898+ * All Rights Reserved.
9899+ *
9900+ * Permission is hereby granted, free of charge, to any person obtaining a
9901+ * copy of this software and associated documentation files (the
9902+ * "Software"), to deal in the Software without restriction, including
9903+ * without limitation the rights to use, copy, modify, merge, publish,
9904+ * distribute, sub license, and/or sell copies of the Software, and to
9905+ * permit persons to whom the Software is furnished to do so, subject to
9906+ * the following conditions:
9907+ *
9908+ * The above copyright notice and this permission notice (including the
9909+ * next paragraph) shall be included in all copies or substantial portions
9910+ * of the Software.
9911+ *
9912+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
9913+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
9914+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
9915+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
9916+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
9917+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
9918+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
9919+ *
9920+ **************************************************************************/
9921+/*
9922+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
9923+ */
9924+
9925+#include "drmP.h"
9926+#include <asm/agp.h>
9927+
9928+static void drm_ttm_ipi_handler(void *null)
9929+{
9930+ flush_agp_cache();
9931+}
9932+
9933+void drm_ttm_cache_flush(void)
9934+{
9935+ if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1) != 0)
9936+ DRM_ERROR("Timed out waiting for drm cache flush.\n");
9937+}
9938+EXPORT_SYMBOL(drm_ttm_cache_flush);
9939+
9940+/*
9941+ * Use kmalloc if possible. Otherwise fall back to vmalloc.
9942+ */
9943+
9944+static void ttm_alloc_pages(struct drm_ttm *ttm)
9945+{
9946+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
9947+ ttm->pages = NULL;
9948+
9949+ if (size <= PAGE_SIZE)
9950+ ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
9951+
9952+ if (!ttm->pages) {
9953+ ttm->pages = vmalloc_user(size);
9954+ if (ttm->pages)
9955+ ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
9956+ }
9957+}
9958+
9959+static void ttm_free_pages(struct drm_ttm *ttm)
9960+{
9961+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
9962+
9963+ if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) {
9964+ vfree(ttm->pages);
9965+ ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC;
9966+ } else {
9967+ drm_free(ttm->pages, size, DRM_MEM_TTM);
9968+ }
9969+ ttm->pages = NULL;
9970+}
9971+
9972+static struct page *drm_ttm_alloc_page(void)
9973+{
9974+ struct page *page;
9975+
9976+ page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
9977+ if (!page)
9978+ return NULL;
9979+ return page;
9980+}
9981+
9982+/*
9983+ * Change caching policy for the linear kernel map
9984+ * for range of pages in a ttm.
9985+ */
9986+
9987+static int drm_set_caching(struct drm_ttm *ttm, int noncached)
9988+{
9989+ int i;
9990+ struct page **cur_page;
9991+ int do_tlbflush = 0;
9992+
9993+ if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
9994+ return 0;
9995+
9996+ if (noncached)
9997+ drm_ttm_cache_flush();
9998+
9999+ for (i = 0; i < ttm->num_pages; ++i) {
10000+ cur_page = ttm->pages + i;
10001+ if (*cur_page) {
10002+ if (!PageHighMem(*cur_page)) {
10003+ if (noncached) {
10004+ map_page_into_agp(*cur_page);
10005+ } else {
10006+ unmap_page_from_agp(*cur_page);
10007+ }
10008+ do_tlbflush = 1;
10009+ }
10010+ }
10011+ }
10012+ //if (do_tlbflush)
10013+ // flush_agp_mappings();
10014+
10015+ DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
10016+
10017+ return 0;
10018+}
10019+
10020+
10021+static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
10022+{
10023+ int write;
10024+ int dirty;
10025+ struct page *page;
10026+ int i;
10027+
10028+ BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
10029+ write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0);
10030+ dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
10031+
10032+ for (i = 0; i < ttm->num_pages; ++i) {
10033+ page = ttm->pages[i];
10034+ if (page == NULL)
10035+ continue;
10036+
10037+ if (page == ttm->dummy_read_page) {
10038+ BUG_ON(write);
10039+ continue;
10040+ }
10041+
10042+ if (write && dirty && !PageReserved(page))
10043+ set_page_dirty_lock(page);
10044+
10045+ ttm->pages[i] = NULL;
10046+ put_page(page);
10047+ }
10048+}
10049+
10050+static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
10051+{
10052+ int i;
10053+ struct drm_buffer_manager *bm = &ttm->dev->bm;
10054+ struct page **cur_page;
10055+
10056+ for (i = 0; i < ttm->num_pages; ++i) {
10057+ cur_page = ttm->pages + i;
10058+ if (*cur_page) {
10059+ if (page_count(*cur_page) != 1)
10060+ DRM_ERROR("Erroneous page count. Leaking pages.\n");
10061+ if (page_mapped(*cur_page))
10062+ DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
10063+ __free_page(*cur_page);
10064+ --bm->cur_pages;
10065+ }
10066+ }
10067+}
10068+
10069+/*
10070+ * Free all resources associated with a ttm.
10071+ */
10072+
10073+int drm_destroy_ttm(struct drm_ttm *ttm)
10074+{
10075+ struct drm_ttm_backend *be;
10076+
10077+ if (!ttm)
10078+ return 0;
10079+
10080+ be = ttm->be;
10081+ if (be) {
10082+ be->func->destroy(be);
10083+ ttm->be = NULL;
10084+ }
10085+
10086+ if (ttm->pages) {
10087+ if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
10088+ drm_set_caching(ttm, 0);
10089+
10090+ if (ttm->page_flags & DRM_TTM_PAGE_USER)
10091+ drm_ttm_free_user_pages(ttm);
10092+ else
10093+ drm_ttm_free_alloced_pages(ttm);
10094+
10095+ ttm_free_pages(ttm);
10096+ }
10097+
10098+ return 0;
10099+}
10100+
10101+struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
10102+{
10103+ struct page *p;
10104+ struct drm_buffer_manager *bm = &ttm->dev->bm;
10105+
10106+ p = ttm->pages[index];
10107+ if (!p) {
10108+ p = drm_ttm_alloc_page();
10109+ if (!p)
10110+ return NULL;
10111+ ttm->pages[index] = p;
10112+ ++bm->cur_pages;
10113+ }
10114+ return p;
10115+}
10116+EXPORT_SYMBOL(drm_ttm_get_page);
10117+
10118+int drm_ttm_set_user(struct drm_ttm *ttm,
10119+ struct task_struct *tsk,
10120+ int write,
10121+ unsigned long start,
10122+ unsigned long num_pages,
10123+ struct page *dummy_read_page)
10124+{
10125+ struct mm_struct *mm = tsk->mm;
10126+ int ret;
10127+ int i;
10128+
10129+ BUG_ON(num_pages != ttm->num_pages);
10130+
10131+ ttm->dummy_read_page = dummy_read_page;
10132+ ttm->page_flags |= DRM_TTM_PAGE_USER |
10133+ ((write) ? DRM_TTM_PAGE_USER_WRITE : 0);
10134+
10135+
10136+ down_read(&mm->mmap_sem);
10137+ ret = get_user_pages(tsk, mm, start, num_pages,
10138+ write, 0, ttm->pages, NULL);
10139+ up_read(&mm->mmap_sem);
10140+
10141+ if (ret != num_pages && write) {
10142+ drm_ttm_free_user_pages(ttm);
10143+ return -ENOMEM;
10144+ }
10145+
10146+ for (i = 0; i < num_pages; ++i) {
10147+ if (ttm->pages[i] == NULL)
10148+ ttm->pages[i] = ttm->dummy_read_page;
10149+ }
10150+
10151+ return 0;
10152+}
10153+
10154+int drm_ttm_populate(struct drm_ttm *ttm)
10155+{
10156+ struct page *page;
10157+ unsigned long i;
10158+ struct drm_ttm_backend *be;
10159+
10160+ if (ttm->state != ttm_unpopulated)
10161+ return 0;
10162+
10163+ be = ttm->be;
10164+ for (i = 0; i < ttm->num_pages; ++i) {
10165+ page = drm_ttm_get_page(ttm, i);
10166+ if (!page)
10167+ return -ENOMEM;
10168+ }
10169+ be->func->populate(be, ttm->num_pages, ttm->pages);
10170+ ttm->state = ttm_unbound;
10171+ return 0;
10172+}
10173+
10174+static inline size_t drm_size_align(size_t size)
10175+{
10176+ size_t tmpSize = 4;
10177+ if (size > PAGE_SIZE)
10178+ return PAGE_ALIGN(size);
10179+ while (tmpSize < size)
10180+ tmpSize <<= 1;
10181+
10182+ return (size_t) tmpSize;
10183+}
10184+
10185+/*
10186+ * Calculate the estimated pinned memory usage of a ttm.
10187+ */
10188+
10189+unsigned long drm_ttm_size(struct drm_device *dev,
10190+ unsigned long num_pages,
10191+ int user_bo)
10192+{
10193+ struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
10194+ unsigned long tmp;
10195+
10196+ tmp = drm_size_align(sizeof(struct drm_ttm)) +
10197+ drm_size_align(num_pages * sizeof(struct page *)) +
10198+ ((user_bo) ? 0 : drm_size_align(num_pages * PAGE_SIZE));
10199+
10200+ if (bo_driver->backend_size)
10201+ tmp += bo_driver->backend_size(dev, num_pages);
10202+ else
10203+ tmp += drm_size_align(num_pages * sizeof(struct page *)) +
10204+ 3*drm_size_align(sizeof(struct drm_ttm_backend));
10205+ return tmp;
10206+}
10207+
10208+
10209+/*
10210+ * Initialize a ttm.
10211+ */
10212+
10213+struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size)
10214+{
10215+ struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
10216+ struct drm_ttm *ttm;
10217+
10218+ if (!bo_driver)
10219+ return NULL;
10220+
10221+ ttm = drm_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
10222+ if (!ttm)
10223+ return NULL;
10224+
10225+ ttm->dev = dev;
10226+ atomic_set(&ttm->vma_count, 0);
10227+
10228+ ttm->destroy = 0;
10229+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
10230+
10231+ ttm->page_flags = 0;
10232+
10233+ /*
10234+ * Account also for AGP module memory usage.
10235+ */
10236+
10237+ ttm_alloc_pages(ttm);
10238+ if (!ttm->pages) {
10239+ drm_destroy_ttm(ttm);
10240+ DRM_ERROR("Failed allocating page table\n");
10241+ return NULL;
10242+ }
10243+ ttm->be = bo_driver->create_ttm_backend_entry(dev);
10244+ if (!ttm->be) {
10245+ drm_destroy_ttm(ttm);
10246+ DRM_ERROR("Failed creating ttm backend entry\n");
10247+ return NULL;
10248+ }
10249+ ttm->state = ttm_unpopulated;
10250+ return ttm;
10251+}
10252+
10253+/*
10254+ * Unbind a ttm region from the aperture.
10255+ */
10256+
10257+void drm_ttm_evict(struct drm_ttm *ttm)
10258+{
10259+ struct drm_ttm_backend *be = ttm->be;
10260+ int ret;
10261+
10262+ if (ttm->state == ttm_bound) {
10263+ ret = be->func->unbind(be);
10264+ BUG_ON(ret);
10265+ }
10266+
10267+ ttm->state = ttm_evicted;
10268+}
10269+
10270+void drm_ttm_fixup_caching(struct drm_ttm *ttm)
10271+{
10272+
10273+ if (ttm->state == ttm_evicted) {
10274+ struct drm_ttm_backend *be = ttm->be;
10275+ if (be->func->needs_ub_cache_adjust(be))
10276+ drm_set_caching(ttm, 0);
10277+ ttm->state = ttm_unbound;
10278+ }
10279+}
10280+
10281+void drm_ttm_unbind(struct drm_ttm *ttm)
10282+{
10283+ if (ttm->state == ttm_bound)
10284+ drm_ttm_evict(ttm);
10285+
10286+ drm_ttm_fixup_caching(ttm);
10287+}
10288+
10289+int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
10290+{
10291+ struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
10292+ int ret = 0;
10293+ struct drm_ttm_backend *be;
10294+
10295+ if (!ttm)
10296+ return -EINVAL;
10297+ if (ttm->state == ttm_bound)
10298+ return 0;
10299+
10300+ be = ttm->be;
10301+
10302+ ret = drm_ttm_populate(ttm);
10303+ if (ret)
10304+ return ret;
10305+
10306+ if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
10307+ drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
10308+ else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
10309+ bo_driver->ttm_cache_flush)
10310+ bo_driver->ttm_cache_flush(ttm);
10311+
10312+ ret = be->func->bind(be, bo_mem);
10313+ if (ret) {
10314+ ttm->state = ttm_evicted;
10315+ DRM_ERROR("Couldn't bind backend.\n");
10316+ return ret;
10317+ }
10318+
10319+ ttm->state = ttm_bound;
10320+ if (ttm->page_flags & DRM_TTM_PAGE_USER)
10321+ ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
10322+ return 0;
10323+}
10324+EXPORT_SYMBOL(drm_bind_ttm);
10325Index: linux-2.6.27/drivers/gpu/drm/drm_vm.c
10326===================================================================
10327--- linux-2.6.27.orig/drivers/gpu/drm/drm_vm.c 2009-01-14 11:54:35.000000000 +0000
10328+++ linux-2.6.27/drivers/gpu/drm/drm_vm.c 2009-01-14 11:58:01.000000000 +0000
10329@@ -40,6 +40,10 @@
10330
10331 static void drm_vm_open(struct vm_area_struct *vma);
10332 static void drm_vm_close(struct vm_area_struct *vma);
10333+static int drm_bo_mmap_locked(struct vm_area_struct *vma,
10334+ struct file *filp,
10335+ drm_local_map_t *map);
10336+
10337
10338 static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
10339 {
10340@@ -267,6 +271,9 @@
10341 dmah.size = map->size;
10342 __drm_pci_free(dev, &dmah);
10343 break;
10344+ case _DRM_TTM:
10345+ BUG_ON(1);
10346+ break;
10347 }
10348 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
10349 }
10350@@ -647,6 +654,8 @@
10351 vma->vm_flags |= VM_RESERVED;
10352 vma->vm_page_prot = drm_dma_prot(map->type, vma);
10353 break;
10354+ case _DRM_TTM:
10355+ return drm_bo_mmap_locked(vma, filp, map);
10356 default:
10357 return -EINVAL; /* This should never happen. */
10358 }
10359@@ -671,3 +680,213 @@
10360 return ret;
10361 }
10362 EXPORT_SYMBOL(drm_mmap);
10363+
10364+/**
10365+ * buffer object vm functions.
10366+ */
10367+
10368+/**
10369+ * \c Pagefault method for buffer objects.
10370+ *
10371+ * \param vma Virtual memory area.
10372+ * \param address File offset.
10373+ * \return Error or refault. The pfn is manually inserted.
10374+ *
10375+ * It's important that pfns are inserted while holding the bo->mutex lock.
10376+ * otherwise we might race with unmap_mapping_range() which is always
10377+ * called with the bo->mutex lock held.
10378+ *
10379+ * We're modifying the page attribute bits of the vma->vm_page_prot field,
10380+ * without holding the mmap_sem in write mode. Only in read mode.
10381+ * These bits are not used by the mm subsystem code, and we consider them
10382+ * protected by the bo->mutex lock.
10383+ */
10384+
10385+#define DRM_NOPFN_EXTRA 15 /* Fault 16 pages at a time in */
10386+
10387+int drm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10388+{
10389+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
10390+ unsigned long page_offset;
10391+ struct page *page = NULL;
10392+ struct drm_ttm *ttm = NULL;
10393+ struct drm_device *dev;
10394+ unsigned long pfn;
10395+ int err;
10396+ unsigned long bus_base;
10397+ unsigned long bus_offset;
10398+ unsigned long bus_size;
10399+ int i;
10400+ unsigned long ret = VM_FAULT_NOPAGE;
10401+ unsigned long address = (unsigned long)vmf->virtual_address;
10402+
10403+ if (address > vma->vm_end)
10404+ return VM_FAULT_SIGBUS;
10405+
10406+ dev = bo->dev;
10407+ err = drm_bo_read_lock(&dev->bm.bm_lock);
10408+ if (err)
10409+ return VM_FAULT_NOPAGE;
10410+
10411+ err = mutex_lock_interruptible(&bo->mutex);
10412+ if (err) {
10413+ drm_bo_read_unlock(&dev->bm.bm_lock);
10414+ return VM_FAULT_NOPAGE;
10415+ }
10416+
10417+ err = drm_bo_wait(bo, 0, 0, 0);
10418+ if (err) {
10419+ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
10420+ goto out_unlock;
10421+ }
10422+
10423+ /*
10424+ * If buffer happens to be in a non-mappable location,
10425+ * move it to a mappable.
10426+ */
10427+
10428+ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
10429+ uint32_t new_mask = bo->mem.mask |
10430+ DRM_BO_FLAG_MAPPABLE |
10431+ DRM_BO_FLAG_FORCE_MAPPABLE;
10432+ err = drm_bo_move_buffer(bo, new_mask, 0, 0);
10433+ if (err) {
10434+ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
10435+ goto out_unlock;
10436+ }
10437+ }
10438+
10439+ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
10440+ &bus_size);
10441+
10442+ if (err) {
10443+ ret = VM_FAULT_SIGBUS;
10444+ goto out_unlock;
10445+ }
10446+
10447+ page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
10448+
10449+ if (bus_size) {
10450+ struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
10451+
10452+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
10453+ vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
10454+ } else {
10455+ ttm = bo->ttm;
10456+
10457+ drm_ttm_fixup_caching(ttm);
10458+ page = drm_ttm_get_page(ttm, page_offset);
10459+ if (!page) {
10460+ ret = VM_FAULT_OOM;
10461+ goto out_unlock;
10462+ }
10463+ pfn = page_to_pfn(page);
10464+ vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
10465+ vm_get_page_prot(vma->vm_flags) :
10466+ drm_io_prot(_DRM_TTM, vma);
10467+ }
10468+
10469+ err = vm_insert_pfn(vma, address, pfn);
10470+ if (err) {
10471+ ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
10472+ goto out_unlock;
10473+ }
10474+
10475+ for (i=0; i<DRM_NOPFN_EXTRA; ++i) {
10476+
10477+ if (++page_offset == bo->mem.num_pages)
10478+ break;
10479+ address = vma->vm_start + (page_offset << PAGE_SHIFT);
10480+ if (address >= vma->vm_end)
10481+ break;
10482+ if (bus_size) {
10483+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT)
10484+ + page_offset;
10485+ } else {
10486+ page = drm_ttm_get_page(ttm, page_offset);
10487+ if (!page)
10488+ break;
10489+ pfn = page_to_pfn(page);
10490+ }
10491+ if (vm_insert_pfn(vma, address, pfn))
10492+ break;
10493+ }
10494+out_unlock:
10495+ mutex_unlock(&bo->mutex);
10496+ drm_bo_read_unlock(&dev->bm.bm_lock);
10497+ return ret;
10498+}
10499+EXPORT_SYMBOL(drm_bo_vm_fault);
10500+
10501+static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
10502+{
10503+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
10504+
10505+ drm_vm_open_locked(vma);
10506+ atomic_inc(&bo->usage);
10507+}
10508+
10509+/**
10510+ * \c vma open method for buffer objects.
10511+ *
10512+ * \param vma virtual memory area.
10513+ */
10514+
10515+static void drm_bo_vm_open(struct vm_area_struct *vma)
10516+{
10517+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
10518+ struct drm_device *dev = bo->dev;
10519+
10520+ mutex_lock(&dev->struct_mutex);
10521+ drm_bo_vm_open_locked(vma);
10522+ mutex_unlock(&dev->struct_mutex);
10523+}
10524+
10525+/**
10526+ * \c vma close method for buffer objects.
10527+ *
10528+ * \param vma virtual memory area.
10529+ */
10530+
10531+static void drm_bo_vm_close(struct vm_area_struct *vma)
10532+{
10533+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
10534+ struct drm_device *dev = bo->dev;
10535+
10536+ drm_vm_close(vma);
10537+ if (bo) {
10538+ mutex_lock(&dev->struct_mutex);
10539+ drm_bo_usage_deref_locked((struct drm_buffer_object **)
10540+ &vma->vm_private_data);
10541+ mutex_unlock(&dev->struct_mutex);
10542+ }
10543+ return;
10544+}
10545+
10546+static struct vm_operations_struct drm_bo_vm_ops = {
10547+ .fault = drm_bo_vm_fault,
10548+ .open = drm_bo_vm_open,
10549+ .close = drm_bo_vm_close,
10550+};
10551+
10552+/**
10553+ * mmap buffer object memory.
10554+ *
10555+ * \param vma virtual memory area.
10556+ * \param file_priv DRM file private.
10557+ * \param map The buffer object drm map.
10558+ * \return zero on success or a negative number on failure.
10559+ */
10560+
10561+int drm_bo_mmap_locked(struct vm_area_struct *vma,
10562+ struct file *filp,
10563+ drm_local_map_t *map)
10564+{
10565+ vma->vm_ops = &drm_bo_vm_ops;
10566+ vma->vm_private_data = map->handle;
10567+ vma->vm_file = filp;
10568+ vma->vm_flags |= VM_RESERVED | VM_IO;
10569+ vma->vm_flags |= VM_PFNMAP;
10570+ drm_bo_vm_open_locked(vma);
10571+ return 0;
10572+}
10573Index: linux-2.6.27/drivers/gpu/drm/psb/Makefile
10574===================================================================
10575--- /dev/null 1970-01-01 00:00:00.000000000 +0000
10576+++ linux-2.6.27/drivers/gpu/drm/psb/Makefile 2009-01-14 11:58:01.000000000 +0000
10577@@ -0,0 +1,13 @@
10578+#
10579+# Makefile for the drm device driver. This driver provides support for the
10580+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
10581+
10582+ccflags-y := -Iinclude/drm
10583+
10584+psb-y := psb_drv.o psb_mmu.o psb_sgx.o psb_irq.o psb_fence.o psb_buffer.o \
10585+ psb_gtt.o psb_setup.o psb_i2c.o psb_fb.o psb_msvdx.o \
10586+ psb_msvdxinit.o psb_regman.o psb_reset.o psb_scene.o \
10587+ psb_schedule.o psb_xhw.o
10588+
10589+
10590+obj-$(CONFIG_DRM_PSB) += psb.o
10591Index: linux-2.6.27/drivers/gpu/drm/psb/i915_drv.h
10592===================================================================
10593--- /dev/null 1970-01-01 00:00:00.000000000 +0000
10594+++ linux-2.6.27/drivers/gpu/drm/psb/i915_drv.h 2009-01-14 11:58:01.000000000 +0000
10595@@ -0,0 +1,795 @@
10596+/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
10597+ */
10598+/*
10599+ *
10600+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
10601+ * All Rights Reserved.
10602+ *
10603+ * Permission is hereby granted, free of charge, to any person obtaining a
10604+ * copy of this software and associated documentation files (the
10605+ * "Software"), to deal in the Software without restriction, including
10606+ * without limitation the rights to use, copy, modify, merge, publish,
10607+ * distribute, sub license, and/or sell copies of the Software, and to
10608+ * permit persons to whom the Software is furnished to do so, subject to
10609+ * the following conditions:
10610+ *
10611+ * The above copyright notice and this permission notice (including the
10612+ * next paragraph) shall be included in all copies or substantial portions
10613+ * of the Software.
10614+ *
10615+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
10616+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10617+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
10618+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
10619+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
10620+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
10621+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10622+ *
10623+ */
10624+
10625+#ifndef _I915_DRV_H_
10626+#define _I915_DRV_H_
10627+
10628+#include "i915_reg.h"
10629+
10630+/* General customization:
10631+ */
10632+
10633+#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
10634+
10635+#define DRIVER_NAME "i915"
10636+#define DRIVER_DESC "Intel Graphics"
10637+#define DRIVER_DATE "20070209"
10638+
10639+#if defined(__linux__)
10640+#define I915_HAVE_FENCE
10641+#define I915_HAVE_BUFFER
10642+#endif
10643+
10644+/* Interface history:
10645+ *
10646+ * 1.1: Original.
10647+ * 1.2: Add Power Management
10648+ * 1.3: Add vblank support
10649+ * 1.4: Fix cmdbuffer path, add heap destroy
10650+ * 1.5: Add vblank pipe configuration
10651+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
10652+ * - Support vertical blank on secondary display pipe
10653+ * 1.8: New ioctl for ARB_Occlusion_Query
10654+ * 1.9: Usable page flipping and triple buffering
10655+ * 1.10: Plane/pipe disentangling
10656+ * 1.11: TTM superioctl
10657+ */
10658+#define DRIVER_MAJOR 1
10659+#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
10660+#define DRIVER_MINOR 11
10661+#else
10662+#define DRIVER_MINOR 6
10663+#endif
10664+#define DRIVER_PATCHLEVEL 0
10665+
10666+#define DRM_DRIVER_PRIVATE_T struct drm_i915_private
10667+
10668+#ifdef I915_HAVE_BUFFER
10669+#define I915_MAX_VALIDATE_BUFFERS 4096
10670+#endif
10671+
10672+struct drm_i915_ring_buffer {
10673+ int tail_mask;
10674+ unsigned long Start;
10675+ unsigned long End;
10676+ unsigned long Size;
10677+ u8 *virtual_start;
10678+ int head;
10679+ int tail;
10680+ int space;
10681+ drm_local_map_t map;
10682+};
10683+
10684+struct mem_block {
10685+ struct mem_block *next;
10686+ struct mem_block *prev;
10687+ int start;
10688+ int size;
10689+ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
10690+};
10691+
10692+struct drm_i915_vbl_swap {
10693+ struct list_head head;
10694+ drm_drawable_t drw_id;
10695+ unsigned int plane;
10696+ unsigned int sequence;
10697+ int flip;
10698+};
10699+
10700+struct drm_i915_private {
10701+ struct drm_buffer_object *ring_buffer;
10702+ drm_local_map_t *sarea;
10703+ drm_local_map_t *mmio_map;
10704+
10705+ unsigned long mmiobase;
10706+ unsigned long mmiolen;
10707+
10708+ struct drm_i915_sarea *sarea_priv;
10709+ struct drm_i915_ring_buffer ring;
10710+
10711+ struct drm_dma_handle *status_page_dmah;
10712+ void *hw_status_page;
10713+ dma_addr_t dma_status_page;
10714+ uint32_t counter;
10715+ unsigned int status_gfx_addr;
10716+ drm_local_map_t hws_map;
10717+
10718+ unsigned int cpp;
10719+ int use_mi_batchbuffer_start;
10720+
10721+ wait_queue_head_t irq_queue;
10722+ atomic_t irq_received;
10723+ atomic_t irq_emitted;
10724+
10725+ int tex_lru_log_granularity;
10726+ int allow_batchbuffer;
10727+ struct mem_block *agp_heap;
10728+ unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
10729+ int vblank_pipe;
10730+ DRM_SPINTYPE user_irq_lock;
10731+ int user_irq_refcount;
10732+ int fence_irq_on;
10733+ uint32_t irq_enable_reg;
10734+ int irq_enabled;
10735+
10736+#ifdef I915_HAVE_FENCE
10737+ uint32_t flush_sequence;
10738+ uint32_t flush_flags;
10739+ uint32_t flush_pending;
10740+ uint32_t saved_flush_status;
10741+ uint32_t reported_sequence;
10742+ int reported_sequence_valid;
10743+#endif
10744+#ifdef I915_HAVE_BUFFER
10745+ void *agp_iomap;
10746+ unsigned int max_validate_buffers;
10747+ struct mutex cmdbuf_mutex;
10748+#endif
10749+
10750+ DRM_SPINTYPE swaps_lock;
10751+ struct drm_i915_vbl_swap vbl_swaps;
10752+ unsigned int swaps_pending;
10753+
10754+ /* LVDS info */
10755+ int backlight_duty_cycle; /* restore backlight to this value */
10756+ bool panel_wants_dither;
10757+ struct drm_display_mode *panel_fixed_mode;
10758+
10759+ /* Register state */
10760+ u8 saveLBB;
10761+ u32 saveDSPACNTR;
10762+ u32 saveDSPBCNTR;
10763+ u32 savePIPEACONF;
10764+ u32 savePIPEBCONF;
10765+ u32 savePIPEASRC;
10766+ u32 savePIPEBSRC;
10767+ u32 saveFPA0;
10768+ u32 saveFPA1;
10769+ u32 saveDPLL_A;
10770+ u32 saveDPLL_A_MD;
10771+ u32 saveHTOTAL_A;
10772+ u32 saveHBLANK_A;
10773+ u32 saveHSYNC_A;
10774+ u32 saveVTOTAL_A;
10775+ u32 saveVBLANK_A;
10776+ u32 saveVSYNC_A;
10777+ u32 saveBCLRPAT_A;
10778+ u32 saveDSPASTRIDE;
10779+ u32 saveDSPASIZE;
10780+ u32 saveDSPAPOS;
10781+ u32 saveDSPABASE;
10782+ u32 saveDSPASURF;
10783+ u32 saveDSPATILEOFF;
10784+ u32 savePFIT_PGM_RATIOS;
10785+ u32 saveBLC_PWM_CTL;
10786+ u32 saveBLC_PWM_CTL2;
10787+ u32 saveFPB0;
10788+ u32 saveFPB1;
10789+ u32 saveDPLL_B;
10790+ u32 saveDPLL_B_MD;
10791+ u32 saveHTOTAL_B;
10792+ u32 saveHBLANK_B;
10793+ u32 saveHSYNC_B;
10794+ u32 saveVTOTAL_B;
10795+ u32 saveVBLANK_B;
10796+ u32 saveVSYNC_B;
10797+ u32 saveBCLRPAT_B;
10798+ u32 saveDSPBSTRIDE;
10799+ u32 saveDSPBSIZE;
10800+ u32 saveDSPBPOS;
10801+ u32 saveDSPBBASE;
10802+ u32 saveDSPBSURF;
10803+ u32 saveDSPBTILEOFF;
10804+ u32 saveVCLK_DIVISOR_VGA0;
10805+ u32 saveVCLK_DIVISOR_VGA1;
10806+ u32 saveVCLK_POST_DIV;
10807+ u32 saveVGACNTRL;
10808+ u32 saveADPA;
10809+ u32 saveLVDS;
10810+ u32 saveLVDSPP_ON;
10811+ u32 saveLVDSPP_OFF;
10812+ u32 saveDVOA;
10813+ u32 saveDVOB;
10814+ u32 saveDVOC;
10815+ u32 savePP_ON;
10816+ u32 savePP_OFF;
10817+ u32 savePP_CONTROL;
10818+ u32 savePP_CYCLE;
10819+ u32 savePFIT_CONTROL;
10820+ u32 save_palette_a[256];
10821+ u32 save_palette_b[256];
10822+ u32 saveFBC_CFB_BASE;
10823+ u32 saveFBC_LL_BASE;
10824+ u32 saveFBC_CONTROL;
10825+ u32 saveFBC_CONTROL2;
10826+ u32 saveSWF0[16];
10827+ u32 saveSWF1[16];
10828+ u32 saveSWF2[3];
10829+ u8 saveMSR;
10830+ u8 saveSR[8];
10831+ u8 saveGR[24];
10832+ u8 saveAR_INDEX;
10833+ u8 saveAR[20];
10834+ u8 saveDACMASK;
10835+ u8 saveDACDATA[256*3]; /* 256 3-byte colors */
10836+ u8 saveCR[36];
10837+};
10838+
10839+enum intel_chip_family {
10840+ CHIP_I8XX = 0x01,
10841+ CHIP_I9XX = 0x02,
10842+ CHIP_I915 = 0x04,
10843+ CHIP_I965 = 0x08,
10844+ CHIP_POULSBO = 0x10,
10845+};
10846+
10847+extern struct drm_ioctl_desc i915_ioctls[];
10848+extern int i915_max_ioctl;
10849+
10850+ /* i915_dma.c */
10851+extern void i915_kernel_lost_context(struct drm_device * dev);
10852+extern int i915_driver_load(struct drm_device *, unsigned long flags);
10853+extern int i915_driver_unload(struct drm_device *dev);
10854+extern void i915_driver_lastclose(struct drm_device * dev);
10855+extern void i915_driver_preclose(struct drm_device *dev,
10856+ struct drm_file *file_priv);
10857+extern int i915_driver_device_is_agp(struct drm_device * dev);
10858+extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
10859+ unsigned long arg);
10860+extern void i915_emit_breadcrumb(struct drm_device *dev);
10861+extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync);
10862+extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush);
10863+extern int i915_driver_firstopen(struct drm_device *dev);
10864+extern int i915_do_cleanup_pageflip(struct drm_device *dev);
10865+extern int i915_dma_cleanup(struct drm_device *dev);
10866+
10867+/* i915_irq.c */
10868+extern int i915_irq_emit(struct drm_device *dev, void *data,
10869+ struct drm_file *file_priv);
10870+extern int i915_irq_wait(struct drm_device *dev, void *data,
10871+ struct drm_file *file_priv);
10872+
10873+extern void i915_driver_wait_next_vblank(struct drm_device *dev, int pipe);
10874+extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
10875+extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
10876+extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
10877+extern void i915_driver_irq_preinstall(struct drm_device * dev);
10878+extern void i915_driver_irq_postinstall(struct drm_device * dev);
10879+extern void i915_driver_irq_uninstall(struct drm_device * dev);
10880+extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
10881+ struct drm_file *file_priv);
10882+extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
10883+ struct drm_file *file_priv);
10884+extern int i915_emit_irq(struct drm_device * dev);
10885+extern void i915_user_irq_on(struct drm_i915_private *dev_priv);
10886+extern void i915_user_irq_off(struct drm_i915_private *dev_priv);
10887+extern void i915_enable_interrupt (struct drm_device *dev);
10888+extern int i915_vblank_swap(struct drm_device *dev, void *data,
10889+ struct drm_file *file_priv);
10890+
10891+/* i915_mem.c */
10892+extern int i915_mem_alloc(struct drm_device *dev, void *data,
10893+ struct drm_file *file_priv);
10894+extern int i915_mem_free(struct drm_device *dev, void *data,
10895+ struct drm_file *file_priv);
10896+extern int i915_mem_init_heap(struct drm_device *dev, void *data,
10897+ struct drm_file *file_priv);
10898+extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
10899+ struct drm_file *file_priv);
10900+extern void i915_mem_takedown(struct mem_block **heap);
10901+extern void i915_mem_release(struct drm_device * dev,
10902+ struct drm_file *file_priv,
10903+ struct mem_block *heap);
10904+#ifdef I915_HAVE_FENCE
10905+/* i915_fence.c */
10906+extern void i915_fence_handler(struct drm_device *dev);
10907+extern void i915_invalidate_reported_sequence(struct drm_device *dev);
10908+
10909+#endif
10910+
10911+#ifdef I915_HAVE_BUFFER
10912+/* i915_buffer.c */
10913+extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
10914+extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
10915+ uint32_t *type);
10916+extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
10917+extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
10918+ struct drm_mem_type_manager *man);
10919+extern uint32_t i915_evict_mask(struct drm_buffer_object *bo);
10920+extern int i915_move(struct drm_buffer_object *bo, int evict,
10921+ int no_wait, struct drm_bo_mem_reg *new_mem);
10922+void i915_flush_ttm(struct drm_ttm *ttm);
10923+#endif
10924+
10925+#ifdef __linux__
10926+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
10927+extern void intel_init_chipset_flush_compat(struct drm_device *dev);
10928+extern void intel_fini_chipset_flush_compat(struct drm_device *dev);
10929+#endif
10930+#endif
10931+
10932+
10933+/* modesetting */
10934+extern void intel_modeset_init(struct drm_device *dev);
10935+extern void intel_modeset_cleanup(struct drm_device *dev);
10936+
10937+
10938+#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
10939+#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
10940+#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
10941+#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
10942+
10943+#define I915_VERBOSE 0
10944+
10945+#define RING_LOCALS unsigned int outring, ringmask, outcount; \
10946+ volatile char *virt;
10947+
10948+#define BEGIN_LP_RING(n) do { \
10949+ if (I915_VERBOSE) \
10950+ DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \
10951+ (n), __FUNCTION__); \
10952+ if (dev_priv->ring.space < (n)*4) \
10953+ i915_wait_ring(dev, (n)*4, __FUNCTION__); \
10954+ outcount = 0; \
10955+ outring = dev_priv->ring.tail; \
10956+ ringmask = dev_priv->ring.tail_mask; \
10957+ virt = dev_priv->ring.virtual_start; \
10958+} while (0)
10959+
10960+#define OUT_RING(n) do { \
10961+ if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
10962+ *(volatile unsigned int *)(virt + outring) = (n); \
10963+ outcount++; \
10964+ outring += 4; \
10965+ outring &= ringmask; \
10966+} while (0)
10967+
10968+#define ADVANCE_LP_RING() do { \
10969+ if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
10970+ dev_priv->ring.tail = outring; \
10971+ dev_priv->ring.space -= outcount * 4; \
10972+ I915_WRITE(LP_RING + RING_TAIL, outring); \
10973+} while(0)
10974+
10975+#define MI_NOOP (0x00 << 23)
10976+
10977+extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
10978+
10979+/*
10980+ * The Bridge device's PCI config space has information about the
10981+ * fb aperture size and the amount of pre-reserved memory.
10982+ */
10983+#define INTEL_GMCH_CTRL 0x52
10984+#define INTEL_GMCH_ENABLED 0x4
10985+#define INTEL_GMCH_MEM_MASK 0x1
10986+#define INTEL_GMCH_MEM_64M 0x1
10987+#define INTEL_GMCH_MEM_128M 0
10988+
10989+#define INTEL_855_GMCH_GMS_MASK (0x7 << 4)
10990+#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
10991+#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
10992+#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
10993+#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
10994+#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
10995+#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
10996+
10997+#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
10998+#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
10999+
11000+/* Extended config space */
11001+#define LBB 0xf4
11002+
11003+/* VGA stuff */
11004+
11005+#define VGA_ST01_MDA 0x3ba
11006+#define VGA_ST01_CGA 0x3da
11007+
11008+#define VGA_MSR_WRITE 0x3c2
11009+#define VGA_MSR_READ 0x3cc
11010+#define VGA_MSR_MEM_EN (1<<1)
11011+#define VGA_MSR_CGA_MODE (1<<0)
11012+
11013+#define VGA_SR_INDEX 0x3c4
11014+#define VGA_SR_DATA 0x3c5
11015+
11016+#define VGA_AR_INDEX 0x3c0
11017+#define VGA_AR_VID_EN (1<<5)
11018+#define VGA_AR_DATA_WRITE 0x3c0
11019+#define VGA_AR_DATA_READ 0x3c1
11020+
11021+#define VGA_GR_INDEX 0x3ce
11022+#define VGA_GR_DATA 0x3cf
11023+/* GR05 */
11024+#define VGA_GR_MEM_READ_MODE_SHIFT 3
11025+#define VGA_GR_MEM_READ_MODE_PLANE 1
11026+/* GR06 */
11027+#define VGA_GR_MEM_MODE_MASK 0xc
11028+#define VGA_GR_MEM_MODE_SHIFT 2
11029+#define VGA_GR_MEM_A0000_AFFFF 0
11030+#define VGA_GR_MEM_A0000_BFFFF 1
11031+#define VGA_GR_MEM_B0000_B7FFF 2
11032+#define VGA_GR_MEM_B0000_BFFFF 3
11033+
11034+#define VGA_DACMASK 0x3c6
11035+#define VGA_DACRX 0x3c7
11036+#define VGA_DACWX 0x3c8
11037+#define VGA_DACDATA 0x3c9
11038+
11039+#define VGA_CR_INDEX_MDA 0x3b4
11040+#define VGA_CR_DATA_MDA 0x3b5
11041+#define VGA_CR_INDEX_CGA 0x3d4
11042+#define VGA_CR_DATA_CGA 0x3d5
11043+
11044+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
11045+#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
11046+#define CMD_REPORT_HEAD (7<<23)
11047+#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
11048+#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
11049+
11050+#define CMD_MI_FLUSH (0x04 << 23)
11051+#define MI_NO_WRITE_FLUSH (1 << 2)
11052+#define MI_READ_FLUSH (1 << 0)
11053+#define MI_EXE_FLUSH (1 << 1)
11054+#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
11055+#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
11056+
11057+/* Packet to load a register value from the ring/batch command stream:
11058+ */
11059+#define CMD_MI_LOAD_REGISTER_IMM ((0x22 << 23)|0x1)
11060+
11061+#define BB1_START_ADDR_MASK (~0x7)
11062+#define BB1_PROTECTED (1<<0)
11063+#define BB1_UNPROTECTED (0<<0)
11064+#define BB2_END_ADDR_MASK (~0x7)
11065+
11066+#define I915REG_HWS_PGA 0x02080
11067+
11068+/* Framebuffer compression */
11069+#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
11070+#define FBC_LL_BASE 0x03204 /* 4k page aligned */
11071+#define FBC_CONTROL 0x03208
11072+#define FBC_CTL_EN (1<<31)
11073+#define FBC_CTL_PERIODIC (1<<30)
11074+#define FBC_CTL_INTERVAL_SHIFT (16)
11075+#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
11076+#define FBC_CTL_STRIDE_SHIFT (5)
11077+#define FBC_CTL_FENCENO (1<<0)
11078+#define FBC_COMMAND 0x0320c
11079+#define FBC_CMD_COMPRESS (1<<0)
11080+#define FBC_STATUS 0x03210
11081+#define FBC_STAT_COMPRESSING (1<<31)
11082+#define FBC_STAT_COMPRESSED (1<<30)
11083+#define FBC_STAT_MODIFIED (1<<29)
11084+#define FBC_STAT_CURRENT_LINE (1<<0)
11085+#define FBC_CONTROL2 0x03214
11086+#define FBC_CTL_FENCE_DBL (0<<4)
11087+#define FBC_CTL_IDLE_IMM (0<<2)
11088+#define FBC_CTL_IDLE_FULL (1<<2)
11089+#define FBC_CTL_IDLE_LINE (2<<2)
11090+#define FBC_CTL_IDLE_DEBUG (3<<2)
11091+#define FBC_CTL_CPU_FENCE (1<<1)
11092+#define FBC_CTL_PLANEA (0<<0)
11093+#define FBC_CTL_PLANEB (1<<0)
11094+#define FBC_FENCE_OFF 0x0321b
11095+
11096+#define FBC_LL_SIZE (1536)
11097+#define FBC_LL_PAD (32)
11098+
11099+/* Interrupt bits:
11100+ */
11101+#define USER_INT_FLAG (1<<1)
11102+#define VSYNC_PIPEB_FLAG (1<<5)
11103+#define VSYNC_PIPEA_FLAG (1<<7)
11104+#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
11105+
11106+#define I915REG_HWSTAM 0x02098
11107+#define I915REG_INT_IDENTITY_R 0x020a4
11108+#define I915REG_INT_MASK_R 0x020a8
11109+#define I915REG_INT_ENABLE_R 0x020a0
11110+#define I915REG_INSTPM 0x020c0
11111+
11112+#define I915REG_PIPEASTAT 0x70024
11113+#define I915REG_PIPEBSTAT 0x71024
11114+
11115+#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
11116+#define I915_VBLANK_CLEAR (1UL<<1)
11117+
11118+#define GPIOA 0x5010
11119+#define GPIOB 0x5014
11120+#define GPIOC 0x5018
11121+#define GPIOD 0x501c
11122+#define GPIOE 0x5020
11123+#define GPIOF 0x5024
11124+#define GPIOG 0x5028
11125+#define GPIOH 0x502c
11126+# define GPIO_CLOCK_DIR_MASK (1 << 0)
11127+# define GPIO_CLOCK_DIR_IN (0 << 1)
11128+# define GPIO_CLOCK_DIR_OUT (1 << 1)
11129+# define GPIO_CLOCK_VAL_MASK (1 << 2)
11130+# define GPIO_CLOCK_VAL_OUT (1 << 3)
11131+# define GPIO_CLOCK_VAL_IN (1 << 4)
11132+# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
11133+# define GPIO_DATA_DIR_MASK (1 << 8)
11134+# define GPIO_DATA_DIR_IN (0 << 9)
11135+# define GPIO_DATA_DIR_OUT (1 << 9)
11136+# define GPIO_DATA_VAL_MASK (1 << 10)
11137+# define GPIO_DATA_VAL_OUT (1 << 11)
11138+# define GPIO_DATA_VAL_IN (1 << 12)
11139+# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
11140+
11141+/* p317, 319
11142+ */
11143+#define VCLK2_VCO_M 0x6008 /* treat as 16 bit? (includes msbs) */
11144+#define VCLK2_VCO_N 0x600a
11145+#define VCLK2_VCO_DIV_SEL 0x6012
11146+
11147+#define VCLK_DIVISOR_VGA0 0x6000
11148+#define VCLK_DIVISOR_VGA1 0x6004
11149+#define VCLK_POST_DIV 0x6010
11150+/** Selects a post divisor of 4 instead of 2. */
11151+# define VGA1_PD_P2_DIV_4 (1 << 15)
11152+/** Overrides the p2 post divisor field */
11153+# define VGA1_PD_P1_DIV_2 (1 << 13)
11154+# define VGA1_PD_P1_SHIFT 8
11155+/** P1 value is 2 greater than this field */
11156+# define VGA1_PD_P1_MASK (0x1f << 8)
11157+/** Selects a post divisor of 4 instead of 2. */
11158+# define VGA0_PD_P2_DIV_4 (1 << 7)
11159+/** Overrides the p2 post divisor field */
11160+# define VGA0_PD_P1_DIV_2 (1 << 5)
11161+# define VGA0_PD_P1_SHIFT 0
11162+/** P1 value is 2 greater than this field */
11163+# define VGA0_PD_P1_MASK (0x1f << 0)
11164+
11165+#define POST_DIV_SELECT 0x70
11166+#define POST_DIV_1 0x00
11167+#define POST_DIV_2 0x10
11168+#define POST_DIV_4 0x20
11169+#define POST_DIV_8 0x30
11170+#define POST_DIV_16 0x40
11171+#define POST_DIV_32 0x50
11172+#define VCO_LOOP_DIV_BY_4M 0x00
11173+#define VCO_LOOP_DIV_BY_16M 0x04
11174+
11175+#define SRX_INDEX 0x3c4
11176+#define SRX_DATA 0x3c5
11177+#define SR01 1
11178+#define SR01_SCREEN_OFF (1<<5)
11179+
11180+#define PPCR 0x61204
11181+#define PPCR_ON (1<<0)
11182+
11183+#define DVOA 0x61120
11184+#define DVOA_ON (1<<31)
11185+#define DVOB 0x61140
11186+#define DVOB_ON (1<<31)
11187+#define DVOC 0x61160
11188+#define DVOC_ON (1<<31)
11189+#define LVDS 0x61180
11190+#define LVDS_ON (1<<31)
11191+
11192+#define ADPA 0x61100
11193+#define ADPA_DPMS_MASK (~(3<<10))
11194+#define ADPA_DPMS_ON (0<<10)
11195+#define ADPA_DPMS_SUSPEND (1<<10)
11196+#define ADPA_DPMS_STANDBY (2<<10)
11197+#define ADPA_DPMS_OFF (3<<10)
11198+
11199+#define NOPID 0x2094
11200+#define LP_RING 0x2030
11201+#define HP_RING 0x2040
11202+/* The binner has its own ring buffer:
11203+ */
11204+#define HWB_RING 0x2400
11205+
11206+#define RING_TAIL 0x00
11207+#define TAIL_ADDR 0x001FFFF8
11208+#define RING_HEAD 0x04
11209+#define HEAD_WRAP_COUNT 0xFFE00000
11210+#define HEAD_WRAP_ONE 0x00200000
11211+#define HEAD_ADDR 0x001FFFFC
11212+#define RING_START 0x08
11213+#define START_ADDR 0x0xFFFFF000
11214+#define RING_LEN 0x0C
11215+#define RING_NR_PAGES 0x001FF000
11216+#define RING_REPORT_MASK 0x00000006
11217+#define RING_REPORT_64K 0x00000002
11218+#define RING_REPORT_128K 0x00000004
11219+#define RING_NO_REPORT 0x00000000
11220+#define RING_VALID_MASK 0x00000001
11221+#define RING_VALID 0x00000001
11222+#define RING_INVALID 0x00000000
11223+
11224+/* Instruction parser error reg:
11225+ */
11226+#define IPEIR 0x2088
11227+
11228+/* Scratch pad debug 0 reg:
11229+ */
11230+#define SCPD0 0x209c
11231+
11232+/* Error status reg:
11233+ */
11234+#define ESR 0x20b8
11235+
11236+/* Secondary DMA fetch address debug reg:
11237+ */
11238+#define DMA_FADD_S 0x20d4
11239+
11240+/* Cache mode 0 reg.
11241+ * - Manipulating render cache behaviour is central
11242+ * to the concept of zone rendering, tuning this reg can help avoid
11243+ * unnecessary render cache reads and even writes (for z/stencil)
11244+ * at beginning and end of scene.
11245+ *
11246+ * - To change a bit, write to this reg with a mask bit set and the
11247+ * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
11248+ */
11249+#define Cache_Mode_0 0x2120
11250+#define CM0_MASK_SHIFT 16
11251+#define CM0_IZ_OPT_DISABLE (1<<6)
11252+#define CM0_ZR_OPT_DISABLE (1<<5)
11253+#define CM0_DEPTH_EVICT_DISABLE (1<<4)
11254+#define CM0_COLOR_EVICT_DISABLE (1<<3)
11255+#define CM0_DEPTH_WRITE_DISABLE (1<<1)
11256+#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
11257+
11258+
11259+/* Graphics flush control. A CPU write flushes the GWB of all writes.
11260+ * The data is discarded.
11261+ */
11262+#define GFX_FLSH_CNTL 0x2170
11263+
11264+/* Binner control. Defines the location of the bin pointer list:
11265+ */
11266+#define BINCTL 0x2420
11267+#define BC_MASK (1 << 9)
11268+
11269+/* Binned scene info.
11270+ */
11271+#define BINSCENE 0x2428
11272+#define BS_OP_LOAD (1 << 8)
11273+#define BS_MASK (1 << 22)
11274+
11275+/* Bin command parser debug reg:
11276+ */
11277+#define BCPD 0x2480
11278+
11279+/* Bin memory control debug reg:
11280+ */
11281+#define BMCD 0x2484
11282+
11283+/* Bin data cache debug reg:
11284+ */
11285+#define BDCD 0x2488
11286+
11287+/* Binner pointer cache debug reg:
11288+ */
11289+#define BPCD 0x248c
11290+
11291+/* Binner scratch pad debug reg:
11292+ */
11293+#define BINSKPD 0x24f0
11294+
11295+/* HWB scratch pad debug reg:
11296+ */
11297+#define HWBSKPD 0x24f4
11298+
11299+/* Binner memory pool reg:
11300+ */
11301+#define BMP_BUFFER 0x2430
11302+#define BMP_PAGE_SIZE_4K (0 << 10)
11303+#define BMP_BUFFER_SIZE_SHIFT 1
11304+#define BMP_ENABLE (1 << 0)
11305+
11306+/* Get/put memory from the binner memory pool:
11307+ */
11308+#define BMP_GET 0x2438
11309+#define BMP_PUT 0x2440
11310+#define BMP_OFFSET_SHIFT 5
11311+
11312+/* 3D state packets:
11313+ */
11314+#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
11315+
11316+#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
11317+#define SC_UPDATE_SCISSOR (0x1<<1)
11318+#define SC_ENABLE_MASK (0x1<<0)
11319+#define SC_ENABLE (0x1<<0)
11320+
11321+#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
11322+
11323+#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
11324+#define SCI_YMIN_MASK (0xffff<<16)
11325+#define SCI_XMIN_MASK (0xffff<<0)
11326+#define SCI_YMAX_MASK (0xffff<<16)
11327+#define SCI_XMAX_MASK (0xffff<<0)
11328+
11329+#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
11330+#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
11331+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
11332+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
11333+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
11334+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
11335+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
11336+
11337+#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
11338+
11339+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
11340+#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
11341+#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
11342+#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
11343+#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
11344+#define BLT_DEPTH_8 (0<<24)
11345+#define BLT_DEPTH_16_565 (1<<24)
11346+#define BLT_DEPTH_16_1555 (2<<24)
11347+#define BLT_DEPTH_32 (3<<24)
11348+#define BLT_ROP_GXCOPY (0xcc<<16)
11349+
11350+#define MI_BATCH_BUFFER ((0x30<<23)|1)
11351+#define MI_BATCH_BUFFER_START (0x31<<23)
11352+#define MI_BATCH_BUFFER_END (0xA<<23)
11353+#define MI_BATCH_NON_SECURE (1)
11354+
11355+#define MI_BATCH_NON_SECURE_I965 (1<<8)
11356+
11357+#define MI_WAIT_FOR_EVENT ((0x3<<23))
11358+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
11359+#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
11360+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
11361+
11362+#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
11363+
11364+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
11365+#define ASYNC_FLIP (1<<22)
11366+#define DISPLAY_PLANE_A (0<<20)
11367+#define DISPLAY_PLANE_B (1<<20)
11368+
11369+/* Display regs */
11370+#define DSPACNTR 0x70180
11371+#define DSPBCNTR 0x71180
11372+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
11373+
11374+/* Define the region of interest for the binner:
11375+ */
11376+#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
11377+
11378+#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
11379+
11380+#define BREADCRUMB_BITS 31
11381+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
11382+
11383+#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
11384+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
11385+
11386+#define PRIMARY_RINGBUFFER_SIZE (128*1024)
11387+
11388+#define BLC_PWM_CTL2 0x61250
11389+
11390+#endif
11391Index: linux-2.6.27/drivers/gpu/drm/psb/i915_reg.h
11392===================================================================
11393--- /dev/null 1970-01-01 00:00:00.000000000 +0000
11394+++ linux-2.6.27/drivers/gpu/drm/psb/i915_reg.h 2009-01-14 11:58:01.000000000 +0000
11395@@ -0,0 +1,487 @@
11396+#define BLC_PWM_CTL 0x61254
11397+#define BLC_PWM_CTL2 0x61250
11398+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
11399+/**
11400+ * This is the most significant 15 bits of the number of backlight cycles in a
11401+ * complete cycle of the modulated backlight control.
11402+ *
11403+ * The actual value is this field multiplied by two.
11404+ */
11405+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
11406+#define BLM_LEGACY_MODE (1 << 16)
11407+/**
11408+ * This is the number of cycles out of the backlight modulation cycle for which
11409+ * the backlight is on.
11410+ *
11411+ * This field must be no greater than the number of cycles in the complete
11412+ * backlight modulation cycle.
11413+ */
11414+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
11415+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
11416+
11417+#define I915_GCFGC 0xf0
11418+#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
11419+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
11420+#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
11421+#define I915_DISPLAY_CLOCK_MASK (7 << 4)
11422+
11423+#define I855_HPLLCC 0xc0
11424+#define I855_CLOCK_CONTROL_MASK (3 << 0)
11425+#define I855_CLOCK_133_200 (0 << 0)
11426+#define I855_CLOCK_100_200 (1 << 0)
11427+#define I855_CLOCK_100_133 (2 << 0)
11428+#define I855_CLOCK_166_250 (3 << 0)
11429+
11430+/* I830 CRTC registers */
11431+#define HTOTAL_A 0x60000
11432+#define HBLANK_A 0x60004
11433+#define HSYNC_A 0x60008
11434+#define VTOTAL_A 0x6000c
11435+#define VBLANK_A 0x60010
11436+#define VSYNC_A 0x60014
11437+#define PIPEASRC 0x6001c
11438+#define BCLRPAT_A 0x60020
11439+#define VSYNCSHIFT_A 0x60028
11440+
11441+#define HTOTAL_B 0x61000
11442+#define HBLANK_B 0x61004
11443+#define HSYNC_B 0x61008
11444+#define VTOTAL_B 0x6100c
11445+#define VBLANK_B 0x61010
11446+#define VSYNC_B 0x61014
11447+#define PIPEBSRC 0x6101c
11448+#define BCLRPAT_B 0x61020
11449+#define VSYNCSHIFT_B 0x61028
11450+
11451+#define PP_STATUS 0x61200
11452+# define PP_ON (1 << 31)
11453+/**
11454+ * Indicates that all dependencies of the panel are on:
11455+ *
11456+ * - PLL enabled
11457+ * - pipe enabled
11458+ * - LVDS/DVOB/DVOC on
11459+ */
11460+# define PP_READY (1 << 30)
11461+# define PP_SEQUENCE_NONE (0 << 28)
11462+# define PP_SEQUENCE_ON (1 << 28)
11463+# define PP_SEQUENCE_OFF (2 << 28)
11464+# define PP_SEQUENCE_MASK 0x30000000
11465+#define PP_CONTROL 0x61204
11466+# define POWER_TARGET_ON (1 << 0)
11467+
11468+#define LVDSPP_ON 0x61208
11469+#define LVDSPP_OFF 0x6120c
11470+#define PP_CYCLE 0x61210
11471+
11472+#define PFIT_CONTROL 0x61230
11473+# define PFIT_ENABLE (1 << 31)
11474+# define PFIT_PIPE_MASK (3 << 29)
11475+# define PFIT_PIPE_SHIFT 29
11476+# define VERT_INTERP_DISABLE (0 << 10)
11477+# define VERT_INTERP_BILINEAR (1 << 10)
11478+# define VERT_INTERP_MASK (3 << 10)
11479+# define VERT_AUTO_SCALE (1 << 9)
11480+# define HORIZ_INTERP_DISABLE (0 << 6)
11481+# define HORIZ_INTERP_BILINEAR (1 << 6)
11482+# define HORIZ_INTERP_MASK (3 << 6)
11483+# define HORIZ_AUTO_SCALE (1 << 5)
11484+# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
11485+
11486+#define PFIT_PGM_RATIOS 0x61234
11487+# define PFIT_VERT_SCALE_MASK 0xfff00000
11488+# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
11489+
11490+#define PFIT_AUTO_RATIOS 0x61238
11491+
11492+
11493+#define DPLL_A 0x06014
11494+#define DPLL_B 0x06018
11495+# define DPLL_VCO_ENABLE (1 << 31)
11496+# define DPLL_DVO_HIGH_SPEED (1 << 30)
11497+# define DPLL_SYNCLOCK_ENABLE (1 << 29)
11498+# define DPLL_VGA_MODE_DIS (1 << 28)
11499+# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
11500+# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
11501+# define DPLL_MODE_MASK (3 << 26)
11502+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
11503+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
11504+# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
11505+# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
11506+# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
11507+# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
11508+/**
11509+ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
11510+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
11511+ */
11512+# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
11513+/**
11514+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
11515+ * this field (only one bit may be set).
11516+ */
11517+# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
11518+# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
11519+# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required in DVO non-gang */
11520+# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
11521+# define PLL_REF_INPUT_DREFCLK (0 << 13)
11522+# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
11523+# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
11524+# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
11525+# define PLL_REF_INPUT_MASK (3 << 13)
11526+# define PLL_LOAD_PULSE_PHASE_SHIFT 9
11527+/*
11528+ * Parallel to Serial Load Pulse phase selection.
11529+ * Selects the phase for the 10X DPLL clock for the PCIe
11530+ * digital display port. The range is 4 to 13; 10 or more
11531+ * is just a flip delay. The default is 6
11532+ */
11533+# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
11534+# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
11535+
11536+/**
11537+ * SDVO multiplier for 945G/GM. Not used on 965.
11538+ *
11539+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
11540+ */
11541+# define SDVO_MULTIPLIER_MASK 0x000000ff
11542+# define SDVO_MULTIPLIER_SHIFT_HIRES 4
11543+# define SDVO_MULTIPLIER_SHIFT_VGA 0
11544+
11545+/** @defgroup DPLL_MD
11546+ * @{
11547+ */
11548+/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
11549+#define DPLL_A_MD 0x0601c
11550+/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
11551+#define DPLL_B_MD 0x06020
11552+/**
11553+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
11554+ *
11555+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
11556+ */
11557+# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
11558+# define DPLL_MD_UDI_DIVIDER_SHIFT 24
11559+/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
11560+# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
11561+# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
11562+/**
11563+ * SDVO/UDI pixel multiplier.
11564+ *
11565+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
11566+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
11567+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
11568+ * dummy bytes in the datastream at an increased clock rate, with both sides of
11569+ * the link knowing how many bytes are fill.
11570+ *
11571+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
11572+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
11573+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
11574+ * through an SDVO command.
11575+ *
11576+ * This register field has values of multiplication factor minus 1, with
11577+ * a maximum multiplier of 5 for SDVO.
11578+ */
11579+# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
11580+# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
11581+/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
11582+ * This best be set to the default value (3) or the CRT won't work. No,
11583+ * I don't entirely understand what this does...
11584+ */
11585+# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
11586+# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
11587+/** @} */
11588+
11589+#define DPLL_TEST 0x606c
11590+# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
11591+# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
11592+# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
11593+# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
11594+# define DPLLB_TEST_N_BYPASS (1 << 19)
11595+# define DPLLB_TEST_M_BYPASS (1 << 18)
11596+# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
11597+# define DPLLA_TEST_N_BYPASS (1 << 3)
11598+# define DPLLA_TEST_M_BYPASS (1 << 2)
11599+# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
11600+
11601+#define ADPA 0x61100
11602+#define ADPA_DAC_ENABLE (1<<31)
11603+#define ADPA_DAC_DISABLE 0
11604+#define ADPA_PIPE_SELECT_MASK (1<<30)
11605+#define ADPA_PIPE_A_SELECT 0
11606+#define ADPA_PIPE_B_SELECT (1<<30)
11607+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
11608+#define ADPA_SETS_HVPOLARITY 0
11609+#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
11610+#define ADPA_VSYNC_CNTL_ENABLE 0
11611+#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
11612+#define ADPA_HSYNC_CNTL_ENABLE 0
11613+#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
11614+#define ADPA_VSYNC_ACTIVE_LOW 0
11615+#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
11616+#define ADPA_HSYNC_ACTIVE_LOW 0
11617+
11618+#define FPA0 0x06040
11619+#define FPA1 0x06044
11620+#define FPB0 0x06048
11621+#define FPB1 0x0604c
11622+# define FP_N_DIV_MASK 0x003f0000
11623+# define FP_N_DIV_SHIFT 16
11624+# define FP_M1_DIV_MASK 0x00003f00
11625+# define FP_M1_DIV_SHIFT 8
11626+# define FP_M2_DIV_MASK 0x0000003f
11627+# define FP_M2_DIV_SHIFT 0
11628+
11629+
11630+#define PORT_HOTPLUG_EN 0x61110
11631+# define SDVOB_HOTPLUG_INT_EN (1 << 26)
11632+# define SDVOC_HOTPLUG_INT_EN (1 << 25)
11633+# define TV_HOTPLUG_INT_EN (1 << 18)
11634+# define CRT_HOTPLUG_INT_EN (1 << 9)
11635+# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
11636+
11637+#define PORT_HOTPLUG_STAT 0x61114
11638+# define CRT_HOTPLUG_INT_STATUS (1 << 11)
11639+# define TV_HOTPLUG_INT_STATUS (1 << 10)
11640+# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
11641+# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
11642+# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
11643+# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
11644+# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
11645+# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
11646+
11647+#define SDVOB 0x61140
11648+#define SDVOC 0x61160
11649+#define SDVO_ENABLE (1 << 31)
11650+#define SDVO_PIPE_B_SELECT (1 << 30)
11651+#define SDVO_STALL_SELECT (1 << 29)
11652+#define SDVO_INTERRUPT_ENABLE (1 << 26)
11653+/**
11654+ * 915G/GM SDVO pixel multiplier.
11655+ *
11656+ * Programmed value is multiplier - 1, up to 5x.
11657+ *
11658+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
11659+ */
11660+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
11661+#define SDVO_PORT_MULTIPLY_SHIFT 23
11662+#define SDVO_PHASE_SELECT_MASK (15 << 19)
11663+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
11664+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
11665+#define SDVOC_GANG_MODE (1 << 16)
11666+#define SDVO_BORDER_ENABLE (1 << 7)
11667+#define SDVOB_PCIE_CONCURRENCY (1 << 3)
11668+#define SDVO_DETECTED (1 << 2)
11669+/* Bits to be preserved when writing */
11670+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
11671+#define SDVOC_PRESERVE_MASK (1 << 17)
11672+
11673+/** @defgroup LVDS
11674+ * @{
11675+ */
11676+/**
11677+ * This register controls the LVDS output enable, pipe selection, and data
11678+ * format selection.
11679+ *
11680+ * All of the clock/data pairs are force powered down by power sequencing.
11681+ */
11682+#define LVDS 0x61180
11683+/**
11684+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
11685+ * the DPLL semantics change when the LVDS is assigned to that pipe.
11686+ */
11687+# define LVDS_PORT_EN (1 << 31)
11688+/** Selects pipe B for LVDS data. Must be set on pre-965. */
11689+# define LVDS_PIPEB_SELECT (1 << 30)
11690+
11691+/**
11692+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
11693+ * pixel.
11694+ */
11695+# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
11696+# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
11697+# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
11698+/**
11699+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
11700+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
11701+ * on.
11702+ */
11703+# define LVDS_A3_POWER_MASK (3 << 6)
11704+# define LVDS_A3_POWER_DOWN (0 << 6)
11705+# define LVDS_A3_POWER_UP (3 << 6)
11706+/**
11707+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
11708+ * is set.
11709+ */
11710+# define LVDS_CLKB_POWER_MASK (3 << 4)
11711+# define LVDS_CLKB_POWER_DOWN (0 << 4)
11712+# define LVDS_CLKB_POWER_UP (3 << 4)
11713+
11714+/**
11715+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
11716+ * setting for whether we are in dual-channel mode. The B3 pair will
11717+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
11718+ */
11719+# define LVDS_B0B3_POWER_MASK (3 << 2)
11720+# define LVDS_B0B3_POWER_DOWN (0 << 2)
11721+# define LVDS_B0B3_POWER_UP (3 << 2)
11722+
11723+#define PIPEACONF 0x70008
11724+#define PIPEACONF_ENABLE (1<<31)
11725+#define PIPEACONF_DISABLE 0
11726+#define PIPEACONF_DOUBLE_WIDE (1<<30)
11727+#define I965_PIPECONF_ACTIVE (1<<30)
11728+#define PIPEACONF_SINGLE_WIDE 0
11729+#define PIPEACONF_PIPE_UNLOCKED 0
11730+#define PIPEACONF_PIPE_LOCKED (1<<25)
11731+#define PIPEACONF_PALETTE 0
11732+#define PIPEACONF_GAMMA (1<<24)
11733+#define PIPECONF_FORCE_BORDER (1<<25)
11734+#define PIPECONF_PROGRESSIVE (0 << 21)
11735+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
11736+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
11737+
11738+#define PIPEBCONF 0x71008
11739+#define PIPEBCONF_ENABLE (1<<31)
11740+#define PIPEBCONF_DISABLE 0
11741+#define PIPEBCONF_DOUBLE_WIDE (1<<30)
11742+#define PIPEBCONF_DISABLE 0
11743+#define PIPEBCONF_GAMMA (1<<24)
11744+#define PIPEBCONF_PALETTE 0
11745+
11746+#define PIPEBGCMAXRED 0x71010
11747+#define PIPEBGCMAXGREEN 0x71014
11748+#define PIPEBGCMAXBLUE 0x71018
11749+#define PIPEBSTAT 0x71024
11750+#define PIPEBFRAMEHIGH 0x71040
11751+#define PIPEBFRAMEPIXEL 0x71044
11752+
11753+#define DSPACNTR 0x70180
11754+#define DSPBCNTR 0x71180
11755+#define DISPLAY_PLANE_ENABLE (1<<31)
11756+#define DISPLAY_PLANE_DISABLE 0
11757+#define DISPPLANE_GAMMA_ENABLE (1<<30)
11758+#define DISPPLANE_GAMMA_DISABLE 0
11759+#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
11760+#define DISPPLANE_8BPP (0x2<<26)
11761+#define DISPPLANE_15_16BPP (0x4<<26)
11762+#define DISPPLANE_16BPP (0x5<<26)
11763+#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
11764+#define DISPPLANE_32BPP (0x7<<26)
11765+#define DISPPLANE_STEREO_ENABLE (1<<25)
11766+#define DISPPLANE_STEREO_DISABLE 0
11767+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
11768+#define DISPPLANE_SEL_PIPE_A 0
11769+#define DISPPLANE_SEL_PIPE_B (1<<24)
11770+#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
11771+#define DISPPLANE_SRC_KEY_DISABLE 0
11772+#define DISPPLANE_LINE_DOUBLE (1<<20)
11773+#define DISPPLANE_NO_LINE_DOUBLE 0
11774+#define DISPPLANE_STEREO_POLARITY_FIRST 0
11775+#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
11776+/* plane B only */
11777+#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
11778+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
11779+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
11780+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
11781+
11782+#define DSPABASE 0x70184
11783+#define DSPASTRIDE 0x70188
11784+
11785+#define DSPBBASE 0x71184
11786+#define DSPBADDR DSPBBASE
11787+#define DSPBSTRIDE 0x71188
11788+
11789+#define DSPAKEYVAL 0x70194
11790+#define DSPAKEYMASK 0x70198
11791+
11792+#define DSPAPOS 0x7018C /* reserved */
11793+#define DSPASIZE 0x70190
11794+#define DSPBPOS 0x7118C
11795+#define DSPBSIZE 0x71190
11796+
11797+#define DSPASURF 0x7019C
11798+#define DSPATILEOFF 0x701A4
11799+
11800+#define DSPBSURF 0x7119C
11801+#define DSPBTILEOFF 0x711A4
11802+
11803+#define VGACNTRL 0x71400
11804+# define VGA_DISP_DISABLE (1 << 31)
11805+# define VGA_2X_MODE (1 << 30)
11806+# define VGA_PIPE_B_SELECT (1 << 29)
11807+
11808+/*
11809+ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
11810+ * of video memory available to the BIOS in SWF1.
11811+ */
11812+
11813+#define SWF0 0x71410
11814+#define SWF1 0x71414
11815+#define SWF2 0x71418
11816+#define SWF3 0x7141c
11817+#define SWF4 0x71420
11818+#define SWF5 0x71424
11819+#define SWF6 0x71428
11820+
11821+/*
11822+ * 855 scratch registers.
11823+ */
11824+#define SWF00 0x70410
11825+#define SWF01 0x70414
11826+#define SWF02 0x70418
11827+#define SWF03 0x7041c
11828+#define SWF04 0x70420
11829+#define SWF05 0x70424
11830+#define SWF06 0x70428
11831+
11832+#define SWF10 SWF0
11833+#define SWF11 SWF1
11834+#define SWF12 SWF2
11835+#define SWF13 SWF3
11836+#define SWF14 SWF4
11837+#define SWF15 SWF5
11838+#define SWF16 SWF6
11839+
11840+#define SWF30 0x72414
11841+#define SWF31 0x72418
11842+#define SWF32 0x7241c
11843+
11844+
11845+/*
11846+ * Palette registers
11847+ */
11848+#define PALETTE_A 0x0a000
11849+#define PALETTE_B 0x0a800
11850+
11851+#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
11852+#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
11853+#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
11854+#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
11855+#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
11856+
11857+#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G)*/
11858+#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
11859+#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
11860+#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG)
11861+
11862+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
11863+ (dev)->pci_device == 0x2982 || \
11864+ (dev)->pci_device == 0x2992 || \
11865+ (dev)->pci_device == 0x29A2 || \
11866+ (dev)->pci_device == 0x2A02 || \
11867+ (dev)->pci_device == 0x2A12)
11868+
11869+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
11870+
11871+#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
11872+ (dev)->pci_device == 0x29B2 || \
11873+ (dev)->pci_device == 0x29D2)
11874+
11875+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
11876+ IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev))
11877+
11878+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
11879+ IS_I945GM(dev) || IS_I965GM(dev) || IS_POULSBO(dev))
11880+
11881+#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \
11882+ ((dev)->pci_device == 0x8109))
11883Index: linux-2.6.27/drivers/gpu/drm/psb/intel_crt.c
11884===================================================================
11885--- /dev/null 1970-01-01 00:00:00.000000000 +0000
11886+++ linux-2.6.27/drivers/gpu/drm/psb/intel_crt.c 2009-01-14 11:58:01.000000000 +0000
11887@@ -0,0 +1,242 @@
11888+/*
11889+ * Copyright © 2006-2007 Intel Corporation
11890+ *
11891+ * Permission is hereby granted, free of charge, to any person obtaining a
11892+ * copy of this software and associated documentation files (the "Software"),
11893+ * to deal in the Software without restriction, including without limitation
11894+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11895+ * and/or sell copies of the Software, and to permit persons to whom the
11896+ * Software is furnished to do so, subject to the following conditions:
11897+ *
11898+ * The above copyright notice and this permission notice (including the next
11899+ * paragraph) shall be included in all copies or substantial portions of the
11900+ * Software.
11901+ *
11902+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
11903+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11904+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
11905+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
11906+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
11907+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
11908+ * DEALINGS IN THE SOFTWARE.
11909+ *
11910+ * Authors:
11911+ * Eric Anholt <eric@anholt.net>
11912+ */
11913+
11914+#include <linux/i2c.h>
11915+
11916+static void intel_crt_dpms(struct drm_output *output, int mode)
11917+{
11918+ struct drm_device *dev = output->dev;
11919+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
11920+ u32 temp;
11921+
11922+ temp = I915_READ(ADPA);
11923+ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
11924+ temp &= ~ADPA_DAC_ENABLE;
11925+
11926+ switch(mode) {
11927+ case DPMSModeOn:
11928+ temp |= ADPA_DAC_ENABLE;
11929+ break;
11930+ case DPMSModeStandby:
11931+ temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
11932+ break;
11933+ case DPMSModeSuspend:
11934+ temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
11935+ break;
11936+ case DPMSModeOff:
11937+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
11938+ break;
11939+ }
11940+
11941+ I915_WRITE(ADPA, temp);
11942+}
11943+
11944+static void intel_crt_save(struct drm_output *output)
11945+{
11946+
11947+}
11948+
11949+static void intel_crt_restore(struct drm_output *output)
11950+{
11951+
11952+}
11953+
11954+static int intel_crt_mode_valid(struct drm_output *output,
11955+ struct drm_display_mode *mode)
11956+{
11957+ if (mode->flags & V_DBLSCAN)
11958+ return MODE_NO_DBLESCAN;
11959+
11960+ if (mode->clock > 400000 || mode->clock < 25000)
11961+ return MODE_CLOCK_RANGE;
11962+
11963+ return MODE_OK;
11964+}
11965+
11966+static bool intel_crt_mode_fixup(struct drm_output *output,
11967+ struct drm_display_mode *mode,
11968+ struct drm_display_mode *adjusted_mode)
11969+{
11970+ return true;
11971+}
11972+
11973+static void intel_crt_mode_set(struct drm_output *output,
11974+ struct drm_display_mode *mode,
11975+ struct drm_display_mode *adjusted_mode)
11976+{
11977+ struct drm_device *dev = output->dev;
11978+ struct drm_crtc *crtc = output->crtc;
11979+ struct intel_crtc *intel_crtc = crtc->driver_private;
11980+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
11981+ int dpll_md_reg;
11982+ u32 adpa, dpll_md;
11983+
11984+ if (intel_crtc->pipe == 0)
11985+ dpll_md_reg = DPLL_A_MD;
11986+ else
11987+ dpll_md_reg = DPLL_B_MD;
11988+
11989+ /*
11990+ * Disable separate mode multiplier used when cloning SDVO to CRT
11991+ * XXX this needs to be adjusted when we really are cloning
11992+ */
11993+ if (IS_I965G(dev)) {
11994+ dpll_md = I915_READ(dpll_md_reg);
11995+ I915_WRITE(dpll_md_reg,
11996+ dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
11997+ }
11998+
11999+ adpa = 0;
12000+ if (adjusted_mode->flags & V_PHSYNC)
12001+ adpa |= ADPA_HSYNC_ACTIVE_HIGH;
12002+ if (adjusted_mode->flags & V_PVSYNC)
12003+ adpa |= ADPA_VSYNC_ACTIVE_HIGH;
12004+
12005+ if (intel_crtc->pipe == 0)
12006+ adpa |= ADPA_PIPE_A_SELECT;
12007+ else
12008+ adpa |= ADPA_PIPE_B_SELECT;
12009+
12010+ I915_WRITE(ADPA, adpa);
12011+}
12012+
12013+/**
12014+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
12015+ *
12016+ * Only for I945G/GM.
12017+ *
12018+ * \return TRUE if CRT is connected.
12019+ * \return FALSE if CRT is disconnected.
12020+ */
12021+static bool intel_crt_detect_hotplug(struct drm_output *output)
12022+{
12023+ struct drm_device *dev = output->dev;
12024+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12025+ u32 temp;
12026+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
12027+
12028+ temp = I915_READ(PORT_HOTPLUG_EN);
12029+
12030+ I915_WRITE(PORT_HOTPLUG_EN,
12031+ temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
12032+
12033+ do {
12034+ if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
12035+ break;
12036+ msleep(1);
12037+ } while (time_after(timeout, jiffies));
12038+
12039+ if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
12040+ CRT_HOTPLUG_MONITOR_COLOR)
12041+ return true;
12042+
12043+ return false;
12044+}
12045+
12046+static bool intel_crt_detect_ddc(struct drm_output *output)
12047+{
12048+ struct intel_output *intel_output = output->driver_private;
12049+
12050+ /* CRT should always be at 0, but check anyway */
12051+ if (intel_output->type != INTEL_OUTPUT_ANALOG)
12052+ return false;
12053+
12054+ return intel_ddc_probe(output);
12055+}
12056+
12057+static enum drm_output_status intel_crt_detect(struct drm_output *output)
12058+{
12059+ struct drm_device *dev = output->dev;
12060+
12061+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) {
12062+ if (intel_crt_detect_hotplug(output))
12063+ return output_status_connected;
12064+ else
12065+ return output_status_disconnected;
12066+ }
12067+
12068+ if (intel_crt_detect_ddc(output))
12069+ return output_status_connected;
12070+
12071+ /* TODO use load detect */
12072+ return output_status_unknown;
12073+}
12074+
12075+static void intel_crt_destroy(struct drm_output *output)
12076+{
12077+ struct intel_output *intel_output = output->driver_private;
12078+
12079+ intel_i2c_destroy(intel_output->ddc_bus);
12080+ kfree(output->driver_private);
12081+}
12082+
12083+static int intel_crt_get_modes(struct drm_output *output)
12084+{
12085+ return intel_ddc_get_modes(output);
12086+}
12087+
12088+/*
12089+ * Routines for controlling stuff on the analog port
12090+ */
12091+static const struct drm_output_funcs intel_crt_output_funcs = {
12092+ .dpms = intel_crt_dpms,
12093+ .save = intel_crt_save,
12094+ .restore = intel_crt_restore,
12095+ .mode_valid = intel_crt_mode_valid,
12096+ .mode_fixup = intel_crt_mode_fixup,
12097+ .prepare = intel_output_prepare,
12098+ .mode_set = intel_crt_mode_set,
12099+ .commit = intel_output_commit,
12100+ .detect = intel_crt_detect,
12101+ .get_modes = intel_crt_get_modes,
12102+ .cleanup = intel_crt_destroy,
12103+};
12104+
12105+void intel_crt_init(struct drm_device *dev)
12106+{
12107+ struct drm_output *output;
12108+ struct intel_output *intel_output;
12109+
12110+ output = drm_output_create(dev, &intel_crt_output_funcs, "VGA");
12111+
12112+ intel_output = kmalloc(sizeof(struct intel_output), GFP_KERNEL);
12113+ if (!intel_output) {
12114+ drm_output_destroy(output);
12115+ return;
12116+ }
12117+ /* Set up the DDC bus. */
12118+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
12119+ if (!intel_output->ddc_bus) {
12120+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
12121+ "failed.\n");
12122+ return;
12123+ }
12124+
12125+ intel_output->type = INTEL_OUTPUT_ANALOG;
12126+ output->driver_private = intel_output;
12127+ output->interlace_allowed = 0;
12128+ output->doublescan_allowed = 0;
12129+}
12130Index: linux-2.6.27/drivers/gpu/drm/psb/intel_display.c
12131===================================================================
12132--- /dev/null 1970-01-01 00:00:00.000000000 +0000
12133+++ linux-2.6.27/drivers/gpu/drm/psb/intel_display.c 2009-01-14 11:58:01.000000000 +0000
12134@@ -0,0 +1,1472 @@
12135+/*
12136+ * Copyright © 2006-2007 Intel Corporation
12137+ *
12138+ * Permission is hereby granted, free of charge, to any person obtaining a
12139+ * copy of this software and associated documentation files (the "Software"),
12140+ * to deal in the Software without restriction, including without limitation
12141+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12142+ * and/or sell copies of the Software, and to permit persons to whom the
12143+ * Software is furnished to do so, subject to the following conditions:
12144+ *
12145+ * The above copyright notice and this permission notice (including the next
12146+ * paragraph) shall be included in all copies or substantial portions of the
12147+ * Software.
12148+ *
12149+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12150+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12151+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
12152+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
12153+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
12154+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
12155+ * DEALINGS IN THE SOFTWARE.
12156+ *
12157+ * Authors:
12158+ * Eric Anholt <eric@anholt.net>
12159+ */
12160+
12161+#include <linux/i2c.h>
12162+
12163+bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
12164+
12165+typedef struct {
12166+ /* given values */
12167+ int n;
12168+ int m1, m2;
12169+ int p1, p2;
12170+ /* derived values */
12171+ int dot;
12172+ int vco;
12173+ int m;
12174+ int p;
12175+} intel_clock_t;
12176+
12177+typedef struct {
12178+ int min, max;
12179+} intel_range_t;
12180+
12181+typedef struct {
12182+ int dot_limit;
12183+ int p2_slow, p2_fast;
12184+} intel_p2_t;
12185+
12186+#define INTEL_P2_NUM 2
12187+
12188+typedef struct {
12189+ intel_range_t dot, vco, n, m, m1, m2, p, p1;
12190+ intel_p2_t p2;
12191+} intel_limit_t;
12192+
12193+#define I8XX_DOT_MIN 25000
12194+#define I8XX_DOT_MAX 350000
12195+#define I8XX_VCO_MIN 930000
12196+#define I8XX_VCO_MAX 1400000
12197+#define I8XX_N_MIN 3
12198+#define I8XX_N_MAX 16
12199+#define I8XX_M_MIN 96
12200+#define I8XX_M_MAX 140
12201+#define I8XX_M1_MIN 18
12202+#define I8XX_M1_MAX 26
12203+#define I8XX_M2_MIN 6
12204+#define I8XX_M2_MAX 16
12205+#define I8XX_P_MIN 4
12206+#define I8XX_P_MAX 128
12207+#define I8XX_P1_MIN 2
12208+#define I8XX_P1_MAX 33
12209+#define I8XX_P1_LVDS_MIN 1
12210+#define I8XX_P1_LVDS_MAX 6
12211+#define I8XX_P2_SLOW 4
12212+#define I8XX_P2_FAST 2
12213+#define I8XX_P2_LVDS_SLOW 14
12214+#define I8XX_P2_LVDS_FAST 14 /* No fast option */
12215+#define I8XX_P2_SLOW_LIMIT 165000
12216+
12217+#define I9XX_DOT_MIN 20000
12218+#define I9XX_DOT_MAX 400000
12219+#define I9XX_VCO_MIN 1400000
12220+#define I9XX_VCO_MAX 2800000
12221+#define I9XX_N_MIN 3
12222+#define I9XX_N_MAX 8
12223+#define I9XX_M_MIN 70
12224+#define I9XX_M_MAX 120
12225+#define I9XX_M1_MIN 10
12226+#define I9XX_M1_MAX 20
12227+#define I9XX_M2_MIN 5
12228+#define I9XX_M2_MAX 9
12229+#define I9XX_P_SDVO_DAC_MIN 5
12230+#define I9XX_P_SDVO_DAC_MAX 80
12231+#define I9XX_P_LVDS_MIN 7
12232+#define I9XX_P_LVDS_MAX 98
12233+#define I9XX_P1_MIN 1
12234+#define I9XX_P1_MAX 8
12235+#define I9XX_P2_SDVO_DAC_SLOW 10
12236+#define I9XX_P2_SDVO_DAC_FAST 5
12237+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
12238+#define I9XX_P2_LVDS_SLOW 14
12239+#define I9XX_P2_LVDS_FAST 7
12240+#define I9XX_P2_LVDS_SLOW_LIMIT 112000
12241+
12242+#define INTEL_LIMIT_I8XX_DVO_DAC 0
12243+#define INTEL_LIMIT_I8XX_LVDS 1
12244+#define INTEL_LIMIT_I9XX_SDVO_DAC 2
12245+#define INTEL_LIMIT_I9XX_LVDS 3
12246+
12247+static const intel_limit_t intel_limits[] = {
12248+ { /* INTEL_LIMIT_I8XX_DVO_DAC */
12249+ .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
12250+ .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
12251+ .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
12252+ .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
12253+ .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
12254+ .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
12255+ .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
12256+ .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
12257+ .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
12258+ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
12259+ },
12260+ { /* INTEL_LIMIT_I8XX_LVDS */
12261+ .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
12262+ .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
12263+ .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
12264+ .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
12265+ .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
12266+ .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
12267+ .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
12268+ .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
12269+ .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
12270+ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
12271+ },
12272+ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
12273+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
12274+ .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
12275+ .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
12276+ .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
12277+ .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
12278+ .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
12279+ .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
12280+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
12281+ .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
12282+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
12283+ },
12284+ { /* INTEL_LIMIT_I9XX_LVDS */
12285+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
12286+ .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
12287+ .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
12288+ .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
12289+ .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
12290+ .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
12291+ .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX },
12292+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
12293+ /* The single-channel range is 25-112Mhz, and dual-channel
12294+ * is 80-224Mhz. Prefer single channel as much as possible.
12295+ */
12296+ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
12297+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
12298+ },
12299+};
12300+
12301+static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
12302+{
12303+ struct drm_device *dev = crtc->dev;
12304+ const intel_limit_t *limit;
12305+
12306+ if (IS_I9XX(dev)) {
12307+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
12308+ limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
12309+ else
12310+ limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
12311+ } else {
12312+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
12313+ limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
12314+ else
12315+ limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
12316+ }
12317+ return limit;
12318+}
12319+
12320+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
12321+
12322+static void i8xx_clock(int refclk, intel_clock_t *clock)
12323+{
12324+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
12325+ clock->p = clock->p1 * clock->p2;
12326+ clock->vco = refclk * clock->m / (clock->n + 2);
12327+ clock->dot = clock->vco / clock->p;
12328+}
12329+
12330+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
12331+
12332+static void i9xx_clock(int refclk, intel_clock_t *clock)
12333+{
12334+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
12335+ clock->p = clock->p1 * clock->p2;
12336+ clock->vco = refclk * clock->m / (clock->n + 2);
12337+ clock->dot = clock->vco / clock->p;
12338+}
12339+
12340+static void intel_clock(struct drm_device *dev, int refclk,
12341+ intel_clock_t *clock)
12342+{
12343+ if (IS_I9XX(dev))
12344+ return i9xx_clock (refclk, clock);
12345+ else
12346+ return i8xx_clock (refclk, clock);
12347+}
12348+
12349+/**
12350+ * Returns whether any output on the specified pipe is of the specified type
12351+ */
12352+bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
12353+{
12354+ struct drm_device *dev = crtc->dev;
12355+ struct drm_mode_config *mode_config = &dev->mode_config;
12356+ struct drm_output *l_entry;
12357+
12358+ list_for_each_entry(l_entry, &mode_config->output_list, head) {
12359+ if (l_entry->crtc == crtc) {
12360+ struct intel_output *intel_output = l_entry->driver_private;
12361+ if (intel_output->type == type)
12362+ return true;
12363+ }
12364+ }
12365+ return false;
12366+}
12367+
12368+#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
12369+/**
12370+ * Returns whether the given set of divisors are valid for a given refclk with
12371+ * the given outputs.
12372+ */
12373+
12374+static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
12375+{
12376+ const intel_limit_t *limit = intel_limit (crtc);
12377+
12378+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
12379+ INTELPllInvalid ("p1 out of range\n");
12380+ if (clock->p < limit->p.min || limit->p.max < clock->p)
12381+ INTELPllInvalid ("p out of range\n");
12382+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
12383+ INTELPllInvalid ("m2 out of range\n");
12384+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
12385+ INTELPllInvalid ("m1 out of range\n");
12386+ if (clock->m1 <= clock->m2)
12387+ INTELPllInvalid ("m1 <= m2\n");
12388+ if (clock->m < limit->m.min || limit->m.max < clock->m)
12389+ INTELPllInvalid ("m out of range\n");
12390+ if (clock->n < limit->n.min || limit->n.max < clock->n)
12391+ INTELPllInvalid ("n out of range\n");
12392+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
12393+ INTELPllInvalid ("vco out of range\n");
12394+ /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
12395+ * output, etc., rather than just a single range.
12396+ */
12397+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
12398+ INTELPllInvalid ("dot out of range\n");
12399+
12400+ return true;
12401+}
12402+
12403+/**
12404+ * Returns a set of divisors for the desired target clock with the given
12405+ * refclk, or FALSE. The returned values represent the clock equation:
12406+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
12407+ */
12408+static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
12409+ int refclk, intel_clock_t *best_clock)
12410+{
12411+ struct drm_device *dev = crtc->dev;
12412+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12413+ intel_clock_t clock;
12414+ const intel_limit_t *limit = intel_limit(crtc);
12415+ int err = target;
12416+
12417+ if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
12418+ (I915_READ(LVDS) & LVDS_PORT_EN) != 0) {
12419+ /*
12420+ * For LVDS, if the panel is on, just rely on its current
12421+ * settings for dual-channel. We haven't figured out how to
12422+ * reliably set up different single/dual channel state, if we
12423+ * even can.
12424+ */
12425+ if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
12426+ LVDS_CLKB_POWER_UP)
12427+ clock.p2 = limit->p2.p2_fast;
12428+ else
12429+ clock.p2 = limit->p2.p2_slow;
12430+ } else {
12431+ if (target < limit->p2.dot_limit)
12432+ clock.p2 = limit->p2.p2_slow;
12433+ else
12434+ clock.p2 = limit->p2.p2_fast;
12435+ }
12436+
12437+ memset (best_clock, 0, sizeof (*best_clock));
12438+
12439+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
12440+ for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
12441+ clock.m2 <= limit->m2.max; clock.m2++) {
12442+ for (clock.n = limit->n.min; clock.n <= limit->n.max;
12443+ clock.n++) {
12444+ for (clock.p1 = limit->p1.min;
12445+ clock.p1 <= limit->p1.max; clock.p1++) {
12446+ int this_err;
12447+
12448+ intel_clock(dev, refclk, &clock);
12449+
12450+ if (!intel_PLL_is_valid(crtc, &clock))
12451+ continue;
12452+
12453+ this_err = abs(clock.dot - target);
12454+ if (this_err < err) {
12455+ *best_clock = clock;
12456+ err = this_err;
12457+ }
12458+ }
12459+ }
12460+ }
12461+ }
12462+
12463+ return (err != target);
12464+}
12465+
12466+#if 0
12467+void
12468+intel_set_vblank(struct drm_device *dev)
12469+{
12470+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12471+ struct drm_crtc *crtc;
12472+ struct intel_crtc *intel_crtc;
12473+ int vbl_pipe = 0;
12474+
12475+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
12476+ intel_crtc = crtc->driver_private;
12477+
12478+ if (crtc->enabled)
12479+ vbl_pipe |= (1<<intel_crtc->pipe);
12480+ }
12481+
12482+ dev_priv->vblank_pipe = vbl_pipe;
12483+ i915_enable_interrupt(dev);
12484+}
12485+#endif
12486+
12487+void
12488+intel_wait_for_vblank(struct drm_device *dev)
12489+{
12490+ /* Wait for 20ms, i.e. one cycle at 50hz. */
12491+ udelay(20000);
12492+}
12493+
12494+void
12495+intel_pipe_set_base(struct drm_crtc *crtc, int x, int y)
12496+{
12497+ struct drm_device *dev = crtc->dev;
12498+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12499+ struct intel_crtc *intel_crtc = crtc->driver_private;
12500+ int pipe = intel_crtc->pipe;
12501+ unsigned long Start, Offset;
12502+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
12503+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
12504+
12505+ Start = crtc->fb->offset;
12506+ Offset = y * crtc->fb->pitch + x;
12507+
12508+ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
12509+ if (IS_I965G(dev)) {
12510+ I915_WRITE(dspbase, Offset);
12511+ I915_READ(dspbase);
12512+ I915_WRITE(dspsurf, Start);
12513+ I915_READ(dspsurf);
12514+ } else {
12515+ I915_WRITE(dspbase, Start + Offset);
12516+ I915_READ(dspbase);
12517+ }
12518+
12519+
12520+ if (!dev_priv->sarea_priv)
12521+ return;
12522+
12523+ switch (pipe) {
12524+ case 0:
12525+ dev_priv->sarea_priv->planeA_x = x;
12526+ dev_priv->sarea_priv->planeA_y = y;
12527+ break;
12528+ case 1:
12529+ dev_priv->sarea_priv->planeB_x = x;
12530+ dev_priv->sarea_priv->planeB_y = y;
12531+ break;
12532+ default:
12533+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
12534+ break;
12535+ }
12536+}
12537+
12538+/**
12539+ * Sets the power management mode of the pipe and plane.
12540+ *
12541+ * This code should probably grow support for turning the cursor off and back
12542+ * on appropriately at the same time as we're turning the pipe off/on.
12543+ */
12544+static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
12545+{
12546+ struct drm_device *dev = crtc->dev;
12547+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12548+ struct intel_crtc *intel_crtc = crtc->driver_private;
12549+ int pipe = intel_crtc->pipe;
12550+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
12551+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
12552+ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
12553+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
12554+ u32 temp, temp2;
12555+ bool enabled;
12556+
12557+ /* XXX: When our outputs are all unaware of DPMS modes other than off
12558+ * and on, we should map those modes to DPMSModeOff in the CRTC.
12559+ */
12560+ switch (mode) {
12561+ case DPMSModeOn:
12562+ case DPMSModeStandby:
12563+ case DPMSModeSuspend:
12564+ /* Enable the DPLL */
12565+ temp = I915_READ(dpll_reg);
12566+ if ((temp & DPLL_VCO_ENABLE) == 0) {
12567+ I915_WRITE(dpll_reg, temp);
12568+ I915_READ(dpll_reg);
12569+ /* Wait for the clocks to stabilize. */
12570+ udelay(150);
12571+ I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
12572+ I915_READ(dpll_reg);
12573+ /* Wait for the clocks to stabilize. */
12574+ udelay(150);
12575+ I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
12576+ I915_READ(dpll_reg);
12577+ /* Wait for the clocks to stabilize. */
12578+ udelay(150);
12579+ }
12580+
12581+ /* Enable the pipe */
12582+ temp = I915_READ(pipeconf_reg);
12583+ if ((temp & PIPEACONF_ENABLE) == 0)
12584+ I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
12585+
12586+ /* Enable the plane */
12587+ temp = I915_READ(dspcntr_reg);
12588+ if (mode != DPMSModeOn)
12589+ temp2 = temp & ~DISPLAY_PLANE_ENABLE;
12590+ else
12591+ temp2 = temp | DISPLAY_PLANE_ENABLE;
12592+
12593+ if (temp != temp2) {
12594+ I915_WRITE(dspcntr_reg, temp2);
12595+ /* Flush the plane changes */
12596+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
12597+ }
12598+
12599+ intel_crtc_load_lut(crtc);
12600+
12601+ /* Give the overlay scaler a chance to enable if it's on this pipe */
12602+ //intel_crtc_dpms_video(crtc, TRUE); TODO
12603+ break;
12604+ case DPMSModeOff:
12605+ /* Give the overlay scaler a chance to disable if it's on this pipe */
12606+ //intel_crtc_dpms_video(crtc, FALSE); TODO
12607+
12608+ /* Disable display plane */
12609+ temp = I915_READ(dspcntr_reg);
12610+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
12611+ I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
12612+ /* Flush the plane changes */
12613+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
12614+ I915_READ(dspbase_reg);
12615+ }
12616+
12617+ if (!IS_I9XX(dev)) {
12618+ /* Wait for vblank for the disable to take effect */
12619+ intel_wait_for_vblank(dev);
12620+ }
12621+
12622+ /* Next, disable display pipes */
12623+ temp = I915_READ(pipeconf_reg);
12624+ if ((temp & PIPEACONF_ENABLE) != 0) {
12625+ I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
12626+ I915_READ(pipeconf_reg);
12627+ }
12628+
12629+ /* Wait for vblank for the disable to take effect. */
12630+ intel_wait_for_vblank(dev);
12631+
12632+ temp = I915_READ(dpll_reg);
12633+ if ((temp & DPLL_VCO_ENABLE) != 0) {
12634+ I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
12635+ I915_READ(dpll_reg);
12636+ }
12637+
12638+ /* Wait for the clocks to turn off. */
12639+ udelay(150);
12640+ break;
12641+ }
12642+
12643+
12644+ if (!dev_priv->sarea_priv)
12645+ return;
12646+
12647+ enabled = crtc->enabled && mode != DPMSModeOff;
12648+
12649+ switch (pipe) {
12650+ case 0:
12651+ dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0;
12652+ dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0;
12653+ break;
12654+ case 1:
12655+ dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0;
12656+ dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0;
12657+ break;
12658+ default:
12659+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
12660+ break;
12661+ }
12662+}
12663+
12664+static bool intel_crtc_lock(struct drm_crtc *crtc)
12665+{
12666+ /* Sync the engine before mode switch */
12667+// i830WaitSync(crtc->scrn);
12668+
12669+#if 0 // TODO def XF86DRI
12670+ return I830DRILock(crtc->scrn);
12671+#else
12672+ return FALSE;
12673+#endif
12674+}
12675+
12676+static void intel_crtc_unlock (struct drm_crtc *crtc)
12677+{
12678+#if 0 // TODO def XF86DRI
12679+ I830DRIUnlock (crtc->scrn);
12680+#endif
12681+}
12682+
12683+static void intel_crtc_prepare (struct drm_crtc *crtc)
12684+{
12685+ crtc->funcs->dpms(crtc, DPMSModeOff);
12686+}
12687+
12688+static void intel_crtc_commit (struct drm_crtc *crtc)
12689+{
12690+ crtc->funcs->dpms(crtc, DPMSModeOn);
12691+}
12692+
12693+void intel_output_prepare (struct drm_output *output)
12694+{
12695+ /* lvds has its own version of prepare see intel_lvds_prepare */
12696+ output->funcs->dpms(output, DPMSModeOff);
12697+}
12698+
12699+void intel_output_commit (struct drm_output *output)
12700+{
12701+ /* lvds has its own version of commit see intel_lvds_commit */
12702+ output->funcs->dpms(output, DPMSModeOn);
12703+}
12704+
12705+static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
12706+ struct drm_display_mode *mode,
12707+ struct drm_display_mode *adjusted_mode)
12708+{
12709+ return true;
12710+}
12711+
12712+
12713+/** Returns the core display clock speed for i830 - i945 */
12714+int intel_get_core_clock_speed(struct drm_device *dev)
12715+{
12716+
12717+ /* Core clock values taken from the published datasheets.
12718+ * The 830 may go up to 166 Mhz, which we should check.
12719+ */
12720+ if (IS_I945G(dev))
12721+ return 400000;
12722+ else if (IS_I915G(dev))
12723+ return 333000;
12724+ else if (IS_I945GM(dev) || IS_POULSBO(dev) || IS_845G(dev))
12725+ return 200000;
12726+ else if (IS_I915GM(dev)) {
12727+ u16 gcfgc = 0;
12728+
12729+ pci_read_config_word(dev->pdev, I915_GCFGC, &gcfgc);
12730+
12731+ if (gcfgc & I915_LOW_FREQUENCY_ENABLE)
12732+ return 133000;
12733+ else {
12734+ switch (gcfgc & I915_DISPLAY_CLOCK_MASK) {
12735+ case I915_DISPLAY_CLOCK_333_MHZ:
12736+ return 333000;
12737+ default:
12738+ case I915_DISPLAY_CLOCK_190_200_MHZ:
12739+ return 190000;
12740+ }
12741+ }
12742+ } else if (IS_I865G(dev))
12743+ return 266000;
12744+ else if (IS_I855(dev)) {
12745+#if 0
12746+ PCITAG bridge = pciTag(0, 0, 0); /* This is always the host bridge */
12747+ u16 hpllcc = pciReadWord(bridge, I855_HPLLCC);
12748+
12749+#endif
12750+ u16 hpllcc = 0;
12751+ /* Assume that the hardware is in the high speed state. This
12752+ * should be the default.
12753+ */
12754+ switch (hpllcc & I855_CLOCK_CONTROL_MASK) {
12755+ case I855_CLOCK_133_200:
12756+ case I855_CLOCK_100_200:
12757+ return 200000;
12758+ case I855_CLOCK_166_250:
12759+ return 250000;
12760+ case I855_CLOCK_100_133:
12761+ return 133000;
12762+ }
12763+ } else /* 852, 830 */
12764+ return 133000;
12765+
12766+ return 0; /* Silence gcc warning */
12767+}
12768+
12769+
12770+/**
12771+ * Return the pipe currently connected to the panel fitter,
12772+ * or -1 if the panel fitter is not present or not in use
12773+ */
12774+int intel_panel_fitter_pipe (struct drm_device *dev)
12775+{
12776+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12777+ u32 pfit_control;
12778+
12779+ /* i830 doesn't have a panel fitter */
12780+ if (IS_I830(dev))
12781+ return -1;
12782+
12783+ pfit_control = I915_READ(PFIT_CONTROL);
12784+
12785+ /* See if the panel fitter is in use */
12786+ if ((pfit_control & PFIT_ENABLE) == 0)
12787+ return -1;
12788+
12789+ /* 965 can place panel fitter on either pipe */
12790+ if (IS_I965G(dev))
12791+ return (pfit_control >> 29) & 0x3;
12792+
12793+ /* older chips can only use pipe 1 */
12794+ return 1;
12795+}
12796+
12797+#define WA_NO_FB_GARBAGE_DISPLAY
12798+#ifdef WA_NO_FB_GARBAGE_DISPLAY
12799+static u32 fp_reg_value[2];
12800+static u32 dpll_reg_value[2];
12801+static u32 dpll_md_reg_value[2];
12802+static u32 dspcntr_reg_value[2];
12803+static u32 pipeconf_reg_value[2];
12804+static u32 htot_reg_value[2];
12805+static u32 hblank_reg_value[2];
12806+static u32 hsync_reg_value[2];
12807+static u32 vtot_reg_value[2];
12808+static u32 vblank_reg_value[2];
12809+static u32 vsync_reg_value[2];
12810+static u32 dspsize_reg_value[2];
12811+static u32 dspstride_reg_value[2];
12812+static u32 dsppos_reg_value[2];
12813+static u32 pipesrc_reg_value[2];
12814+
12815+static u32 dspbase_value[2];
12816+
12817+static u32 lvds_reg_value[2];
12818+static u32 vgacntrl_reg_value[2];
12819+static u32 pfit_control_reg_value[2];
12820+
12821+void intel_crtc_mode_restore(struct drm_crtc *crtc)
12822+{
12823+ struct drm_device *dev = crtc->dev;
12824+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12825+ struct intel_crtc *intel_crtc = crtc->driver_private;
12826+ int pipe = intel_crtc->pipe;
12827+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
12828+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
12829+ int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
12830+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
12831+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
12832+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
12833+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
12834+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
12835+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
12836+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
12837+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
12838+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
12839+ int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
12840+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
12841+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
12842+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
12843+
12844+ bool ok, is_sdvo = false, is_dvo = false;
12845+ bool is_crt = false, is_lvds = false, is_tv = false;
12846+ struct drm_mode_config *mode_config = &dev->mode_config;
12847+ struct drm_output *output;
12848+
12849+ list_for_each_entry(output, &mode_config->output_list, head) {
12850+ struct intel_output *intel_output = output->driver_private;
12851+
12852+ if (output->crtc != crtc)
12853+ continue;
12854+
12855+ switch (intel_output->type) {
12856+ case INTEL_OUTPUT_LVDS:
12857+ is_lvds = TRUE;
12858+ break;
12859+ case INTEL_OUTPUT_SDVO:
12860+ is_sdvo = TRUE;
12861+ break;
12862+ case INTEL_OUTPUT_DVO:
12863+ is_dvo = TRUE;
12864+ break;
12865+ case INTEL_OUTPUT_TVOUT:
12866+ is_tv = TRUE;
12867+ break;
12868+ case INTEL_OUTPUT_ANALOG:
12869+ is_crt = TRUE;
12870+ break;
12871+ }
12872+ if(is_lvds && ((lvds_reg_value[pipe] & LVDS_PORT_EN) == 0))
12873+ {
12874+ printk("%s: is_lvds but not the boot display, so return\n",
12875+ __FUNCTION__);
12876+ return;
12877+ }
12878+ output->funcs->prepare(output);
12879+ }
12880+
12881+ intel_crtc_prepare(crtc);
12882+ /* Disable the panel fitter if it was on our pipe */
12883+ if (intel_panel_fitter_pipe(dev) == pipe)
12884+ I915_WRITE(PFIT_CONTROL, 0);
12885+
12886+ if (dpll_reg_value[pipe] & DPLL_VCO_ENABLE) {
12887+ I915_WRITE(fp_reg, fp_reg_value[pipe]);
12888+ I915_WRITE(dpll_reg, dpll_reg_value[pipe]& ~DPLL_VCO_ENABLE);
12889+ I915_READ(dpll_reg);
12890+ udelay(150);
12891+ }
12892+
12893+ /*
12894+ if(is_lvds)
12895+ I915_WRITE(LVDS, lvds_reg_value[pipe]);
12896+ */
12897+ if (is_lvds) {
12898+ I915_WRITE(LVDS, lvds_reg_value[pipe]);
12899+ I915_READ(LVDS);
12900+ }
12901+
12902+ I915_WRITE(fp_reg, fp_reg_value[pipe]);
12903+ I915_WRITE(dpll_reg, dpll_reg_value[pipe]);
12904+ I915_READ(dpll_reg);
12905+ udelay(150);
12906+ //I915_WRITE(dpll_md_reg, dpll_md_reg_value[pipe]);
12907+ I915_WRITE(dpll_reg, dpll_reg_value[pipe]);
12908+ I915_READ(dpll_reg);
12909+ udelay(150);
12910+ I915_WRITE(htot_reg, htot_reg_value[pipe]);
12911+ I915_WRITE(hblank_reg, hblank_reg_value[pipe]);
12912+ I915_WRITE(hsync_reg, hsync_reg_value[pipe]);
12913+ I915_WRITE(vtot_reg, vtot_reg_value[pipe]);
12914+ I915_WRITE(vblank_reg, vblank_reg_value[pipe]);
12915+ I915_WRITE(vsync_reg, vsync_reg_value[pipe]);
12916+ I915_WRITE(dspstride_reg, dspstride_reg_value[pipe]);
12917+ I915_WRITE(dspsize_reg, dspsize_reg_value[pipe]);
12918+ I915_WRITE(dsppos_reg, dsppos_reg_value[pipe]);
12919+ I915_WRITE(pipesrc_reg, pipesrc_reg_value[pipe]);
12920+ I915_WRITE(pipeconf_reg, pipeconf_reg_value[pipe]);
12921+ I915_READ(pipeconf_reg);
12922+ intel_wait_for_vblank(dev);
12923+ I915_WRITE(dspcntr_reg, dspcntr_reg_value[pipe]);
12924+ I915_WRITE(dspbase, dspbase_value[pipe]);
12925+ I915_READ(dspbase);
12926+ I915_WRITE(VGACNTRL, vgacntrl_reg_value[pipe]);
12927+ intel_wait_for_vblank(dev);
12928+ I915_WRITE(PFIT_CONTROL, pfit_control_reg_value[pipe]);
12929+
12930+ intel_crtc_commit(crtc);
12931+ list_for_each_entry(output, &mode_config->output_list, head) {
12932+ if (output->crtc != crtc)
12933+ continue;
12934+
12935+ output->funcs->commit(output);
12936+ //output->funcs->dpms(output, DPMSModeOff);
12937+ //printk("turn off the display first\n");
12938+ }
12939+ return;
12940+}
12941+
12942+void intel_crtc_mode_save(struct drm_crtc *crtc)
12943+{
12944+ struct drm_device *dev = crtc->dev;
12945+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
12946+ struct intel_crtc *intel_crtc = crtc->driver_private;
12947+ int pipe = intel_crtc->pipe;
12948+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
12949+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
12950+ int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
12951+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
12952+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
12953+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
12954+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
12955+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
12956+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
12957+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
12958+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
12959+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
12960+ int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
12961+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
12962+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
12963+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
12964+ bool ok, is_sdvo = false, is_dvo = false;
12965+ bool is_crt = false, is_lvds = false, is_tv = false;
12966+ struct drm_mode_config *mode_config = &dev->mode_config;
12967+ struct drm_output *output;
12968+
12969+ list_for_each_entry(output, &mode_config->output_list, head) {
12970+ struct intel_output *intel_output = output->driver_private;
12971+
12972+ if (output->crtc != crtc)
12973+ continue;
12974+
12975+ switch (intel_output->type) {
12976+ case INTEL_OUTPUT_LVDS:
12977+ is_lvds = TRUE;
12978+ break;
12979+ case INTEL_OUTPUT_SDVO:
12980+ is_sdvo = TRUE;
12981+ break;
12982+ case INTEL_OUTPUT_DVO:
12983+ is_dvo = TRUE;
12984+ break;
12985+ case INTEL_OUTPUT_TVOUT:
12986+ is_tv = TRUE;
12987+ break;
12988+ case INTEL_OUTPUT_ANALOG:
12989+ is_crt = TRUE;
12990+ break;
12991+ }
12992+ }
12993+
12994+ fp_reg_value[pipe] = I915_READ(fp_reg);
12995+ dpll_reg_value[pipe] = I915_READ(dpll_reg);
12996+ dpll_md_reg_value[pipe] = I915_READ(dpll_md_reg);
12997+ dspcntr_reg_value[pipe] = I915_READ(dspcntr_reg);
12998+ pipeconf_reg_value[pipe] = I915_READ(pipeconf_reg);
12999+ htot_reg_value[pipe] = I915_READ(htot_reg);
13000+ hblank_reg_value[pipe] = I915_READ(hblank_reg);
13001+ hsync_reg_value[pipe] = I915_READ(hsync_reg);
13002+ vtot_reg_value[pipe] = I915_READ(vtot_reg);
13003+ vblank_reg_value[pipe] = I915_READ(vblank_reg);
13004+ vsync_reg_value[pipe] = I915_READ(vsync_reg);
13005+ dspsize_reg_value[pipe] = I915_READ(dspsize_reg);
13006+ dspstride_reg_value[pipe] = I915_READ(dspstride_reg);
13007+ dsppos_reg_value[pipe] = I915_READ(dsppos_reg);
13008+ pipesrc_reg_value[pipe] = I915_READ(pipesrc_reg);
13009+ dspbase_value[pipe] = I915_READ(dspbase);
13010+ if(is_lvds)
13011+ lvds_reg_value[pipe] = I915_READ(LVDS);
13012+ vgacntrl_reg_value[pipe] = I915_READ(VGACNTRL);
13013+ pfit_control_reg_value[pipe] = I915_READ(PFIT_CONTROL);
13014+}
13015+#endif
13016+
13017+static void intel_crtc_mode_set(struct drm_crtc *crtc,
13018+ struct drm_display_mode *mode,
13019+ struct drm_display_mode *adjusted_mode,
13020+ int x, int y)
13021+{
13022+ struct drm_device *dev = crtc->dev;
13023+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13024+ struct intel_crtc *intel_crtc = crtc->driver_private;
13025+ int pipe = intel_crtc->pipe;
13026+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
13027+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
13028+ int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
13029+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
13030+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
13031+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
13032+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
13033+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
13034+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
13035+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
13036+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
13037+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
13038+ int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
13039+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
13040+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
13041+ int refclk;
13042+ intel_clock_t clock;
13043+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
13044+ bool ok, is_sdvo = false, is_dvo = false;
13045+ bool is_crt = false, is_lvds = false, is_tv = false;
13046+ struct drm_mode_config *mode_config = &dev->mode_config;
13047+ struct drm_output *output;
13048+
13049+ if (!crtc->fb) {
13050+ DRM_ERROR("Can't set mode without attached fb\n");
13051+ return;
13052+ }
13053+
13054+ list_for_each_entry(output, &mode_config->output_list, head) {
13055+ struct intel_output *intel_output = output->driver_private;
13056+
13057+ if (output->crtc != crtc)
13058+ continue;
13059+
13060+ switch (intel_output->type) {
13061+ case INTEL_OUTPUT_LVDS:
13062+ is_lvds = TRUE;
13063+ break;
13064+ case INTEL_OUTPUT_SDVO:
13065+ is_sdvo = TRUE;
13066+ break;
13067+ case INTEL_OUTPUT_DVO:
13068+ is_dvo = TRUE;
13069+ break;
13070+ case INTEL_OUTPUT_TVOUT:
13071+ is_tv = TRUE;
13072+ break;
13073+ case INTEL_OUTPUT_ANALOG:
13074+ is_crt = TRUE;
13075+ break;
13076+ }
13077+ }
13078+
13079+ if (IS_I9XX(dev)) {
13080+ refclk = 96000;
13081+ } else {
13082+ refclk = 48000;
13083+ }
13084+
13085+ ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
13086+ if (!ok) {
13087+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
13088+ return;
13089+ }
13090+
13091+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
13092+
13093+ dpll = DPLL_VGA_MODE_DIS;
13094+ if (IS_I9XX(dev)) {
13095+ if (is_lvds) {
13096+ dpll |= DPLLB_MODE_LVDS;
13097+ if (IS_POULSBO(dev))
13098+ dpll |= DPLL_DVO_HIGH_SPEED;
13099+ } else
13100+ dpll |= DPLLB_MODE_DAC_SERIAL;
13101+ if (is_sdvo) {
13102+ dpll |= DPLL_DVO_HIGH_SPEED;
13103+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_POULSBO(dev)) {
13104+ int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
13105+ dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
13106+ }
13107+ }
13108+
13109+ /* compute bitmask from p1 value */
13110+ dpll |= (1 << (clock.p1 - 1)) << 16;
13111+ switch (clock.p2) {
13112+ case 5:
13113+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
13114+ break;
13115+ case 7:
13116+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
13117+ break;
13118+ case 10:
13119+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
13120+ break;
13121+ case 14:
13122+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
13123+ break;
13124+ }
13125+ if (IS_I965G(dev))
13126+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
13127+ } else {
13128+ if (is_lvds) {
13129+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
13130+ } else {
13131+ if (clock.p1 == 2)
13132+ dpll |= PLL_P1_DIVIDE_BY_TWO;
13133+ else
13134+ dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
13135+ if (clock.p2 == 4)
13136+ dpll |= PLL_P2_DIVIDE_BY_4;
13137+ }
13138+ }
13139+
13140+ if (is_tv) {
13141+ /* XXX: just matching BIOS for now */
13142+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
13143+ dpll |= 3;
13144+ }
13145+#if 0
13146+ else if (is_lvds)
13147+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
13148+#endif
13149+ else
13150+ dpll |= PLL_REF_INPUT_DREFCLK;
13151+
13152+ /* setup pipeconf */
13153+ pipeconf = I915_READ(pipeconf_reg);
13154+
13155+ /* Set up the display plane register */
13156+ dspcntr = DISPPLANE_GAMMA_ENABLE;
13157+
13158+ switch (crtc->fb->bits_per_pixel) {
13159+ case 8:
13160+ dspcntr |= DISPPLANE_8BPP;
13161+ break;
13162+ case 16:
13163+ if (crtc->fb->depth == 15)
13164+ dspcntr |= DISPPLANE_15_16BPP;
13165+ else
13166+ dspcntr |= DISPPLANE_16BPP;
13167+ break;
13168+ case 32:
13169+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
13170+ break;
13171+ default:
13172+ DRM_ERROR("Unknown color depth\n");
13173+ return;
13174+ }
13175+
13176+
13177+ if (pipe == 0)
13178+ dspcntr |= DISPPLANE_SEL_PIPE_A;
13179+ else
13180+ dspcntr |= DISPPLANE_SEL_PIPE_B;
13181+
13182+ if (pipe == 0 && !IS_I965G(dev)) {
13183+ /* Enable pixel doubling when the dot clock is > 90% of the (display)
13184+ * core speed.
13185+ *
13186+ * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
13187+ * pipe == 0 check?
13188+ */
13189+ if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10)
13190+ pipeconf |= PIPEACONF_DOUBLE_WIDE;
13191+ else
13192+ pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
13193+ }
13194+
13195+ dspcntr |= DISPLAY_PLANE_ENABLE;
13196+ pipeconf |= PIPEACONF_ENABLE;
13197+ dpll |= DPLL_VCO_ENABLE;
13198+
13199+
13200+ /* Disable the panel fitter if it was on our pipe */
13201+ if (intel_panel_fitter_pipe(dev) == pipe)
13202+ I915_WRITE(PFIT_CONTROL, 0);
13203+
13204+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
13205+ drm_mode_debug_printmodeline(dev, mode);
13206+
13207+ /*psbPrintPll("chosen", &clock);*/
13208+ DRM_DEBUG("clock regs: 0x%08x, 0x%08x,dspntr is 0x%8x, pipeconf is 0x%8x\n", (int)dpll,
13209+ (int)fp,(int)dspcntr,(int)pipeconf);
13210+#if 0
13211+ if (!xf86ModesEqual(mode, adjusted_mode)) {
13212+ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
13213+ "Adjusted mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
13214+ xf86PrintModeline(pScrn->scrnIndex, mode);
13215+ }
13216+ i830PrintPll("chosen", &clock);
13217+#endif
13218+
13219+ if (dpll & DPLL_VCO_ENABLE) {
13220+ I915_WRITE(fp_reg, fp);
13221+ I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
13222+ I915_READ(dpll_reg);
13223+ udelay(150);
13224+ }
13225+
13226+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
13227+ * This is an exception to the general rule that mode_set doesn't turn
13228+ * things on.
13229+ */
13230+ if (is_lvds) {
13231+ u32 lvds = I915_READ(LVDS);
13232+
13233+ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
13234+ /* Set the B0-B3 data pairs corresponding to whether we're going to
13235+ * set the DPLLs for dual-channel mode or not.
13236+ */
13237+ if (clock.p2 == 7)
13238+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
13239+ else
13240+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
13241+
13242+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
13243+ * appropriately here, but we need to look more thoroughly into how
13244+ * panels behave in the two modes.
13245+ */
13246+
13247+ I915_WRITE(LVDS, lvds);
13248+ I915_READ(LVDS);
13249+ }
13250+
13251+ I915_WRITE(fp_reg, fp);
13252+ I915_WRITE(dpll_reg, dpll);
13253+ I915_READ(dpll_reg);
13254+ /* Wait for the clocks to stabilize. */
13255+ udelay(150);
13256+
13257+ if (IS_I965G(dev)) {
13258+ int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
13259+ I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
13260+ ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
13261+ } else {
13262+ /* write it again -- the BIOS does, after all */
13263+ I915_WRITE(dpll_reg, dpll);
13264+ }
13265+ I915_READ(dpll_reg);
13266+ /* Wait for the clocks to stabilize. */
13267+ udelay(150);
13268+
13269+ I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
13270+ ((adjusted_mode->crtc_htotal - 1) << 16));
13271+ I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
13272+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
13273+ I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
13274+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
13275+ I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
13276+ ((adjusted_mode->crtc_vtotal - 1) << 16));
13277+ I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
13278+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
13279+ I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
13280+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
13281+ I915_WRITE(dspstride_reg, crtc->fb->pitch);
13282+ /* pipesrc and dspsize control the size that is scaled from, which should
13283+ * always be the user's requested size.
13284+ */
13285+ I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
13286+ I915_WRITE(dsppos_reg, 0);
13287+ I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
13288+ I915_WRITE(pipeconf_reg, pipeconf);
13289+ I915_READ(pipeconf_reg);
13290+
13291+ intel_wait_for_vblank(dev);
13292+
13293+ I915_WRITE(dspcntr_reg, dspcntr);
13294+
13295+ /* Flush the plane changes */
13296+ intel_pipe_set_base(crtc, x, y);
13297+
13298+#if 0
13299+ intel_set_vblank(dev);
13300+#endif
13301+
13302+ /* Disable the VGA plane that we never use */
13303+ I915_WRITE(VGACNTRL, VGA_DISP_DISABLE);
13304+
13305+ intel_wait_for_vblank(dev);
13306+}
13307+
13308+/** Loads the palette/gamma unit for the CRTC with the prepared values */
13309+void intel_crtc_load_lut(struct drm_crtc *crtc)
13310+{
13311+ struct drm_device *dev = crtc->dev;
13312+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13313+ struct intel_crtc *intel_crtc = crtc->driver_private;
13314+ int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
13315+ int i;
13316+
13317+ /* The clocks have to be on to load the palette. */
13318+ if (!crtc->enabled)
13319+ return;
13320+
13321+ for (i = 0; i < 256; i++) {
13322+ I915_WRITE(palreg + 4 * i,
13323+ (intel_crtc->lut_r[i] << 16) |
13324+ (intel_crtc->lut_g[i] << 8) |
13325+ intel_crtc->lut_b[i]);
13326+ }
13327+}
13328+
13329+/** Sets the color ramps on behalf of RandR */
13330+static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
13331+ u16 blue, int regno)
13332+{
13333+ struct intel_crtc *intel_crtc = crtc->driver_private;
13334+
13335+ intel_crtc->lut_r[regno] = red >> 8;
13336+ intel_crtc->lut_g[regno] = green >> 8;
13337+ intel_crtc->lut_b[regno] = blue >> 8;
13338+}
13339+
13340+/* Returns the clock of the currently programmed mode of the given pipe. */
13341+static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
13342+{
13343+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13344+ struct intel_crtc *intel_crtc = crtc->driver_private;
13345+ int pipe = intel_crtc->pipe;
13346+ u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
13347+ u32 fp;
13348+ intel_clock_t clock;
13349+
13350+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
13351+ fp = I915_READ((pipe == 0) ? FPA0 : FPB0);
13352+ else
13353+ fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
13354+
13355+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
13356+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
13357+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
13358+ if (IS_I9XX(dev)) {
13359+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
13360+ DPLL_FPA01_P1_POST_DIV_SHIFT);
13361+
13362+ switch (dpll & DPLL_MODE_MASK) {
13363+ case DPLLB_MODE_DAC_SERIAL:
13364+ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
13365+ 5 : 10;
13366+ break;
13367+ case DPLLB_MODE_LVDS:
13368+ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
13369+ 7 : 14;
13370+ break;
13371+ default:
13372+ DRM_DEBUG("Unknown DPLL mode %08x in programmed "
13373+ "mode\n", (int)(dpll & DPLL_MODE_MASK));
13374+ return 0;
13375+ }
13376+
13377+ /* XXX: Handle the 100Mhz refclk */
13378+ i9xx_clock(96000, &clock);
13379+ } else {
13380+ bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
13381+
13382+ if (is_lvds) {
13383+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
13384+ DPLL_FPA01_P1_POST_DIV_SHIFT);
13385+ clock.p2 = 14;
13386+
13387+ if ((dpll & PLL_REF_INPUT_MASK) ==
13388+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
13389+ /* XXX: might not be 66MHz */
13390+ i8xx_clock(66000, &clock);
13391+ } else
13392+ i8xx_clock(48000, &clock);
13393+ } else {
13394+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
13395+ clock.p1 = 2;
13396+ else {
13397+ clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
13398+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
13399+ }
13400+ if (dpll & PLL_P2_DIVIDE_BY_4)
13401+ clock.p2 = 4;
13402+ else
13403+ clock.p2 = 2;
13404+
13405+ i8xx_clock(48000, &clock);
13406+ }
13407+ }
13408+
13409+ /* XXX: It would be nice to validate the clocks, but we can't reuse
13410+ * i830PllIsValid() because it relies on the xf86_config output
13411+ * configuration being accurate, which it isn't necessarily.
13412+ */
13413+
13414+ return clock.dot;
13415+}
13416+
13417+/** Returns the currently programmed mode of the given pipe. */
13418+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
13419+ struct drm_crtc *crtc)
13420+{
13421+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13422+ struct intel_crtc *intel_crtc = crtc->driver_private;
13423+ int pipe = intel_crtc->pipe;
13424+ struct drm_display_mode *mode;
13425+ int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
13426+ int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
13427+ int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
13428+ int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
13429+
13430+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
13431+ if (!mode)
13432+ return NULL;
13433+
13434+ mode->clock = intel_crtc_clock_get(dev, crtc);
13435+ mode->hdisplay = (htot & 0xffff) + 1;
13436+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
13437+ mode->hsync_start = (hsync & 0xffff) + 1;
13438+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
13439+ mode->vdisplay = (vtot & 0xffff) + 1;
13440+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
13441+ mode->vsync_start = (vsync & 0xffff) + 1;
13442+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
13443+
13444+ drm_mode_set_name(mode);
13445+ drm_mode_set_crtcinfo(mode, 0);
13446+
13447+ return mode;
13448+}
13449+
13450+static const struct drm_crtc_funcs intel_crtc_funcs = {
13451+ .dpms = intel_crtc_dpms,
13452+ .lock = intel_crtc_lock,
13453+ .unlock = intel_crtc_unlock,
13454+ .mode_fixup = intel_crtc_mode_fixup,
13455+ .mode_set = intel_crtc_mode_set,
13456+ .gamma_set = intel_crtc_gamma_set,
13457+ .prepare = intel_crtc_prepare,
13458+ .commit = intel_crtc_commit,
13459+};
13460+
13461+
13462+void intel_crtc_init(struct drm_device *dev, int pipe)
13463+{
13464+ struct drm_crtc *crtc;
13465+ struct intel_crtc *intel_crtc;
13466+ int i;
13467+
13468+ crtc = drm_crtc_create(dev, &intel_crtc_funcs);
13469+ if (crtc == NULL)
13470+ return;
13471+
13472+ intel_crtc = kzalloc(sizeof(struct intel_crtc), GFP_KERNEL);
13473+ if (intel_crtc == NULL) {
13474+ kfree(crtc);
13475+ return;
13476+ }
13477+
13478+ intel_crtc->pipe = pipe;
13479+ for (i = 0; i < 256; i++) {
13480+ intel_crtc->lut_r[i] = i;
13481+ intel_crtc->lut_g[i] = i;
13482+ intel_crtc->lut_b[i] = i;
13483+ }
13484+
13485+ crtc->driver_private = intel_crtc;
13486+}
13487+
13488+struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
13489+{
13490+ struct drm_crtc *crtc = NULL;
13491+
13492+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
13493+ struct intel_crtc *intel_crtc = crtc->driver_private;
13494+ if (intel_crtc->pipe == pipe)
13495+ break;
13496+ }
13497+ return crtc;
13498+}
13499+
13500+int intel_output_clones(struct drm_device *dev, int type_mask)
13501+{
13502+ int index_mask = 0;
13503+ struct drm_output *output;
13504+ int entry = 0;
13505+
13506+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
13507+ struct intel_output *intel_output = output->driver_private;
13508+ if (type_mask & (1 << intel_output->type))
13509+ index_mask |= (1 << entry);
13510+ entry++;
13511+ }
13512+ return index_mask;
13513+}
13514+
13515+
13516+static void intel_setup_outputs(struct drm_device *dev)
13517+{
13518+ struct drm_output *output;
13519+
13520+ if (!IS_POULSBO(dev))
13521+ intel_crt_init(dev);
13522+
13523+ /* Set up integrated LVDS */
13524+ if (IS_MOBILE(dev) && !IS_I830(dev))
13525+ intel_lvds_init(dev);
13526+
13527+ if (IS_I9XX(dev)) {
13528+ intel_sdvo_init(dev, SDVOB);
13529+ intel_sdvo_init(dev, SDVOC);
13530+ }
13531+
13532+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
13533+ struct intel_output *intel_output = output->driver_private;
13534+ int crtc_mask = 0, clone_mask = 0;
13535+
13536+ /* valid crtcs */
13537+ switch(intel_output->type) {
13538+ case INTEL_OUTPUT_DVO:
13539+ case INTEL_OUTPUT_SDVO:
13540+ crtc_mask = ((1 << 0)|
13541+ (1 << 1));
13542+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
13543+ (1 << INTEL_OUTPUT_DVO) |
13544+ (1 << INTEL_OUTPUT_SDVO));
13545+ break;
13546+ case INTEL_OUTPUT_ANALOG:
13547+ crtc_mask = ((1 << 0)|
13548+ (1 << 1));
13549+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
13550+ (1 << INTEL_OUTPUT_DVO) |
13551+ (1 << INTEL_OUTPUT_SDVO));
13552+ break;
13553+ case INTEL_OUTPUT_LVDS:
13554+ crtc_mask = (1 << 1);
13555+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
13556+ break;
13557+ case INTEL_OUTPUT_TVOUT:
13558+ crtc_mask = ((1 << 0) |
13559+ (1 << 1));
13560+ clone_mask = (1 << INTEL_OUTPUT_TVOUT);
13561+ break;
13562+ }
13563+ output->possible_crtcs = crtc_mask;
13564+ output->possible_clones = intel_output_clones(dev, clone_mask);
13565+ }
13566+}
13567+
13568+void intel_modeset_init(struct drm_device *dev)
13569+{
13570+ int num_pipe;
13571+ int i;
13572+
13573+ drm_mode_config_init(dev);
13574+
13575+ dev->mode_config.min_width = 0;
13576+ dev->mode_config.min_height = 0;
13577+
13578+ dev->mode_config.max_width = 4096;
13579+ dev->mode_config.max_height = 4096;
13580+
13581+ /* set memory base */
13582+ if (IS_I9XX(dev))
13583+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
13584+ else
13585+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
13586+
13587+ if (IS_MOBILE(dev) || IS_I9XX(dev))
13588+ num_pipe = 2;
13589+ else
13590+ num_pipe = 1;
13591+ DRM_DEBUG("%d display pipe%s available.\n",
13592+ num_pipe, num_pipe > 1 ? "s" : "");
13593+
13594+ for (i = 0; i < num_pipe; i++) {
13595+ intel_crtc_init(dev, i);
13596+ }
13597+
13598+ intel_setup_outputs(dev);
13599+
13600+ //drm_initial_config(dev, false);
13601+}
13602+
13603+void intel_modeset_cleanup(struct drm_device *dev)
13604+{
13605+ drm_mode_config_cleanup(dev);
13606+}
13607Index: linux-2.6.27/drivers/gpu/drm/psb/intel_drv.h
13608===================================================================
13609--- /dev/null 1970-01-01 00:00:00.000000000 +0000
13610+++ linux-2.6.27/drivers/gpu/drm/psb/intel_drv.h 2009-01-14 11:58:01.000000000 +0000
13611@@ -0,0 +1,91 @@
13612+/*
13613+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
13614+ * Copyright (c) 2007 Intel Corporation
13615+ * Jesse Barnes <jesse.barnes@intel.com>
13616+ */
13617+#ifndef __INTEL_DRV_H__
13618+#define __INTEL_DRV_H__
13619+
13620+#include <linux/i2c.h>
13621+#include <linux/i2c-id.h>
13622+#include <linux/i2c-algo-bit.h>
13623+#include "drm_crtc.h"
13624+
13625+/*
13626+ * Display related stuff
13627+ */
13628+
13629+/* store information about an Ixxx DVO */
13630+/* The i830->i865 use multiple DVOs with multiple i2cs */
13631+/* the i915, i945 have a single sDVO i2c bus - which is different */
13632+#define MAX_OUTPUTS 6
13633+
13634+#define INTEL_I2C_BUS_DVO 1
13635+#define INTEL_I2C_BUS_SDVO 2
13636+
13637+/* these are outputs from the chip - integrated only
13638+ external chips are via DVO or SDVO output */
13639+#define INTEL_OUTPUT_UNUSED 0
13640+#define INTEL_OUTPUT_ANALOG 1
13641+#define INTEL_OUTPUT_DVO 2
13642+#define INTEL_OUTPUT_SDVO 3
13643+#define INTEL_OUTPUT_LVDS 4
13644+#define INTEL_OUTPUT_TVOUT 5
13645+
13646+#define INTEL_DVO_CHIP_NONE 0
13647+#define INTEL_DVO_CHIP_LVDS 1
13648+#define INTEL_DVO_CHIP_TMDS 2
13649+#define INTEL_DVO_CHIP_TVOUT 4
13650+
13651+struct intel_i2c_chan {
13652+ struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
13653+ u32 reg; /* GPIO reg */
13654+ struct i2c_adapter adapter;
13655+ struct i2c_algo_bit_data algo;
13656+ u8 slave_addr;
13657+};
13658+
13659+struct intel_output {
13660+ int type;
13661+ struct intel_i2c_chan *i2c_bus; /* for control functions */
13662+ struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
13663+ bool load_detect_tmp;
13664+ void *dev_priv;
13665+};
13666+
13667+struct intel_crtc {
13668+ int pipe;
13669+ u8 lut_r[256], lut_g[256], lut_b[256];
13670+};
13671+
13672+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
13673+ const char *name);
13674+void intel_i2c_destroy(struct intel_i2c_chan *chan);
13675+int intel_ddc_get_modes(struct drm_output *output);
13676+extern bool intel_ddc_probe(struct drm_output *output);
13677+
13678+extern void intel_crt_init(struct drm_device *dev);
13679+extern void intel_sdvo_init(struct drm_device *dev, int output_device);
13680+extern void intel_lvds_init(struct drm_device *dev);
13681+
13682+extern void intel_crtc_load_lut(struct drm_crtc *crtc);
13683+extern void intel_output_prepare (struct drm_output *output);
13684+extern void intel_output_commit (struct drm_output *output);
13685+extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
13686+ struct drm_crtc *crtc);
13687+extern void intel_wait_for_vblank(struct drm_device *dev);
13688+extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
13689+
13690+extern int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
13691+extern int intelfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
13692+
13693+extern void intel_modeset_init(struct drm_device *dev);
13694+extern void intel_modeset_cleanup(struct drm_device *dev);
13695+
13696+#define WA_NO_FB_GARBAGE_DISPLAY
13697+#ifdef WA_NO_FB_GARBAGE_DISPLAY
13698+extern void intel_crtc_mode_restore(struct drm_crtc *crtc);
13699+extern void intel_crtc_mode_save(struct drm_crtc *crtc);
13700+#endif
13701+
13702+#endif /* __INTEL_DRV_H__ */
13703Index: linux-2.6.27/drivers/gpu/drm/psb/intel_lvds.c
13704===================================================================
13705--- /dev/null 1970-01-01 00:00:00.000000000 +0000
13706+++ linux-2.6.27/drivers/gpu/drm/psb/intel_lvds.c 2009-01-14 11:58:01.000000000 +0000
13707@@ -0,0 +1,913 @@
13708+/*
13709+ * Copyright © 2006-2007 Intel Corporation
13710+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
13711+ *
13712+ * Permission is hereby granted, free of charge, to any person obtaining a
13713+ * copy of this software and associated documentation files (the "Software"),
13714+ * to deal in the Software without restriction, including without limitation
13715+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13716+ * and/or sell copies of the Software, and to permit persons to whom the
13717+ * Software is furnished to do so, subject to the following conditions:
13718+ *
13719+ * The above copyright notice and this permission notice (including the next
13720+ * paragraph) shall be included in all copies or substantial portions of the
13721+ * Software.
13722+ *
13723+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13724+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13725+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
13726+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
13727+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
13728+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
13729+ * DEALINGS IN THE SOFTWARE.
13730+ *
13731+ * Authors:
13732+ * Eric Anholt <eric@anholt.net>
13733+ * Dave Airlie <airlied@linux.ie>
13734+ * Jesse Barnes <jesse.barnes@intel.com>
13735+ */
13736+
13737+#include <linux/i2c.h>
13738+#include <linux/backlight.h>
13739+#include "drm_crtc.h"
13740+#include "drm_edid.h"
13741+#include "intel_lvds.h"
13742+
13743+#include <acpi/acpi_drivers.h>
13744+
13745+int drm_intel_ignore_acpi = 0;
13746+MODULE_PARM_DESC(ignore_acpi, "Ignore ACPI");
13747+module_param_named(ignore_acpi, drm_intel_ignore_acpi, int, 0600);
13748+
13749+uint8_t blc_type;
13750+uint8_t blc_pol;
13751+uint8_t blc_freq;
13752+uint8_t blc_minbrightness;
13753+uint8_t blc_i2caddr;
13754+uint8_t blc_brightnesscmd;
13755+int lvds_backlight; /* restore backlight to this value */
13756+
13757+struct intel_i2c_chan *lvds_i2c_bus;
13758+u32 CoreClock;
13759+u32 PWMControlRegFreq;
13760+
13761+unsigned char * dev_OpRegion = NULL;
13762+unsigned int dev_OpRegionSize;
13763+
13764+#define PCI_PORT5_REG80_FFUSE 0xD0058000
13765+#define PCI_PORT5_REG80_MAXRES_INT_EN 0x0040
13766+#define MAX_HDISPLAY 800
13767+#define MAX_VDISPLAY 480
13768+bool sku_bMaxResEnableInt = false;
13769+
13770+/** Set BLC through I2C*/
13771+static int
13772+LVDSI2CSetBacklight(struct drm_device *dev, unsigned char ch)
13773+{
13774+ u8 out_buf[2];
13775+ struct i2c_msg msgs[] = {
13776+ {
13777+ .addr = lvds_i2c_bus->slave_addr,
13778+ .flags = 0,
13779+ .len = 2,
13780+ .buf = out_buf,
13781+ }
13782+ };
13783+
13784+ DRM_INFO("LVDSI2CSetBacklight: the slave_addr is 0x%x, the backlight value is %d\n", lvds_i2c_bus->slave_addr, ch);
13785+
13786+ out_buf[0] = blc_brightnesscmd;
13787+ out_buf[1] = ch;
13788+
13789+ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
13790+ {
13791+ DRM_INFO("LVDSI2CSetBacklight: i2c_transfer done\n");
13792+ return true;
13793+ }
13794+
13795+ DRM_ERROR("msg: i2c_transfer error\n");
13796+ return false;
13797+}
13798+
13799+/**
13800+ * Calculate PWM control register value.
13801+ */
13802+static int
13803+LVDSCalculatePWMCtrlRegFreq(struct drm_device *dev)
13804+{
13805+ unsigned long value = 0;
13806+
13807+ DRM_INFO("Enter LVDSCalculatePWMCtrlRegFreq.\n");
13808+ if (blc_freq == 0) {
13809+ DRM_ERROR("LVDSCalculatePWMCtrlRegFreq: Frequency Requested is 0.\n");
13810+ return FALSE;
13811+ }
13812+ value = (CoreClock * MHz);
13813+ value = (value / BLC_PWM_FREQ_CALC_CONSTANT);
13814+ value = (value * BLC_PWM_PRECISION_FACTOR);
13815+ value = (value / blc_freq);
13816+ value = (value / BLC_PWM_PRECISION_FACTOR);
13817+
13818+ if (value > (unsigned long)BLC_MAX_PWM_REG_FREQ ||
13819+ value < (unsigned long)BLC_MIN_PWM_REG_FREQ) {
13820+ return FALSE;
13821+ } else {
13822+ PWMControlRegFreq = ((u32)value & ~BLC_PWM_LEGACY_MODE_ENABLE);
13823+ return TRUE;
13824+ }
13825+}
13826+
13827+/**
13828+ * Returns the maximum level of the backlight duty cycle field.
13829+ */
13830+static u32
13831+LVDSGetPWMMaxBacklight(struct drm_device *dev)
13832+{
13833+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13834+ u32 max_pwm_blc = 0;
13835+
13836+ max_pwm_blc = ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> \
13837+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
13838+
13839+ if (!(max_pwm_blc & BLC_MAX_PWM_REG_FREQ)) {
13840+ if (LVDSCalculatePWMCtrlRegFreq(dev)) {
13841+ max_pwm_blc = PWMControlRegFreq;
13842+ }
13843+ }
13844+
13845+ DRM_INFO("LVDSGetPWMMaxBacklight: the max_pwm_blc is %d.\n", max_pwm_blc);
13846+ return max_pwm_blc;
13847+}
13848+
13849+
13850+/**
13851+ * Sets the backlight level.
13852+ *
13853+ * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
13854+ */
13855+static void intel_lvds_set_backlight(struct drm_device *dev, int level)
13856+{
13857+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13858+ //u32 blc_pwm_ctl;
13859+
13860+ /*
13861+ blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
13862+ I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
13863+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
13864+ */
13865+ u32 newbacklight = 0;
13866+
13867+ DRM_INFO("intel_lvds_set_backlight: the level is %d\n", level);
13868+
13869+ if(blc_type == BLC_I2C_TYPE){
13870+ newbacklight = BRIGHTNESS_MASK & ((unsigned long)level * \
13871+ BRIGHTNESS_MASK /BRIGHTNESS_MAX_LEVEL);
13872+
13873+ if (blc_pol == BLC_POLARITY_INVERSE) {
13874+ newbacklight = BRIGHTNESS_MASK - newbacklight;
13875+ }
13876+
13877+ LVDSI2CSetBacklight(dev, newbacklight);
13878+
13879+ } else if (blc_type == BLC_PWM_TYPE) {
13880+ u32 max_pwm_blc = LVDSGetPWMMaxBacklight(dev);
13881+
13882+ u32 blc_pwm_duty_cycle;
13883+
13884+ /* Provent LVDS going to total black */
13885+ if ( level < 20) {
13886+ level = 20;
13887+ }
13888+ blc_pwm_duty_cycle = level * max_pwm_blc/BRIGHTNESS_MAX_LEVEL;
13889+
13890+ if (blc_pol == BLC_POLARITY_INVERSE) {
13891+ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
13892+ }
13893+
13894+ blc_pwm_duty_cycle &= BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
13895+
13896+ I915_WRITE(BLC_PWM_CTL,
13897+ (max_pwm_blc << BACKLIGHT_PWM_CTL_SHIFT)| (blc_pwm_duty_cycle));
13898+ }
13899+}
13900+
13901+/**
13902+ * Returns the maximum level of the backlight duty cycle field.
13903+ */
13904+static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
13905+{
13906+ return BRIGHTNESS_MAX_LEVEL;
13907+ /*
13908+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13909+
13910+ return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
13911+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
13912+ */
13913+}
13914+
13915+/**
13916+ * Sets the power state for the panel.
13917+ */
13918+static void intel_lvds_set_power(struct drm_device *dev, bool on)
13919+{
13920+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13921+ u32 pp_status;
13922+
13923+ DRM_INFO("intel_lvds_set_power: %d\n", on);
13924+ if (on) {
13925+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
13926+ POWER_TARGET_ON);
13927+ do {
13928+ pp_status = I915_READ(PP_STATUS);
13929+ } while ((pp_status & PP_ON) == 0);
13930+
13931+ intel_lvds_set_backlight(dev, lvds_backlight);
13932+ } else {
13933+ intel_lvds_set_backlight(dev, 0);
13934+
13935+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) &
13936+ ~POWER_TARGET_ON);
13937+ do {
13938+ pp_status = I915_READ(PP_STATUS);
13939+ } while (pp_status & PP_ON);
13940+ }
13941+}
13942+
13943+static void intel_lvds_dpms(struct drm_output *output, int mode)
13944+{
13945+ struct drm_device *dev = output->dev;
13946+
13947+ DRM_INFO("intel_lvds_dpms: the mode is %d\n", mode);
13948+ if (mode == DPMSModeOn)
13949+ intel_lvds_set_power(dev, true);
13950+ else
13951+ intel_lvds_set_power(dev, false);
13952+
13953+ /* XXX: We never power down the LVDS pairs. */
13954+}
13955+
13956+static void intel_lvds_save(struct drm_output *output)
13957+{
13958+ struct drm_device *dev = output->dev;
13959+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13960+
13961+ dev_priv->savePP_ON = I915_READ(LVDSPP_ON);
13962+ dev_priv->savePP_OFF = I915_READ(LVDSPP_OFF);
13963+ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
13964+ dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
13965+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
13966+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
13967+ BACKLIGHT_DUTY_CYCLE_MASK);
13968+
13969+ /*
13970+ * If the light is off at server startup, just make it full brightness
13971+ */
13972+ if (dev_priv->backlight_duty_cycle == 0)
13973+ lvds_backlight=
13974+ intel_lvds_get_max_backlight(dev);
13975+}
13976+
13977+static void intel_lvds_restore(struct drm_output *output)
13978+{
13979+ struct drm_device *dev = output->dev;
13980+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13981+
13982+ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
13983+ I915_WRITE(LVDSPP_ON, dev_priv->savePP_ON);
13984+ I915_WRITE(LVDSPP_OFF, dev_priv->savePP_OFF);
13985+ I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
13986+ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
13987+ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
13988+ intel_lvds_set_power(dev, true);
13989+ else
13990+ intel_lvds_set_power(dev, false);
13991+}
13992+
13993+static int intel_lvds_mode_valid(struct drm_output *output,
13994+ struct drm_display_mode *mode)
13995+{
13996+ struct drm_device *dev = output->dev;
13997+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
13998+ struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
13999+
14000+ if (fixed_mode) {
14001+ if (mode->hdisplay > fixed_mode->hdisplay)
14002+ return MODE_PANEL;
14003+ if (mode->vdisplay > fixed_mode->vdisplay)
14004+ return MODE_PANEL;
14005+ }
14006+
14007+ if (IS_POULSBO(dev) && sku_bMaxResEnableInt) {
14008+ if (mode->hdisplay > MAX_HDISPLAY)
14009+ return MODE_PANEL;
14010+ if (mode->vdisplay > MAX_VDISPLAY)
14011+ return MODE_PANEL;
14012+ }
14013+
14014+ return MODE_OK;
14015+}
14016+
14017+static bool intel_lvds_mode_fixup(struct drm_output *output,
14018+ struct drm_display_mode *mode,
14019+ struct drm_display_mode *adjusted_mode)
14020+{
14021+ struct drm_device *dev = output->dev;
14022+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
14023+ struct intel_crtc *intel_crtc = output->crtc->driver_private;
14024+ struct drm_output *tmp_output;
14025+
14026+ /* Should never happen!! */
14027+ if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
14028+ DRM_ERROR(KERN_ERR "Can't support LVDS on pipe A\n");
14029+ return false;
14030+ }
14031+
14032+ /* Should never happen!! */
14033+ list_for_each_entry(tmp_output, &dev->mode_config.output_list, head) {
14034+ if (tmp_output != output && tmp_output->crtc == output->crtc) {
14035+ DRM_ERROR("Can't enable LVDS and another "
14036+ "output on the same pipe\n");
14037+ return false;
14038+ }
14039+ }
14040+
14041+ /*
14042+ * If we have timings from the BIOS for the panel, put them in
14043+ * to the adjusted mode. The CRTC will be set up for this mode,
14044+ * with the panel scaling set up to source from the H/VDisplay
14045+ * of the original mode.
14046+ */
14047+ if (dev_priv->panel_fixed_mode != NULL) {
14048+ adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
14049+ adjusted_mode->hsync_start =
14050+ dev_priv->panel_fixed_mode->hsync_start;
14051+ adjusted_mode->hsync_end =
14052+ dev_priv->panel_fixed_mode->hsync_end;
14053+ adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
14054+ adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
14055+ adjusted_mode->vsync_start =
14056+ dev_priv->panel_fixed_mode->vsync_start;
14057+ adjusted_mode->vsync_end =
14058+ dev_priv->panel_fixed_mode->vsync_end;
14059+ adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
14060+ adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
14061+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
14062+ }
14063+
14064+ /*
14065+ * XXX: It would be nice to support lower refresh rates on the
14066+ * panels to reduce power consumption, and perhaps match the
14067+ * user's requested refresh rate.
14068+ */
14069+
14070+ return true;
14071+}
14072+
14073+static void intel_lvds_prepare(struct drm_output *output)
14074+{
14075+ struct drm_device *dev = output->dev;
14076+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
14077+
14078+ DRM_INFO("intel_lvds_prepare\n");
14079+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
14080+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
14081+ BACKLIGHT_DUTY_CYCLE_MASK);
14082+
14083+ intel_lvds_set_power(dev, false);
14084+}
14085+
14086+static void intel_lvds_commit( struct drm_output *output)
14087+{
14088+ struct drm_device *dev = output->dev;
14089+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
14090+
14091+ DRM_INFO("intel_lvds_commit\n");
14092+ if (dev_priv->backlight_duty_cycle == 0)
14093+ //dev_priv->backlight_duty_cycle =
14094+ lvds_backlight =
14095+ intel_lvds_get_max_backlight(dev);
14096+
14097+ intel_lvds_set_power(dev, true);
14098+}
14099+
14100+static void intel_lvds_mode_set(struct drm_output *output,
14101+ struct drm_display_mode *mode,
14102+ struct drm_display_mode *adjusted_mode)
14103+{
14104+ struct drm_device *dev = output->dev;
14105+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
14106+ struct intel_crtc *intel_crtc = output->crtc->driver_private;
14107+ u32 pfit_control;
14108+
14109+ /*
14110+ * The LVDS pin pair will already have been turned on in the
14111+ * intel_crtc_mode_set since it has a large impact on the DPLL
14112+ * settings.
14113+ */
14114+
14115+ /*
14116+ * Enable automatic panel scaling so that non-native modes fill the
14117+ * screen. Should be enabled before the pipe is enabled, according to
14118+ * register description and PRM.
14119+ */
14120+ if (mode->hdisplay != adjusted_mode->hdisplay ||
14121+ mode->vdisplay != adjusted_mode->vdisplay)
14122+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
14123+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
14124+ HORIZ_INTERP_BILINEAR);
14125+ else
14126+ pfit_control = 0;
14127+
14128+ if (!IS_I965G(dev)) {
14129+ if (dev_priv->panel_wants_dither)
14130+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
14131+ }
14132+ else
14133+ pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
14134+
14135+ I915_WRITE(PFIT_CONTROL, pfit_control);
14136+}
14137+
14138+/**
14139+ * Detect the LVDS connection.
14140+ *
14141+ * This always returns OUTPUT_STATUS_CONNECTED. This output should only have
14142+ * been set up if the LVDS was actually connected anyway.
14143+ */
14144+static enum drm_output_status intel_lvds_detect(struct drm_output *output)
14145+{
14146+ return output_status_connected;
14147+}
14148+
14149+/**
14150+ * Return the list of DDC modes if available.
14151+ */
14152+static int intel_lvds_get_modes(struct drm_output *output)
14153+{
14154+ struct drm_device *dev = output->dev;
14155+ struct intel_output *intel_output = output->driver_private;
14156+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
14157+ struct edid *edid;
14158+
14159+ /* Try reading DDC from the adapter */
14160+ edid = (struct edid *)drm_ddc_read(&intel_output->ddc_bus->adapter);
14161+
14162+ if (!edid) {
14163+ DRM_INFO("%s: no EDID data from device, reading ACPI _DDC data.\n",
14164+ output->name);
14165+ edid = kzalloc(sizeof(struct edid), GFP_KERNEL);
14166+ drm_get_acpi_edid(ACPI_EDID_LCD, (char*)edid, 128);
14167+ }
14168+
14169+ if (edid)
14170+ drm_add_edid_modes(output, edid);
14171+
14172+ /* Didn't get an EDID */
14173+ if (!output->monitor_info) {
14174+ struct drm_display_info *dspinfo;
14175+ dspinfo = kzalloc(sizeof(*output->monitor_info), GFP_KERNEL);
14176+ if (!dspinfo)
14177+ goto out;
14178+
14179+ /* Set wide sync ranges so we get all modes
14180+ * handed to valid_mode for checking
14181+ */
14182+ dspinfo->min_vfreq = 0;
14183+ dspinfo->max_vfreq = 200;
14184+ dspinfo->min_hfreq = 0;
14185+ dspinfo->max_hfreq = 200;
14186+ output->monitor_info = dspinfo;
14187+ }
14188+
14189+out:
14190+ if (dev_priv->panel_fixed_mode != NULL) {
14191+ struct drm_display_mode *mode =
14192+ drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
14193+ drm_mode_probed_add(output, mode);
14194+ return 1;
14195+ }
14196+
14197+ return 0;
14198+}
14199+
14200+/* added by alek du to add /sys/class/backlight interface */
14201+static int update_bl_status(struct backlight_device *bd)
14202+{
14203+ int value = bd->props.brightness;
14204+
14205+ struct drm_device *dev = bl_get_data(bd);
14206+
14207+ lvds_backlight = value;
14208+ intel_lvds_set_backlight(dev, value);
14209+ /*value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0;
14210+ intel_lvds_set_power(dev,value);*/
14211+ return 0;
14212+}
14213+
14214+static int read_brightness(struct backlight_device *bd)
14215+{
14216+ return bd->props.brightness;
14217+}
14218+
14219+static struct backlight_device *psbbl_device = NULL;
14220+static struct backlight_ops psbbl_ops = {
14221+ .get_brightness = read_brightness,
14222+ .update_status = update_bl_status,
14223+};
14224+
14225+/**
14226+ * intel_lvds_destroy - unregister and free LVDS structures
14227+ * @output: output to free
14228+ *
14229+ * Unregister the DDC bus for this output then free the driver private
14230+ * structure.
14231+ */
14232+static void intel_lvds_destroy(struct drm_output *output)
14233+{
14234+ struct intel_output *intel_output = output->driver_private;
14235+
14236+ if (psbbl_device){
14237+ backlight_device_unregister(psbbl_device);
14238+ }
14239+ if(dev_OpRegion != NULL)
14240+ iounmap(dev_OpRegion);
14241+ intel_i2c_destroy(intel_output->ddc_bus);
14242+ intel_i2c_destroy(lvds_i2c_bus);
14243+ kfree(output->driver_private);
14244+}
14245+
14246+static const struct drm_output_funcs intel_lvds_output_funcs = {
14247+ .dpms = intel_lvds_dpms,
14248+ .save = intel_lvds_save,
14249+ .restore = intel_lvds_restore,
14250+ .mode_valid = intel_lvds_mode_valid,
14251+ .mode_fixup = intel_lvds_mode_fixup,
14252+ .prepare = intel_lvds_prepare,
14253+ .mode_set = intel_lvds_mode_set,
14254+ .commit = intel_lvds_commit,
14255+ .detect = intel_lvds_detect,
14256+ .get_modes = intel_lvds_get_modes,
14257+ .cleanup = intel_lvds_destroy
14258+};
14259+
14260+int intel_get_acpi_dod(char *method)
14261+{
14262+ int status;
14263+ int found = 0;
14264+ int i;
14265+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
14266+ union acpi_object *dod = NULL;
14267+ union acpi_object *obj;
14268+
14269+ status = acpi_evaluate_object(NULL, method, NULL, &buffer);
14270+ if (ACPI_FAILURE(status))
14271+ return -ENODEV;
14272+
14273+ dod = buffer.pointer;
14274+ if (!dod || (dod->type != ACPI_TYPE_PACKAGE)) {
14275+ status = -EFAULT;
14276+ goto out;
14277+ }
14278+
14279+ DRM_DEBUG("Found %d video heads in _DOD\n", dod->package.count);
14280+
14281+ for (i = 0; i < dod->package.count; i++) {
14282+ obj = &dod->package.elements[i];
14283+
14284+ if (obj->type != ACPI_TYPE_INTEGER) {
14285+ DRM_DEBUG("Invalid _DOD data\n");
14286+ } else {
14287+ DRM_DEBUG("dod element[%d] = 0x%x\n", i,
14288+ (int)obj->integer.value);
14289+
14290+ /* look for an LVDS type */
14291+ if (obj->integer.value & 0x00000400)
14292+ found = 1;
14293+ }
14294+ }
14295+ out:
14296+ kfree(buffer.pointer);
14297+ return found;
14298+}
14299+/**
14300+ * intel_lvds_init - setup LVDS outputs on this device
14301+ * @dev: drm device
14302+ *
14303+ * Create the output, register the LVDS DDC bus, and try to figure out what
14304+ * modes we can display on the LVDS panel (if present).
14305+ */
14306+void intel_lvds_init(struct drm_device *dev)
14307+{
14308+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
14309+ struct drm_output *output;
14310+ struct intel_output *intel_output;
14311+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
14312+ struct drm_crtc *crtc;
14313+ u32 lvds;
14314+ int pipe;
14315+
14316+ if (!drm_intel_ignore_acpi && !intel_get_acpi_dod(ACPI_DOD))
14317+ return;
14318+
14319+ output = drm_output_create(dev, &intel_lvds_output_funcs, "LVDS");
14320+ if (!output)
14321+ return;
14322+
14323+ intel_output = kmalloc(sizeof(struct intel_output), GFP_KERNEL);
14324+ if (!intel_output) {
14325+ drm_output_destroy(output);
14326+ return;
14327+ }
14328+
14329+ intel_output->type = INTEL_OUTPUT_LVDS;
14330+ output->driver_private = intel_output;
14331+ output->subpixel_order = SubPixelHorizontalRGB;
14332+ output->interlace_allowed = FALSE;
14333+ output->doublescan_allowed = FALSE;
14334+
14335+ //initialize the I2C bus and BLC data
14336+ lvds_i2c_bus = intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
14337+ if (!lvds_i2c_bus) {
14338+ dev_printk(KERN_ERR, &dev->pdev->dev, "i2c bus registration "
14339+ "failed.\n");
14340+ return;
14341+ }
14342+ lvds_i2c_bus->slave_addr = 0x2c;//0x58;
14343+ lvds_backlight = BRIGHTNESS_MAX_LEVEL;
14344+ blc_type = 0;
14345+ blc_pol = 0;
14346+
14347+ if (1) { //get the BLC init data from VBT
14348+ u32 OpRegion_Phys;
14349+ unsigned int OpRegion_Size = 0x100;
14350+ OpRegionPtr OpRegion;
14351+ char *OpRegion_String = "IntelGraphicsMem";
14352+
14353+ struct vbt_header *vbt;
14354+ struct bdb_header *bdb;
14355+ int vbt_off, bdb_off, bdb_block_off, block_size;
14356+ int panel_type = -1;
14357+ unsigned char *bios;
14358+ unsigned char *vbt_buf;
14359+
14360+ pci_read_config_dword(dev->pdev, 0xFC, &OpRegion_Phys);
14361+
14362+ //dev_OpRegion = phys_to_virt(OpRegion_Phys);
14363+ dev_OpRegion = ioremap(OpRegion_Phys, OpRegion_Size);
14364+ dev_OpRegionSize = OpRegion_Size;
14365+
14366+ OpRegion = (OpRegionPtr) dev_OpRegion;
14367+
14368+ if (!memcmp(OpRegion->sign, OpRegion_String, 16)) {
14369+ unsigned int OpRegion_NewSize;
14370+
14371+ OpRegion_NewSize = OpRegion->size * 1024;
14372+
14373+ dev_OpRegionSize = OpRegion_NewSize;
14374+
14375+ iounmap(dev_OpRegion);
14376+ dev_OpRegion = ioremap(OpRegion_Phys, OpRegion_NewSize);
14377+ } else {
14378+ iounmap(dev_OpRegion);
14379+ dev_OpRegion = NULL;
14380+ }
14381+
14382+ if((dev_OpRegion != NULL)&&(dev_OpRegionSize >= OFFSET_OPREGION_VBT)) {
14383+ DRM_INFO("intel_lvds_init: OpRegion has the VBT address\n");
14384+ vbt_buf = dev_OpRegion + OFFSET_OPREGION_VBT;
14385+ vbt = (struct vbt_header *)(dev_OpRegion + OFFSET_OPREGION_VBT);
14386+ } else {
14387+ DRM_INFO("intel_lvds_init: No OpRegion, use the bios at fixed address 0xc0000\n");
14388+ bios = phys_to_virt(0xC0000);
14389+ if(*((u16 *)bios) != 0xAA55){
14390+ bios = NULL;
14391+ DRM_ERROR("the bios is incorrect\n");
14392+ goto blc_out;
14393+ }
14394+ vbt_off = bios[0x1a] | (bios[0x1a + 1] << 8);
14395+ DRM_INFO("intel_lvds_init: the vbt off is %x\n", vbt_off);
14396+ vbt_buf = bios + vbt_off;
14397+ vbt = (struct vbt_header *)(bios + vbt_off);
14398+ }
14399+
14400+ bdb_off = vbt->bdb_offset;
14401+ bdb = (struct bdb_header *)(vbt_buf + bdb_off);
14402+
14403+ DRM_INFO("intel_lvds_init: The bdb->signature is %s, the bdb_off is %d\n",bdb->signature, bdb_off);
14404+
14405+ if (memcmp(bdb->signature, "BIOS_DATA_BLOCK ", 16) != 0) {
14406+ DRM_ERROR("the vbt is error\n");
14407+ goto blc_out;
14408+ }
14409+
14410+ for (bdb_block_off = bdb->header_size; bdb_block_off < bdb->bdb_size;
14411+ bdb_block_off += block_size) {
14412+ int start = bdb_off + bdb_block_off;
14413+ int id, num_entries;
14414+ struct lvds_bdb_1 *lvds1;
14415+ struct lvds_blc *lvdsblc;
14416+ struct lvds_bdb_blc *bdbblc;
14417+
14418+ id = vbt_buf[start];
14419+ block_size = (vbt_buf[start + 1] | (vbt_buf[start + 2] << 8)) + 3;
14420+ switch (id) {
14421+ case 40:
14422+ lvds1 = (struct lvds_bdb_1 *)(vbt_buf+ start);
14423+ panel_type = lvds1->panel_type;
14424+ //if (lvds1->caps & LVDS_CAP_DITHER)
14425+ // *panelWantsDither = TRUE;
14426+ break;
14427+
14428+ case 43:
14429+ bdbblc = (struct lvds_bdb_blc *)(vbt_buf + start);
14430+ num_entries = bdbblc->table_size? (bdbblc->size - \
14431+ sizeof(bdbblc->table_size))/bdbblc->table_size : 0;
14432+ if (num_entries << 16 && bdbblc->table_size == sizeof(struct lvds_blc)) {
14433+ lvdsblc = (struct lvds_blc *)(vbt_buf + start + sizeof(struct lvds_bdb_blc));
14434+ lvdsblc += panel_type;
14435+ blc_type = lvdsblc->type;
14436+ blc_pol = lvdsblc->pol;
14437+ blc_freq = lvdsblc->freq;
14438+ blc_minbrightness = lvdsblc->minbrightness;
14439+ blc_i2caddr = lvdsblc->i2caddr;
14440+ blc_brightnesscmd = lvdsblc->brightnesscmd;
14441+ DRM_INFO("intel_lvds_init: BLC Data in BIOS VBT tables: datasize=%d paneltype=%d \
14442+ type=0x%02x pol=0x%02x freq=0x%04x minlevel=0x%02x \
14443+ i2caddr=0x%02x cmd=0x%02x \n",
14444+ 0,
14445+ panel_type,
14446+ lvdsblc->type,
14447+ lvdsblc->pol,
14448+ lvdsblc->freq,
14449+ lvdsblc->minbrightness,
14450+ lvdsblc->i2caddr,
14451+ lvdsblc->brightnesscmd);
14452+ }
14453+ break;
14454+ }
14455+ }
14456+
14457+ }
14458+
14459+ if(1){
14460+ //get the Core Clock for calculating MAX PWM value
14461+ //check whether the MaxResEnableInt is
14462+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
14463+ u32 clock;
14464+ u32 sku_value = 0;
14465+ unsigned int CoreClocks[] = {
14466+ 100,
14467+ 133,
14468+ 150,
14469+ 178,
14470+ 200,
14471+ 266,
14472+ 266,
14473+ 266
14474+ };
14475+ if(pci_root)
14476+ {
14477+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
14478+ pci_read_config_dword(pci_root, 0xD4, &clock);
14479+ CoreClock = CoreClocks[clock & 0x07];
14480+ DRM_INFO("intel_lvds_init: the CoreClock is %d\n", CoreClock);
14481+
14482+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
14483+ pci_read_config_dword(pci_root, 0xD4, &sku_value);
14484+ sku_bMaxResEnableInt = (sku_value & PCI_PORT5_REG80_MAXRES_INT_EN)? true : false;
14485+ DRM_INFO("intel_lvds_init: sku_value is 0x%08x\n", sku_value);
14486+ DRM_INFO("intel_lvds_init: sku_bMaxResEnableInt is %d\n", sku_bMaxResEnableInt);
14487+ }
14488+ }
14489+
14490+ if ((blc_type == BLC_I2C_TYPE) || (blc_type == BLC_PWM_TYPE)){
14491+ /* add /sys/class/backlight interface as standard */
14492+ psbbl_device = backlight_device_register("psblvds", &dev->pdev->dev, dev, &psbbl_ops);
14493+ if (psbbl_device){
14494+ psbbl_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
14495+ psbbl_device->props.brightness = lvds_backlight;
14496+ psbbl_device->props.power = FB_BLANK_UNBLANK;
14497+ backlight_update_status(psbbl_device);
14498+ }
14499+ }
14500+
14501+blc_out:
14502+
14503+ /* Set up the DDC bus. */
14504+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
14505+ if (!intel_output->ddc_bus) {
14506+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
14507+ "failed.\n");
14508+ intel_i2c_destroy(lvds_i2c_bus);
14509+ return;
14510+ }
14511+
14512+ /*
14513+ * Attempt to get the fixed panel mode from DDC. Assume that the
14514+ * preferred mode is the right one.
14515+ */
14516+ intel_lvds_get_modes(output);
14517+
14518+ list_for_each_entry(scan, &output->probed_modes, head) {
14519+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
14520+ dev_priv->panel_fixed_mode =
14521+ drm_mode_duplicate(dev, scan);
14522+ goto out; /* FIXME: check for quirks */
14523+ }
14524+ }
14525+
14526+ /*
14527+ * If we didn't get EDID, try checking if the panel is already turned
14528+ * on. If so, assume that whatever is currently programmed is the
14529+ * correct mode.
14530+ */
14531+ lvds = I915_READ(LVDS);
14532+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
14533+ crtc = intel_get_crtc_from_pipe(dev, pipe);
14534+
14535+ if (crtc && (lvds & LVDS_PORT_EN)) {
14536+ dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
14537+ if (dev_priv->panel_fixed_mode) {
14538+ dev_priv->panel_fixed_mode->type |=
14539+ DRM_MODE_TYPE_PREFERRED;
14540+ goto out; /* FIXME: check for quirks */
14541+ }
14542+ }
14543+
14544+ /* If we still don't have a mode after all that, give up. */
14545+ if (!dev_priv->panel_fixed_mode)
14546+ goto failed;
14547+
14548+ /* FIXME: probe the BIOS for modes and check for LVDS quirks */
14549+#if 0
14550+ /* Get the LVDS fixed mode out of the BIOS. We should support LVDS
14551+ * with the BIOS being unavailable or broken, but lack the
14552+ * configuration options for now.
14553+ */
14554+ bios_mode = intel_bios_get_panel_mode(pScrn);
14555+ if (bios_mode != NULL) {
14556+ if (dev_priv->panel_fixed_mode != NULL) {
14557+ if (dev_priv->debug_modes &&
14558+ !xf86ModesEqual(dev_priv->panel_fixed_mode,
14559+ bios_mode))
14560+ {
14561+ xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
14562+ "BIOS panel mode data doesn't match probed data, "
14563+ "continuing with probed.\n");
14564+ xf86DrvMsg(pScrn->scrnIndex, X_INFO, "BIOS mode:\n");
14565+ xf86PrintModeline(pScrn->scrnIndex, bios_mode);
14566+ xf86DrvMsg(pScrn->scrnIndex, X_INFO, "probed mode:\n");
14567+ xf86PrintModeline(pScrn->scrnIndex, dev_priv->panel_fixed_mode);
14568+ xfree(bios_mode->name);
14569+ xfree(bios_mode);
14570+ }
14571+ } else {
14572+ dev_priv->panel_fixed_mode = bios_mode;
14573+ }
14574+ } else {
14575+ xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
14576+ "Couldn't detect panel mode. Disabling panel\n");
14577+ goto disable_exit;
14578+ }
14579+
14580+ /*
14581+ * Blacklist machines with BIOSes that list an LVDS panel without
14582+ * actually having one.
14583+ */
14584+ if (dev_priv->PciInfo->chipType == PCI_CHIP_I945_GM) {
14585+ /* aopen mini pc */
14586+ if (dev_priv->PciInfo->subsysVendor == 0xa0a0)
14587+ goto disable_exit;
14588+
14589+ if ((dev_priv->PciInfo->subsysVendor == 0x8086) &&
14590+ (dev_priv->PciInfo->subsysCard == 0x7270)) {
14591+ /* It's a Mac Mini or Macbook Pro.
14592+ *
14593+ * Apple hardware is out to get us. The macbook pro
14594+ * has a real LVDS panel, but the mac mini does not,
14595+ * and they have the same device IDs. We'll
14596+ * distinguish by panel size, on the assumption
14597+ * that Apple isn't about to make any machines with an
14598+ * 800x600 display.
14599+ */
14600+
14601+ if (dev_priv->panel_fixed_mode != NULL &&
14602+ dev_priv->panel_fixed_mode->HDisplay == 800 &&
14603+ dev_priv->panel_fixed_mode->VDisplay == 600)
14604+ {
14605+ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
14606+ "Suspected Mac Mini, ignoring the LVDS\n");
14607+ goto disable_exit;
14608+ }
14609+ }
14610+ }
14611+
14612+#endif
14613+
14614+out:
14615+ return;
14616+
14617+failed:
14618+ DRM_DEBUG("No LVDS modes found, disabling.\n");
14619+ drm_output_destroy(output); /* calls intel_lvds_destroy above */
14620+}
14621Index: linux-2.6.27/drivers/gpu/drm/psb/intel_lvds.h
14622===================================================================
14623--- /dev/null 1970-01-01 00:00:00.000000000 +0000
14624+++ linux-2.6.27/drivers/gpu/drm/psb/intel_lvds.h 2009-01-14 11:58:01.000000000 +0000
14625@@ -0,0 +1,174 @@
14626+/*
14627+ * Copyright © 2006-2007 Intel Corporation
14628+ *
14629+ * Permission is hereby granted, free of charge, to any person obtaining a
14630+ * copy of this software and associated documentation files (the "Software"),
14631+ * to deal in the Software without restriction, including without limitation
14632+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14633+ * and/or sell copies of the Software, and to permit persons to whom the
14634+ * Software is furnished to do so, subject to the following conditions:
14635+ *
14636+ * The above copyright notice and this permission notice (including the next
14637+ * paragraph) shall be included in all copies or substantial portions of the
14638+ * Software.
14639+ *
14640+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14641+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14642+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
14643+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
14644+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
14645+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
14646+ * DEALINGS IN THE SOFTWARE.
14647+ *
14648+ */
14649+
14650+/**
14651+ * @file lvds definitions and structures.
14652+ */
14653+
14654+#define BLC_I2C_TYPE 0x01
14655+#define BLC_PWM_TYPE 0x02
14656+#define BRIGHTNESS_MASK 0xff
14657+#define BRIGHTNESS_MAX_LEVEL 100
14658+#define BLC_POLARITY_NORMAL 0
14659+#define BLC_POLARITY_INVERSE 1
14660+#define BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xfffe)
14661+#define BACKLIGHT_PWM_CTL_SHIFT (16)
14662+#define BLC_MAX_PWM_REG_FREQ 0xfffe
14663+#define BLC_MIN_PWM_REG_FREQ 0x2
14664+#define BLC_PWM_LEGACY_MODE_ENABLE 0x0001
14665+#define BLC_PWM_PRECISION_FACTOR 10//10000000
14666+#define BLC_PWM_FREQ_CALC_CONSTANT 32
14667+#define MHz 1000000
14668+#define OFFSET_OPREGION_VBT 0x400
14669+
14670+typedef struct OpRegion_Header
14671+{
14672+ char sign[16];
14673+ u32 size;
14674+ u32 over;
14675+ char sver[32];
14676+ char vver[16];
14677+ char gver[16];
14678+ u32 mbox;
14679+ char rhd1[164];
14680+} OpRegionRec, *OpRegionPtr;
14681+
14682+struct vbt_header
14683+{
14684+ char signature[20]; /**< Always starts with 'VBT$' */
14685+ u16 version; /**< decimal */
14686+ u16 header_size; /**< in bytes */
14687+ u16 vbt_size; /**< in bytes */
14688+ u8 vbt_checksum;
14689+ u8 reserved0;
14690+ u32 bdb_offset; /**< from beginning of VBT */
14691+ u32 aim1_offset; /**< from beginning of VBT */
14692+ u32 aim2_offset; /**< from beginning of VBT */
14693+ u32 aim3_offset; /**< from beginning of VBT */
14694+ u32 aim4_offset; /**< from beginning of VBT */
14695+} __attribute__ ((packed));
14696+
14697+struct bdb_header
14698+{
14699+ char signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
14700+ u16 version; /**< decimal */
14701+ u16 header_size; /**< in bytes */
14702+ u16 bdb_size; /**< in bytes */
14703+} __attribute__ ((packed));
14704+
14705+#define LVDS_CAP_EDID (1 << 6)
14706+#define LVDS_CAP_DITHER (1 << 5)
14707+#define LVDS_CAP_PFIT_AUTO_RATIO (1 << 4)
14708+#define LVDS_CAP_PFIT_GRAPHICS_MODE (1 << 3)
14709+#define LVDS_CAP_PFIT_TEXT_MODE (1 << 2)
14710+#define LVDS_CAP_PFIT_GRAPHICS (1 << 1)
14711+#define LVDS_CAP_PFIT_TEXT (1 << 0)
14712+struct lvds_bdb_1
14713+{
14714+ u8 id; /**< 40 */
14715+ u16 size;
14716+ u8 panel_type;
14717+ u8 reserved0;
14718+ u16 caps;
14719+} __attribute__ ((packed));
14720+
14721+struct lvds_bdb_2_fp_params
14722+{
14723+ u16 x_res;
14724+ u16 y_res;
14725+ u32 lvds_reg;
14726+ u32 lvds_reg_val;
14727+ u32 pp_on_reg;
14728+ u32 pp_on_reg_val;
14729+ u32 pp_off_reg;
14730+ u32 pp_off_reg_val;
14731+ u32 pp_cycle_reg;
14732+ u32 pp_cycle_reg_val;
14733+ u32 pfit_reg;
14734+ u32 pfit_reg_val;
14735+ u16 terminator;
14736+} __attribute__ ((packed));
14737+
14738+struct lvds_bdb_2_fp_edid_dtd
14739+{
14740+ u16 dclk; /**< In 10khz */
14741+ u8 hactive;
14742+ u8 hblank;
14743+ u8 high_h; /**< 7:4 = hactive 11:8, 3:0 = hblank 11:8 */
14744+ u8 vactive;
14745+ u8 vblank;
14746+ u8 high_v; /**< 7:4 = vactive 11:8, 3:0 = vblank 11:8 */
14747+ u8 hsync_off;
14748+ u8 hsync_pulse_width;
14749+ u8 vsync_off;
14750+ u8 high_hsync_off; /**< 7:6 = hsync off 9:8 */
14751+ u8 h_image;
14752+ u8 v_image;
14753+ u8 max_hv;
14754+ u8 h_border;
14755+ u8 v_border;
14756+ u8 flags;
14757+#define FP_EDID_FLAG_VSYNC_POSITIVE (1 << 2)
14758+#define FP_EDID_FLAG_HSYNC_POSITIVE (1 << 1)
14759+} __attribute__ ((packed));
14760+
14761+struct lvds_bdb_2_entry
14762+{
14763+ u16 fp_params_offset; /**< From beginning of BDB */
14764+ u8 fp_params_size;
14765+ u16 fp_edid_dtd_offset;
14766+ u8 fp_edid_dtd_size;
14767+ u16 fp_edid_pid_offset;
14768+ u8 fp_edid_pid_size;
14769+} __attribute__ ((packed));
14770+
14771+struct lvds_bdb_2
14772+{
14773+ u8 id; /**< 41 */
14774+ u16 size;
14775+ u8 table_size; /* not sure on this one */
14776+ struct lvds_bdb_2_entry panels[16];
14777+} __attribute__ ((packed));
14778+
14779+
14780+struct lvds_bdb_blc
14781+{
14782+ u8 id; /**< 43 */
14783+ u16 size;
14784+ u8 table_size;
14785+} __attribute__ ((packed));
14786+
14787+struct lvds_blc
14788+{
14789+ u8 type:2;
14790+ u8 pol:1;
14791+ u8 gpio:3;
14792+ u8 gmbus:2;
14793+ u16 freq;
14794+ u8 minbrightness;
14795+ u8 i2caddr;
14796+ u8 brightnesscmd;
14797+ /* more... */
14798+} __attribute__ ((packed));
14799+
14800Index: linux-2.6.27/drivers/gpu/drm/psb/intel_modes.c
14801===================================================================
14802--- /dev/null 1970-01-01 00:00:00.000000000 +0000
14803+++ linux-2.6.27/drivers/gpu/drm/psb/intel_modes.c 2009-01-14 11:58:01.000000000 +0000
14804@@ -0,0 +1,60 @@
14805+/*
14806+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
14807+ * Copyright (c) 2007 Intel Corporation
14808+ * Jesse Barnes <jesse.barnes@intel.com>
14809+ */
14810+
14811+#include <linux/i2c.h>
14812+#include <linux/fb.h>
14813+
14814+/**
14815+ * intel_ddc_probe
14816+ *
14817+ */
14818+bool intel_ddc_probe(struct drm_output *output)
14819+{
14820+ struct intel_output *intel_output = output->driver_private;
14821+ u8 out_buf[] = { 0x0, 0x0};
14822+ u8 buf[2];
14823+ int ret;
14824+ struct i2c_msg msgs[] = {
14825+ {
14826+ .addr = 0x50,
14827+ .flags = 0,
14828+ .len = 1,
14829+ .buf = out_buf,
14830+ },
14831+ {
14832+ .addr = 0x50,
14833+ .flags = I2C_M_RD,
14834+ .len = 1,
14835+ .buf = buf,
14836+ }
14837+ };
14838+
14839+ ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2);
14840+ if (ret == 2)
14841+ return true;
14842+
14843+ return false;
14844+}
14845+
14846+/**
14847+ * intel_ddc_get_modes - get modelist from monitor
14848+ * @output: DRM output device to use
14849+ *
14850+ * Fetch the EDID information from @output using the DDC bus.
14851+ */
14852+int intel_ddc_get_modes(struct drm_output *output)
14853+{
14854+ struct intel_output *intel_output = output->driver_private;
14855+ struct edid *edid;
14856+ int ret = 0;
14857+
14858+ edid = drm_get_edid(output, &intel_output->ddc_bus->adapter);
14859+ if (edid) {
14860+ ret = drm_add_edid_modes(output, edid);
14861+ kfree(edid);
14862+ }
14863+ return ret;
14864+}
14865Index: linux-2.6.27/drivers/gpu/drm/psb/intel_sdvo.c
14866===================================================================
14867--- /dev/null 1970-01-01 00:00:00.000000000 +0000
14868+++ linux-2.6.27/drivers/gpu/drm/psb/intel_sdvo.c 2009-01-14 11:58:01.000000000 +0000
14869@@ -0,0 +1,3973 @@
14870+/*
14871+ * Copyright © 2006-2007 Intel Corporation
14872+ *
14873+ * Permission is hereby granted, free of charge, to any person obtaining a
14874+ * copy of this software and associated documentation files (the "Software"),
14875+ * to deal in the Software without restriction, including without limitation
14876+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14877+ * and/or sell copies of the Software, and to permit persons to whom the
14878+ * Software is furnished to do so, subject to the following conditions:
14879+ *
14880+ * The above copyright notice and this permission notice (including the next
14881+ * paragraph) shall be included in all copies or substantial portions of the
14882+ * Software.
14883+ *
14884+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14885+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14886+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
14887+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
14888+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
14889+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
14890+ * DEALINGS IN THE SOFTWARE.
14891+ *
14892+ * Authors:
14893+ * Eric Anholt <eric@anholt.net>
14894+ */
14895+/*
14896+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
14897+ * Jesse Barnes <jesse.barnes@intel.com>
14898+ */
14899+
14900+#include <linux/i2c.h>
14901+#include <linux/delay.h>
14902+#include "drm_crtc.h"
14903+#include "intel_sdvo_regs.h"
14904+
14905+#define MAX_VAL 1000
14906+#define DPLL_CLOCK_PHASE_9 (1<<9 | 1<<12)
14907+
14908+#define PCI_PORT5_REG80_FFUSE 0xD0058000
14909+#define PCI_PORT5_REG80_SDVO_DISABLE 0x0020
14910+
14911+#define SII_1392_WA
14912+#ifdef SII_1392_WA
14913+int SII_1392=0;
14914+extern int drm_psb_no_fb;
14915+#endif
14916+
14917+typedef struct _EXTVDATA
14918+{
14919+ u32 Value;
14920+ u32 Default;
14921+ u32 Min;
14922+ u32 Max;
14923+ u32 Step; // arbitrary unit (e.g. pixel, percent) returned during VP_COMMAND_GET
14924+} EXTVDATA, *PEXTVDATA;
14925+
14926+typedef struct _sdvo_display_params
14927+{
14928+ EXTVDATA FlickerFilter; /* Flicker Filter : for TV onl */
14929+ EXTVDATA AdaptiveFF; /* Adaptive Flicker Filter : for TV onl */
14930+ EXTVDATA TwoD_FlickerFilter; /* 2D Flicker Filter : for TV onl */
14931+ EXTVDATA Brightness; /* Brightness : for TV & CRT onl */
14932+ EXTVDATA Contrast; /* Contrast : for TV & CRT onl */
14933+ EXTVDATA PositionX; /* Horizontal Position : for all device */
14934+ EXTVDATA PositionY; /* Vertical Position : for all device */
14935+ /*EXTVDATA OverScanX; Horizontal Overscan : for TV onl */
14936+ EXTVDATA DotCrawl; /* Dot crawl value : for TV onl */
14937+ EXTVDATA ChromaFilter; /* Chroma Filter : for TV onl */
14938+ /* EXTVDATA OverScanY; Vertical Overscan : for TV onl */
14939+ EXTVDATA LumaFilter; /* Luma Filter : for TV only */
14940+ EXTVDATA Sharpness; /* Sharpness : for TV & CRT onl */
14941+ EXTVDATA Saturation; /* Saturation : for TV & CRT onl */
14942+ EXTVDATA Hue; /* Hue : for TV & CRT onl */
14943+ EXTVDATA Dither; /* Dither : For LVDS onl */
14944+} sdvo_display_params;
14945+
14946+typedef enum _SDVO_PICTURE_ASPECT_RATIO_T
14947+{
14948+ UAIM_PAR_NO_DATA = 0x00000000,
14949+ UAIM_PAR_4_3 = 0x00000100,
14950+ UAIM_PAR_16_9 = 0x00000200,
14951+ UAIM_PAR_FUTURE = 0x00000300,
14952+ UAIM_PAR_MASK = 0x00000300,
14953+} SDVO_PICTURE_ASPECT_RATIO_T;
14954+
14955+typedef enum _SDVO_FORMAT_ASPECT_RATIO_T
14956+{
14957+ UAIM_FAR_NO_DATA = 0x00000000,
14958+ UAIM_FAR_SAME_AS_PAR = 0x00002000,
14959+ UAIM_FAR_4_BY_3_CENTER = 0x00002400,
14960+ UAIM_FAR_16_BY_9_CENTER = 0x00002800,
14961+ UAIM_FAR_14_BY_9_CENTER = 0x00002C00,
14962+ UAIM_FAR_16_BY_9_LETTERBOX_TOP = 0x00000800,
14963+ UAIM_FAR_14_BY_9_LETTERBOX_TOP = 0x00000C00,
14964+ UAIM_FAR_GT_16_BY_9_LETTERBOX_CENTER = 0x00002000,
14965+ UAIM_FAR_4_BY_3_SNP_14_BY_9_CENTER = 0x00003400, /* With shoot and protect 14:9 cente */
14966+ UAIM_FAR_16_BY_9_SNP_14_BY_9_CENTER = 0x00003800, /* With shoot and protect 14:9 cente */
14967+ UAIM_FAR_16_BY_9_SNP_4_BY_3_CENTER = 0x00003C00, /* With shoot and protect 4:3 cente */
14968+ UAIM_FAR_MASK = 0x00003C00,
14969+} SDVO_FORMAT_ASPECT_RATIO_T;
14970+
14971+// TV image aspect ratio
14972+typedef enum _CP_IMAGE_ASPECT_RATIO
14973+{
14974+ CP_ASPECT_RATIO_FF_4_BY_3 = 0,
14975+ CP_ASPECT_RATIO_14_BY_9_CENTER = 1,
14976+ CP_ASPECT_RATIO_14_BY_9_TOP = 2,
14977+ CP_ASPECT_RATIO_16_BY_9_CENTER = 3,
14978+ CP_ASPECT_RATIO_16_BY_9_TOP = 4,
14979+ CP_ASPECT_RATIO_GT_16_BY_9_CENTER = 5,
14980+ CP_ASPECT_RATIO_FF_4_BY_3_PROT_CENTER = 6,
14981+ CP_ASPECT_RATIO_FF_16_BY_9_ANAMORPHIC = 7,
14982+} CP_IMAGE_ASPECT_RATIO;
14983+
14984+typedef struct _SDVO_ANCILLARY_INFO_T
14985+{
14986+ CP_IMAGE_ASPECT_RATIO AspectRatio;
14987+ u32 RedistCtrlFlag; /* Redistribution control flag (get and set */
14988+} SDVO_ANCILLARY_INFO_T, *PSDVO_ANCILLARY_INFO_T;
14989+
14990+struct intel_sdvo_priv {
14991+ struct intel_i2c_chan *i2c_bus;
14992+ int slaveaddr;
14993+ int output_device;
14994+
14995+ u16 active_outputs;
14996+
14997+ struct intel_sdvo_caps caps;
14998+ int pixel_clock_min, pixel_clock_max;
14999+
15000+ int save_sdvo_mult;
15001+ u16 save_active_outputs;
15002+ struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
15003+ struct intel_sdvo_dtd save_output_dtd[16];
15004+ u32 save_SDVOX;
15005+ /**
15006+ * SDVO TV encoder support
15007+ */
15008+ u32 ActiveDevice; /* CRT, TV, LVDS, TMDS */
15009+ u32 TVStandard; /* PAL, NTSC */
15010+ int TVOutput; /* S-Video, CVBS,YPbPr,RGB */
15011+ int TVMode; /* SDTV/HDTV/SECAM mod */
15012+ u32 TVStdBitmask;
15013+ u32 dwSDVOHDTVBitMask;
15014+ u32 dwSDVOSDTVBitMask;
15015+ u8 byInputWiring;
15016+ bool bGetClk;
15017+ u32 dwMaxDotClk;
15018+ u32 dwMinDotClk;
15019+
15020+ u32 dwMaxInDotClk;
15021+ u32 dwMinInDotClk;
15022+
15023+ u32 dwMaxOutDotClk;
15024+ u32 dwMinOutDotClk;
15025+ u32 dwSupportedEnhancements;
15026+ EXTVDATA OverScanY; /* Vertical Overscan : for TV onl */
15027+ EXTVDATA OverScanX; /* Horizontal Overscan : for TV onl */
15028+ sdvo_display_params dispParams;
15029+ SDVO_ANCILLARY_INFO_T AncillaryInfo;
15030+};
15031+
15032+/* Define TV mode type */
15033+/* The full set are defined in xf86str.h*/
15034+#define M_T_TV 0x80
15035+
15036+typedef struct _tv_mode_t
15037+{
15038+ /* the following data is detailed mode information as it would be passed to the hardware: */
15039+ struct drm_display_mode mode_entry;
15040+ u32 dwSupportedSDTVvss;
15041+ u32 dwSupportedHDTVvss;
15042+ bool m_preferred;
15043+ bool isTVMode;
15044+} tv_mode_t;
15045+
15046+static tv_mode_t tv_modes[] = {
15047+ {
15048+ .mode_entry =
15049+ {DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x2625a00 / 1000, 800, 840, 968, 1056, 0,
15050+ 600, 601,
15051+ 604, 628, 0, V_PHSYNC | V_PVSYNC)},
15052+ .dwSupportedSDTVvss = TVSTANDARD_SDTV_ALL,
15053+ .dwSupportedHDTVvss = TVSTANDARD_HDTV_ALL,
15054+ .m_preferred = TRUE,
15055+ .isTVMode = TRUE,
15056+ },
15057+ {
15058+ .mode_entry =
15059+ {DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x3dfd240 / 1000, 1024, 0x418, 0x49f, 0x540,
15060+ 0, 768,
15061+ 0x303, 0x308, 0x325, 0, V_PHSYNC | V_PVSYNC)},
15062+ .dwSupportedSDTVvss = TVSTANDARD_SDTV_ALL,
15063+ .dwSupportedHDTVvss = TVSTANDARD_HDTV_ALL,
15064+ .m_preferred = FALSE,
15065+ .isTVMode = TRUE,
15066+ },
15067+ {
15068+ .mode_entry =
15069+ {DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x1978ff0 / 1000, 720, 0x2e1, 0x326, 0x380, 0,
15070+ 480,
15071+ 0x1f0, 0x1e1, 0x1f1, 0, V_PHSYNC | V_PVSYNC)},
15072+ .dwSupportedSDTVvss =
15073+ TVSTANDARD_NTSC_M | TVSTANDARD_NTSC_M_J | TVSTANDARD_NTSC_433,
15074+ .dwSupportedHDTVvss = 0x0,
15075+ .m_preferred = FALSE,
15076+ .isTVMode = TRUE,
15077+ },
15078+ {
15079+ /*Modeline "720x576_SDVO" 0.96 720 756 788 864 576 616 618 700 +vsync */
15080+ .mode_entry =
15081+ {DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x1f25a20 / 1000, 720, 756, 788, 864, 0, 576,
15082+ 616,
15083+ 618, 700, 0, V_PHSYNC | V_PVSYNC)},
15084+ .dwSupportedSDTVvss =
15085+ (TVSTANDARD_PAL_B | TVSTANDARD_PAL_D | TVSTANDARD_PAL_H |
15086+ TVSTANDARD_PAL_I | TVSTANDARD_PAL_N | TVSTANDARD_SECAM_B |
15087+ TVSTANDARD_SECAM_D | TVSTANDARD_SECAM_G | TVSTANDARD_SECAM_H |
15088+ TVSTANDARD_SECAM_K | TVSTANDARD_SECAM_K1 | TVSTANDARD_SECAM_L |
15089+ TVSTANDARD_PAL_G | TVSTANDARD_SECAM_L1),
15090+ .dwSupportedHDTVvss = 0x0,
15091+ .m_preferred = FALSE,
15092+ .isTVMode = TRUE,
15093+ },
15094+ {
15095+ .mode_entry =
15096+ {DRM_MODE("1280x720@60",DRM_MODE_TYPE_DRIVER | M_T_TV, 74250000 / 1000, 1280, 1390, 1430, 1650, 0,
15097+ 720,
15098+ 725, 730, 750, 0, V_PHSYNC | V_PVSYNC)},
15099+ .dwSupportedSDTVvss = 0x0,
15100+ .dwSupportedHDTVvss = HDTV_SMPTE_296M_720p60,
15101+ .m_preferred = FALSE,
15102+ .isTVMode = TRUE,
15103+ },
15104+ {
15105+ .mode_entry =
15106+ {DRM_MODE("1280x720@50", DRM_MODE_TYPE_DRIVER | M_T_TV, 74250000 / 1000, 1280, 1720, 1759, 1980, 0,
15107+ 720,
15108+ 725, 730, 750, 0, V_PHSYNC | V_PVSYNC)},
15109+ .dwSupportedSDTVvss = 0x0,
15110+ .dwSupportedHDTVvss = HDTV_SMPTE_296M_720p50,
15111+ .m_preferred = FALSE,
15112+ .isTVMode = TRUE,
15113+ },
15114+ {
15115+ .mode_entry =
15116+ {DRM_MODE("1920x1080@60", DRM_MODE_TYPE_DRIVER | M_T_TV, 148500000 / 1000, 1920, 2008, 2051, 2200, 0,
15117+ 1080,
15118+ 1084, 1088, 1124, 0, V_PHSYNC | V_PVSYNC)},
15119+ .dwSupportedSDTVvss = 0x0,
15120+ .dwSupportedHDTVvss = HDTV_SMPTE_274M_1080i60,
15121+ .m_preferred = FALSE,
15122+ .isTVMode = TRUE,
15123+ },
15124+};
15125+
15126+#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0])
15127+
15128+typedef struct {
15129+ /* given values */
15130+ int n;
15131+ int m1, m2;
15132+ int p1, p2;
15133+ /* derived values */
15134+ int dot;
15135+ int vco;
15136+ int m;
15137+ int p;
15138+} ex_intel_clock_t;
15139+
15140+
15141+/**
15142+ * Writes the SDVOB or SDVOC with the given value, but always writes both
15143+ * SDVOB and SDVOC to work around apparent hardware issues (according to
15144+ * comments in the BIOS).
15145+ */
15146+static void intel_sdvo_write_sdvox(struct drm_output *output, u32 val)
15147+{
15148+ struct drm_device *dev = output->dev;
15149+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
15150+ struct intel_output *intel_output = output->driver_private;
15151+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15152+ u32 bval = val, cval = val;
15153+ int i;
15154+
15155+ if (sdvo_priv->output_device == SDVOB)
15156+ cval = I915_READ(SDVOC);
15157+ else
15158+ bval = I915_READ(SDVOB);
15159+ /*
15160+ * Write the registers twice for luck. Sometimes,
15161+ * writing them only once doesn't appear to 'stick'.
15162+ * The BIOS does this too. Yay, magic
15163+ */
15164+ for (i = 0; i < 2; i++)
15165+ {
15166+ I915_WRITE(SDVOB, bval);
15167+ I915_READ(SDVOB);
15168+ I915_WRITE(SDVOC, cval);
15169+ I915_READ(SDVOC);
15170+ }
15171+}
15172+
15173+static bool intel_sdvo_read_byte(struct drm_output *output, u8 addr,
15174+ u8 *ch)
15175+{
15176+ struct intel_output *intel_output = output->driver_private;
15177+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15178+ u8 out_buf[2];
15179+ u8 buf[2];
15180+ int ret;
15181+
15182+ struct i2c_msg msgs[] = {
15183+ {
15184+ .addr = sdvo_priv->i2c_bus->slave_addr,
15185+ .flags = 0,
15186+ .len = 1,
15187+ .buf = out_buf,
15188+ },
15189+ {
15190+ .addr = sdvo_priv->i2c_bus->slave_addr,
15191+ .flags = I2C_M_RD,
15192+ .len = 1,
15193+ .buf = buf,
15194+ }
15195+ };
15196+
15197+ out_buf[0] = addr;
15198+ out_buf[1] = 0;
15199+
15200+ if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2)
15201+ {
15202+// DRM_DEBUG("got back from addr %02X = %02x\n", out_buf[0], buf[0]);
15203+ *ch = buf[0];
15204+ return true;
15205+ }
15206+
15207+ DRM_DEBUG("i2c transfer returned %d\n", ret);
15208+ return false;
15209+}
15210+
15211+
15212+#if 0
15213+static bool intel_sdvo_read_byte_quiet(struct drm_output *output, int addr,
15214+ u8 *ch)
15215+{
15216+ return true;
15217+
15218+}
15219+#endif
15220+
15221+static bool intel_sdvo_write_byte(struct drm_output *output, int addr,
15222+ u8 ch)
15223+{
15224+ struct intel_output *intel_output = output->driver_private;
15225+ u8 out_buf[2];
15226+ struct i2c_msg msgs[] = {
15227+ {
15228+ .addr = intel_output->i2c_bus->slave_addr,
15229+ .flags = 0,
15230+ .len = 2,
15231+ .buf = out_buf,
15232+ }
15233+ };
15234+
15235+ out_buf[0] = addr;
15236+ out_buf[1] = ch;
15237+
15238+ if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1)
15239+ {
15240+ return true;
15241+ }
15242+ return false;
15243+}
15244+
15245+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
15246+/** Mapping of command numbers to names, for debug output */
15247+const static struct _sdvo_cmd_name {
15248+ u8 cmd;
15249+ char *name;
15250+} sdvo_cmd_names[] = {
15251+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
15252+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
15253+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
15254+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
15255+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
15256+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
15257+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
15258+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
15259+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
15260+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
15261+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
15262+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
15263+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
15264+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
15265+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
15266+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
15267+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
15268+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
15269+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
15270+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
15271+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
15272+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
15273+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
15274+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
15275+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
15276+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
15277+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
15278+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
15279+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
15280+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
15281+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
15282+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
15283+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
15284+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
15285+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
15286+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
15287+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
15288+};
15289+
15290+#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
15291+#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv)
15292+
15293+static void intel_sdvo_write_cmd(struct drm_output *output, u8 cmd,
15294+ void *args, int args_len)
15295+{
15296+ struct intel_output *intel_output = output->driver_private;
15297+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15298+ int i;
15299+
15300+ if (drm_debug) {
15301+ DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
15302+ for (i = 0; i < args_len; i++)
15303+ printk("%02X ", ((u8 *)args)[i]);
15304+ for (; i < 8; i++)
15305+ printk(" ");
15306+ for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) {
15307+ if (cmd == sdvo_cmd_names[i].cmd) {
15308+ printk("(%s)", sdvo_cmd_names[i].name);
15309+ break;
15310+ }
15311+ }
15312+ if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0]))
15313+ printk("(%02X)",cmd);
15314+ printk("\n");
15315+ }
15316+
15317+ for (i = 0; i < args_len; i++) {
15318+ intel_sdvo_write_byte(output, SDVO_I2C_ARG_0 - i, ((u8*)args)[i]);
15319+ }
15320+
15321+ intel_sdvo_write_byte(output, SDVO_I2C_OPCODE, cmd);
15322+}
15323+
15324+static const char *cmd_status_names[] = {
15325+ "Power on",
15326+ "Success",
15327+ "Not supported",
15328+ "Invalid arg",
15329+ "Pending",
15330+ "Target not specified",
15331+ "Scaling not supported"
15332+};
15333+
15334+static u8 intel_sdvo_read_response(struct drm_output *output, void *response,
15335+ int response_len)
15336+{
15337+ struct intel_output *intel_output = output->driver_private;
15338+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15339+ int i;
15340+ u8 status;
15341+ u8 retry = 50;
15342+
15343+ while (retry--) {
15344+ /* Read the command response */
15345+ for (i = 0; i < response_len; i++) {
15346+ intel_sdvo_read_byte(output, SDVO_I2C_RETURN_0 + i,
15347+ &((u8 *)response)[i]);
15348+ }
15349+
15350+ /* read the return status */
15351+ intel_sdvo_read_byte(output, SDVO_I2C_CMD_STATUS, &status);
15352+
15353+ if (drm_debug) {
15354+ DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
15355+ for (i = 0; i < response_len; i++)
15356+ printk("%02X ", ((u8 *)response)[i]);
15357+ for (; i < 8; i++)
15358+ printk(" ");
15359+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
15360+ printk("(%s)", cmd_status_names[status]);
15361+ else
15362+ printk("(??? %d)", status);
15363+ printk("\n");
15364+ }
15365+
15366+ if (status != SDVO_CMD_STATUS_PENDING)
15367+ return status;
15368+
15369+ mdelay(50);
15370+ }
15371+
15372+ return status;
15373+}
15374+
15375+int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
15376+{
15377+ if (mode->clock >= 100000)
15378+ return 1;
15379+ else if (mode->clock >= 50000)
15380+ return 2;
15381+ else
15382+ return 4;
15383+}
15384+
15385+/**
15386+ * Don't check status code from this as it switches the bus back to the
15387+ * SDVO chips which defeats the purpose of doing a bus switch in the first
15388+ * place.
15389+ */
15390+void intel_sdvo_set_control_bus_switch(struct drm_output *output, u8 target)
15391+{
15392+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
15393+}
15394+
15395+static bool intel_sdvo_set_target_input(struct drm_output *output, bool target_0, bool target_1)
15396+{
15397+ struct intel_sdvo_set_target_input_args targets = {0};
15398+ u8 status;
15399+
15400+ if (target_0 && target_1)
15401+ return SDVO_CMD_STATUS_NOTSUPP;
15402+
15403+ if (target_1)
15404+ targets.target_1 = 1;
15405+
15406+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TARGET_INPUT, &targets,
15407+ sizeof(targets));
15408+
15409+ status = intel_sdvo_read_response(output, NULL, 0);
15410+
15411+ return (status == SDVO_CMD_STATUS_SUCCESS);
15412+}
15413+
15414+/**
15415+ * Return whether each input is trained.
15416+ *
15417+ * This function is making an assumption about the layout of the response,
15418+ * which should be checked against the docs.
15419+ */
15420+static bool intel_sdvo_get_trained_inputs(struct drm_output *output, bool *input_1, bool *input_2)
15421+{
15422+ struct intel_sdvo_get_trained_inputs_response response;
15423+ u8 status;
15424+
15425+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
15426+ status = intel_sdvo_read_response(output, &response, sizeof(response));
15427+ if (status != SDVO_CMD_STATUS_SUCCESS)
15428+ return false;
15429+
15430+ *input_1 = response.input0_trained;
15431+ *input_2 = response.input1_trained;
15432+ return true;
15433+}
15434+
15435+static bool intel_sdvo_get_active_outputs(struct drm_output *output,
15436+ u16 *outputs)
15437+{
15438+ u8 status;
15439+
15440+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
15441+ status = intel_sdvo_read_response(output, outputs, sizeof(*outputs));
15442+
15443+ return (status == SDVO_CMD_STATUS_SUCCESS);
15444+}
15445+
15446+static bool intel_sdvo_set_active_outputs(struct drm_output *output,
15447+ u16 outputs)
15448+{
15449+ u8 status;
15450+
15451+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
15452+ sizeof(outputs));
15453+ status = intel_sdvo_read_response(output, NULL, 0);
15454+ return (status == SDVO_CMD_STATUS_SUCCESS);
15455+}
15456+
15457+static bool intel_sdvo_set_encoder_power_state(struct drm_output *output,
15458+ int mode)
15459+{
15460+ u8 status, state = SDVO_ENCODER_STATE_ON;
15461+
15462+ switch (mode) {
15463+ case DPMSModeOn:
15464+ state = SDVO_ENCODER_STATE_ON;
15465+ break;
15466+ case DPMSModeStandby:
15467+ state = SDVO_ENCODER_STATE_STANDBY;
15468+ break;
15469+ case DPMSModeSuspend:
15470+ state = SDVO_ENCODER_STATE_SUSPEND;
15471+ break;
15472+ case DPMSModeOff:
15473+ state = SDVO_ENCODER_STATE_OFF;
15474+ break;
15475+ }
15476+
15477+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
15478+ sizeof(state));
15479+ status = intel_sdvo_read_response(output, NULL, 0);
15480+
15481+ return (status == SDVO_CMD_STATUS_SUCCESS);
15482+}
15483+
15484+static bool intel_sdvo_get_input_pixel_clock_range(struct drm_output *output,
15485+ int *clock_min,
15486+ int *clock_max)
15487+{
15488+ struct intel_sdvo_pixel_clock_range clocks;
15489+ u8 status;
15490+
15491+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
15492+ NULL, 0);
15493+
15494+ status = intel_sdvo_read_response(output, &clocks, sizeof(clocks));
15495+
15496+ if (status != SDVO_CMD_STATUS_SUCCESS)
15497+ return false;
15498+
15499+ /* Convert the values from units of 10 kHz to kHz. */
15500+ *clock_min = clocks.min * 10;
15501+ *clock_max = clocks.max * 10;
15502+
15503+ return true;
15504+}
15505+
15506+static bool intel_sdvo_set_target_output(struct drm_output *output,
15507+ u16 outputs)
15508+{
15509+ u8 status;
15510+
15511+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
15512+ sizeof(outputs));
15513+
15514+ status = intel_sdvo_read_response(output, NULL, 0);
15515+ return (status == SDVO_CMD_STATUS_SUCCESS);
15516+}
15517+
15518+static bool intel_sdvo_get_timing(struct drm_output *output, u8 cmd,
15519+ struct intel_sdvo_dtd *dtd)
15520+{
15521+ u8 status;
15522+
15523+ intel_sdvo_write_cmd(output, cmd, NULL, 0);
15524+ status = intel_sdvo_read_response(output, &dtd->part1,
15525+ sizeof(dtd->part1));
15526+ if (status != SDVO_CMD_STATUS_SUCCESS)
15527+ return false;
15528+
15529+ intel_sdvo_write_cmd(output, cmd + 1, NULL, 0);
15530+ status = intel_sdvo_read_response(output, &dtd->part2,
15531+ sizeof(dtd->part2));
15532+ if (status != SDVO_CMD_STATUS_SUCCESS)
15533+ return false;
15534+
15535+ return true;
15536+}
15537+
15538+static bool intel_sdvo_get_input_timing(struct drm_output *output,
15539+ struct intel_sdvo_dtd *dtd)
15540+{
15541+ return intel_sdvo_get_timing(output,
15542+ SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
15543+}
15544+
15545+static bool intel_sdvo_get_output_timing(struct drm_output *output,
15546+ struct intel_sdvo_dtd *dtd)
15547+{
15548+ return intel_sdvo_get_timing(output,
15549+ SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
15550+}
15551+
15552+static bool intel_sdvo_set_timing(struct drm_output *output, u8 cmd,
15553+ struct intel_sdvo_dtd *dtd)
15554+{
15555+ u8 status;
15556+
15557+ intel_sdvo_write_cmd(output, cmd, &dtd->part1, sizeof(dtd->part1));
15558+ status = intel_sdvo_read_response(output, NULL, 0);
15559+ if (status != SDVO_CMD_STATUS_SUCCESS)
15560+ return false;
15561+
15562+ intel_sdvo_write_cmd(output, cmd + 1, &dtd->part2, sizeof(dtd->part2));
15563+ status = intel_sdvo_read_response(output, NULL, 0);
15564+ if (status != SDVO_CMD_STATUS_SUCCESS)
15565+ return false;
15566+
15567+ return true;
15568+}
15569+
15570+static bool intel_sdvo_set_input_timing(struct drm_output *output,
15571+ struct intel_sdvo_dtd *dtd)
15572+{
15573+ return intel_sdvo_set_timing(output,
15574+ SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
15575+}
15576+
15577+static bool intel_sdvo_set_output_timing(struct drm_output *output,
15578+ struct intel_sdvo_dtd *dtd)
15579+{
15580+ return intel_sdvo_set_timing(output,
15581+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
15582+}
15583+
15584+#if 0
15585+static bool intel_sdvo_get_preferred_input_timing(struct drm_output *output,
15586+ struct intel_sdvo_dtd *dtd)
15587+{
15588+ struct intel_output *intel_output = output->driver_private;
15589+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15590+ u8 status;
15591+
15592+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
15593+ NULL, 0);
15594+
15595+ status = intel_sdvo_read_response(output, &dtd->part1,
15596+ sizeof(dtd->part1));
15597+ if (status != SDVO_CMD_STATUS_SUCCESS)
15598+ return false;
15599+
15600+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
15601+ NULL, 0);
15602+ status = intel_sdvo_read_response(output, &dtd->part2,
15603+ sizeof(dtd->part2));
15604+ if (status != SDVO_CMD_STATUS_SUCCESS)
15605+ return false;
15606+
15607+ return true;
15608+}
15609+#endif
15610+
15611+static int intel_sdvo_get_clock_rate_mult(struct drm_output *output)
15612+{
15613+ u8 response, status;
15614+
15615+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
15616+ status = intel_sdvo_read_response(output, &response, 1);
15617+
15618+ if (status != SDVO_CMD_STATUS_SUCCESS) {
15619+ DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
15620+ return SDVO_CLOCK_RATE_MULT_1X;
15621+ } else {
15622+ DRM_DEBUG("Current clock rate multiplier: %d\n", response);
15623+ }
15624+
15625+ return response;
15626+}
15627+
15628+static bool intel_sdvo_set_clock_rate_mult(struct drm_output *output, u8 val)
15629+{
15630+ u8 status;
15631+
15632+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
15633+ status = intel_sdvo_read_response(output, NULL, 0);
15634+ if (status != SDVO_CMD_STATUS_SUCCESS)
15635+ return false;
15636+
15637+ return true;
15638+}
15639+
15640+static bool intel_sdvo_mode_fixup(struct drm_output *output,
15641+ struct drm_display_mode *mode,
15642+ struct drm_display_mode *adjusted_mode)
15643+{
15644+ /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
15645+ * device will be told of the multiplier during mode_set.
15646+ */
15647+ DRM_DEBUG("xxintel_sdvo_fixup\n");
15648+ adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
15649+ return true;
15650+}
15651+
15652+#if 0
15653+static void i830_sdvo_map_hdtvstd_bitmask(struct drm_output * output)
15654+{
15655+ struct intel_output *intel_output = output->driver_private;
15656+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15657+
15658+ switch (sdvo_priv->TVStandard) {
15659+ case HDTV_SMPTE_274M_1080i50:
15660+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080i50;
15661+ break;
15662+
15663+ case HDTV_SMPTE_274M_1080i59:
15664+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080i59;
15665+ break;
15666+
15667+ case HDTV_SMPTE_274M_1080i60:
15668+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080i60;
15669+ break;
15670+ case HDTV_SMPTE_274M_1080p60:
15671+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080p60;
15672+ break;
15673+ case HDTV_SMPTE_296M_720p59:
15674+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_296M_720p59;
15675+ break;
15676+
15677+ case HDTV_SMPTE_296M_720p60:
15678+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_296M_720p60;
15679+ break;
15680+
15681+ case HDTV_SMPTE_296M_720p50:
15682+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_296M_720p50;
15683+ break;
15684+
15685+ case HDTV_SMPTE_293M_480p59:
15686+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_293M_480p59;
15687+ break;
15688+
15689+ case HDTV_SMPTE_293M_480p60:
15690+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_EIA_7702A_480p60;
15691+ break;
15692+
15693+ case HDTV_SMPTE_170M_480i59:
15694+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_170M_480i59;
15695+ break;
15696+
15697+ case HDTV_ITURBT601_576i50:
15698+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_ITURBT601_576i50;
15699+ break;
15700+
15701+ case HDTV_ITURBT601_576p50:
15702+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_ITURBT601_576p50;
15703+ break;
15704+ default:
15705+ DRM_DEBUG("ERROR: Unknown TV Standard!!!\n");
15706+ /*Invalid return 0 */
15707+ sdvo_priv->TVStdBitmask = 0;
15708+ }
15709+
15710+}
15711+
15712+static void i830_sdvo_map_sdtvstd_bitmask(struct drm_output * output)
15713+{
15714+ struct intel_output *intel_output = output->driver_private;
15715+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15716+
15717+ switch (sdvo_priv->TVStandard) {
15718+ case TVSTANDARD_NTSC_M:
15719+ sdvo_priv->TVStdBitmask = SDVO_NTSC_M;
15720+ break;
15721+
15722+ case TVSTANDARD_NTSC_M_J:
15723+ sdvo_priv->TVStdBitmask = SDVO_NTSC_M_J;
15724+ break;
15725+
15726+ case TVSTANDARD_NTSC_433:
15727+ sdvo_priv->TVStdBitmask = SDVO_NTSC_433;
15728+ break;
15729+
15730+ case TVSTANDARD_PAL_B:
15731+ sdvo_priv->TVStdBitmask = SDVO_PAL_B;
15732+ break;
15733+
15734+ case TVSTANDARD_PAL_D:
15735+ sdvo_priv->TVStdBitmask = SDVO_PAL_D;
15736+ break;
15737+
15738+ case TVSTANDARD_PAL_G:
15739+ sdvo_priv->TVStdBitmask = SDVO_PAL_G;
15740+ break;
15741+
15742+ case TVSTANDARD_PAL_H:
15743+ sdvo_priv->TVStdBitmask = SDVO_PAL_H;
15744+ break;
15745+
15746+ case TVSTANDARD_PAL_I:
15747+ sdvo_priv->TVStdBitmask = SDVO_PAL_I;
15748+ break;
15749+
15750+ case TVSTANDARD_PAL_M:
15751+ sdvo_priv->TVStdBitmask = SDVO_PAL_M;
15752+ break;
15753+
15754+ case TVSTANDARD_PAL_N:
15755+ sdvo_priv->TVStdBitmask = SDVO_PAL_N;
15756+ break;
15757+
15758+ case TVSTANDARD_PAL_60:
15759+ sdvo_priv->TVStdBitmask = SDVO_PAL_60;
15760+ break;
15761+
15762+ case TVSTANDARD_SECAM_B:
15763+ sdvo_priv->TVStdBitmask = SDVO_SECAM_B;
15764+ break;
15765+
15766+ case TVSTANDARD_SECAM_D:
15767+ sdvo_priv->TVStdBitmask = SDVO_SECAM_D;
15768+ break;
15769+
15770+ case TVSTANDARD_SECAM_G:
15771+ sdvo_priv->TVStdBitmask = SDVO_SECAM_G;
15772+ break;
15773+
15774+ case TVSTANDARD_SECAM_K:
15775+ sdvo_priv->TVStdBitmask = SDVO_SECAM_K;
15776+ break;
15777+
15778+ case TVSTANDARD_SECAM_K1:
15779+ sdvo_priv->TVStdBitmask = SDVO_SECAM_K1;
15780+ break;
15781+
15782+ case TVSTANDARD_SECAM_L:
15783+ sdvo_priv->TVStdBitmask = SDVO_SECAM_L;
15784+ break;
15785+
15786+ case TVSTANDARD_SECAM_L1:
15787+ DRM_DEBUG("TVSTANDARD_SECAM_L1 not supported by encoder\n");
15788+ break;
15789+
15790+ case TVSTANDARD_SECAM_H:
15791+ DRM_DEBUG("TVSTANDARD_SECAM_H not supported by encoder\n");
15792+ break;
15793+
15794+ default:
15795+ DRM_DEBUG("ERROR: Unknown TV Standard\n");
15796+ /*Invalid return 0 */
15797+ sdvo_priv->TVStdBitmask = 0;
15798+ break;
15799+ }
15800+}
15801+#endif
15802+
15803+static bool i830_sdvo_set_tvoutputs_formats(struct drm_output * output)
15804+{
15805+ u8 byArgs[6];
15806+ u8 status;
15807+ struct intel_output *intel_output = output->driver_private;
15808+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15809+
15810+
15811+ /* Make all fields of the args/ret to zero */
15812+ memset(byArgs, 0, sizeof(byArgs));
15813+
15814+ if (sdvo_priv->TVMode & (TVMODE_SDTV)) {
15815+ /* Fill up the arguement value */
15816+ byArgs[0] = (u8) (sdvo_priv->TVStdBitmask & 0xFF);
15817+ byArgs[1] = (u8) ((sdvo_priv->TVStdBitmask >> 8) & 0xFF);
15818+ byArgs[2] = (u8) ((sdvo_priv->TVStdBitmask >> 16) & 0xFF);
15819+ } else {
15820+ /* Fill up the arguement value */
15821+ byArgs[0] = 0;
15822+ byArgs[1] = 0;
15823+ byArgs[2] = (u8) ((sdvo_priv->TVStdBitmask & 0xFF));
15824+ byArgs[3] = (u8) ((sdvo_priv->TVStdBitmask >> 8) & 0xFF);
15825+ byArgs[4] = (u8) ((sdvo_priv->TVStdBitmask >> 16) & 0xFF);
15826+ byArgs[5] = (u8) ((sdvo_priv->TVStdBitmask >> 24) & 0xFF);
15827+ }
15828+
15829+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMATS, byArgs, 6);
15830+ status = intel_sdvo_read_response(output, NULL, 0);
15831+
15832+ if (status != SDVO_CMD_STATUS_SUCCESS)
15833+ return FALSE;
15834+
15835+ return TRUE;
15836+
15837+}
15838+
15839+static bool i830_sdvo_create_preferred_input_timing(struct drm_output * output,
15840+ struct drm_display_mode * mode)
15841+{
15842+ u8 byArgs[7];
15843+ u8 status;
15844+ u32 dwClk;
15845+ u32 dwHActive, dwVActive;
15846+ bool bIsInterlaced, bIsScaled;
15847+
15848+ /* Make all fields of the args/ret to zero */
15849+ memset(byArgs, 0, sizeof(byArgs));
15850+
15851+ /* Fill up the arguement values */
15852+ dwHActive = mode->crtc_hdisplay;
15853+ dwVActive = mode->crtc_vdisplay;
15854+
15855+ dwClk = mode->clock * 1000 / 10000;
15856+ byArgs[0] = (u8) (dwClk & 0xFF);
15857+ byArgs[1] = (u8) ((dwClk >> 8) & 0xFF);
15858+
15859+ /* HActive & VActive should not exceed 12 bits each. So check it */
15860+ if ((dwHActive > 0xFFF) || (dwVActive > 0xFFF))
15861+ return FALSE;
15862+
15863+ byArgs[2] = (u8) (dwHActive & 0xFF);
15864+ byArgs[3] = (u8) ((dwHActive >> 8) & 0xF);
15865+ byArgs[4] = (u8) (dwVActive & 0xFF);
15866+ byArgs[5] = (u8) ((dwVActive >> 8) & 0xF);
15867+
15868+ bIsInterlaced = 1;
15869+ bIsScaled = 0;
15870+
15871+ byArgs[6] = bIsInterlaced ? 1 : 0;
15872+ byArgs[6] |= bIsScaled ? 2 : 0;
15873+
15874+ intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMINGS,
15875+ byArgs, 7);
15876+ status = intel_sdvo_read_response(output, NULL, 0);
15877+
15878+ if (status != SDVO_CMD_STATUS_SUCCESS)
15879+ return FALSE;
15880+
15881+ return TRUE;
15882+
15883+}
15884+
15885+static bool i830_sdvo_get_preferred_input_timing(struct drm_output * output,
15886+ struct intel_sdvo_dtd *output_dtd)
15887+{
15888+ return intel_sdvo_get_timing(output,
15889+ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
15890+ output_dtd);
15891+}
15892+
15893+static bool i830_sdvo_set_current_inoutmap(struct drm_output * output, u32 in0outputmask,
15894+ u32 in1outputmask)
15895+{
15896+ u8 byArgs[4];
15897+ u8 status;
15898+
15899+ /* Make all fields of the args/ret to zero */
15900+ memset(byArgs, 0, sizeof(byArgs));
15901+
15902+ /* Fill up the arguement values; */
15903+ byArgs[0] = (u8) (in0outputmask & 0xFF);
15904+ byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF);
15905+ byArgs[2] = (u8) (in1outputmask & 0xFF);
15906+ byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF);
15907+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4);
15908+ status = intel_sdvo_read_response(output, NULL, 0);
15909+
15910+ if (status != SDVO_CMD_STATUS_SUCCESS)
15911+ return FALSE;
15912+
15913+ return TRUE;
15914+
15915+}
15916+
15917+void i830_sdvo_set_iomap(struct drm_output * output)
15918+{
15919+ u32 dwCurrentSDVOIn0 = 0;
15920+ u32 dwCurrentSDVOIn1 = 0;
15921+ u32 dwDevMask = 0;
15922+
15923+ struct intel_output *intel_output = output->driver_private;
15924+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15925+
15926+
15927+ /* Please DO NOT change the following code. */
15928+ /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */
15929+ /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */
15930+ if (sdvo_priv->byInputWiring & (SDVOB_IN0 | SDVOC_IN0)) {
15931+ switch (sdvo_priv->ActiveDevice) {
15932+ case SDVO_DEVICE_LVDS:
15933+ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
15934+ break;
15935+
15936+ case SDVO_DEVICE_TMDS:
15937+ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
15938+ break;
15939+
15940+ case SDVO_DEVICE_TV:
15941+ dwDevMask =
15942+ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 |
15943+ SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
15944+ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
15945+ break;
15946+
15947+ case SDVO_DEVICE_CRT:
15948+ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
15949+ break;
15950+ }
15951+ dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask);
15952+ } else if (sdvo_priv->byInputWiring & (SDVOB_IN1 | SDVOC_IN1)) {
15953+ switch (sdvo_priv->ActiveDevice) {
15954+ case SDVO_DEVICE_LVDS:
15955+ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
15956+ break;
15957+
15958+ case SDVO_DEVICE_TMDS:
15959+ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
15960+ break;
15961+
15962+ case SDVO_DEVICE_TV:
15963+ dwDevMask =
15964+ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 |
15965+ SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
15966+ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
15967+ break;
15968+
15969+ case SDVO_DEVICE_CRT:
15970+ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
15971+ break;
15972+ }
15973+ dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask);
15974+ }
15975+
15976+ i830_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0,
15977+ dwCurrentSDVOIn1);
15978+}
15979+
15980+static bool i830_sdvo_get_input_output_pixelclock_range(struct drm_output * output,
15981+ bool direction)
15982+{
15983+ u8 byRets[4];
15984+ u8 status;
15985+
15986+ struct intel_output *intel_output = output->driver_private;
15987+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
15988+
15989+ /* Make all fields of the args/ret to zero */
15990+ memset(byRets, 0, sizeof(byRets));
15991+ if (direction) /* output pixel clock */
15992+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE,
15993+ NULL, 0);
15994+ else
15995+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
15996+ NULL, 0);
15997+ status = intel_sdvo_read_response(output, byRets, 4);
15998+
15999+ if (status != SDVO_CMD_STATUS_SUCCESS)
16000+ return FALSE;
16001+
16002+ if (direction) {
16003+ /* Fill up the return values. */
16004+ sdvo_priv->dwMinOutDotClk =
16005+ (u32) byRets[0] | ((u32) byRets[1] << 8);
16006+ sdvo_priv->dwMaxOutDotClk =
16007+ (u32) byRets[2] | ((u32) byRets[3] << 8);
16008+
16009+ /* Multiply 10000 with the clocks obtained */
16010+ sdvo_priv->dwMinOutDotClk = (sdvo_priv->dwMinOutDotClk) * 10000;
16011+ sdvo_priv->dwMaxOutDotClk = (sdvo_priv->dwMaxOutDotClk) * 10000;
16012+
16013+ } else {
16014+ /* Fill up the return values. */
16015+ sdvo_priv->dwMinInDotClk = (u32) byRets[0] | ((u32) byRets[1] << 8);
16016+ sdvo_priv->dwMaxInDotClk = (u32) byRets[2] | ((u32) byRets[3] << 8);
16017+
16018+ /* Multiply 10000 with the clocks obtained */
16019+ sdvo_priv->dwMinInDotClk = (sdvo_priv->dwMinInDotClk) * 10000;
16020+ sdvo_priv->dwMaxInDotClk = (sdvo_priv->dwMaxInDotClk) * 10000;
16021+ }
16022+ DRM_DEBUG("MinDotClk = 0x%x\n", sdvo_priv->dwMinInDotClk);
16023+ DRM_DEBUG("MaxDotClk = 0x%x\n", sdvo_priv->dwMaxInDotClk);
16024+
16025+ return TRUE;
16026+
16027+}
16028+
16029+static bool i830_sdvo_get_supported_tvoutput_formats(struct drm_output * output,
16030+ u32 * pTVStdMask,
16031+ u32 * pHDTVStdMask, u32 *pTVStdFormat)
16032+{
16033+ struct intel_output *intel_output = output->driver_private;
16034+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
16035+
16036+ u8 byRets[6];
16037+ u8 status;
16038+
16039+ /* Make all fields of the args/ret to zero */
16040+ memset(byRets, 0, sizeof(byRets));
16041+
16042+ /* Send the arguements & SDVO opcode to the h/w */
16043+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
16044+
16045+ status = intel_sdvo_read_response(output, byRets, 6);
16046+ if (status != SDVO_CMD_STATUS_SUCCESS)
16047+ return FALSE;
16048+
16049+ /* Fill up the return values; */
16050+ *pTVStdMask = (((u32) byRets[0]) |
16051+ ((u32) byRets[1] << 8) |
16052+ ((u32) (byRets[2] & 0x7) << 16));
16053+
16054+ *pHDTVStdMask = (((u32) byRets[2] & 0xF8) |
16055+ ((u32) byRets[3] << 8) |
16056+ ((u32) byRets[4] << 16) | ((u32) byRets[5] << 24));
16057+
16058+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_TV_FORMATS, NULL, 0);
16059+
16060+ status = intel_sdvo_read_response(output, byRets, 6);
16061+ if (status != SDVO_CMD_STATUS_SUCCESS)
16062+ return FALSE;
16063+
16064+ /* Fill up the return values; */
16065+ if(sdvo_priv->TVMode == TVMODE_SDTV)
16066+ *pTVStdFormat = (((u32) byRets[0]) |
16067+ ((u32) byRets[1] << 8) |
16068+ ((u32) (byRets[2] & 0x7) << 16));
16069+ else
16070+ *pTVStdFormat = (((u32) byRets[2] & 0xF8) |
16071+ ((u32) byRets[3] << 8) |
16072+ ((u32) byRets[4] << 16) | ((u32) byRets[5] << 24));
16073+ DRM_DEBUG("BIOS TV format is %d\n",*pTVStdFormat);
16074+ return TRUE;
16075+
16076+}
16077+
16078+static bool i830_sdvo_get_supported_enhancements(struct drm_output * output,
16079+ u32 * psupported_enhancements)
16080+{
16081+
16082+ u8 status;
16083+ u8 byRets[2];
16084+ struct intel_output *intel_output = output->driver_private;
16085+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
16086+
16087+
16088+ /* Make all fields of the args/ret to zero */
16089+ memset(byRets, 0, sizeof(byRets));
16090+
16091+ /* Send the arguements & SDVO opcode to the h/w */
16092+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, NULL, 0);
16093+
16094+ status = intel_sdvo_read_response(output, byRets, 2);
16095+ if (status != SDVO_CMD_STATUS_SUCCESS)
16096+ return FALSE;
16097+
16098+ sdvo_priv->dwSupportedEnhancements = *psupported_enhancements =
16099+ ((u32) byRets[0] | ((u32) byRets[1] << 8));
16100+ return TRUE;
16101+
16102+}
16103+
16104+static bool i830_sdvo_get_max_horizontal_overscan(struct drm_output * output, u32 * pMaxVal,
16105+ u32 * pDefaultVal)
16106+{
16107+ u8 byRets[4];
16108+ u8 status;
16109+
16110+ /* Make all fields of the args/ret to zero */
16111+ memset(byRets, 0, sizeof(byRets));
16112+
16113+ /* Send the arguements & SDVO opcode to the h/w */
16114+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_HORIZONTAL_OVERSCAN, NULL,
16115+ 0);
16116+
16117+ status = intel_sdvo_read_response(output, byRets, 4);
16118+ if (status != SDVO_CMD_STATUS_SUCCESS)
16119+ return FALSE;
16120+ /* Fill up the return values. */
16121+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16122+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16123+ return TRUE;
16124+}
16125+
16126+static bool i830_sdvo_get_max_vertical_overscan(struct drm_output * output, u32 * pMaxVal,
16127+ u32 * pDefaultVal)
16128+{
16129+ u8 byRets[4];
16130+ u8 status;
16131+
16132+ /* Make all fields of the args/ret to zero */
16133+ memset(byRets, 0, sizeof(byRets));
16134+
16135+ /* Send the arguements & SDVO opcode to the h/w */
16136+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_VERTICAL_OVERSCAN, NULL, 0);
16137+
16138+ status = intel_sdvo_read_response(output, byRets, 4);
16139+ if (status != SDVO_CMD_STATUS_SUCCESS)
16140+ return FALSE;
16141+ /* Fill up the return values. */
16142+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16143+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16144+ return TRUE;
16145+}
16146+
16147+static bool i830_sdvo_get_max_horizontal_position(struct drm_output * output, u32 * pMaxVal,
16148+ u32 * pDefaultVal)
16149+{
16150+
16151+ u8 byRets[4];
16152+ u8 status;
16153+
16154+ /* Make all fields of the args/ret to zero */
16155+ memset(byRets, 0, sizeof(byRets));
16156+
16157+ /* Send the arguements & SDVO opcode to the h/w */
16158+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_HORIZONTAL_POSITION, NULL,
16159+ 0);
16160+
16161+ status = intel_sdvo_read_response(output, byRets, 4);
16162+ if (status != SDVO_CMD_STATUS_SUCCESS)
16163+ return FALSE;
16164+
16165+ /* Fill up the return values. */
16166+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16167+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16168+
16169+ return TRUE;
16170+}
16171+
16172+static bool i830_sdvo_get_max_vertical_position(struct drm_output * output,
16173+ u32 * pMaxVal, u32 * pDefaultVal)
16174+{
16175+
16176+ u8 byRets[4];
16177+ u8 status;
16178+
16179+ /* Make all fields of the args/ret to zero */
16180+ memset(byRets, 0, sizeof(byRets));
16181+
16182+ /* Send the arguements & SDVO opcode to the h/w */
16183+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_VERTICAL_POSITION, NULL, 0);
16184+
16185+ status = intel_sdvo_read_response(output, byRets, 4);
16186+ if (status != SDVO_CMD_STATUS_SUCCESS)
16187+ return FALSE;
16188+
16189+ /* Fill up the return values. */
16190+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16191+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16192+
16193+ return TRUE;
16194+}
16195+
16196+static bool i830_sdvo_get_max_flickerfilter(struct drm_output * output,
16197+ u32 * pMaxVal, u32 * pDefaultVal)
16198+{
16199+
16200+ u8 byRets[4];
16201+ u8 status;
16202+
16203+ /* Make all fields of the args/ret to zero */
16204+ memset(byRets, 0, sizeof(byRets));
16205+
16206+ /* Send the arguements & SDVO opcode to the h/w */
16207+
16208+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_FLICKER_FILTER, NULL, 0);
16209+
16210+ status = intel_sdvo_read_response(output, byRets, 4);
16211+ if (status != SDVO_CMD_STATUS_SUCCESS)
16212+ return FALSE;
16213+ /* Fill up the return values. */
16214+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16215+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16216+
16217+ return TRUE;
16218+}
16219+
16220+static bool i830_sdvo_get_max_brightness(struct drm_output * output,
16221+ u32 * pMaxVal, u32 * pDefaultVal)
16222+{
16223+
16224+ u8 byRets[4];
16225+ u8 status;
16226+
16227+ /* Make all fields of the args/ret to zero */
16228+ memset(byRets, 0, sizeof(byRets));
16229+
16230+ /* Send the arguements & SDVO opcode to the h/w */
16231+
16232+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
16233+
16234+ status = intel_sdvo_read_response(output, byRets, 4);
16235+ if (status != SDVO_CMD_STATUS_SUCCESS)
16236+ return FALSE;
16237+ /* Fill up the return values. */
16238+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16239+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16240+
16241+ return TRUE;
16242+}
16243+
16244+static bool i830_sdvo_get_max_contrast(struct drm_output * output,
16245+ u32 * pMaxVal, u32 * pDefaultVal)
16246+{
16247+
16248+ u8 byRets[4];
16249+ u8 status;
16250+
16251+ /* Make all fields of the args/ret to zero */
16252+ memset(byRets, 0, sizeof(byRets));
16253+
16254+ /* Send the arguements & SDVO opcode to the h/w */
16255+
16256+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
16257+
16258+ status = intel_sdvo_read_response(output, byRets, 4);
16259+ if (status != SDVO_CMD_STATUS_SUCCESS)
16260+ return FALSE;
16261+ /* Fill up the return values. */
16262+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16263+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16264+
16265+ return TRUE;
16266+}
16267+
16268+static bool i830_sdvo_get_max_sharpness(struct drm_output * output,
16269+ u32 * pMaxVal, u32 * pDefaultVal)
16270+{
16271+
16272+ u8 byRets[4];
16273+ u8 status;
16274+
16275+ /* Make all fields of the args/ret to zero */
16276+ memset(byRets, 0, sizeof(byRets));
16277+
16278+ /* Send the arguements & SDVO opcode to the h/w */
16279+
16280+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_SHARPNESS, NULL, 0);
16281+
16282+ status = intel_sdvo_read_response(output, byRets, 4);
16283+ if (status != SDVO_CMD_STATUS_SUCCESS)
16284+ return FALSE;
16285+
16286+ /* Fill up the return values. */
16287+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16288+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16289+
16290+ return TRUE;
16291+}
16292+
16293+static bool i830_sdvo_get_max_hue(struct drm_output * output,
16294+ u32 * pMaxVal, u32 * pDefaultVal)
16295+{
16296+ u8 byRets[4];
16297+ u8 status;
16298+
16299+ /* Make all fields of the args/ret to zero */
16300+ memset(byRets, 0, sizeof(byRets));
16301+
16302+ /* Send the arguements & SDVO opcode to the h/w */
16303+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_HUE, NULL, 0);
16304+
16305+ status = intel_sdvo_read_response(output, byRets, 4);
16306+ if (status != SDVO_CMD_STATUS_SUCCESS)
16307+ return FALSE;
16308+
16309+ /* Fill up the return values. */
16310+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16311+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16312+
16313+ return TRUE;
16314+}
16315+
16316+static bool i830_sdvo_get_max_saturation(struct drm_output * output,
16317+ u32 * pMaxVal, u32 * pDefaultVal)
16318+{
16319+
16320+ u8 byRets[4];
16321+ u8 status;
16322+
16323+ /* Make all fields of the args/ret to zero */
16324+ memset(byRets, 0, sizeof(byRets));
16325+
16326+ /* Send the arguements & SDVO opcode to the h/w */
16327+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
16328+
16329+ status = intel_sdvo_read_response(output, byRets, 4);
16330+ if (status != SDVO_CMD_STATUS_SUCCESS)
16331+ return FALSE;
16332+
16333+ /* Fill up the return values. */
16334+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16335+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16336+
16337+ return TRUE;
16338+}
16339+
16340+static bool i830_sdvo_get_max_adaptive_flickerfilter(struct drm_output * output,
16341+ u32 * pMaxVal,
16342+ u32 * pDefaultVal)
16343+{
16344+ u8 byRets[4];
16345+ u8 status;
16346+
16347+ /* Make all fields of the args/ret to zero */
16348+ memset(byRets, 0, sizeof(byRets));
16349+
16350+ /* Send the arguements & SDVO opcode to the h/w */
16351+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FILTER,
16352+ NULL, 0);
16353+ status = intel_sdvo_read_response(output, byRets, 4);
16354+ if (status != SDVO_CMD_STATUS_SUCCESS)
16355+ return FALSE;
16356+
16357+ /* Fill up the return values. */
16358+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16359+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16360+
16361+ return TRUE;
16362+}
16363+
16364+static bool i830_sdvo_get_max_lumafilter(struct drm_output * output,
16365+ u32 * pMaxVal, u32 * pDefaultVal)
16366+{
16367+
16368+ u8 byRets[4];
16369+ u8 status;
16370+
16371+ /* Make all fields of the args/ret to zero */
16372+ memset(byRets, 0, sizeof(byRets));
16373+
16374+ /* Send the arguements & SDVO opcode to the h/w */
16375+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_TV_LUMA_FILTER, NULL, 0);
16376+ status = intel_sdvo_read_response(output, byRets, 4);
16377+ if (status != SDVO_CMD_STATUS_SUCCESS)
16378+ return FALSE;
16379+
16380+ /* Fill up the return values. */
16381+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16382+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16383+
16384+ return TRUE;
16385+}
16386+
16387+static bool i830_sdvo_get_max_chromafilter(struct drm_output * output,
16388+ u32 * pMaxVal, u32 * pDefaultVal)
16389+{
16390+
16391+ u8 byRets[4];
16392+ u8 status;
16393+
16394+ /* Make all fields of the args/ret to zero */
16395+ memset(byRets, 0, sizeof(byRets));
16396+
16397+ /* Send the arguements & SDVO opcode to the h/w */
16398+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_TV_CHROMA_FILTER, NULL, 0);
16399+ status = intel_sdvo_read_response(output, byRets, 4);
16400+ if (status != SDVO_CMD_STATUS_SUCCESS)
16401+ return FALSE;
16402+
16403+ /* Fill up the return values. */
16404+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16405+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16406+
16407+ return TRUE;
16408+}
16409+
16410+static bool i830_sdvo_get_dotcrawl(struct drm_output * output,
16411+ u32 * pCurrentVal, u32 * pDefaultVal)
16412+{
16413+
16414+ u8 byRets[2];
16415+ u8 status;
16416+
16417+ /* Make all fields of the args/ret to zero */
16418+ memset(byRets, 0, sizeof(byRets));
16419+
16420+ /* Send the arguements & SDVO opcode to the h/w */
16421+
16422+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_DOT_CRAWL, NULL, 0);
16423+ status = intel_sdvo_read_response(output, byRets, 2);
16424+ if (status != SDVO_CMD_STATUS_SUCCESS)
16425+ return FALSE;
16426+
16427+ /* Tibet issue 1603772: Dot crawl do not persist after reboot/Hibernate */
16428+ /* Details : Bit0 is considered as DotCrawl Max value. But according to EDS, Bit0 */
16429+ /* represents the Current DotCrawl value. */
16430+ /* Fix : The current value is updated with Bit0. */
16431+
16432+ /* Fill up the return values. */
16433+ *pCurrentVal = (u32) (byRets[0] & 0x1);
16434+ *pDefaultVal = (u32) ((byRets[0] >> 1) & 0x1);
16435+ return TRUE;
16436+}
16437+
16438+static bool i830_sdvo_get_max_2D_flickerfilter(struct drm_output * output,
16439+ u32 * pMaxVal, u32 * pDefaultVal)
16440+{
16441+
16442+ u8 byRets[4];
16443+ u8 status;
16444+
16445+ /* Make all fields of the args/ret to zero */
16446+ memset(byRets, 0, sizeof(byRets));
16447+
16448+ /* Send the arguements & SDVO opcode to the h/w */
16449+
16450+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_2D_FLICKER_FILTER, NULL, 0);
16451+ status = intel_sdvo_read_response(output, byRets, 4);
16452+ if (status != SDVO_CMD_STATUS_SUCCESS)
16453+ return FALSE;
16454+
16455+ /* Fill up the return values. */
16456+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
16457+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
16458+
16459+ return TRUE;
16460+}
16461+
16462+static bool i830_sdvo_set_horizontal_overscan(struct drm_output * output, u32 dwVal)
16463+{
16464+
16465+ u8 byArgs[2];
16466+ u8 status;
16467+
16468+ /* Make all fields of the args/ret to zero */
16469+ memset(byArgs, 0, sizeof(byArgs));
16470+
16471+ /* Fill up the arguement value */
16472+ byArgs[0] = (u8) (dwVal & 0xFF);
16473+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16474+
16475+ /* Send the arguements & SDVO opcode to the h/w */
16476+
16477+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_HORIZONTAL_OVERSCAN, byArgs, 2);
16478+ status = intel_sdvo_read_response(output, NULL, 0);
16479+
16480+ if (status != SDVO_CMD_STATUS_SUCCESS)
16481+ return FALSE;
16482+ return TRUE;
16483+}
16484+
16485+static bool i830_sdvo_set_vertical_overscan(struct drm_output * output, u32 dwVal)
16486+{
16487+
16488+ u8 byArgs[2];
16489+ u8 status;
16490+
16491+ /* Make all fields of the args/ret to zero */
16492+ memset(byArgs, 0, sizeof(byArgs));
16493+
16494+ /* Fill up the arguement value */
16495+ byArgs[0] = (u8) (dwVal & 0xFF);
16496+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16497+
16498+ /* Send the arguements & SDVO opcode to the h/w */
16499+
16500+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_VERTICAL_OVERSCAN, byArgs, 2);
16501+ status = intel_sdvo_read_response(output, NULL, 0);
16502+
16503+ if (status != SDVO_CMD_STATUS_SUCCESS)
16504+ return FALSE;
16505+ return TRUE;
16506+}
16507+
16508+static bool i830_sdvo_set_horizontal_position(struct drm_output * output, u32 dwVal)
16509+{
16510+
16511+ u8 byArgs[2];
16512+ u8 status;
16513+
16514+ /* Make all fields of the args/ret to zero */
16515+ memset(byArgs, 0, sizeof(byArgs));
16516+
16517+ /* Fill up the arguement value */
16518+ byArgs[0] = (u8) (dwVal & 0xFF);
16519+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16520+
16521+ /* Send the arguements & SDVO opcode to the h/w */
16522+
16523+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_HORIZONTAL_POSITION, byArgs, 2);
16524+ status = intel_sdvo_read_response(output, NULL, 0);
16525+
16526+ if (status != SDVO_CMD_STATUS_SUCCESS)
16527+ return FALSE;
16528+
16529+ return TRUE;
16530+}
16531+
16532+static bool i830_sdvo_set_vertical_position(struct drm_output * output, u32 dwVal)
16533+{
16534+
16535+ u8 byArgs[2];
16536+ u8 status;
16537+
16538+ /* Make all fields of the args/ret to zero */
16539+ memset(byArgs, 0, sizeof(byArgs));
16540+
16541+ /* Fill up the arguement value */
16542+ byArgs[0] = (u8) (dwVal & 0xFF);
16543+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16544+
16545+ /* Send the arguements & SDVO opcode to the h/w */
16546+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_VERTICAL_POSITION, byArgs, 2);
16547+ status = intel_sdvo_read_response(output, NULL, 0);
16548+
16549+ if (status != SDVO_CMD_STATUS_SUCCESS)
16550+ return FALSE;
16551+
16552+ return TRUE;
16553+
16554+}
16555+
16556+static bool i830_sdvo_set_flickerilter(struct drm_output * output, u32 dwVal)
16557+{
16558+
16559+ u8 byArgs[2];
16560+ u8 status;
16561+
16562+ /* Make all fields of the args/ret to zero */
16563+ memset(byArgs, 0, sizeof(byArgs));
16564+
16565+ /* Fill up the arguement value */
16566+ byArgs[0] = (u8) (dwVal & 0xFF);
16567+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16568+
16569+ /* Send the arguements & SDVO opcode to the h/w */
16570+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_FLICKER_FILTER, byArgs, 2);
16571+ status = intel_sdvo_read_response(output, NULL, 0);
16572+
16573+ if (status != SDVO_CMD_STATUS_SUCCESS)
16574+ return FALSE;
16575+
16576+ return TRUE;
16577+}
16578+
16579+static bool i830_sdvo_set_brightness(struct drm_output * output, u32 dwVal)
16580+{
16581+
16582+ u8 byArgs[2];
16583+ u8 status;
16584+
16585+ /* Make all fields of the args/ret to zero */
16586+ memset(byArgs, 0, sizeof(byArgs));
16587+
16588+ /* Fill up the arguement value */
16589+ byArgs[0] = (u8) (dwVal & 0xFF);
16590+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16591+
16592+ /* Send the arguements & SDVO opcode to the h/w */
16593+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_BRIGHTNESS, byArgs, 2);
16594+ status = intel_sdvo_read_response(output, NULL, 0);
16595+
16596+ if (status != SDVO_CMD_STATUS_SUCCESS)
16597+ return FALSE;
16598+
16599+ return TRUE;
16600+}
16601+
16602+static bool i830_sdvo_set_contrast(struct drm_output * output, u32 dwVal)
16603+{
16604+
16605+ u8 byArgs[2];
16606+ u8 status;
16607+
16608+ /* Make all fields of the args/ret to zero */
16609+ memset(byArgs, 0, sizeof(byArgs));
16610+ /* Fill up the arguement value */
16611+ byArgs[0] = (u8) (dwVal & 0xFF);
16612+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16613+
16614+ /* Send the arguements & SDVO opcode to the h/w */
16615+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_CONTRAST, byArgs, 2);
16616+ status = intel_sdvo_read_response(output, NULL, 0);
16617+
16618+ if (status != SDVO_CMD_STATUS_SUCCESS)
16619+ return FALSE;
16620+
16621+ return TRUE;
16622+}
16623+
16624+static bool i830_sdvo_set_sharpness(struct drm_output * output, u32 dwVal)
16625+{
16626+
16627+ u8 byArgs[2];
16628+ u8 status;
16629+
16630+ /* Make all fields of the args/ret to zero */
16631+ memset(byArgs, 0, sizeof(byArgs));
16632+
16633+ /* Fill up the arguement value */
16634+ byArgs[0] = (u8) (dwVal & 0xFF);
16635+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16636+
16637+ /* Send the arguements & SDVO opcode to the h/w */
16638+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_SHARPNESS, byArgs, 2);
16639+ status = intel_sdvo_read_response(output, NULL, 0);
16640+
16641+ if (status != SDVO_CMD_STATUS_SUCCESS)
16642+ return FALSE;
16643+
16644+ return TRUE;
16645+}
16646+
16647+static bool i830_sdvo_set_hue(struct drm_output * output, u32 dwVal)
16648+{
16649+
16650+ u8 byArgs[2];
16651+ u8 status;
16652+
16653+ /* Make all fields of the args/ret to zero */
16654+ memset(byArgs, 0, sizeof(byArgs));
16655+
16656+ /* Fill up the arguement value */
16657+ byArgs[0] = (u8) (dwVal & 0xFF);
16658+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16659+
16660+ /* Send the arguements & SDVO opcode to the h/w */
16661+
16662+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_HUE, byArgs, 2);
16663+ status = intel_sdvo_read_response(output, NULL, 0);
16664+
16665+ if (status != SDVO_CMD_STATUS_SUCCESS)
16666+ return FALSE;
16667+
16668+ return TRUE;
16669+}
16670+
16671+static bool i830_sdvo_set_saturation(struct drm_output * output, u32 dwVal)
16672+{
16673+
16674+ u8 byArgs[2];
16675+ u8 status;
16676+
16677+ /* Make all fields of the args/ret to zero */
16678+ memset(byArgs, 0, sizeof(byArgs));
16679+
16680+ /* Fill up the arguement value */
16681+ byArgs[0] = (u8) (dwVal & 0xFF);
16682+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16683+
16684+ /* Send the arguements & SDVO opcode to the h/w */
16685+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_SATURATION, byArgs, 2);
16686+ status = intel_sdvo_read_response(output, NULL, 0);
16687+
16688+ if (status != SDVO_CMD_STATUS_SUCCESS)
16689+ return FALSE;
16690+
16691+ return TRUE;
16692+}
16693+
16694+static bool i830_sdvo_set_adaptive_flickerfilter(struct drm_output * output, u32 dwVal)
16695+{
16696+ u8 byArgs[2];
16697+ u8 status;
16698+
16699+ /* Make all fields of the args/ret to zero */
16700+ memset(byArgs, 0, sizeof(byArgs));
16701+
16702+ /* Fill up the arguement value */
16703+ byArgs[0] = (u8) (dwVal & 0xFF);
16704+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16705+
16706+ /* Send the arguements & SDVO opcode to the h/w */
16707+
16708+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_ADAPTIVE_FLICKER_FILTER, byArgs,
16709+ 2);
16710+ status = intel_sdvo_read_response(output, NULL, 0);
16711+
16712+ if (status != SDVO_CMD_STATUS_SUCCESS)
16713+ return FALSE;
16714+
16715+ return TRUE;
16716+
16717+}
16718+
16719+static bool i830_sdvo_set_lumafilter(struct drm_output * output, u32 dwVal)
16720+{
16721+ u8 byArgs[2];
16722+ u8 status;
16723+
16724+ /* Make all fields of the args/ret to zero */
16725+ memset(byArgs, 0, sizeof(byArgs));
16726+
16727+ /* Fill up the arguement value */
16728+ byArgs[0] = (u8) (dwVal & 0xFF);
16729+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16730+
16731+ /* Send the arguements & SDVO opcode to the h/w */
16732+
16733+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_LUMA_FILTER, byArgs, 2);
16734+ status = intel_sdvo_read_response(output, NULL, 0);
16735+
16736+ if (status != SDVO_CMD_STATUS_SUCCESS)
16737+ return FALSE;
16738+
16739+ return TRUE;
16740+}
16741+
16742+static bool i830_sdvo_set_chromafilter(struct drm_output * output, u32 dwVal)
16743+{
16744+
16745+ u8 byArgs[2];
16746+ u8 status;
16747+
16748+ /* Make all fields of the args/ret to zero */
16749+ memset(byArgs, 0, sizeof(byArgs));
16750+
16751+ /* Fill up the arguement value */
16752+ byArgs[0] = (u8) (dwVal & 0xFF);
16753+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16754+
16755+ /* Send the arguements & SDVO opcode to the h/w */
16756+
16757+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_CHROMA_FILTER, byArgs, 2);
16758+ status = intel_sdvo_read_response(output, NULL, 0);
16759+
16760+ if (status != SDVO_CMD_STATUS_SUCCESS)
16761+ return FALSE;
16762+
16763+ return TRUE;
16764+}
16765+
16766+static bool i830_sdvo_set_dotcrawl(struct drm_output * output, u32 dwVal)
16767+{
16768+
16769+ u8 byArgs[2];
16770+ u8 status;
16771+
16772+ /* Make all fields of the args/ret to zero */
16773+ memset(byArgs, 0, sizeof(byArgs));
16774+
16775+ /* Fill up the arguement value */
16776+ byArgs[0] = (u8) (dwVal & 0xFF);
16777+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16778+
16779+ /* Send the arguements & SDVO opcode to the h/w */
16780+
16781+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_DOT_CRAWL, byArgs, 2);
16782+ status = intel_sdvo_read_response(output, NULL, 0);
16783+ if (status != SDVO_CMD_STATUS_SUCCESS)
16784+ return FALSE;
16785+
16786+ return TRUE;
16787+}
16788+
16789+static bool i830_sdvo_set_2D_flickerfilter(struct drm_output * output, u32 dwVal)
16790+{
16791+
16792+ u8 byArgs[2];
16793+ u8 status;
16794+
16795+ /* Make all fields of the args/ret to zero */
16796+ memset(byArgs, 0, sizeof(byArgs));
16797+
16798+ /* Fill up the arguement value */
16799+ byArgs[0] = (u8) (dwVal & 0xFF);
16800+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
16801+
16802+ /* Send the arguements & SDVO opcode to the h/w */
16803+
16804+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_2D_FLICKER_FILTER, byArgs, 2);
16805+ status = intel_sdvo_read_response(output, NULL, 0);
16806+
16807+ if (status != SDVO_CMD_STATUS_SUCCESS)
16808+ return FALSE;
16809+
16810+ return TRUE;
16811+}
16812+
16813+#if 0
16814+static bool i830_sdvo_set_ancillary_video_information(struct drm_output * output)
16815+{
16816+
16817+ u8 status;
16818+ u8 byArgs[4];
16819+ u32 dwAncillaryBits = 0;
16820+ struct intel_output *intel_output = output->driver_private;
16821+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
16822+
16823+
16824+ PSDVO_ANCILLARY_INFO_T pAncillaryInfo = &sdvo_priv->AncillaryInfo;
16825+
16826+ /* Make all fields of the args/ret to zero */
16827+ memset(byArgs, 0, sizeof(byArgs));
16828+
16829+ /* Handle picture aspect ratio (bits 8, 9) and */
16830+ /* active format aspect ratio (bits 10, 13) */
16831+ switch (pAncillaryInfo->AspectRatio) {
16832+ case CP_ASPECT_RATIO_FF_4_BY_3:
16833+ dwAncillaryBits |= UAIM_PAR_4_3;
16834+ dwAncillaryBits |= UAIM_FAR_4_BY_3_CENTER;
16835+ break;
16836+ case CP_ASPECT_RATIO_14_BY_9_CENTER:
16837+ dwAncillaryBits |= UAIM_FAR_14_BY_9_CENTER;
16838+ break;
16839+ case CP_ASPECT_RATIO_14_BY_9_TOP:
16840+ dwAncillaryBits |= UAIM_FAR_14_BY_9_LETTERBOX_TOP;
16841+ break;
16842+ case CP_ASPECT_RATIO_16_BY_9_CENTER:
16843+ dwAncillaryBits |= UAIM_PAR_16_9;
16844+ dwAncillaryBits |= UAIM_FAR_16_BY_9_CENTER;
16845+ break;
16846+ case CP_ASPECT_RATIO_16_BY_9_TOP:
16847+ dwAncillaryBits |= UAIM_PAR_16_9;
16848+ dwAncillaryBits |= UAIM_FAR_16_BY_9_LETTERBOX_TOP;
16849+ break;
16850+ case CP_ASPECT_RATIO_GT_16_BY_9_CENTER:
16851+ dwAncillaryBits |= UAIM_PAR_16_9;
16852+ dwAncillaryBits |= UAIM_FAR_GT_16_BY_9_LETTERBOX_CENTER;
16853+ break;
16854+ case CP_ASPECT_RATIO_FF_4_BY_3_PROT_CENTER:
16855+ dwAncillaryBits |= UAIM_FAR_4_BY_3_SNP_14_BY_9_CENTER;
16856+ break;
16857+ case CP_ASPECT_RATIO_FF_16_BY_9_ANAMORPHIC:
16858+ dwAncillaryBits |= UAIM_PAR_16_9;
16859+ break;
16860+ default:
16861+ DRM_DEBUG("fail to set ancillary video info\n");
16862+ return FALSE;
16863+
16864+ }
16865+
16866+ /* Fill up the argument value */
16867+ byArgs[0] = (u8) ((dwAncillaryBits >> 0) & 0xFF);
16868+ byArgs[1] = (u8) ((dwAncillaryBits >> 8) & 0xFF);
16869+ byArgs[2] = (u8) ((dwAncillaryBits >> 16) & 0xFF);
16870+ byArgs[3] = (u8) ((dwAncillaryBits >> 24) & 0xFF);
16871+
16872+ /* Send the arguements & SDVO opcode to the h/w */
16873+
16874+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_ANCILLARY_VIDEO_INFORMATION,
16875+ byArgs, 4);
16876+ status = intel_sdvo_read_response(output, NULL, 0);
16877+
16878+ if (status != SDVO_CMD_STATUS_SUCCESS)
16879+ return FALSE;
16880+
16881+ return TRUE;
16882+
16883+}
16884+#endif
16885+static bool i830_tv_program_display_params(struct drm_output * output)
16886+
16887+{
16888+ u8 status;
16889+ u32 dwMaxVal = 0;
16890+ u32 dwDefaultVal = 0;
16891+ u32 dwCurrentVal = 0;
16892+
16893+ struct intel_output *intel_output = output->driver_private;
16894+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
16895+
16896+
16897+ /* X & Y Positions */
16898+
16899+ /* Horizontal postition */
16900+ if (sdvo_priv->dwSupportedEnhancements & SDVO_HORIZONTAL_POSITION) {
16901+ status =
16902+ i830_sdvo_get_max_horizontal_position(output, &dwMaxVal,
16903+ &dwDefaultVal);
16904+
16905+ if (status) {
16906+ /*Tibet issue 1596943: After changing mode from 8x6 to 10x7 open CUI and press Restore Defaults */
16907+ /*Position changes. */
16908+
16909+ /* Tibet:1629992 : can't keep previous TV setting status if re-boot system after TV setting(screen position & size) of CUI */
16910+ /* Fix : compare whether current postion is greater than max value and then assign the default value. Earlier the check was */
16911+ /* against the pAim->PositionX.Max value to dwMaxVal. When we boot the PositionX.Max value is 0 and so after every reboot, */
16912+ /* position is set to default. */
16913+
16914+ if (sdvo_priv->dispParams.PositionX.Value > dwMaxVal)
16915+ sdvo_priv->dispParams.PositionX.Value = dwDefaultVal;
16916+
16917+ status =
16918+ i830_sdvo_set_horizontal_position(output,
16919+ sdvo_priv->dispParams.PositionX.
16920+ Value);
16921+
16922+ if (!status)
16923+ return status;
16924+
16925+ sdvo_priv->dispParams.PositionX.Max = dwMaxVal;
16926+ sdvo_priv->dispParams.PositionX.Min = 0;
16927+ sdvo_priv->dispParams.PositionX.Default = dwDefaultVal;
16928+ sdvo_priv->dispParams.PositionX.Step = 1;
16929+ } else {
16930+ return status;
16931+ }
16932+ }
16933+
16934+ /* Vertical position */
16935+ if (sdvo_priv->dwSupportedEnhancements & SDVO_VERTICAL_POSITION) {
16936+ status =
16937+ i830_sdvo_get_max_vertical_position(output, &dwMaxVal,
16938+ &dwDefaultVal);
16939+
16940+ if (status) {
16941+
16942+ /*Tibet issue 1596943: After changing mode from 8x6 to 10x7 open CUI and press Restore Defaults */
16943+ /*Position changes. */
16944+ /*currently if we are out of range get back to default */
16945+
16946+ /* Tibet:1629992 : can't keep previous TV setting status if re-boot system after TV setting(screen position & size) of CUI */
16947+ /* Fix : compare whether current postion is greater than max value and then assign the default value. Earlier the check was */
16948+ /* against the pAim->PositionY.Max value to dwMaxVal. When we boot the PositionX.Max value is 0 and so after every reboot, */
16949+ /* position is set to default. */
16950+
16951+ if (sdvo_priv->dispParams.PositionY.Value > dwMaxVal)
16952+ sdvo_priv->dispParams.PositionY.Value = dwDefaultVal;
16953+
16954+ status =
16955+ i830_sdvo_set_vertical_position(output,
16956+ sdvo_priv->dispParams.PositionY.
16957+ Value);
16958+ if (!status)
16959+ return status;
16960+
16961+ sdvo_priv->dispParams.PositionY.Max = dwMaxVal;
16962+ sdvo_priv->dispParams.PositionY.Min = 0;
16963+ sdvo_priv->dispParams.PositionY.Default = dwDefaultVal;
16964+ sdvo_priv->dispParams.PositionY.Step = 1;
16965+ } else {
16966+ return status;
16967+ }
16968+ }
16969+
16970+ /* Flicker Filter */
16971+ if (sdvo_priv->dwSupportedEnhancements & SDVO_FLICKER_FILTER) {
16972+ status =
16973+ i830_sdvo_get_max_flickerfilter(output, &dwMaxVal, &dwDefaultVal);
16974+
16975+ if (status) {
16976+ /*currently if we are out of range get back to default */
16977+ if (sdvo_priv->dispParams.FlickerFilter.Value > dwMaxVal)
16978+ sdvo_priv->dispParams.FlickerFilter.Value = dwDefaultVal;
16979+
16980+ status =
16981+ i830_sdvo_set_flickerilter(output,
16982+ sdvo_priv->dispParams.FlickerFilter.
16983+ Value);
16984+ if (!status)
16985+ return status;
16986+
16987+ sdvo_priv->dispParams.FlickerFilter.Max = dwMaxVal;
16988+ sdvo_priv->dispParams.FlickerFilter.Min = 0;
16989+ sdvo_priv->dispParams.FlickerFilter.Default = dwDefaultVal;
16990+ sdvo_priv->dispParams.FlickerFilter.Step = 1;
16991+ } else {
16992+ return status;
16993+ }
16994+ }
16995+
16996+ /* Brightness */
16997+ if (sdvo_priv->dwSupportedEnhancements & SDVO_BRIGHTNESS) {
16998+
16999+ status =
17000+ i830_sdvo_get_max_brightness(output, &dwMaxVal, &dwDefaultVal);
17001+
17002+ if (status) {
17003+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
17004+ /*no need to check it. */
17005+ if (sdvo_priv->dispParams.Brightness.Value > dwMaxVal)
17006+ sdvo_priv->dispParams.Brightness.Value = dwDefaultVal;
17007+
17008+ /* Program the device */
17009+ status =
17010+ i830_sdvo_set_brightness(output,
17011+ sdvo_priv->dispParams.Brightness.Value);
17012+ if (!status)
17013+ return status;
17014+
17015+ sdvo_priv->dispParams.Brightness.Max = dwMaxVal;
17016+ sdvo_priv->dispParams.Brightness.Min = 0;
17017+ sdvo_priv->dispParams.Brightness.Default = dwDefaultVal;
17018+ sdvo_priv->dispParams.Brightness.Step = 1;
17019+ } else {
17020+ return status;
17021+ }
17022+
17023+ }
17024+
17025+ /* Contrast */
17026+ if (sdvo_priv->dwSupportedEnhancements & SDVO_CONTRAST) {
17027+
17028+ status = i830_sdvo_get_max_contrast(output, &dwMaxVal, &dwDefaultVal);
17029+
17030+ if (status) {
17031+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
17032+ /*no need to check it. */
17033+ if (sdvo_priv->dispParams.Contrast.Value > dwMaxVal)
17034+ sdvo_priv->dispParams.Contrast.Value = dwDefaultVal;
17035+
17036+ /* Program the device */
17037+ status =
17038+ i830_sdvo_set_contrast(output,
17039+ sdvo_priv->dispParams.Contrast.Value);
17040+ if (!status)
17041+ return status;
17042+
17043+ sdvo_priv->dispParams.Contrast.Max = dwMaxVal;
17044+ sdvo_priv->dispParams.Contrast.Min = 0;
17045+ sdvo_priv->dispParams.Contrast.Default = dwDefaultVal;
17046+
17047+ sdvo_priv->dispParams.Contrast.Step = 1;
17048+
17049+ } else {
17050+ return status;
17051+ }
17052+ }
17053+
17054+ /* Sharpness */
17055+ if (sdvo_priv->dwSupportedEnhancements & SDVO_SHARPNESS) {
17056+
17057+ status =
17058+ i830_sdvo_get_max_sharpness(output, &dwMaxVal, &dwDefaultVal);
17059+
17060+ if (status) {
17061+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
17062+ /*no need to check it. */
17063+ if (sdvo_priv->dispParams.Sharpness.Value > dwMaxVal)
17064+ sdvo_priv->dispParams.Sharpness.Value = dwDefaultVal;
17065+
17066+ /* Program the device */
17067+ status =
17068+ i830_sdvo_set_sharpness(output,
17069+ sdvo_priv->dispParams.Sharpness.Value);
17070+ if (!status)
17071+ return status;
17072+ sdvo_priv->dispParams.Sharpness.Max = dwMaxVal;
17073+ sdvo_priv->dispParams.Sharpness.Min = 0;
17074+ sdvo_priv->dispParams.Sharpness.Default = dwDefaultVal;
17075+
17076+ sdvo_priv->dispParams.Sharpness.Step = 1;
17077+ } else {
17078+ return status;
17079+ }
17080+ }
17081+
17082+ /* Hue */
17083+ if (sdvo_priv->dwSupportedEnhancements & SDVO_HUE) {
17084+
17085+ status = i830_sdvo_get_max_hue(output, &dwMaxVal, &dwDefaultVal);
17086+
17087+ if (status) {
17088+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
17089+ /*no need to check it. */
17090+ if (sdvo_priv->dispParams.Hue.Value > dwMaxVal)
17091+ sdvo_priv->dispParams.Hue.Value = dwDefaultVal;
17092+
17093+ /* Program the device */
17094+ status = i830_sdvo_set_hue(output, sdvo_priv->dispParams.Hue.Value);
17095+ if (!status)
17096+ return status;
17097+
17098+ sdvo_priv->dispParams.Hue.Max = dwMaxVal;
17099+ sdvo_priv->dispParams.Hue.Min = 0;
17100+ sdvo_priv->dispParams.Hue.Default = dwDefaultVal;
17101+
17102+ sdvo_priv->dispParams.Hue.Step = 1;
17103+
17104+ } else {
17105+ return status;
17106+ }
17107+ }
17108+
17109+ /* Saturation */
17110+ if (sdvo_priv->dwSupportedEnhancements & SDVO_SATURATION) {
17111+ status =
17112+ i830_sdvo_get_max_saturation(output, &dwMaxVal, &dwDefaultVal);
17113+
17114+ if (status) {
17115+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
17116+ /*no need to check it. */
17117+ if (sdvo_priv->dispParams.Saturation.Value > dwMaxVal)
17118+ sdvo_priv->dispParams.Saturation.Value = dwDefaultVal;
17119+
17120+ /* Program the device */
17121+ status =
17122+ i830_sdvo_set_saturation(output,
17123+ sdvo_priv->dispParams.Saturation.Value);
17124+ if (!status)
17125+ return status;
17126+
17127+ sdvo_priv->dispParams.Saturation.Max = dwMaxVal;
17128+ sdvo_priv->dispParams.Saturation.Min = 0;
17129+ sdvo_priv->dispParams.Saturation.Default = dwDefaultVal;
17130+ sdvo_priv->dispParams.Saturation.Step = 1;
17131+ } else {
17132+ return status;
17133+ }
17134+
17135+ }
17136+
17137+ /* Adaptive Flicker filter */
17138+ if (sdvo_priv->dwSupportedEnhancements & SDVO_ADAPTIVE_FLICKER_FILTER) {
17139+ status =
17140+ i830_sdvo_get_max_adaptive_flickerfilter(output, &dwMaxVal,
17141+ &dwDefaultVal);
17142+
17143+ if (status) {
17144+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
17145+ /*no need to check it. */
17146+ if (sdvo_priv->dispParams.AdaptiveFF.Value > dwMaxVal)
17147+ sdvo_priv->dispParams.AdaptiveFF.Value = dwDefaultVal;
17148+
17149+ status =
17150+ i830_sdvo_set_adaptive_flickerfilter(output,
17151+ sdvo_priv->dispParams.
17152+ AdaptiveFF.Value);
17153+ if (!status)
17154+ return status;
17155+
17156+ sdvo_priv->dispParams.AdaptiveFF.Max = dwMaxVal;
17157+ sdvo_priv->dispParams.AdaptiveFF.Min = 0;
17158+ sdvo_priv->dispParams.AdaptiveFF.Default = dwDefaultVal;
17159+ sdvo_priv->dispParams.AdaptiveFF.Step = 1;
17160+ } else {
17161+ return status;
17162+ }
17163+ }
17164+
17165+ /* 2D Flicker filter */
17166+ if (sdvo_priv->dwSupportedEnhancements & SDVO_2D_FLICKER_FILTER) {
17167+
17168+ status =
17169+ i830_sdvo_get_max_2D_flickerfilter(output, &dwMaxVal,
17170+ &dwDefaultVal);
17171+
17172+ if (status) {
17173+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
17174+ /*no need to check it. */
17175+ if (sdvo_priv->dispParams.TwoD_FlickerFilter.Value > dwMaxVal)
17176+ sdvo_priv->dispParams.TwoD_FlickerFilter.Value = dwDefaultVal;
17177+
17178+ status =
17179+ i830_sdvo_set_2D_flickerfilter(output,
17180+ sdvo_priv->dispParams.
17181+ TwoD_FlickerFilter.Value);
17182+ if (!status)
17183+ return status;
17184+
17185+ sdvo_priv->dispParams.TwoD_FlickerFilter.Max = dwMaxVal;
17186+ sdvo_priv->dispParams.TwoD_FlickerFilter.Min = 0;
17187+ sdvo_priv->dispParams.TwoD_FlickerFilter.Default = dwDefaultVal;
17188+ sdvo_priv->dispParams.TwoD_FlickerFilter.Step = 1;
17189+ } else {
17190+ return status;
17191+ }
17192+ }
17193+
17194+ /* Luma Filter */
17195+ if (sdvo_priv->dwSupportedEnhancements & SDVO_TV_MAX_LUMA_FILTER) {
17196+ status =
17197+ i830_sdvo_get_max_lumafilter(output, &dwMaxVal, &dwDefaultVal);
17198+
17199+ if (status) {
17200+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
17201+ /*no need to check it. */
17202+ if (sdvo_priv->dispParams.LumaFilter.Value > dwMaxVal)
17203+ sdvo_priv->dispParams.LumaFilter.Value = dwDefaultVal;
17204+
17205+ /* Program the device */
17206+ status =
17207+ i830_sdvo_set_lumafilter(output,
17208+ sdvo_priv->dispParams.LumaFilter.Value);
17209+ if (!status)
17210+ return status;
17211+
17212+ sdvo_priv->dispParams.LumaFilter.Max = dwMaxVal;
17213+ sdvo_priv->dispParams.LumaFilter.Min = 0;
17214+ sdvo_priv->dispParams.LumaFilter.Default = dwDefaultVal;
17215+ sdvo_priv->dispParams.LumaFilter.Step = 1;
17216+
17217+ } else {
17218+ return status;
17219+ }
17220+
17221+ }
17222+
17223+ /* Chroma Filter */
17224+ if (sdvo_priv->dwSupportedEnhancements & SDVO_MAX_TV_CHROMA_FILTER) {
17225+
17226+ status =
17227+ i830_sdvo_get_max_chromafilter(output, &dwMaxVal, &dwDefaultVal);
17228+
17229+ if (status) {
17230+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
17231+ /*no need to check it. */
17232+ if (sdvo_priv->dispParams.ChromaFilter.Value > dwMaxVal)
17233+ sdvo_priv->dispParams.ChromaFilter.Value = dwDefaultVal;
17234+
17235+ /* Program the device */
17236+ status =
17237+ i830_sdvo_set_chromafilter(output,
17238+ sdvo_priv->dispParams.ChromaFilter.
17239+ Value);
17240+ if (!status)
17241+ return status;
17242+
17243+ sdvo_priv->dispParams.ChromaFilter.Max = dwMaxVal;
17244+ sdvo_priv->dispParams.ChromaFilter.Min = 0;
17245+ sdvo_priv->dispParams.ChromaFilter.Default = dwDefaultVal;
17246+ sdvo_priv->dispParams.ChromaFilter.Step = 1;
17247+ } else {
17248+ return status;
17249+ }
17250+
17251+ }
17252+
17253+ /* Dot Crawl */
17254+ if (sdvo_priv->dwSupportedEnhancements & SDVO_DOT_CRAWL) {
17255+ status = i830_sdvo_get_dotcrawl(output, &dwCurrentVal, &dwDefaultVal);
17256+
17257+ if (status) {
17258+
17259+ dwMaxVal = 1;
17260+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
17261+ /*no need to check it. */
17262+
17263+ /* Tibet issue 1603772: Dot crawl do not persist after reboot/Hibernate */
17264+ /* Details : "Dotcrawl.value" is compared with "dwDefaultVal". Since */
17265+ /* dwDefaultVal is always 0, dotCrawl value is always set to 0. */
17266+ /* Fix : Compare the current dotCrawl value with dwMaxValue. */
17267+
17268+ if (sdvo_priv->dispParams.DotCrawl.Value > dwMaxVal)
17269+
17270+ sdvo_priv->dispParams.DotCrawl.Value = dwMaxVal;
17271+
17272+ status =
17273+ i830_sdvo_set_dotcrawl(output,
17274+ sdvo_priv->dispParams.DotCrawl.Value);
17275+ if (!status)
17276+ return status;
17277+
17278+ sdvo_priv->dispParams.DotCrawl.Max = dwMaxVal;
17279+ sdvo_priv->dispParams.DotCrawl.Min = 0;
17280+ sdvo_priv->dispParams.DotCrawl.Default = dwMaxVal;
17281+ sdvo_priv->dispParams.DotCrawl.Step = 1;
17282+ } else {
17283+ return status;
17284+ }
17285+ }
17286+
17287+ return TRUE;
17288+}
17289+
17290+static bool i830_tv_set_overscan_parameters(struct drm_output * output)
17291+{
17292+ u8 status;
17293+
17294+ u32 dwDefaultVal = 0;
17295+ u32 dwMaxVal = 0;
17296+ u32 dwPercentageValue = 0;
17297+ u32 dwDefOverscanXValue = 0;
17298+ u32 dwDefOverscanYValue = 0;
17299+ u32 dwOverscanValue = 0;
17300+ u32 dwSupportedEnhancements;
17301+ struct intel_output *intel_output = output->driver_private;
17302+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17303+
17304+
17305+ /* Get supported picture enhancements */
17306+ status =
17307+ i830_sdvo_get_supported_enhancements(output,
17308+ &dwSupportedEnhancements);
17309+ if (!status)
17310+ return status;
17311+
17312+ /* Horizontal Overscan */
17313+ if (dwSupportedEnhancements & SDVO_HORIZONTAL_OVERSCAN) {
17314+ status =
17315+ i830_sdvo_get_max_horizontal_overscan(output, &dwMaxVal,
17316+ &dwDefaultVal);
17317+ if (!status)
17318+ return status;
17319+
17320+ /*Calculate the default value in terms of percentage */
17321+ dwDefOverscanXValue = ((dwDefaultVal * 100) / dwMaxVal);
17322+
17323+ /*Calculate the default value in 0-1000 range */
17324+ dwDefOverscanXValue = (dwDefOverscanXValue * 10);
17325+
17326+ /*Overscan is in the range of 0 to 10000 as per MS spec */
17327+ if (sdvo_priv->OverScanX.Value > MAX_VAL)
17328+ sdvo_priv->OverScanX.Value = dwDefOverscanXValue;
17329+
17330+ /*Calculate the percentage(0-100%) of the overscan value */
17331+ dwPercentageValue = (sdvo_priv->OverScanX.Value * 100) / 1000;
17332+
17333+ /* Now map the % value to absolute value to be programed to the encoder */
17334+ dwOverscanValue = (dwMaxVal * dwPercentageValue) / 100;
17335+
17336+ status = i830_sdvo_set_horizontal_overscan(output, dwOverscanValue);
17337+ if (!status)
17338+ return status;
17339+
17340+ sdvo_priv->OverScanX.Max = 1000;
17341+ sdvo_priv->OverScanX.Min = 0;
17342+ sdvo_priv->OverScanX.Default = dwDefOverscanXValue;
17343+ sdvo_priv->OverScanX.Step = 20;
17344+ }
17345+
17346+ /* Horizontal Overscan */
17347+ /* vertical Overscan */
17348+ if (dwSupportedEnhancements & SDVO_VERTICAL_OVERSCAN) {
17349+ status =
17350+ i830_sdvo_get_max_vertical_overscan(output, &dwMaxVal,
17351+ &dwDefaultVal);
17352+ if (!status)
17353+ return status;
17354+
17355+ /*Calculate the default value in terms of percentage */
17356+ dwDefOverscanYValue = ((dwDefaultVal * 100) / dwMaxVal);
17357+
17358+ /*Calculate the default value in 0-1000 range */
17359+ dwDefOverscanYValue = (dwDefOverscanYValue * 10);
17360+
17361+ /*Overscan is in the range of 0 to 10000 as per MS spec */
17362+ if (sdvo_priv->OverScanY.Value > MAX_VAL)
17363+ sdvo_priv->OverScanY.Value = dwDefOverscanYValue;
17364+
17365+ /*Calculate the percentage(0-100%) of the overscan value */
17366+ dwPercentageValue = (sdvo_priv->OverScanY.Value * 100) / 1000;
17367+
17368+ /* Now map the % value to absolute value to be programed to the encoder */
17369+ dwOverscanValue = (dwMaxVal * dwPercentageValue) / 100;
17370+
17371+ status = i830_sdvo_set_vertical_overscan(output, dwOverscanValue);
17372+ if (!status)
17373+ return status;
17374+
17375+ sdvo_priv->OverScanY.Max = 1000;
17376+ sdvo_priv->OverScanY.Min = 0;
17377+ sdvo_priv->OverScanY.Default = dwDefOverscanYValue;
17378+ sdvo_priv->OverScanY.Step = 20;
17379+
17380+ }
17381+ /* vertical Overscan */
17382+ return TRUE;
17383+}
17384+
17385+static bool i830_translate_dtd2timing(struct drm_display_mode * pTimingInfo,
17386+ struct intel_sdvo_dtd *pDTD)
17387+{
17388+
17389+ u32 dwHBLHigh = 0;
17390+ u32 dwVBLHigh = 0;
17391+ u32 dwHSHigh1 = 0;
17392+ u32 dwHSHigh2 = 0;
17393+ u32 dwVSHigh1 = 0;
17394+ u32 dwVSHigh2 = 0;
17395+ u32 dwVPWLow = 0;
17396+ bool status = FALSE;
17397+
17398+ if ((pDTD == NULL) || (pTimingInfo == NULL)) {
17399+ return status;
17400+ }
17401+
17402+ pTimingInfo->clock= pDTD->part1.clock * 10000 / 1000; /*fix me if i am wrong */
17403+
17404+ pTimingInfo->hdisplay = pTimingInfo->crtc_hdisplay =
17405+ (u32) pDTD->part1.
17406+ h_active | ((u32) (pDTD->part1.h_high & 0xF0) << 4);
17407+
17408+ pTimingInfo->vdisplay = pTimingInfo->crtc_vdisplay =
17409+ (u32) pDTD->part1.
17410+ v_active | ((u32) (pDTD->part1.v_high & 0xF0) << 4);
17411+
17412+ pTimingInfo->crtc_hblank_start = pTimingInfo->crtc_hdisplay;
17413+
17414+ /* Horizontal Total = Horizontal Active + Horizontal Blanking */
17415+ dwHBLHigh = (u32) (pDTD->part1.h_high & 0x0F);
17416+ pTimingInfo->htotal = pTimingInfo->crtc_htotal =
17417+ pTimingInfo->crtc_hdisplay + (u32) pDTD->part1.h_blank +
17418+ (dwHBLHigh << 8);
17419+
17420+ pTimingInfo->crtc_hblank_end = pTimingInfo->crtc_htotal - 1;
17421+
17422+ /* Vertical Total = Vertical Active + Vertical Blanking */
17423+ dwVBLHigh = (u32) (pDTD->part1.v_high & 0x0F);
17424+ pTimingInfo->vtotal = pTimingInfo->crtc_vtotal =
17425+ pTimingInfo->crtc_vdisplay + (u32) pDTD->part1.v_blank +
17426+ (dwVBLHigh << 8);
17427+ pTimingInfo->crtc_vblank_start = pTimingInfo->crtc_vdisplay;
17428+ pTimingInfo->crtc_vblank_end = pTimingInfo->crtc_vtotal - 1;
17429+
17430+ /* Horz Sync Start = Horz Blank Start + Horz Sync Offset */
17431+ dwHSHigh1 = (u32) (pDTD->part2.sync_off_width_high & 0xC0);
17432+ pTimingInfo->hsync_start = pTimingInfo->crtc_hsync_start =
17433+ pTimingInfo->crtc_hblank_start + (u32) pDTD->part2.h_sync_off +
17434+ (dwHSHigh1 << 2);
17435+
17436+ /* Horz Sync End = Horz Sync Start + Horz Sync Pulse Width */
17437+ dwHSHigh2 = (u32) (pDTD->part2.sync_off_width_high & 0x30);
17438+ pTimingInfo->hsync_end = pTimingInfo->crtc_hsync_end =
17439+ pTimingInfo->crtc_hsync_start + (u32) pDTD->part2.h_sync_width +
17440+ (dwHSHigh2 << 4) - 1;
17441+
17442+ /* Vert Sync Start = Vert Blank Start + Vert Sync Offset */
17443+ dwVSHigh1 = (u32) (pDTD->part2.sync_off_width_high & 0x0C);
17444+ dwVPWLow = (u32) (pDTD->part2.v_sync_off_width & 0xF0);
17445+
17446+ pTimingInfo->vsync_start = pTimingInfo->crtc_vsync_start =
17447+ pTimingInfo->crtc_vblank_start + (dwVPWLow >> 4) + (dwVSHigh1 << 2);
17448+
17449+ /* Vert Sync End = Vert Sync Start + Vert Sync Pulse Width */
17450+ dwVSHigh2 = (u32) (pDTD->part2.sync_off_width_high & 0x03);
17451+ pTimingInfo->vsync_end = pTimingInfo->crtc_vsync_end =
17452+ pTimingInfo->crtc_vsync_start +
17453+ (u32) (pDTD->part2.v_sync_off_width & 0x0F) + (dwVSHigh2 << 4) - 1;
17454+
17455+ /* Fillup flags */
17456+ status = TRUE;
17457+
17458+ return status;
17459+}
17460+
17461+static void i830_translate_timing2dtd(struct drm_display_mode * mode, struct intel_sdvo_dtd *dtd)
17462+{
17463+ u16 width, height;
17464+ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
17465+ u16 h_sync_offset, v_sync_offset;
17466+
17467+ width = mode->crtc_hdisplay;
17468+ height = mode->crtc_vdisplay;
17469+
17470+ /* do some mode translations */
17471+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
17472+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
17473+
17474+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
17475+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
17476+
17477+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
17478+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
17479+
17480+ dtd->part1.clock = mode->clock * 1000 / 10000; /*xiaolin, fixme, do i need to by 1k hz */
17481+ dtd->part1.h_active = width & 0xff;
17482+ dtd->part1.h_blank = h_blank_len & 0xff;
17483+ dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
17484+ ((h_blank_len >> 8) & 0xf);
17485+ dtd->part1.v_active = height & 0xff;
17486+ dtd->part1.v_blank = v_blank_len & 0xff;
17487+ dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
17488+ ((v_blank_len >> 8) & 0xf);
17489+
17490+ dtd->part2.h_sync_off = h_sync_offset;
17491+ dtd->part2.h_sync_width = h_sync_len & 0xff;
17492+ dtd->part2.v_sync_off_width = ((v_sync_offset & 0xf) << 4 |
17493+ (v_sync_len & 0xf)) + 1;
17494+ dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
17495+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
17496+ ((v_sync_len & 0x30) >> 4);
17497+
17498+ dtd->part2.dtd_flags = 0x18;
17499+ if (mode->flags & V_PHSYNC)
17500+ dtd->part2.dtd_flags |= 0x2;
17501+ if (mode->flags & V_PVSYNC)
17502+ dtd->part2.dtd_flags |= 0x4;
17503+
17504+ dtd->part2.sdvo_flags = 0;
17505+ dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
17506+ dtd->part2.reserved = 0;
17507+
17508+}
17509+
17510+static bool i830_tv_set_target_io(struct drm_output* output)
17511+{
17512+ bool status;
17513+ struct intel_output *intel_output = output->driver_private;
17514+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17515+
17516+ status = intel_sdvo_set_target_input(output, TRUE, FALSE);
17517+ if (status)
17518+ status = intel_sdvo_set_target_output(output, sdvo_priv->active_outputs);
17519+
17520+ return status;
17521+}
17522+
17523+static bool i830_tv_get_max_min_dotclock(struct drm_output* output)
17524+{
17525+ u32 dwMaxClkRateMul = 1;
17526+ u32 dwMinClkRateMul = 1;
17527+ u8 status;
17528+
17529+ struct intel_output *intel_output = output->driver_private;
17530+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17531+
17532+
17533+ /* Set Target Input/Outputs */
17534+ status = i830_tv_set_target_io(output);
17535+ if (!status) {
17536+ DRM_DEBUG("SetTargetIO function FAILED!!! \n");
17537+ return status;
17538+ }
17539+
17540+ /* Get the clock rate multiplies supported by the encoder */
17541+ dwMinClkRateMul = 1;
17542+#if 0
17543+ /* why we need do this, some time, tv can't bring up for the wrong setting in the last time */
17544+ dwClkRateMulMask = i830_sdvo_get_clock_rate_mult(output);
17545+
17546+ /* Find the minimum clock rate multiplier supported */
17547+
17548+ if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_1X)
17549+ dwMinClkRateMul = 1;
17550+ else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_2X)
17551+ dwMinClkRateMul = 2;
17552+ else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_3X)
17553+ dwMinClkRateMul = 3;
17554+ else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_4X)
17555+ dwMinClkRateMul = 4;
17556+ else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_5X)
17557+ dwMinClkRateMul = 5;
17558+ else
17559+ return FALSE;
17560+#endif
17561+ /* Get the min and max input Dot Clock supported by the encoder */
17562+ status = i830_sdvo_get_input_output_pixelclock_range(output, FALSE); /* input */
17563+
17564+ if (!status) {
17565+ DRM_DEBUG("SDVOGetInputPixelClockRange() FAILED!!! \n");
17566+ return status;
17567+ }
17568+
17569+ /* Get the min and max output Dot Clock supported by the encoder */
17570+ status = i830_sdvo_get_input_output_pixelclock_range(output, TRUE); /* output */
17571+
17572+ if (!status) {
17573+ DRM_DEBUG("SDVOGetOutputPixelClockRange() FAILED!!! \n");
17574+ return status;
17575+ }
17576+
17577+ /* Maximum Dot Clock supported should be the minimum of the maximum */
17578+ /* dot clock supported by the encoder & the SDVO bus clock rate */
17579+ sdvo_priv->dwMaxDotClk =
17580+ ((sdvo_priv->dwMaxInDotClk * dwMaxClkRateMul) <
17581+ (sdvo_priv->dwMaxOutDotClk)) ? (sdvo_priv->dwMaxInDotClk *
17582+ dwMaxClkRateMul) : (sdvo_priv->dwMaxOutDotClk);
17583+
17584+ /* Minimum Dot Clock supported should be the maximum of the minimum */
17585+ /* dot clocks supported by the input & output */
17586+ sdvo_priv->dwMinDotClk =
17587+ ((sdvo_priv->dwMinInDotClk * dwMinClkRateMul) >
17588+ (sdvo_priv->dwMinOutDotClk)) ? (sdvo_priv->dwMinInDotClk *
17589+ dwMinClkRateMul) : (sdvo_priv->dwMinOutDotClk);
17590+
17591+ DRM_DEBUG("leave, i830_tv_get_max_min_dotclock() !!! \n");
17592+
17593+ return TRUE;
17594+
17595+}
17596+
17597+bool i830_tv_mode_check_support(struct drm_output* output, struct drm_display_mode* pMode)
17598+{
17599+ u32 dwDotClk = 0;
17600+ bool status;
17601+ struct intel_output *intel_output = output->driver_private;
17602+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17603+
17604+
17605+ dwDotClk = pMode->clock * 1000;
17606+
17607+ /*TODO: Need to fix this from SoftBios side........ */
17608+ if (sdvo_priv->TVMode == TVMODE_HDTV) {
17609+ if (((pMode->hdisplay == 1920) && (pMode->vdisplay== 1080)) ||
17610+ ((pMode->hdisplay== 1864) && (pMode->vdisplay== 1050)) ||
17611+ ((pMode->hdisplay== 1704) && (pMode->vdisplay== 960)) ||
17612+ ((pMode->hdisplay== 640) && (pMode->vdisplay== 448)))
17613+ return true;
17614+ }
17615+
17616+ if (sdvo_priv->bGetClk) {
17617+ status = i830_tv_get_max_min_dotclock(output);
17618+ if (!status) {
17619+ DRM_DEBUG("get max min dotclok failed\n");
17620+ return status;
17621+ }
17622+ sdvo_priv->bGetClk = false;
17623+ }
17624+
17625+ /* Check the Dot clock first. If the requested Dot Clock should fall */
17626+ /* in the supported range for the mode to be supported */
17627+ if ((dwDotClk <= sdvo_priv->dwMinDotClk) || (dwDotClk >= sdvo_priv->dwMaxDotClk)) {
17628+ DRM_DEBUG("dwDotClk value is out of range\n");
17629+ /*TODO: now consider VBT add and Remove mode. */
17630+ /* This mode can't be supported */
17631+ return false;
17632+ }
17633+ DRM_DEBUG("i830_tv_mode_check_support leave\n");
17634+ return true;
17635+
17636+}
17637+
17638+void print_Pll(char *prefix, ex_intel_clock_t * clock)
17639+{
17640+ DRM_DEBUG("%s: dotclock %d vco %d ((m %d, m1 %d, m2 %d), n %d, (p %d, p1 %d, p2 %d))\n",
17641+ prefix, clock->dot, clock->vco, clock->m, clock->m1, clock->m2,
17642+ clock->n, clock->p, clock->p1, clock->p2);
17643+}
17644+
17645+extern int intel_panel_fitter_pipe (struct drm_device *dev);
17646+extern int intel_get_core_clock_speed(struct drm_device *dev);
17647+
17648+void i830_sdvo_tv_settiming(struct drm_crtc *crtc, struct drm_display_mode * mode,
17649+ struct drm_display_mode * adjusted_mode)
17650+{
17651+
17652+ struct drm_device *dev = crtc->dev;
17653+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
17654+
17655+ int pipe = 0;
17656+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
17657+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
17658+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
17659+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
17660+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
17661+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
17662+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
17663+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
17664+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
17665+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
17666+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
17667+ int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
17668+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
17669+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
17670+ ex_intel_clock_t clock;
17671+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
17672+ bool ok, is_sdvo = FALSE;
17673+ int centerX = 0, centerY = 0;
17674+ u32 ulPortMultiplier, ulTemp, ulDotClock;
17675+ int sdvo_pixel_multiply;
17676+ u32 dotclock;
17677+
17678+ /* Set up some convenient bools for what outputs are connected to
17679+ * our pipe, used in DPLL setup.
17680+ */
17681+ if (!crtc->fb) {
17682+ DRM_ERROR("Can't set mode without attached fb\n");
17683+ return;
17684+ }
17685+ is_sdvo = TRUE;
17686+ ok = TRUE;
17687+ ulDotClock = mode->clock * 1000 / 1000; /*xiaolin, fixme, do i need to by 1k hz */
17688+ for (ulPortMultiplier = 1; ulPortMultiplier <= 5; ulPortMultiplier++) {
17689+ ulTemp = ulDotClock * ulPortMultiplier;
17690+ if ((ulTemp >= 100000) && (ulTemp <= 200000)) {
17691+ if ((ulPortMultiplier == 3) || (ulPortMultiplier == 5))
17692+ continue;
17693+ else
17694+ break;
17695+ }
17696+ }
17697+ /* ulPortMultiplier is 2, dotclok is 1babc, fall into the first one case */
17698+ /* add two to each m and n value -- optimizes (slightly) the search algo. */
17699+ dotclock = ulPortMultiplier * (mode->clock * 1000) / 1000;
17700+ DRM_DEBUG("mode->clock is %x, dotclock is %x,!\n", mode->clock,dotclock);
17701+
17702+ if ((dotclock >= 100000) && (dotclock < 140500)) {
17703+ DRM_DEBUG("dotclock is between 10000 and 140500!\n");
17704+ clock.p1 = 0x2;
17705+ clock.p2 = 0x00;
17706+ clock.n = 0x3;
17707+ clock.m1 = 0x10;
17708+ clock.m2 = 0x8;
17709+ } else if ((dotclock >= 140500) && (dotclock <= 200000)) {
17710+
17711+ DRM_DEBUG("dotclock is between 140500 and 200000!\n");
17712+ clock.p1 = 0x1;
17713+ /*CG was using 0x10 from spreadsheet it should be 0 */
17714+ /*pClock_Data->Clk_P2 = 0x10; */
17715+ clock.p2 = 0x00;
17716+ clock.n = 0x6;
17717+ clock.m1 = 0xC;
17718+ clock.m2 = 0x8;
17719+ } else
17720+ ok = FALSE;
17721+
17722+ if (!ok)
17723+ DRM_DEBUG("Couldn't find PLL settings for mode!\n");
17724+
17725+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
17726+
17727+ dpll = DPLL_VGA_MODE_DIS | DPLL_CLOCK_PHASE_9;
17728+
17729+ dpll |= DPLLB_MODE_DAC_SERIAL;
17730+
17731+ sdvo_pixel_multiply = ulPortMultiplier;
17732+ dpll |= DPLL_DVO_HIGH_SPEED;
17733+ dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
17734+
17735+ /* compute bitmask from p1 value */
17736+ dpll |= (clock.p1 << 16);
17737+ dpll |= (clock.p2 << 24);
17738+
17739+ dpll |= PLL_REF_INPUT_TVCLKINBC;
17740+
17741+ /* Set up the display plane register */
17742+ dspcntr = DISPPLANE_GAMMA_ENABLE;
17743+ switch (crtc->fb->bits_per_pixel) {
17744+ case 8:
17745+ dspcntr |= DISPPLANE_8BPP;
17746+ break;
17747+ case 16:
17748+ if (crtc->fb->depth == 15)
17749+ dspcntr |= DISPPLANE_15_16BPP;
17750+ else
17751+ dspcntr |= DISPPLANE_16BPP;
17752+ break;
17753+ case 32:
17754+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
17755+ break;
17756+ default:
17757+ DRM_DEBUG("unknown display bpp\n");
17758+ }
17759+
17760+ if (pipe == 0)
17761+ dspcntr |= DISPPLANE_SEL_PIPE_A;
17762+ else
17763+ dspcntr |= DISPPLANE_SEL_PIPE_B;
17764+
17765+ pipeconf = I915_READ(pipeconf_reg);
17766+ if (pipe == 0) {
17767+ /* Enable pixel doubling when the dot clock is > 90% of the (display)
17768+ * core speed.
17769+ *
17770+ * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
17771+ * pipe == 0 check?
17772+ */
17773+ if (mode->clock * 1000 > (intel_get_core_clock_speed(dev)) * 9 / 10) /*xiaolin, fixme, do i need to by 1k hz */
17774+ { pipeconf |= PIPEACONF_DOUBLE_WIDE; DRM_DEBUG("PIPEACONF_DOUBLE_WIDE\n");}
17775+ else
17776+ { pipeconf &= ~PIPEACONF_DOUBLE_WIDE; DRM_DEBUG("non PIPEACONF_DOUBLE_WIDE\n");}
17777+ }
17778+
17779+ dspcntr |= DISPLAY_PLANE_ENABLE;
17780+ pipeconf |= PIPEACONF_ENABLE;
17781+ dpll |= DPLL_VCO_ENABLE;
17782+
17783+ /* Disable the panel fitter if it was on our pipe */
17784+ if (intel_panel_fitter_pipe(dev) == pipe)
17785+ I915_WRITE(PFIT_CONTROL, 0);
17786+
17787+ print_Pll("chosen", &clock);
17788+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
17789+ drm_mode_debug_printmodeline(dev, mode);
17790+ DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d\n",
17791+ mode->mode_id, mode->name, mode->crtc_htotal, mode->crtc_hdisplay,
17792+ mode->crtc_hblank_end, mode->crtc_hblank_start,
17793+ mode->crtc_vtotal, mode->crtc_vdisplay,
17794+ mode->crtc_vblank_end, mode->crtc_vblank_start);
17795+ DRM_DEBUG("clock regs: 0x%08x, 0x%08x,dspntr is 0x%8x, pipeconf is 0x%8x\n", (int)dpll,
17796+ (int)fp,(int)dspcntr,(int)pipeconf);
17797+
17798+ if (dpll & DPLL_VCO_ENABLE) {
17799+ I915_WRITE(fp_reg, fp);
17800+ I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
17801+ (void)I915_READ(dpll_reg);
17802+ udelay(150);
17803+ }
17804+ I915_WRITE(fp_reg, fp);
17805+ I915_WRITE(dpll_reg, dpll);
17806+ (void)I915_READ(dpll_reg);
17807+ /* Wait for the clocks to stabilize. */
17808+ udelay(150);
17809+
17810+ /* write it again -- the BIOS does, after all */
17811+ I915_WRITE(dpll_reg, dpll);
17812+ I915_READ(dpll_reg);
17813+ /* Wait for the clocks to stabilize. */
17814+ udelay(150);
17815+
17816+ I915_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
17817+ ((mode->crtc_htotal - 1) << 16));
17818+ I915_WRITE(hblank_reg, (mode->crtc_hblank_start - 1) |
17819+ ((mode->crtc_hblank_end - 1) << 16));
17820+ I915_WRITE(hsync_reg, (mode->crtc_hsync_start - 1) |
17821+ ((mode->crtc_hsync_end - 1) << 16));
17822+ I915_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
17823+ ((mode->crtc_vtotal - 1) << 16));
17824+ I915_WRITE(vblank_reg, (mode->crtc_vblank_start - 1) |
17825+ ((mode->crtc_vblank_end - 1) << 16));
17826+ I915_WRITE(vsync_reg, (mode->crtc_vsync_start - 1) |
17827+ ((mode->crtc_vsync_end - 1) << 16));
17828+ I915_WRITE(dspstride_reg, crtc->fb->pitch);
17829+
17830+ if (0) {
17831+
17832+ centerX = (adjusted_mode->crtc_hdisplay - mode->hdisplay) / 2;
17833+ centerY = (adjusted_mode->crtc_vdisplay - mode->vdisplay) / 2;
17834+ I915_WRITE(dspsize_reg,
17835+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
17836+
17837+ I915_WRITE(dsppos_reg, centerY << 16 | centerX);
17838+ I915_WRITE(pipesrc_reg,
17839+ ((adjusted_mode->crtc_hdisplay -
17840+ 1) << 16) | (adjusted_mode->crtc_vdisplay - 1));
17841+ } else {
17842+ /* pipesrc and dspsize control the size that is scaled from, which should
17843+ * always be the user's requested size.
17844+ */
17845+ I915_WRITE(dspsize_reg,
17846+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
17847+ I915_WRITE(dsppos_reg, 0);
17848+ I915_WRITE(pipesrc_reg,
17849+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
17850+
17851+ }
17852+ I915_WRITE(pipeconf_reg, pipeconf);
17853+ I915_READ(pipeconf_reg);
17854+
17855+ intel_wait_for_vblank(dev);
17856+
17857+ I915_WRITE(dspcntr_reg, dspcntr);
17858+ /* Flush the plane changes */
17859+ //intel_pipe_set_base(crtc, 0, 0);
17860+ /* Disable the VGA plane that we never use */
17861+ //I915_WRITE(VGACNTRL, VGA_DISP_DISABLE);
17862+ //intel_wait_for_vblank(dev);
17863+
17864+}
17865+
17866+static void intel_sdvo_mode_set(struct drm_output *output,
17867+ struct drm_display_mode *mode,
17868+ struct drm_display_mode *adjusted_mode)
17869+{
17870+ struct drm_device *dev = output->dev;
17871+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
17872+ struct drm_crtc *crtc = output->crtc;
17873+ struct intel_crtc *intel_crtc = crtc->driver_private;
17874+ struct intel_output *intel_output = output->driver_private;
17875+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
17876+
17877+ u32 sdvox;
17878+ struct intel_sdvo_dtd output_dtd;
17879+ int sdvo_pixel_multiply;
17880+ bool success;
17881+ struct drm_display_mode * save_mode;
17882+ DRM_DEBUG("xxintel_sdvo_mode_set\n");
17883+
17884+ if (!mode)
17885+ return;
17886+
17887+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
17888+ if (!i830_tv_mode_check_support(output, mode)) {
17889+ DRM_DEBUG("mode setting failed, use the forced mode\n");
17890+ mode = &tv_modes[0].mode_entry;
17891+ drm_mode_set_crtcinfo(mode, 0);
17892+ }
17893+ }
17894+ save_mode = mode;
17895+#if 0
17896+ width = mode->crtc_hdisplay;
17897+ height = mode->crtc_vdisplay;
17898+
17899+ /* do some mode translations */
17900+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
17901+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
17902+
17903+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
17904+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
17905+
17906+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
17907+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
17908+
17909+ output_dtd.part1.clock = mode->clock / 10;
17910+ output_dtd.part1.h_active = width & 0xff;
17911+ output_dtd.part1.h_blank = h_blank_len & 0xff;
17912+ output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
17913+ ((h_blank_len >> 8) & 0xf);
17914+ output_dtd.part1.v_active = height & 0xff;
17915+ output_dtd.part1.v_blank = v_blank_len & 0xff;
17916+ output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
17917+ ((v_blank_len >> 8) & 0xf);
17918+
17919+ output_dtd.part2.h_sync_off = h_sync_offset;
17920+ output_dtd.part2.h_sync_width = h_sync_len & 0xff;
17921+ output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
17922+ (v_sync_len & 0xf);
17923+ output_dtd.part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
17924+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
17925+ ((v_sync_len & 0x30) >> 4);
17926+
17927+ output_dtd.part2.dtd_flags = 0x18;
17928+ if (mode->flags & V_PHSYNC)
17929+ output_dtd.part2.dtd_flags |= 0x2;
17930+ if (mode->flags & V_PVSYNC)
17931+ output_dtd.part2.dtd_flags |= 0x4;
17932+
17933+ output_dtd.part2.sdvo_flags = 0;
17934+ output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
17935+ output_dtd.part2.reserved = 0;
17936+#else
17937+ /* disable and enable the display output */
17938+ intel_sdvo_set_target_output(output, 0);
17939+
17940+ //intel_sdvo_set_active_outputs(output, sdvo_priv->active_outputs);
17941+ memset(&output_dtd, 0, sizeof(struct intel_sdvo_dtd));
17942+ /* check if this mode can be supported or not */
17943+
17944+ i830_translate_timing2dtd(mode, &output_dtd);
17945+#endif
17946+ intel_sdvo_set_target_output(output, 0);
17947+ /* set the target input & output first */
17948+ /* Set the input timing to the screen. Assume always input 0. */
17949+ intel_sdvo_set_target_output(output, sdvo_priv->active_outputs);
17950+ intel_sdvo_set_output_timing(output, &output_dtd);
17951+ intel_sdvo_set_target_input(output, true, false);
17952+
17953+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
17954+ i830_tv_set_overscan_parameters(output);
17955+ /* Set TV standard */
17956+ #if 0
17957+ if (sdvo_priv->TVMode == TVMODE_HDTV)
17958+ i830_sdvo_map_hdtvstd_bitmask(output);
17959+ else
17960+ i830_sdvo_map_sdtvstd_bitmask(output);
17961+ #endif
17962+ /* Set TV format */
17963+ i830_sdvo_set_tvoutputs_formats(output);
17964+ /* We would like to use i830_sdvo_create_preferred_input_timing() to
17965+ * provide the device with a timing it can support, if it supports that
17966+ * feature. However, presumably we would need to adjust the CRTC to output
17967+ * the preferred timing, and we don't support that currently.
17968+ */
17969+ success = i830_sdvo_create_preferred_input_timing(output, mode);
17970+ if (success) {
17971+ i830_sdvo_get_preferred_input_timing(output, &output_dtd);
17972+ }
17973+ /* Set the overscan values now as input timing is dependent on overscan values */
17974+
17975+ }
17976+
17977+
17978+ /* We would like to use i830_sdvo_create_preferred_input_timing() to
17979+ * provide the device with a timing it can support, if it supports that
17980+ * feature. However, presumably we would need to adjust the CRTC to
17981+ * output the preferred timing, and we don't support that currently.
17982+ */
17983+#if 0
17984+ success = intel_sdvo_create_preferred_input_timing(output, clock,
17985+ width, height);
17986+ if (success) {
17987+ struct intel_sdvo_dtd *input_dtd;
17988+
17989+ intel_sdvo_get_preferred_input_timing(output, &input_dtd);
17990+ intel_sdvo_set_input_timing(output, &input_dtd);
17991+ }
17992+#else
17993+ /* Set input timing (in DTD) */
17994+ intel_sdvo_set_input_timing(output, &output_dtd);
17995+#endif
17996+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
17997+
17998+ DRM_DEBUG("xxintel_sdvo_mode_set tv path\n");
17999+ i830_tv_program_display_params(output);
18000+ /* translate dtd 2 timing */
18001+ i830_translate_dtd2timing(mode, &output_dtd);
18002+ /* Program clock rate multiplier, 2x,clock is = 0x360b730 */
18003+ if ((mode->clock * 1000 >= 24000000)
18004+ && (mode->clock * 1000 < 50000000)) {
18005+ intel_sdvo_set_clock_rate_mult(output, SDVO_CLOCK_RATE_MULT_4X);
18006+ } else if ((mode->clock * 1000 >= 50000000)
18007+ && (mode->clock * 1000 < 100000000)) {
18008+ intel_sdvo_set_clock_rate_mult(output, SDVO_CLOCK_RATE_MULT_2X);
18009+ } else if ((mode->clock * 1000 >= 100000000)
18010+ && (mode->clock * 1000 < 200000000)) {
18011+ intel_sdvo_set_clock_rate_mult(output, SDVO_CLOCK_RATE_MULT_1X);
18012+ } else
18013+ DRM_DEBUG("i830_sdvo_set_clock_rate is failed\n");
18014+
18015+ i830_sdvo_tv_settiming(output->crtc, mode, adjusted_mode);
18016+ //intel_crtc_mode_set(output->crtc, mode,adjusted_mode,0,0);
18017+ mode = save_mode;
18018+ } else {
18019+ DRM_DEBUG("xxintel_sdvo_mode_set - non tv path\n");
18020+ switch (intel_sdvo_get_pixel_multiplier(mode)) {
18021+ case 1:
18022+ intel_sdvo_set_clock_rate_mult(output,
18023+ SDVO_CLOCK_RATE_MULT_1X);
18024+ break;
18025+ case 2:
18026+ intel_sdvo_set_clock_rate_mult(output,
18027+ SDVO_CLOCK_RATE_MULT_2X);
18028+ break;
18029+ case 4:
18030+ intel_sdvo_set_clock_rate_mult(output,
18031+ SDVO_CLOCK_RATE_MULT_4X);
18032+ break;
18033+ }
18034+ }
18035+ /* Set the SDVO control regs. */
18036+ if (0/*IS_I965GM(dev)*/) {
18037+ sdvox = SDVO_BORDER_ENABLE;
18038+ } else {
18039+ sdvox = I915_READ(sdvo_priv->output_device);
18040+ switch (sdvo_priv->output_device) {
18041+ case SDVOB:
18042+ sdvox &= SDVOB_PRESERVE_MASK;
18043+ break;
18044+ case SDVOC:
18045+ sdvox &= SDVOC_PRESERVE_MASK;
18046+ break;
18047+ }
18048+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
18049+ }
18050+ if (intel_crtc->pipe == 1)
18051+ sdvox |= SDVO_PIPE_B_SELECT;
18052+
18053+ sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
18054+ if (IS_I965G(dev)) {
18055+ /* done in crtc_mode_set as the dpll_md reg must be written
18056+ early */
18057+ } else if (IS_POULSBO(dev) || IS_I945G(dev) || IS_I945GM(dev)) {
18058+ /* done in crtc_mode_set as it lives inside the
18059+ dpll register */
18060+ } else {
18061+ sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
18062+ }
18063+
18064+ intel_sdvo_write_sdvox(output, sdvox);
18065+ i830_sdvo_set_iomap(output);
18066+}
18067+
18068+static void intel_sdvo_dpms(struct drm_output *output, int mode)
18069+{
18070+ struct drm_device *dev = output->dev;
18071+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
18072+ struct intel_output *intel_output = output->driver_private;
18073+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
18074+ u32 temp;
18075+
18076+ DRM_DEBUG("xxintel_sdvo_dpms, dpms mode is %d, active output is %d\n",mode,sdvo_priv->active_outputs);
18077+
18078+#ifdef SII_1392_WA
18079+ if((SII_1392==1) && (drm_psb_no_fb ==1)) {
18080+ DRM_DEBUG("don't touch 1392 card when no_fb=1\n");
18081+ return;
18082+ }
18083+#endif
18084+
18085+ if (mode != DPMSModeOn) {
18086+ intel_sdvo_set_active_outputs(output, sdvo_priv->output_device);
18087+ if (0)
18088+ intel_sdvo_set_encoder_power_state(output, mode);
18089+
18090+ if (mode == DPMSModeOff) {
18091+ temp = I915_READ(sdvo_priv->output_device);
18092+ if ((temp & SDVO_ENABLE) != 0) {
18093+ intel_sdvo_write_sdvox(output, temp & ~SDVO_ENABLE);
18094+ }
18095+ }
18096+ } else {
18097+ bool input1, input2;
18098+ int i;
18099+ u8 status;
18100+
18101+ temp = I915_READ(sdvo_priv->output_device);
18102+ if ((temp & SDVO_ENABLE) == 0)
18103+ intel_sdvo_write_sdvox(output, temp | SDVO_ENABLE);
18104+ for (i = 0; i < 2; i++)
18105+ intel_wait_for_vblank(dev);
18106+
18107+ status = intel_sdvo_get_trained_inputs(output, &input1,
18108+ &input2);
18109+
18110+
18111+ /* Warn if the device reported failure to sync.
18112+ * A lot of SDVO devices fail to notify of sync, but it's
18113+ * a given it the status is a success, we succeeded.
18114+ */
18115+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
18116+ DRM_DEBUG("First %s output reported failure to sync\n",
18117+ SDVO_NAME(sdvo_priv));
18118+ }
18119+
18120+ if (0)
18121+ intel_sdvo_set_encoder_power_state(output, mode);
18122+
18123+ DRM_DEBUG("xiaolin active output is %d\n",sdvo_priv->active_outputs);
18124+ intel_sdvo_set_active_outputs(output, sdvo_priv->active_outputs);
18125+ }
18126+ return;
18127+}
18128+
18129+static void intel_sdvo_save(struct drm_output *output)
18130+{
18131+ struct drm_device *dev = output->dev;
18132+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
18133+ struct intel_output *intel_output = output->driver_private;
18134+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
18135+
18136+ DRM_DEBUG("xxintel_sdvo_save\n");
18137+
18138+ sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(output);
18139+ intel_sdvo_get_active_outputs(output, &sdvo_priv->save_active_outputs);
18140+
18141+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
18142+ intel_sdvo_set_target_input(output, true, false);
18143+ intel_sdvo_get_input_timing(output,
18144+ &sdvo_priv->save_input_dtd_1);
18145+ }
18146+
18147+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
18148+ intel_sdvo_set_target_input(output, false, true);
18149+ intel_sdvo_get_input_timing(output,
18150+ &sdvo_priv->save_input_dtd_2);
18151+ }
18152+
18153+ intel_sdvo_set_target_output(output, sdvo_priv->active_outputs);
18154+ intel_sdvo_get_output_timing(output,
18155+ &sdvo_priv->save_output_dtd[sdvo_priv->active_outputs]);
18156+ sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device);
18157+}
18158+
18159+static void intel_sdvo_restore(struct drm_output *output)
18160+{
18161+ struct drm_device *dev = output->dev;
18162+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
18163+ struct intel_output *intel_output = output->driver_private;
18164+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
18165+ int i;
18166+ bool input1, input2;
18167+ u8 status;
18168+ DRM_DEBUG("xxintel_sdvo_restore\n");
18169+
18170+ intel_sdvo_set_active_outputs(output, 0);
18171+
18172+ intel_sdvo_set_target_output(output, sdvo_priv->save_active_outputs);
18173+ intel_sdvo_set_output_timing(output,
18174+ &sdvo_priv->save_output_dtd[sdvo_priv->save_active_outputs]);
18175+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
18176+ intel_sdvo_set_target_input(output, true, false);
18177+ intel_sdvo_set_input_timing(output, &sdvo_priv->save_input_dtd_1);
18178+ }
18179+
18180+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
18181+ intel_sdvo_set_target_input(output, false, true);
18182+ intel_sdvo_set_input_timing(output, &sdvo_priv->save_input_dtd_2);
18183+ }
18184+
18185+ intel_sdvo_set_clock_rate_mult(output, sdvo_priv->save_sdvo_mult);
18186+
18187+ I915_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
18188+
18189+ if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
18190+ {
18191+ for (i = 0; i < 2; i++)
18192+ intel_wait_for_vblank(dev);
18193+ status = intel_sdvo_get_trained_inputs(output, &input1, &input2);
18194+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
18195+ DRM_DEBUG("First %s output reported failure to sync\n",
18196+ SDVO_NAME(sdvo_priv));
18197+ }
18198+
18199+ i830_sdvo_set_iomap(output);
18200+ intel_sdvo_set_active_outputs(output, sdvo_priv->save_active_outputs);
18201+}
18202+
18203+static bool i830_tv_mode_find(struct drm_output * output,struct drm_display_mode * pMode)
18204+{
18205+ struct intel_output *intel_output = output->driver_private;
18206+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
18207+
18208+ bool find = FALSE;
18209+ int i;
18210+
18211+ DRM_DEBUG("i830_tv_mode_find,0x%x\n", sdvo_priv->TVStandard);
18212+
18213+ for (i = 0; i < NUM_TV_MODES; i++)
18214+ {
18215+ const tv_mode_t *tv_mode = &tv_modes[i];
18216+ if (strcmp (tv_mode->mode_entry.name, pMode->name) == 0
18217+ && (pMode->type & M_T_TV)) {
18218+ find = TRUE;
18219+ break;
18220+ }
18221+ }
18222+ return find;
18223+}
18224+
18225+
18226+static int intel_sdvo_mode_valid(struct drm_output *output,
18227+ struct drm_display_mode *mode)
18228+{
18229+ struct intel_output *intel_output = output->driver_private;
18230+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
18231+
18232+ bool status = TRUE;
18233+ DRM_DEBUG("xxintel_sdvo_mode_valid\n");
18234+
18235+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
18236+ status = i830_tv_mode_check_support(output, mode);
18237+ if (status) {
18238+ if(i830_tv_mode_find(output,mode)) {
18239+ DRM_DEBUG("%s is ok\n", mode->name);
18240+ return MODE_OK;
18241+ }
18242+ else
18243+ return MODE_CLOCK_RANGE;
18244+ } else {
18245+ DRM_DEBUG("%s is failed\n",
18246+ mode->name);
18247+ return MODE_CLOCK_RANGE;
18248+ }
18249+ }
18250+
18251+ if (mode->flags & V_DBLSCAN)
18252+ return MODE_NO_DBLESCAN;
18253+
18254+ if (sdvo_priv->pixel_clock_min > mode->clock)
18255+ return MODE_CLOCK_LOW;
18256+
18257+ if (sdvo_priv->pixel_clock_max < mode->clock)
18258+ return MODE_CLOCK_HIGH;
18259+
18260+ return MODE_OK;
18261+}
18262+
18263+static bool intel_sdvo_get_capabilities(struct drm_output *output, struct intel_sdvo_caps *caps)
18264+{
18265+ u8 status;
18266+
18267+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
18268+ status = intel_sdvo_read_response(output, caps, sizeof(*caps));
18269+ if (status != SDVO_CMD_STATUS_SUCCESS)
18270+ return false;
18271+
18272+ return true;
18273+}
18274+
18275+void i830_tv_get_default_params(struct drm_output * output)
18276+{
18277+ u32 dwSupportedSDTVBitMask = 0;
18278+ u32 dwSupportedHDTVBitMask = 0;
18279+ u32 dwTVStdBitmask = 0;
18280+
18281+ struct intel_output *intel_output = output->driver_private;
18282+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
18283+
18284+
18285+ /* Get supported TV Standard */
18286+ i830_sdvo_get_supported_tvoutput_formats(output, &dwSupportedSDTVBitMask,
18287+ &dwSupportedHDTVBitMask,&dwTVStdBitmask);
18288+
18289+ sdvo_priv->dwSDVOSDTVBitMask = dwSupportedSDTVBitMask;
18290+ sdvo_priv->dwSDVOHDTVBitMask = dwSupportedHDTVBitMask;
18291+ sdvo_priv->TVStdBitmask = dwTVStdBitmask;
18292+
18293+}
18294+
18295+static enum drm_output_status intel_sdvo_detect(struct drm_output *output)
18296+{
18297+ u8 response[2];
18298+ u8 status;
18299+ u8 count = 5;
18300+
18301+ char deviceName[256];
18302+ char *name_suffix;
18303+ char *name_prefix;
18304+ unsigned char bytes[2];
18305+
18306+ struct drm_device *dev = output->dev;
18307+
18308+ struct intel_output *intel_output = output->driver_private;
18309+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
18310+
18311+ DRM_DEBUG("xxintel_sdvo_detect\n");
18312+ intel_sdvo_dpms(output, DPMSModeOn);
18313+
18314+ if (!intel_sdvo_get_capabilities(output, &sdvo_priv->caps)) {
18315+ /*No SDVO support, power down the pipe */
18316+ intel_sdvo_dpms(output, DPMSModeOff);
18317+ return output_status_disconnected;
18318+ }
18319+
18320+#ifdef SII_1392_WA
18321+ if ((sdvo_priv->caps.vendor_id == 0x04) && (sdvo_priv->caps.device_id==0xAE)){
18322+ /*Leave the control of 1392 to X server*/
18323+ SII_1392=1;
18324+ printk("%s: detect 1392 card, leave the setting to up level\n", __FUNCTION__);
18325+ if (drm_psb_no_fb == 0)
18326+ intel_sdvo_dpms(output, DPMSModeOff);
18327+ return output_status_disconnected;
18328+ }
18329+#endif
18330+ while (count--) {
18331+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
18332+ status = intel_sdvo_read_response(output, &response, 2);
18333+
18334+ if(count >3 && status == SDVO_CMD_STATUS_PENDING) {
18335+ intel_sdvo_write_cmd(output,SDVO_CMD_RESET,NULL,0);
18336+ intel_sdvo_read_response(output, &response, 2);
18337+ continue;
18338+ }
18339+
18340+ if ((status != SDVO_CMD_STATUS_SUCCESS) || (response[0] == 0 && response[1] == 0)) {
18341+ udelay(500);
18342+ continue;
18343+ } else
18344+ break;
18345+ }
18346+ if (response[0] != 0 || response[1] != 0) {
18347+ /*Check what device types are connected to the hardware CRT/HDTV/S-Video/Composite */
18348+ /*in case of CRT and multiple TV's attached give preference in the order mentioned below */
18349+ /* 1. RGB */
18350+ /* 2. HDTV */
18351+ /* 3. S-Video */
18352+ /* 4. composite */
18353+ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
18354+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
18355+ output->subpixel_order = SubPixelHorizontalRGB;
18356+ name_prefix = "TMDS";
18357+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
18358+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
18359+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
18360+ output->subpixel_order = SubPixelHorizontalRGB;
18361+ name_prefix = "TMDS";
18362+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
18363+ } else if (response[0] & SDVO_OUTPUT_RGB0) {
18364+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
18365+ output->subpixel_order = SubPixelHorizontalRGB;
18366+ name_prefix = "RGB0";
18367+ sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
18368+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_RGB1) {
18369+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
18370+ output->subpixel_order = SubPixelHorizontalRGB;
18371+ name_prefix = "RGB1";
18372+ sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
18373+ } else if (response[0] & SDVO_OUTPUT_YPRPB0) {
18374+ sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB0;
18375+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_YPRPB1) {
18376+ sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB1;
18377+ }
18378+ /* SCART is given Second preference */
18379+ else if (response[0] & SDVO_OUTPUT_SCART0) {
18380+ sdvo_priv->active_outputs = SDVO_OUTPUT_SCART0;
18381+
18382+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SCART1) {
18383+ sdvo_priv->active_outputs = SDVO_OUTPUT_SCART1;
18384+ }
18385+ /* if S-Video type TV is connected along with Composite type TV give preference to S-Video */
18386+ else if (response[0] & SDVO_OUTPUT_SVID0) {
18387+ sdvo_priv->active_outputs = SDVO_OUTPUT_SVID0;
18388+
18389+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SVID1) {
18390+ sdvo_priv->active_outputs = SDVO_OUTPUT_SVID1;
18391+ }
18392+ /* Composite is given least preference */
18393+ else if (response[0] & SDVO_OUTPUT_CVBS0) {
18394+ sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS0;
18395+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_CVBS1) {
18396+ sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS1;
18397+ } else {
18398+ DRM_DEBUG("no display attached\n");
18399+
18400+ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
18401+ DRM_DEBUG("%s: No active TMDS or RGB outputs (0x%02x%02x) 0x%08x\n",
18402+ SDVO_NAME(sdvo_priv), bytes[0], bytes[1],
18403+ sdvo_priv->caps.output_flags);
18404+ name_prefix = "Unknown";
18405+ }
18406+
18407+ /* init para for TV connector */
18408+ if (sdvo_priv->active_outputs & SDVO_OUTPUT_TV0) {
18409+ DRM_INFO("TV is attaced\n");
18410+ output->subpixel_order = SubPixelHorizontalRGB;
18411+ name_prefix = "TV0";
18412+ /* Init TV mode setting para */
18413+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TV;
18414+ sdvo_priv->bGetClk = TRUE;
18415+ if (sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB0 ||
18416+ sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB1) {
18417+ /*sdvo_priv->TVStandard = HDTV_SMPTE_274M_1080i60;*/
18418+ sdvo_priv->TVMode = TVMODE_HDTV;
18419+ } else {
18420+ /*sdvo_priv->TVStandard = TVSTANDARD_NTSC_M;*/
18421+ sdvo_priv->TVMode = TVMODE_SDTV;
18422+ }
18423+
18424+ /*intel_output->pDevice->TVEnabled = TRUE;*/
18425+
18426+ i830_tv_get_default_params(output);
18427+ /*Init Display parameter for TV */
18428+ sdvo_priv->OverScanX.Value = 0xffffffff;
18429+ sdvo_priv->OverScanY.Value = 0xffffffff;
18430+ sdvo_priv->dispParams.Brightness.Value = 0x80;
18431+ sdvo_priv->dispParams.FlickerFilter.Value = 0xffffffff;
18432+ sdvo_priv->dispParams.AdaptiveFF.Value = 7;
18433+ sdvo_priv->dispParams.TwoD_FlickerFilter.Value = 0xffffffff;
18434+ sdvo_priv->dispParams.Contrast.Value = 0x40;
18435+ sdvo_priv->dispParams.PositionX.Value = 0x200;
18436+ sdvo_priv->dispParams.PositionY.Value = 0x200;
18437+ sdvo_priv->dispParams.DotCrawl.Value = 1;
18438+ sdvo_priv->dispParams.ChromaFilter.Value = 1;
18439+ sdvo_priv->dispParams.LumaFilter.Value = 2;
18440+ sdvo_priv->dispParams.Sharpness.Value = 4;
18441+ sdvo_priv->dispParams.Saturation.Value = 0x45;
18442+ sdvo_priv->dispParams.Hue.Value = 0x40;
18443+ sdvo_priv->dispParams.Dither.Value = 0;
18444+
18445+ }
18446+ else {
18447+ name_prefix = "RGB0";
18448+ DRM_INFO("non TV is attaced\n");
18449+ }
18450+ if (sdvo_priv->output_device == SDVOB) {
18451+ name_suffix = "-1";
18452+ } else {
18453+ name_suffix = "-2";
18454+ }
18455+
18456+ strcpy(deviceName, name_prefix);
18457+ strcat(deviceName, name_suffix);
18458+
18459+ if(output->name && (strcmp(output->name,deviceName) != 0)){
18460+ DRM_DEBUG("change the output name to %s\n", deviceName);
18461+ if (!drm_output_rename(output, deviceName)) {
18462+ drm_output_destroy(output);
18463+ return output_status_disconnected;
18464+ }
18465+
18466+ }
18467+ i830_sdvo_set_iomap(output);
18468+
18469+ DRM_INFO("get attached displays=0x%x,0x%x,connectedouputs=0x%x\n",
18470+ response[0], response[1], sdvo_priv->active_outputs);
18471+ return output_status_connected;
18472+ } else {
18473+ /*No SDVO display device attached */
18474+ intel_sdvo_dpms(output, DPMSModeOff);
18475+ sdvo_priv->ActiveDevice = SDVO_DEVICE_NONE;
18476+ return output_status_disconnected;
18477+ }
18478+}
18479+
18480+static int i830_sdvo_get_tvmode_from_table(struct drm_output *output)
18481+{
18482+ struct intel_output *intel_output = output->driver_private;
18483+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
18484+ struct drm_device *dev = output->dev;
18485+
18486+ int i, modes = 0;
18487+
18488+ for (i = 0; i < NUM_TV_MODES; i++)
18489+ if (((sdvo_priv->TVMode == TVMODE_HDTV) && /*hdtv mode list */
18490+ (tv_modes[i].dwSupportedHDTVvss & TVSTANDARD_HDTV_ALL)) ||
18491+ ((sdvo_priv->TVMode == TVMODE_SDTV) && /*sdtv mode list */
18492+ (tv_modes[i].dwSupportedSDTVvss & TVSTANDARD_SDTV_ALL))) {
18493+ struct drm_display_mode *newmode;
18494+ newmode = drm_mode_duplicate(dev, &tv_modes[i].mode_entry);
18495+ drm_mode_set_crtcinfo(newmode,0);
18496+ drm_mode_probed_add(output, newmode);
18497+ modes++;
18498+ }
18499+
18500+ return modes;
18501+
18502+}
18503+
18504+static int intel_sdvo_get_modes(struct drm_output *output)
18505+{
18506+ struct intel_output *intel_output = output->driver_private;
18507+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
18508+
18509+ DRM_DEBUG("xxintel_sdvo_get_modes\n");
18510+
18511+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
18512+ DRM_DEBUG("SDVO_DEVICE_TV\n");
18513+ i830_sdvo_get_tvmode_from_table(output);
18514+ if (list_empty(&output->probed_modes))
18515+ return 0;
18516+ return 1;
18517+
18518+ } else {
18519+ /* set the bus switch and get the modes */
18520+ intel_sdvo_set_control_bus_switch(output, SDVO_CONTROL_BUS_DDC2);
18521+ intel_ddc_get_modes(output);
18522+
18523+ if (list_empty(&output->probed_modes))
18524+ return 0;
18525+ return 1;
18526+ }
18527+#if 0
18528+ /* Mac mini hack. On this device, I get DDC through the analog, which
18529+ * load-detects as disconnected. I fail to DDC through the SDVO DDC,
18530+ * but it does load-detect as connected. So, just steal the DDC bits
18531+ * from analog when we fail at finding it the right way.
18532+ */
18533+ /* TODO */
18534+ return NULL;
18535+
18536+ return NULL;
18537+#endif
18538+}
18539+
18540+static void intel_sdvo_destroy(struct drm_output *output)
18541+{
18542+ struct intel_output *intel_output = output->driver_private;
18543+ DRM_DEBUG("xxintel_sdvo_destroy\n");
18544+
18545+ if (intel_output->i2c_bus)
18546+ intel_i2c_destroy(intel_output->i2c_bus);
18547+
18548+ if (intel_output) {
18549+ kfree(intel_output);
18550+ output->driver_private = NULL;
18551+ }
18552+}
18553+
18554+static const struct drm_output_funcs intel_sdvo_output_funcs = {
18555+ .dpms = intel_sdvo_dpms,
18556+ .save = intel_sdvo_save,
18557+ .restore = intel_sdvo_restore,
18558+ .mode_valid = intel_sdvo_mode_valid,
18559+ .mode_fixup = intel_sdvo_mode_fixup,
18560+ .prepare = intel_output_prepare,
18561+ .mode_set = intel_sdvo_mode_set,
18562+ .commit = intel_output_commit,
18563+ .detect = intel_sdvo_detect,
18564+ .get_modes = intel_sdvo_get_modes,
18565+ .cleanup = intel_sdvo_destroy
18566+};
18567+
18568+void intel_sdvo_init(struct drm_device *dev, int output_device)
18569+{
18570+ struct drm_output *output;
18571+ struct intel_output *intel_output;
18572+ struct intel_sdvo_priv *sdvo_priv;
18573+ struct intel_i2c_chan *i2cbus = NULL;
18574+ u8 ch[0x40];
18575+ int i;
18576+ char name[DRM_OUTPUT_LEN];
18577+ char *name_prefix;
18578+ char *name_suffix;
18579+
18580+ int count = 3;
18581+ u8 response[2];
18582+ u8 status;
18583+ unsigned char bytes[2];
18584+
18585+ DRM_DEBUG("xxintel_sdvo_init\n");
18586+
18587+ if (IS_POULSBO(dev)) {
18588+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
18589+ u32 sku_value = 0;
18590+ bool sku_bSDVOEnable = true;
18591+ if(pci_root)
18592+ {
18593+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
18594+ pci_read_config_dword(pci_root, 0xD4, &sku_value);
18595+ sku_bSDVOEnable = (sku_value & PCI_PORT5_REG80_SDVO_DISABLE)?false : true;
18596+ DRM_INFO("intel_sdvo_init: sku_value is 0x%08x\n", sku_value);
18597+ DRM_INFO("intel_sdvo_init: sku_bSDVOEnable is %d\n", sku_bSDVOEnable);
18598+ if (sku_bSDVOEnable == false)
18599+ return;
18600+ }
18601+ }
18602+
18603+ output = drm_output_create(dev, &intel_sdvo_output_funcs, NULL);
18604+ if (!output)
18605+ return;
18606+
18607+ intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
18608+ if (!intel_output) {
18609+ drm_output_destroy(output);
18610+ return;
18611+ }
18612+
18613+ sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
18614+ intel_output->type = INTEL_OUTPUT_SDVO;
18615+ output->driver_private = intel_output;
18616+ output->interlace_allowed = 0;
18617+ output->doublescan_allowed = 0;
18618+
18619+ /* setup the DDC bus. */
18620+ if (output_device == SDVOB)
18621+ i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
18622+ else
18623+ i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
18624+
18625+ if (i2cbus == NULL) {
18626+ drm_output_destroy(output);
18627+ return;
18628+ }
18629+
18630+ sdvo_priv->i2c_bus = i2cbus;
18631+
18632+ if (output_device == SDVOB) {
18633+ name_suffix = "-1";
18634+ sdvo_priv->i2c_bus->slave_addr = 0x38;
18635+ sdvo_priv->byInputWiring = SDVOB_IN0;
18636+ } else {
18637+ name_suffix = "-2";
18638+ sdvo_priv->i2c_bus->slave_addr = 0x39;
18639+ }
18640+
18641+ sdvo_priv->output_device = output_device;
18642+ intel_output->i2c_bus = i2cbus;
18643+ intel_output->dev_priv = sdvo_priv;
18644+
18645+
18646+ /* Read the regs to test if we can talk to the device */
18647+ for (i = 0; i < 0x40; i++) {
18648+ if (!intel_sdvo_read_byte(output, i, &ch[i])) {
18649+ DRM_DEBUG("No SDVO device found on SDVO%c\n",
18650+ output_device == SDVOB ? 'B' : 'C');
18651+ drm_output_destroy(output);
18652+ return;
18653+ }
18654+ }
18655+
18656+ intel_sdvo_get_capabilities(output, &sdvo_priv->caps);
18657+
18658+#ifdef SII_1392_WA
18659+ if ((sdvo_priv->caps.vendor_id == 0x04) && (sdvo_priv->caps.device_id==0xAE)){
18660+ /*Leave the control of 1392 to X server*/
18661+ SII_1392=1;
18662+ printk("%s: detect 1392 card, leave the setting to up level\n", __FUNCTION__);
18663+ if (drm_psb_no_fb == 0)
18664+ intel_sdvo_dpms(output, DPMSModeOff);
18665+ sdvo_priv->active_outputs = 0;
18666+ output->subpixel_order = SubPixelHorizontalRGB;
18667+ name_prefix = "SDVO";
18668+ sdvo_priv->ActiveDevice = SDVO_DEVICE_NONE;
18669+ strcpy(name, name_prefix);
18670+ strcat(name, name_suffix);
18671+ if (!drm_output_rename(output, name)) {
18672+ drm_output_destroy(output);
18673+ return;
18674+ }
18675+ return;
18676+ }
18677+#endif
18678+ memset(&sdvo_priv->active_outputs, 0, sizeof(sdvo_priv->active_outputs));
18679+
18680+ while (count--) {
18681+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
18682+ status = intel_sdvo_read_response(output, &response, 2);
18683+
18684+ if (status != SDVO_CMD_STATUS_SUCCESS) {
18685+ udelay(1000);
18686+ continue;
18687+ }
18688+ if (status == SDVO_CMD_STATUS_SUCCESS)
18689+ break;
18690+ }
18691+ if (response[0] != 0 || response[1] != 0) {
18692+ /*Check what device types are connected to the hardware CRT/HDTV/S-Video/Composite */
18693+ /*in case of CRT and multiple TV's attached give preference in the order mentioned below */
18694+ /* 1. RGB */
18695+ /* 2. HDTV */
18696+ /* 3. S-Video */
18697+ /* 4. composite */
18698+ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
18699+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
18700+ output->subpixel_order = SubPixelHorizontalRGB;
18701+ name_prefix = "TMDS";
18702+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
18703+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
18704+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
18705+ output->subpixel_order = SubPixelHorizontalRGB;
18706+ name_prefix = "TMDS";
18707+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
18708+ } else if (response[0] & SDVO_OUTPUT_RGB0) {
18709+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
18710+ output->subpixel_order = SubPixelHorizontalRGB;
18711+ name_prefix = "RGB0";
18712+ sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
18713+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_RGB1) {
18714+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
18715+ output->subpixel_order = SubPixelHorizontalRGB;
18716+ name_prefix = "RGB1";
18717+ sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
18718+ } else if (response[0] & SDVO_OUTPUT_YPRPB0) {
18719+ sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB0;
18720+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_YPRPB1) {
18721+ sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB1;
18722+ }
18723+ /* SCART is given Second preference */
18724+ else if (response[0] & SDVO_OUTPUT_SCART0) {
18725+ sdvo_priv->active_outputs = SDVO_OUTPUT_SCART0;
18726+
18727+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SCART1) {
18728+ sdvo_priv->active_outputs = SDVO_OUTPUT_SCART1;
18729+ }
18730+ /* if S-Video type TV is connected along with Composite type TV give preference to S-Video */
18731+ else if (response[0] & SDVO_OUTPUT_SVID0) {
18732+ sdvo_priv->active_outputs = SDVO_OUTPUT_SVID0;
18733+
18734+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SVID1) {
18735+ sdvo_priv->active_outputs = SDVO_OUTPUT_SVID1;
18736+ }
18737+ /* Composite is given least preference */
18738+ else if (response[0] & SDVO_OUTPUT_CVBS0) {
18739+ sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS0;
18740+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_CVBS1) {
18741+ sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS1;
18742+ } else {
18743+ DRM_DEBUG("no display attached\n");
18744+
18745+ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
18746+ DRM_INFO("%s: No active TMDS or RGB outputs (0x%02x%02x) 0x%08x\n",
18747+ SDVO_NAME(sdvo_priv), bytes[0], bytes[1],
18748+ sdvo_priv->caps.output_flags);
18749+ name_prefix = "Unknown";
18750+ }
18751+
18752+ /* init para for TV connector */
18753+ if (sdvo_priv->active_outputs & SDVO_OUTPUT_TV0) {
18754+ DRM_INFO("TV is attaced\n");
18755+ output->subpixel_order = SubPixelHorizontalRGB;
18756+ name_prefix = "TV0";
18757+ /* Init TV mode setting para */
18758+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TV;
18759+ sdvo_priv->bGetClk = TRUE;
18760+ if (sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB0 ||
18761+ sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB1) {
18762+ sdvo_priv->TVStandard = HDTV_SMPTE_274M_1080i60;
18763+ sdvo_priv->TVMode = TVMODE_HDTV;
18764+ } else {
18765+ sdvo_priv->TVStandard = TVSTANDARD_NTSC_M;
18766+ sdvo_priv->TVMode = TVMODE_SDTV;
18767+ }
18768+ /*intel_output->pDevice->TVEnabled = TRUE;*/
18769+ /*Init Display parameter for TV */
18770+ sdvo_priv->OverScanX.Value = 0xffffffff;
18771+ sdvo_priv->OverScanY.Value = 0xffffffff;
18772+ sdvo_priv->dispParams.Brightness.Value = 0x80;
18773+ sdvo_priv->dispParams.FlickerFilter.Value = 0xffffffff;
18774+ sdvo_priv->dispParams.AdaptiveFF.Value = 7;
18775+ sdvo_priv->dispParams.TwoD_FlickerFilter.Value = 0xffffffff;
18776+ sdvo_priv->dispParams.Contrast.Value = 0x40;
18777+ sdvo_priv->dispParams.PositionX.Value = 0x200;
18778+ sdvo_priv->dispParams.PositionY.Value = 0x200;
18779+ sdvo_priv->dispParams.DotCrawl.Value = 1;
18780+ sdvo_priv->dispParams.ChromaFilter.Value = 1;
18781+ sdvo_priv->dispParams.LumaFilter.Value = 2;
18782+ sdvo_priv->dispParams.Sharpness.Value = 4;
18783+ sdvo_priv->dispParams.Saturation.Value = 0x45;
18784+ sdvo_priv->dispParams.Hue.Value = 0x40;
18785+ sdvo_priv->dispParams.Dither.Value = 0;
18786+ }
18787+ else {
18788+ name_prefix = "RGB0";
18789+ DRM_INFO("non TV is attaced\n");
18790+ }
18791+
18792+ strcpy(name, name_prefix);
18793+ strcat(name, name_suffix);
18794+ if (!drm_output_rename(output, name)) {
18795+ drm_output_destroy(output);
18796+ return;
18797+ }
18798+ } else {
18799+ /*No SDVO display device attached */
18800+ intel_sdvo_dpms(output, DPMSModeOff);
18801+ sdvo_priv->active_outputs = 0;
18802+ output->subpixel_order = SubPixelHorizontalRGB;
18803+ name_prefix = "SDVO";
18804+ sdvo_priv->ActiveDevice = SDVO_DEVICE_NONE;
18805+ strcpy(name, name_prefix);
18806+ strcat(name, name_suffix);
18807+ if (!drm_output_rename(output, name)) {
18808+ drm_output_destroy(output);
18809+ return;
18810+ }
18811+
18812+ }
18813+
18814+ /*(void)intel_sdvo_set_active_outputs(output, sdvo_priv->active_outputs);*/
18815+
18816+ /* Set the input timing to the screen. Assume always input 0. */
18817+ intel_sdvo_set_target_input(output, true, false);
18818+
18819+ intel_sdvo_get_input_pixel_clock_range(output,
18820+ &sdvo_priv->pixel_clock_min,
18821+ &sdvo_priv->pixel_clock_max);
18822+
18823+
18824+ DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
18825+ "clock range %dMHz - %dMHz, "
18826+ "input 1: %c, input 2: %c, "
18827+ "output 1: %c, output 2: %c\n",
18828+ SDVO_NAME(sdvo_priv),
18829+ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
18830+ sdvo_priv->caps.device_rev_id,
18831+ sdvo_priv->pixel_clock_min / 1000,
18832+ sdvo_priv->pixel_clock_max / 1000,
18833+ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
18834+ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
18835+ /* check currently supported outputs */
18836+ sdvo_priv->caps.output_flags &
18837+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
18838+ sdvo_priv->caps.output_flags &
18839+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
18840+
18841+ intel_output->ddc_bus = i2cbus;
18842+}
18843Index: linux-2.6.27/drivers/gpu/drm/psb/intel_sdvo_regs.h
18844===================================================================
18845--- /dev/null 1970-01-01 00:00:00.000000000 +0000
18846+++ linux-2.6.27/drivers/gpu/drm/psb/intel_sdvo_regs.h 2009-01-14 11:58:01.000000000 +0000
18847@@ -0,0 +1,580 @@
18848+/*
18849+ * Copyright ?2006-2007 Intel Corporation
18850+ *
18851+ * Permission is hereby granted, free of charge, to any person obtaining a
18852+ * copy of this software and associated documentation files (the "Software"),
18853+ * to deal in the Software without restriction, including without limitation
18854+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18855+ * and/or sell copies of the Software, and to permit persons to whom the
18856+ * Software is furnished to do so, subject to the following conditions:
18857+ *
18858+ * The above copyright notice and this permission notice (including the next
18859+ * paragraph) shall be included in all copies or substantial portions of the
18860+ * Software.
18861+ *
18862+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18863+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18864+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18865+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18866+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18867+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
18868+ * DEALINGS IN THE SOFTWARE.
18869+ *
18870+ * Authors:
18871+ * Eric Anholt <eric@anholt.net>
18872+ */
18873+
18874+/**
18875+ * @file SDVO command definitions and structures.
18876+ */
18877+
18878+#define SDVO_OUTPUT_FIRST (0)
18879+#define SDVO_OUTPUT_TMDS0 (1 << 0)
18880+#define SDVO_OUTPUT_RGB0 (1 << 1)
18881+#define SDVO_OUTPUT_CVBS0 (1 << 2)
18882+#define SDVO_OUTPUT_SVID0 (1 << 3)
18883+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
18884+#define SDVO_OUTPUT_SCART0 (1 << 5)
18885+#define SDVO_OUTPUT_LVDS0 (1 << 6)
18886+#define SDVO_OUTPUT_TMDS1 (1 << 8)
18887+#define SDVO_OUTPUT_RGB1 (1 << 9)
18888+#define SDVO_OUTPUT_CVBS1 (1 << 10)
18889+#define SDVO_OUTPUT_SVID1 (1 << 11)
18890+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
18891+#define SDVO_OUTPUT_SCART1 (1 << 13)
18892+#define SDVO_OUTPUT_LVDS1 (1 << 14)
18893+#define SDVO_OUTPUT_LAST (14)
18894+
18895+struct intel_sdvo_caps {
18896+ u8 vendor_id;
18897+ u8 device_id;
18898+ u8 device_rev_id;
18899+ u8 sdvo_version_major;
18900+ u8 sdvo_version_minor;
18901+ unsigned int sdvo_inputs_mask:2;
18902+ unsigned int smooth_scaling:1;
18903+ unsigned int sharp_scaling:1;
18904+ unsigned int up_scaling:1;
18905+ unsigned int down_scaling:1;
18906+ unsigned int stall_support:1;
18907+ unsigned int pad:1;
18908+ u16 output_flags;
18909+} __attribute__((packed));
18910+
18911+/** This matches the EDID DTD structure, more or less */
18912+struct intel_sdvo_dtd {
18913+ struct {
18914+ u16 clock; /**< pixel clock, in 10kHz units */
18915+ u8 h_active; /**< lower 8 bits (pixels) */
18916+ u8 h_blank; /**< lower 8 bits (pixels) */
18917+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
18918+ u8 v_active; /**< lower 8 bits (lines) */
18919+ u8 v_blank; /**< lower 8 bits (lines) */
18920+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
18921+ } part1;
18922+
18923+ struct {
18924+ u8 h_sync_off; /**< lower 8 bits, from hblank start */
18925+ u8 h_sync_width; /**< lower 8 bits (pixels) */
18926+ /** lower 4 bits each vsync offset, vsync width */
18927+ u8 v_sync_off_width;
18928+ /**
18929+ * 2 high bits of hsync offset, 2 high bits of hsync width,
18930+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
18931+ */
18932+ u8 sync_off_width_high;
18933+ u8 dtd_flags;
18934+ u8 sdvo_flags;
18935+ /** bits 6-7 of vsync offset at bits 6-7 */
18936+ u8 v_sync_off_high;
18937+ u8 reserved;
18938+ } part2;
18939+} __attribute__((packed));
18940+
18941+struct intel_sdvo_pixel_clock_range {
18942+ u16 min; /**< pixel clock, in 10kHz units */
18943+ u16 max; /**< pixel clock, in 10kHz units */
18944+} __attribute__((packed));
18945+
18946+struct intel_sdvo_preferred_input_timing_args {
18947+ u16 clock;
18948+ u16 width;
18949+ u16 height;
18950+} __attribute__((packed));
18951+
18952+/* I2C registers for SDVO */
18953+#define SDVO_I2C_ARG_0 0x07
18954+#define SDVO_I2C_ARG_1 0x06
18955+#define SDVO_I2C_ARG_2 0x05
18956+#define SDVO_I2C_ARG_3 0x04
18957+#define SDVO_I2C_ARG_4 0x03
18958+#define SDVO_I2C_ARG_5 0x02
18959+#define SDVO_I2C_ARG_6 0x01
18960+#define SDVO_I2C_ARG_7 0x00
18961+#define SDVO_I2C_OPCODE 0x08
18962+#define SDVO_I2C_CMD_STATUS 0x09
18963+#define SDVO_I2C_RETURN_0 0x0a
18964+#define SDVO_I2C_RETURN_1 0x0b
18965+#define SDVO_I2C_RETURN_2 0x0c
18966+#define SDVO_I2C_RETURN_3 0x0d
18967+#define SDVO_I2C_RETURN_4 0x0e
18968+#define SDVO_I2C_RETURN_5 0x0f
18969+#define SDVO_I2C_RETURN_6 0x10
18970+#define SDVO_I2C_RETURN_7 0x11
18971+#define SDVO_I2C_VENDOR_BEGIN 0x20
18972+
18973+/* Status results */
18974+#define SDVO_CMD_STATUS_POWER_ON 0x0
18975+#define SDVO_CMD_STATUS_SUCCESS 0x1
18976+#define SDVO_CMD_STATUS_NOTSUPP 0x2
18977+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
18978+#define SDVO_CMD_STATUS_PENDING 0x4
18979+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
18980+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
18981+
18982+/* SDVO commands, argument/result registers */
18983+
18984+#define SDVO_CMD_RESET 0x01
18985+
18986+/** Returns a struct intel_sdvo_caps */
18987+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
18988+
18989+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
18990+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
18991+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
18992+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
18993+
18994+/**
18995+ * Reports which inputs are trained (managed to sync).
18996+ *
18997+ * Devices must have trained within 2 vsyncs of a mode change.
18998+ */
18999+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
19000+struct intel_sdvo_get_trained_inputs_response {
19001+ unsigned int input0_trained:1;
19002+ unsigned int input1_trained:1;
19003+ unsigned int pad:6;
19004+} __attribute__((packed));
19005+
19006+/** Returns a struct intel_sdvo_output_flags of active outputs. */
19007+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
19008+
19009+/**
19010+ * Sets the current set of active outputs.
19011+ *
19012+ * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
19013+ * on multi-output devices.
19014+ */
19015+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
19016+
19017+/**
19018+ * Returns the current mapping of SDVO inputs to outputs on the device.
19019+ *
19020+ * Returns two struct intel_sdvo_output_flags structures.
19021+ */
19022+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
19023+
19024+/**
19025+ * Sets the current mapping of SDVO inputs to outputs on the device.
19026+ *
19027+ * Takes two struct i380_sdvo_output_flags structures.
19028+ */
19029+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
19030+
19031+/**
19032+ * Returns a struct intel_sdvo_output_flags of attached displays.
19033+ */
19034+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
19035+
19036+/**
19037+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
19038+ */
19039+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
19040+
19041+/**
19042+ * Takes a struct intel_sdvo_output_flags.
19043+ */
19044+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
19045+
19046+/**
19047+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
19048+ * interrupts enabled.
19049+ */
19050+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
19051+
19052+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
19053+struct intel_sdvo_get_interrupt_event_source_response {
19054+ u16 interrupt_status;
19055+ unsigned int ambient_light_interrupt:1;
19056+ unsigned int pad:7;
19057+} __attribute__((packed));
19058+
19059+/**
19060+ * Selects which input is affected by future input commands.
19061+ *
19062+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
19063+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
19064+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
19065+ */
19066+#define SDVO_CMD_SET_TARGET_INPUT 0x10
19067+struct intel_sdvo_set_target_input_args {
19068+ unsigned int target_1:1;
19069+ unsigned int pad:7;
19070+} __attribute__((packed));
19071+
19072+/**
19073+ * Takes a struct intel_sdvo_output_flags of which outputs are targetted by
19074+ * future output commands.
19075+ *
19076+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
19077+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
19078+ */
19079+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
19080+
19081+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
19082+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
19083+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
19084+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
19085+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
19086+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
19087+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
19088+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
19089+/* Part 1 */
19090+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
19091+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
19092+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
19093+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
19094+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
19095+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
19096+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
19097+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
19098+/* Part 2 */
19099+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
19100+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
19101+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
19102+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
19103+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
19104+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
19105+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
19106+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
19107+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
19108+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
19109+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
19110+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
19111+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
19112+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
19113+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
19114+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
19115+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
19116+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
19117+
19118+/**
19119+ * Generates a DTD based on the given width, height, and flags.
19120+ *
19121+ * This will be supported by any device supporting scaling or interlaced
19122+ * modes.
19123+ */
19124+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
19125+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
19126+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
19127+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
19128+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
19129+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
19130+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
19131+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
19132+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
19133+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
19134+
19135+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
19136+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
19137+
19138+/** Returns a struct intel_sdvo_pixel_clock_range */
19139+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
19140+/** Returns a struct intel_sdvo_pixel_clock_range */
19141+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
19142+
19143+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
19144+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
19145+
19146+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
19147+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
19148+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
19149+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
19150+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
19151+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
19152+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
19153+
19154+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
19155+
19156+#define SDVO_CMD_GET_TV_FORMAT 0x28
19157+
19158+#define SDVO_CMD_SET_TV_FORMAT 0x29
19159+
19160+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
19161+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
19162+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
19163+# define SDVO_ENCODER_STATE_ON (1 << 0)
19164+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
19165+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
19166+# define SDVO_ENCODER_STATE_OFF (1 << 3)
19167+
19168+#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
19169+
19170+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
19171+# define SDVO_CONTROL_BUS_PROM 0x0
19172+# define SDVO_CONTROL_BUS_DDC1 0x1
19173+# define SDVO_CONTROL_BUS_DDC2 0x2
19174+# define SDVO_CONTROL_BUS_DDC3 0x3
19175+
19176+/* xiaolin, to support add-on SDVO TV Encoder */
19177+/* SDVO Bus & SDVO Inputs wiring details*/
19178+/* Bit 0: Is SDVOB connected to In0 (1 = yes, 0 = no*/
19179+/* Bit 1: Is SDVOB connected to In1 (1 = yes, 0 = no*/
19180+/* Bit 2: Is SDVOC connected to In0 (1 = yes, 0 = no*/
19181+/* Bit 3: Is SDVOC connected to In1 (1 = yes, 0 = no*/
19182+#define SDVOB_IN0 0x01
19183+#define SDVOB_IN1 0x02
19184+#define SDVOC_IN0 0x04
19185+#define SDVOC_IN1 0x08
19186+
19187+#define SDVO_OUTPUT_TV0 0x003C
19188+#define SDVO_OUTPUT_TV1 0x3C00
19189+#define SDVO_OUTPUT_LAST (14)
19190+
19191+#define SDVO_OUTPUT_CRT (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1 )
19192+#define SDVO_OUTPUT_TV (SDVO_OUTPUT_TV0 | SDVO_OUTPUT_TV1)
19193+#define SDVO_OUTPUT_LVDS (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
19194+#define SDVO_OUTPUT_TMDS (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
19195+
19196+
19197+
19198+#define SDVO_DEVICE_NONE 0x00
19199+#define SDVO_DEVICE_CRT 0x01
19200+#define SDVO_DEVICE_TV 0x02
19201+#define SDVO_DEVICE_LVDS 0x04
19202+#define SDVO_DEVICE_TMDS 0x08
19203+
19204+/* Different TV mode*/
19205+#define TVMODE_OFF 0x0000
19206+#define TVMODE_SDTV 0x0001
19207+#define TVMODE_HDTV 0x0002
19208+
19209+#define TVSTANDARD_NONE 0x00
19210+#define TVSTANDARD_NTSC_M 0x0001 // 75 IRE Setup
19211+#define TVSTANDARD_NTSC_M_J 0x0002 // Japan, 0 IRE Setup
19212+#define TVSTANDARD_PAL_B 0x0004
19213+#define TVSTANDARD_PAL_D 0x0008
19214+#define TVSTANDARD_PAL_H 0x0010
19215+#define TVSTANDARD_PAL_I 0x0020
19216+#define TVSTANDARD_PAL_M 0x0040
19217+#define TVSTANDARD_PAL_N 0x0080
19218+#define TVSTANDARD_SECAM_B 0x0100
19219+#define TVSTANDARD_SECAM_D 0x0200
19220+#define TVSTANDARD_SECAM_G 0x0400
19221+#define TVSTANDARD_SECAM_H 0x0800
19222+#define TVSTANDARD_SECAM_K 0x1000
19223+#define TVSTANDARD_SECAM_K1 0x2000
19224+#define TVSTANDARD_SECAM_L 0x4000
19225+#define TVSTANDARD_WIN_VGA 0x8000
19226+/*and the rest*/
19227+#define TVSTANDARD_NTSC_433 0x00010000
19228+#define TVSTANDARD_PAL_G 0x00020000
19229+#define TVSTANDARD_PAL_60 0x00040000
19230+#define TVSTANDARD_SECAM_L1 0x00080000
19231+#define TVSTANDARD_SDTV_ALL 0x000FFFFF
19232+
19233+
19234+/*HDTV standard defination added using the unused upper 12 bits of dwTVStandard*/
19235+#define HDTV_SMPTE_170M_480i59 0x00100000
19236+#define HDTV_SMPTE_293M_480p60 0x00200000
19237+#define HDTV_SMPTE_293M_480p59 0x00400000
19238+#define HDTV_ITURBT601_576i50 0x00800000
19239+#define HDTV_ITURBT601_576p50 0x01000000
19240+#define HDTV_SMPTE_296M_720p50 0x02000000
19241+#define HDTV_SMPTE_296M_720p59 0x04000000
19242+#define HDTV_SMPTE_296M_720p60 0x08000000
19243+#define HDTV_SMPTE_274M_1080i50 0x10000000
19244+#define HDTV_SMPTE_274M_1080i59 0x20000000
19245+#define HDTV_SMPTE_274M_1080i60 0x40000000
19246+#define HDTV_SMPTE_274M_1080p60 0x80000000
19247+#define TVSTANDARD_HDTV_ALL 0xFFF00000
19248+
19249+
19250+#define TVSTANDARD_NTSC 0x01
19251+#define TVSTANDARD_PAL 0x02
19252+
19253+#define TVOUTPUT_NONE 0x00
19254+#define TVOUTPUT_COMPOSITE 0x01
19255+#define TVOUTPUT_SVIDEO 0x02
19256+#define TVOUTPUT_RGB 0x04
19257+#define TVOUTPUT_YCBCR 0x08
19258+#define TVOUTPUT_SC 0x16
19259+
19260+/* Encoder supported TV standard bit mask per SDVO ED*/
19261+#define SDVO_NTSC_M 0x00000001
19262+#define SDVO_NTSC_M_J 0x00000002
19263+#define SDVO_NTSC_433 0x00000004
19264+#define SDVO_PAL_B 0x00000008
19265+#define SDVO_PAL_D 0x00000010
19266+#define SDVO_PAL_G 0x00000020
19267+#define SDVO_PAL_H 0x00000040
19268+#define SDVO_PAL_I 0x00000080
19269+#define SDVO_PAL_M 0x00000100
19270+#define SDVO_PAL_N 0x00000200
19271+#define SDVO_PAL_NC 0x00000400
19272+#define SDVO_PAL_60 0x00000800
19273+#define SDVO_SECAM_B 0x00001000
19274+#define SDVO_SECAM_D 0x00002000
19275+#define SDVO_SECAM_G 0x00004000
19276+#define SDVO_SECAM_K 0x00008000
19277+#define SDVO_SECAM_K1 0x00010000
19278+#define SDVO_SECAM_L 0x00020000
19279+#define SDVO_SECAM_60 0x00040000
19280+
19281+/* Number of SDTV format*/
19282+#define SDTV_NUM_STANDARDS 19
19283+
19284+/* Encoder supported HDTV standard bit mask per SDVO ED*/
19285+#define SDVO_HDTV_STD_240M_1080i59 0x00000008
19286+#define SDVO_HDTV_STD_240M_1080i60 0x00000010
19287+#define SDVO_HDTV_STD_260M_1080i59 0x00000020
19288+#define SDVO_HDTV_STD_260M_1080i60 0x00000040
19289+#define SDVO_HDTV_STD_274M_1080i50 0x00000080
19290+#define SDVO_HDTV_STD_274M_1080i59 0x00000100
19291+#define SDVO_HDTV_STD_274M_1080i60 0x00000200
19292+#define SDVO_HDTV_STD_274M_1080p23 0x00000400
19293+#define SDVO_HDTV_STD_274M_1080p24 0x00000800
19294+#define SDVO_HDTV_STD_274M_1080p25 0x00001000
19295+#define SDVO_HDTV_STD_274M_1080p29 0x00002000
19296+#define SDVO_HDTV_STD_274M_1080p30 0x00004000
19297+#define SDVO_HDTV_STD_274M_1080p50 0x00008000
19298+#define SDVO_HDTV_STD_274M_1080p59 0x00010000
19299+#define SDVO_HDTV_STD_274M_1080p60 0x00020000
19300+#define SDVO_HDTV_STD_295M_1080i50 0x00040000
19301+#define SDVO_HDTV_STD_295M_1080p50 0x00080000
19302+#define SDVO_HDTV_STD_296M_720p59 0x00100000
19303+#define SDVO_HDTV_STD_296M_720p60 0x00200000
19304+#define SDVO_HDTV_STD_296M_720p50 0x00400000
19305+#define SDVO_HDTV_STD_293M_480p59 0x00800000
19306+#define SDVO_HDTV_STD_170M_480i59 0x01000000
19307+#define SDVO_HDTV_STD_ITURBT601_576i50 0x02000000
19308+#define SDVO_HDTV_STD_ITURBT601_576p50 0x04000000
19309+#define SDVO_HDTV_STD_EIA_7702A_480i60 0x08000000
19310+#define SDVO_HDTV_STD_EIA_7702A_480p60 0x10000000
19311+
19312+/* SDTV resolution*/
19313+#define SDVO_SDTV_320x200 0x00000001
19314+#define SDVO_SDTV_320x240 0x00000002
19315+#define SDVO_SDTV_400x300 0x00000004
19316+#define SDVO_SDTV_640x350 0x00000008
19317+#define SDVO_SDTV_640x400 0x00000010
19318+#define SDVO_SDTV_640x480 0x00000020
19319+#define SDVO_SDTV_704x480 0x00000040
19320+#define SDVO_SDTV_704x576 0x00000080
19321+#define SDVO_SDTV_720x350 0x00000100
19322+#define SDVO_SDTV_720x400 0x00000200
19323+#define SDVO_SDTV_720x480 0x00000400
19324+#define SDVO_SDTV_720x540 0x00000800
19325+#define SDVO_SDTV_720x576 0x00001000
19326+#define SDVO_SDTV_768x576 0x00002000
19327+#define SDVO_SDTV_800x600 0x00004000
19328+#define SDVO_SDTV_832x624 0x00008000
19329+#define SDVO_SDTV_920x766 0x00010000
19330+#define SDVO_SDTV_1024x768 0x00020000
19331+#define SDVO_SDTV_1280x1024 0x00040000
19332+
19333+
19334+#define SDVO_HDTV_640x480 0x00000001
19335+#define SDVO_HDTV_800x600 0x00000002
19336+#define SDVO_HDTV_1024x768 0x00000004
19337+#define SDVO_HDTV_1064x600 0x00020000
19338+#define SDVO_HDTV_1280x720 0x00040000
19339+#define SDVO_HDTV_1704x960 0x00100000
19340+#define SDVO_HDTV_1864x1050 0x00200000
19341+#define SDVO_HDTV_1920x1080 0x00400000
19342+#define SDVO_HDTV_640x400 0x02000000
19343+
19344+/* Number of SDTV mode*/
19345+#define SDTV_NUM_MODES 19
19346+
19347+/* sdvo cmd for sdvo tv */
19348+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMINGS 0x1A
19349+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
19350+#define SDVO_CMD_GET_TV_FORMATS 0x28
19351+#define SDVO_CMD_SET_TV_FORMATS 0x29
19352+
19353+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
19354+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
19355+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
19356+#define SDVO_ENCODER_STATE_ON (1 << 0)
19357+#define SDVO_ENCODER_STATE_STANDBY (1 << 1)
19358+#define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
19359+#define SDVO_ENCODER_STATE_OFF (1 << 3)
19360+
19361+/* Bit mask of picture enhancement*/
19362+#define SDVO_FLICKER_FILTER 0x00000001
19363+#define SDVO_ADAPTIVE_FLICKER_FILTER 0x00000002
19364+#define SDVO_2D_FLICKER_FILTER 0x00000004
19365+#define SDVO_SATURATION 0x00000008
19366+#define SDVO_HUE 0x00000010
19367+#define SDVO_BRIGHTNESS 0x00000020
19368+#define SDVO_CONTRAST 0x00000040
19369+#define SDVO_HORIZONTAL_OVERSCAN 0x00000080
19370+#define SDVO_VERTICAL_OVERSCAN 0x00000100
19371+#define SDVO_HORIZONTAL_POSITION 0x00000200
19372+#define SDVO_VERTICAL_POSITION 0x00000400
19373+#define SDVO_SHARPNESS 0x00000800
19374+#define SDVO_DOT_CRAWL 0x00001000
19375+#define SDVO_DITHER 0x00002000
19376+#define SDVO_MAX_TV_CHROMA_FILTER 0x00004000
19377+#define SDVO_TV_MAX_LUMA_FILTER 0x00008000
19378+
19379+#define SDVO_CMD_GET_ANCILLARY_VIDEO_INFORMATION 0x3A
19380+#define SDVO_CMD_SET_ANCILLARY_VIDEO_INFORMATION 0x3B
19381+
19382+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
19383+#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4D
19384+#define SDVO_CMD_GET_FLICKER_FILTER 0x4E
19385+#define SDVO_CMD_SET_FLICKER_FILTER 0x4F
19386+#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FILTER 0x50
19387+#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FILTER 0x51
19388+#define SDVO_CMD_GET_MAX_2D_FLICKER_FILTER 0x52
19389+#define SDVO_CMD_GET_2D_FLICKER_FILTER 0x53
19390+#define SDVO_CMD_SET_2D_FLICKER_FILTER 0x54
19391+#define SDVO_CMD_GET_MAX_SATURATION 0x55
19392+#define SDVO_CMD_GET_SATURATION 0x56
19393+#define SDVO_CMD_SET_SATURATION 0x57
19394+#define SDVO_CMD_GET_MAX_HUE 0x58
19395+#define SDVO_CMD_GET_HUE 0x59
19396+#define SDVO_CMD_SET_HUE 0x5A
19397+#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5B
19398+#define SDVO_CMD_GET_BRIGHTNESS 0x5C
19399+#define SDVO_CMD_SET_BRIGHTNESS 0x5D
19400+#define SDVO_CMD_GET_MAX_CONTRAST 0x5E
19401+#define SDVO_CMD_GET_CONTRAST 0x5F
19402+#define SDVO_CMD_SET_CONTRAST 0x60
19403+
19404+#define SDVO_CMD_GET_MAX_HORIZONTAL_OVERSCAN 0x61
19405+#define SDVO_CMD_GET_HORIZONTAL_OVERSCAN 0x62
19406+#define SDVO_CMD_SET_HORIZONTAL_OVERSCAN 0x63
19407+#define SDVO_CMD_GET_MAX_VERTICAL_OVERSCAN 0x64
19408+#define SDVO_CMD_GET_VERTICAL_OVERSCAN 0x65
19409+#define SDVO_CMD_SET_VERTICAL_OVERSCAN 0x66
19410+#define SDVO_CMD_GET_MAX_HORIZONTAL_POSITION 0x67
19411+#define SDVO_CMD_GET_HORIZONTAL_POSITION 0x68
19412+#define SDVO_CMD_SET_HORIZONTAL_POSITION 0x69
19413+#define SDVO_CMD_GET_MAX_VERTICAL_POSITION 0x6A
19414+#define SDVO_CMD_GET_VERTICAL_POSITION 0x6B
19415+#define SDVO_CMD_SET_VERTICAL_POSITION 0x6C
19416+#define SDVO_CMD_GET_MAX_SHARPNESS 0x6D
19417+#define SDVO_CMD_GET_SHARPNESS 0x6E
19418+#define SDVO_CMD_SET_SHARPNESS 0x6F
19419+#define SDVO_CMD_GET_DOT_CRAWL 0x70
19420+#define SDVO_CMD_SET_DOT_CRAWL 0x71
19421+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
19422+#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
19423+#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
19424+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
19425+#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
19426+#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
19427+#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FILTER 0x7B
19428Index: linux-2.6.27/drivers/gpu/drm/psb/psb_buffer.c
19429===================================================================
19430--- /dev/null 1970-01-01 00:00:00.000000000 +0000
19431+++ linux-2.6.27/drivers/gpu/drm/psb/psb_buffer.c 2009-01-14 11:58:01.000000000 +0000
19432@@ -0,0 +1,437 @@
19433+/**************************************************************************
19434+ * Copyright (c) 2007, Intel Corporation.
19435+ * All Rights Reserved.
19436+ *
19437+ * This program is free software; you can redistribute it and/or modify it
19438+ * under the terms and conditions of the GNU General Public License,
19439+ * version 2, as published by the Free Software Foundation.
19440+ *
19441+ * This program is distributed in the hope it will be useful, but WITHOUT
19442+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19443+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19444+ * more details.
19445+ *
19446+ * You should have received a copy of the GNU General Public License along with
19447+ * this program; if not, write to the Free Software Foundation, Inc.,
19448+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19449+ *
19450+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19451+ * develop this driver.
19452+ *
19453+ **************************************************************************/
19454+/*
19455+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
19456+ */
19457+#include "drmP.h"
19458+#include "psb_drv.h"
19459+#include "psb_schedule.h"
19460+
19461+struct drm_psb_ttm_backend {
19462+ struct drm_ttm_backend base;
19463+ struct page **pages;
19464+ unsigned int desired_tile_stride;
19465+ unsigned int hw_tile_stride;
19466+ int mem_type;
19467+ unsigned long offset;
19468+ unsigned long num_pages;
19469+};
19470+
19471+int psb_fence_types(struct drm_buffer_object *bo, uint32_t * class,
19472+ uint32_t * type)
19473+{
19474+ switch (*class) {
19475+ case PSB_ENGINE_TA:
19476+ *type = DRM_FENCE_TYPE_EXE |
19477+ _PSB_FENCE_TYPE_TA_DONE | _PSB_FENCE_TYPE_RASTER_DONE;
19478+ if (bo->mem.mask & PSB_BO_FLAG_TA)
19479+ *type &= ~_PSB_FENCE_TYPE_RASTER_DONE;
19480+ if (bo->mem.mask & PSB_BO_FLAG_SCENE)
19481+ *type |= _PSB_FENCE_TYPE_SCENE_DONE;
19482+ if (bo->mem.mask & PSB_BO_FLAG_FEEDBACK)
19483+ *type |= _PSB_FENCE_TYPE_FEEDBACK;
19484+ break;
19485+ default:
19486+ *type = DRM_FENCE_TYPE_EXE;
19487+ }
19488+ return 0;
19489+}
19490+
19491+static inline size_t drm_size_align(size_t size)
19492+{
19493+ size_t tmpSize = 4;
19494+ if (size > PAGE_SIZE)
19495+ return PAGE_ALIGN(size);
19496+ while (tmpSize < size)
19497+ tmpSize <<= 1;
19498+
19499+ return (size_t) tmpSize;
19500+}
19501+
19502+/*
19503+ * Poulsbo GPU virtual space looks like this
19504+ * (We currently use only one MMU context).
19505+ *
19506+ * gatt_start = Start of GATT aperture in bus space.
19507+ * stolen_end = End of GATT populated by stolen memory in bus space.
19508+ * gatt_end = End of GATT
19509+ * twod_end = MIN(gatt_start + 256_MEM, gatt_end)
19510+ *
19511+ * 0x00000000 -> 0x10000000 Temporary mapping space for tiling- and copy operations.
19512+ * This space is not managed and is protected by the
19513+ * temp_mem mutex.
19514+ *
19515+ * 0x10000000 -> 0x20000000 DRM_PSB_MEM_KERNEL For kernel buffers.
19516+ *
19517+ * 0x20000000 -> gatt_start DRM_PSB_MEM_MMU For generic MMU-only use.
19518+ *
19519+ * gatt_start -> stolen_end DRM_BO_MEM_VRAM Pre-populated GATT pages.
19520+ *
19521+ * stolen_end -> twod_end DRM_BO_MEM_TT GATT memory usable by 2D engine.
19522+ *
19523+ * twod_end -> gatt_end DRM_BO_MEM_APER GATT memory not usable by 2D engine.
19524+ *
19525+ * gatt_end -> 0xffffffff Currently unused.
19526+ */
19527+
19528+int psb_init_mem_type(struct drm_device *dev, uint32_t type,
19529+ struct drm_mem_type_manager *man)
19530+{
19531+ struct drm_psb_private *dev_priv =
19532+ (struct drm_psb_private *)dev->dev_private;
19533+ struct psb_gtt *pg = dev_priv->pg;
19534+
19535+ switch (type) {
19536+ case DRM_BO_MEM_LOCAL:
19537+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19538+ _DRM_FLAG_MEMTYPE_CACHED;
19539+ man->drm_bus_maptype = 0;
19540+ break;
19541+ case DRM_PSB_MEM_KERNEL:
19542+ man->io_offset = 0x00000000;
19543+ man->io_size = 0x00000000;
19544+ man->io_addr = NULL;
19545+ man->drm_bus_maptype = _DRM_TTM;
19546+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19547+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
19548+ man->gpu_offset = PSB_MEM_KERNEL_START;
19549+ break;
19550+ case DRM_PSB_MEM_MMU:
19551+ man->io_offset = 0x00000000;
19552+ man->io_size = 0x00000000;
19553+ man->io_addr = NULL;
19554+ man->drm_bus_maptype = _DRM_TTM;
19555+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19556+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
19557+ man->gpu_offset = PSB_MEM_MMU_START;
19558+ break;
19559+ case DRM_PSB_MEM_PDS:
19560+ man->io_offset = 0x00000000;
19561+ man->io_size = 0x00000000;
19562+ man->io_addr = NULL;
19563+ man->drm_bus_maptype = _DRM_TTM;
19564+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19565+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
19566+ man->gpu_offset = PSB_MEM_PDS_START;
19567+ break;
19568+ case DRM_PSB_MEM_RASTGEOM:
19569+ man->io_offset = 0x00000000;
19570+ man->io_size = 0x00000000;
19571+ man->io_addr = NULL;
19572+ man->drm_bus_maptype = _DRM_TTM;
19573+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19574+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
19575+ man->gpu_offset = PSB_MEM_RASTGEOM_START;
19576+ break;
19577+ case DRM_BO_MEM_VRAM:
19578+ man->io_addr = NULL;
19579+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19580+ _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
19581+#ifdef PSB_WORKING_HOST_MMU_ACCESS
19582+ man->drm_bus_maptype = _DRM_AGP;
19583+ man->io_offset = pg->gatt_start;
19584+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
19585+#else
19586+ man->drm_bus_maptype = _DRM_TTM; /* Forces uncached */
19587+ man->io_offset = pg->stolen_base;
19588+ man->io_size = pg->stolen_size;
19589+#endif
19590+ man->gpu_offset = pg->gatt_start;
19591+ break;
19592+ case DRM_BO_MEM_TT: /* Mappable GATT memory */
19593+ man->io_offset = pg->gatt_start;
19594+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
19595+ man->io_addr = NULL;
19596+#ifdef PSB_WORKING_HOST_MMU_ACCESS
19597+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19598+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
19599+ man->drm_bus_maptype = _DRM_AGP;
19600+#else
19601+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19602+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
19603+ man->drm_bus_maptype = _DRM_TTM;
19604+#endif
19605+ man->gpu_offset = pg->gatt_start;
19606+ break;
19607+ case DRM_PSB_MEM_APER: /*MMU memory. Mappable. Not usable for 2D. */
19608+ man->io_offset = pg->gatt_start;
19609+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
19610+ man->io_addr = NULL;
19611+#ifdef PSB_WORKING_HOST_MMU_ACCESS
19612+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19613+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
19614+ man->drm_bus_maptype = _DRM_AGP;
19615+#else
19616+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
19617+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
19618+ man->drm_bus_maptype = _DRM_TTM;
19619+#endif
19620+ man->gpu_offset = pg->gatt_start;
19621+ break;
19622+ default:
19623+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
19624+ return -EINVAL;
19625+ }
19626+ return 0;
19627+}
19628+
19629+uint32_t psb_evict_mask(struct drm_buffer_object * bo)
19630+{
19631+ switch (bo->mem.mem_type) {
19632+ case DRM_BO_MEM_VRAM:
19633+ return DRM_BO_FLAG_MEM_TT;
19634+ default:
19635+ return DRM_BO_FLAG_MEM_LOCAL;
19636+ }
19637+}
19638+
19639+int psb_invalidate_caches(struct drm_device *dev, uint64_t flags)
19640+{
19641+ return 0;
19642+}
19643+
19644+static int psb_move_blit(struct drm_buffer_object *bo,
19645+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
19646+{
19647+ struct drm_bo_mem_reg *old_mem = &bo->mem;
19648+ int dir = 0;
19649+
19650+ if ((old_mem->mem_type == new_mem->mem_type) &&
19651+ (new_mem->mm_node->start <
19652+ old_mem->mm_node->start + old_mem->mm_node->size)) {
19653+ dir = 1;
19654+ }
19655+
19656+ psb_emit_2d_copy_blit(bo->dev,
19657+ old_mem->mm_node->start << PAGE_SHIFT,
19658+ new_mem->mm_node->start << PAGE_SHIFT,
19659+ new_mem->num_pages, dir);
19660+
19661+ return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
19662+ DRM_FENCE_TYPE_EXE, 0, new_mem);
19663+}
19664+
19665+/*
19666+ * Flip destination ttm into cached-coherent GATT,
19667+ * then blit and subsequently move out again.
19668+ */
19669+
19670+static int psb_move_flip(struct drm_buffer_object *bo,
19671+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
19672+{
19673+ struct drm_device *dev = bo->dev;
19674+ struct drm_bo_mem_reg tmp_mem;
19675+ int ret;
19676+
19677+ tmp_mem = *new_mem;
19678+ tmp_mem.mm_node = NULL;
19679+ tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
19680+ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
19681+
19682+ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
19683+ if (ret)
19684+ return ret;
19685+ ret = drm_bind_ttm(bo->ttm, &tmp_mem);
19686+ if (ret)
19687+ goto out_cleanup;
19688+ ret = psb_move_blit(bo, 1, no_wait, &tmp_mem);
19689+ if (ret)
19690+ goto out_cleanup;
19691+
19692+ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
19693+ out_cleanup:
19694+ if (tmp_mem.mm_node) {
19695+ mutex_lock(&dev->struct_mutex);
19696+ if (tmp_mem.mm_node != bo->pinned_node)
19697+ drm_mm_put_block(tmp_mem.mm_node);
19698+ tmp_mem.mm_node = NULL;
19699+ mutex_unlock(&dev->struct_mutex);
19700+ }
19701+ return ret;
19702+}
19703+
19704+int psb_move(struct drm_buffer_object *bo,
19705+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
19706+{
19707+ struct drm_bo_mem_reg *old_mem = &bo->mem;
19708+
19709+ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
19710+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
19711+ } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
19712+ if (psb_move_flip(bo, evict, no_wait, new_mem))
19713+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
19714+ } else {
19715+ if (psb_move_blit(bo, evict, no_wait, new_mem))
19716+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
19717+ }
19718+ return 0;
19719+}
19720+
19721+static int drm_psb_tbe_nca(struct drm_ttm_backend *backend)
19722+{
19723+ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
19724+}
19725+
19726+static int drm_psb_tbe_populate(struct drm_ttm_backend *backend,
19727+ unsigned long num_pages, struct page **pages)
19728+{
19729+ struct drm_psb_ttm_backend *psb_be =
19730+ container_of(backend, struct drm_psb_ttm_backend, base);
19731+
19732+ psb_be->pages = pages;
19733+ return 0;
19734+}
19735+
19736+static int drm_psb_tbe_unbind(struct drm_ttm_backend *backend)
19737+{
19738+ struct drm_device *dev = backend->dev;
19739+ struct drm_psb_private *dev_priv =
19740+ (struct drm_psb_private *)dev->dev_private;
19741+ struct drm_psb_ttm_backend *psb_be =
19742+ container_of(backend, struct drm_psb_ttm_backend, base);
19743+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
19744+ struct drm_mem_type_manager *man = &dev->bm.man[psb_be->mem_type];
19745+
19746+ PSB_DEBUG_RENDER("MMU unbind.\n");
19747+
19748+ if (psb_be->mem_type == DRM_BO_MEM_TT) {
19749+ uint32_t gatt_p_offset = (psb_be->offset - man->gpu_offset) >>
19750+ PAGE_SHIFT;
19751+
19752+ (void)psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
19753+ psb_be->num_pages,
19754+ psb_be->desired_tile_stride,
19755+ psb_be->hw_tile_stride);
19756+ }
19757+
19758+ psb_mmu_remove_pages(pd, psb_be->offset,
19759+ psb_be->num_pages,
19760+ psb_be->desired_tile_stride,
19761+ psb_be->hw_tile_stride);
19762+
19763+ return 0;
19764+}
19765+
19766+static int drm_psb_tbe_bind(struct drm_ttm_backend *backend,
19767+ struct drm_bo_mem_reg *bo_mem)
19768+{
19769+ struct drm_device *dev = backend->dev;
19770+ struct drm_psb_private *dev_priv =
19771+ (struct drm_psb_private *)dev->dev_private;
19772+ struct drm_psb_ttm_backend *psb_be =
19773+ container_of(backend, struct drm_psb_ttm_backend, base);
19774+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
19775+ struct drm_mem_type_manager *man = &dev->bm.man[bo_mem->mem_type];
19776+ int type;
19777+ int ret = 0;
19778+
19779+ psb_be->mem_type = bo_mem->mem_type;
19780+ psb_be->num_pages = bo_mem->num_pages;
19781+ psb_be->desired_tile_stride = bo_mem->desired_tile_stride;
19782+ psb_be->hw_tile_stride = bo_mem->hw_tile_stride;
19783+ psb_be->desired_tile_stride = 0;
19784+ psb_be->hw_tile_stride = 0;
19785+ psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
19786+ man->gpu_offset;
19787+
19788+ type = (bo_mem->flags & DRM_BO_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
19789+
19790+ PSB_DEBUG_RENDER("MMU bind.\n");
19791+ if (psb_be->mem_type == DRM_BO_MEM_TT) {
19792+ uint32_t gatt_p_offset = (psb_be->offset - man->gpu_offset) >>
19793+ PAGE_SHIFT;
19794+
19795+ ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
19796+ gatt_p_offset,
19797+ psb_be->num_pages,
19798+ psb_be->desired_tile_stride,
19799+ psb_be->hw_tile_stride, type);
19800+ }
19801+
19802+ ret = psb_mmu_insert_pages(pd, psb_be->pages,
19803+ psb_be->offset, psb_be->num_pages,
19804+ psb_be->desired_tile_stride,
19805+ psb_be->hw_tile_stride, type);
19806+ if (ret)
19807+ goto out_err;
19808+
19809+ DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
19810+ DRM_BE_FLAG_BOUND_CACHED : 0, DRM_BE_FLAG_BOUND_CACHED);
19811+
19812+ return 0;
19813+ out_err:
19814+ drm_psb_tbe_unbind(backend);
19815+ return ret;
19816+
19817+}
19818+
19819+static void drm_psb_tbe_clear(struct drm_ttm_backend *backend)
19820+{
19821+ struct drm_psb_ttm_backend *psb_be =
19822+ container_of(backend, struct drm_psb_ttm_backend, base);
19823+
19824+ psb_be->pages = NULL;
19825+ return;
19826+}
19827+
19828+static void drm_psb_tbe_destroy(struct drm_ttm_backend *backend)
19829+{
19830+ struct drm_psb_ttm_backend *psb_be =
19831+ container_of(backend, struct drm_psb_ttm_backend, base);
19832+
19833+ if (backend)
19834+ drm_free(psb_be, sizeof(*psb_be), DRM_MEM_TTM);
19835+}
19836+
19837+static struct drm_ttm_backend_func psb_ttm_backend = {
19838+ .needs_ub_cache_adjust = drm_psb_tbe_nca,
19839+ .populate = drm_psb_tbe_populate,
19840+ .clear = drm_psb_tbe_clear,
19841+ .bind = drm_psb_tbe_bind,
19842+ .unbind = drm_psb_tbe_unbind,
19843+ .destroy = drm_psb_tbe_destroy,
19844+};
19845+
19846+struct drm_ttm_backend *drm_psb_tbe_init(struct drm_device *dev)
19847+{
19848+ struct drm_psb_ttm_backend *psb_be;
19849+
19850+ psb_be = drm_calloc(1, sizeof(*psb_be), DRM_MEM_TTM);
19851+ if (!psb_be)
19852+ return NULL;
19853+ psb_be->pages = NULL;
19854+ psb_be->base.func = &psb_ttm_backend;
19855+ psb_be->base.dev = dev;
19856+
19857+ return &psb_be->base;
19858+}
19859+
19860+int psb_tbe_size(struct drm_device *dev, unsigned long num_pages)
19861+{
19862+ /*
19863+ * Return the size of the structures themselves and the
19864+ * estimated size of the pagedir and pagetable entries.
19865+ */
19866+
19867+ return drm_size_align(sizeof(struct drm_psb_ttm_backend)) +
19868+ 8*num_pages;
19869+}
19870Index: linux-2.6.27/drivers/gpu/drm/psb/psb_drm.h
19871===================================================================
19872--- /dev/null 1970-01-01 00:00:00.000000000 +0000
19873+++ linux-2.6.27/drivers/gpu/drm/psb/psb_drm.h 2009-01-14 11:58:01.000000000 +0000
19874@@ -0,0 +1,370 @@
19875+/**************************************************************************
19876+ * Copyright (c) 2007, Intel Corporation.
19877+ * All Rights Reserved.
19878+ *
19879+ * This program is free software; you can redistribute it and/or modify it
19880+ * under the terms and conditions of the GNU General Public License,
19881+ * version 2, as published by the Free Software Foundation.
19882+ *
19883+ * This program is distributed in the hope it will be useful, but WITHOUT
19884+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19885+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19886+ * more details.
19887+ *
19888+ * You should have received a copy of the GNU General Public License along with
19889+ * this program; if not, write to the Free Software Foundation, Inc.,
19890+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19891+ *
19892+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
19893+ * develop this driver.
19894+ *
19895+ **************************************************************************/
19896+/*
19897+ */
19898+
19899+#ifndef _PSB_DRM_H_
19900+#define _PSB_DRM_H_
19901+
19902+#if defined(__linux__) && !defined(__KERNEL__)
19903+#include<stdint.h>
19904+#endif
19905+
19906+/*
19907+ * Intel Poulsbo driver package version.
19908+ *
19909+ */
19910+/* #define PSB_PACKAGE_VERSION "ED"__DATE__*/
19911+#define PSB_PACKAGE_VERSION "2.1.0.32L.0019"
19912+
19913+#define DRM_PSB_SAREA_MAJOR 0
19914+#define DRM_PSB_SAREA_MINOR 1
19915+#define PSB_FIXED_SHIFT 16
19916+
19917+/*
19918+ * Public memory types.
19919+ */
19920+
19921+#define DRM_PSB_MEM_MMU DRM_BO_MEM_PRIV1
19922+#define DRM_PSB_FLAG_MEM_MMU DRM_BO_FLAG_MEM_PRIV1
19923+#define DRM_PSB_MEM_PDS DRM_BO_MEM_PRIV2
19924+#define DRM_PSB_FLAG_MEM_PDS DRM_BO_FLAG_MEM_PRIV2
19925+#define DRM_PSB_MEM_APER DRM_BO_MEM_PRIV3
19926+#define DRM_PSB_FLAG_MEM_APER DRM_BO_FLAG_MEM_PRIV3
19927+#define DRM_PSB_MEM_RASTGEOM DRM_BO_MEM_PRIV4
19928+#define DRM_PSB_FLAG_MEM_RASTGEOM DRM_BO_FLAG_MEM_PRIV4
19929+#define PSB_MEM_RASTGEOM_START 0x30000000
19930+
19931+typedef int32_t psb_fixed;
19932+typedef uint32_t psb_ufixed;
19933+
19934+static inline psb_fixed psb_int_to_fixed(int a)
19935+{
19936+ return a * (1 << PSB_FIXED_SHIFT);
19937+}
19938+
19939+static inline psb_ufixed psb_unsigned_to_ufixed(unsigned int a)
19940+{
19941+ return a << PSB_FIXED_SHIFT;
19942+}
19943+
19944+/*Status of the command sent to the gfx device.*/
19945+typedef enum {
19946+ DRM_CMD_SUCCESS,
19947+ DRM_CMD_FAILED,
19948+ DRM_CMD_HANG
19949+} drm_cmd_status_t;
19950+
19951+struct drm_psb_scanout {
19952+ uint32_t buffer_id; /* DRM buffer object ID */
19953+ uint32_t rotation; /* Rotation as in RR_rotation definitions */
19954+ uint32_t stride; /* Buffer stride in bytes */
19955+ uint32_t depth; /* Buffer depth in bits (NOT) bpp */
19956+ uint32_t width; /* Buffer width in pixels */
19957+ uint32_t height; /* Buffer height in lines */
19958+ psb_fixed transform[3][3]; /* Buffer composite transform */
19959+ /* (scaling, rot, reflect) */
19960+};
19961+
19962+#define DRM_PSB_SAREA_OWNERS 16
19963+#define DRM_PSB_SAREA_OWNER_2D 0
19964+#define DRM_PSB_SAREA_OWNER_3D 1
19965+
19966+#define DRM_PSB_SAREA_SCANOUTS 3
19967+
19968+struct drm_psb_sarea {
19969+ /* Track changes of this data structure */
19970+
19971+ uint32_t major;
19972+ uint32_t minor;
19973+
19974+ /* Last context to touch part of hw */
19975+ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
19976+
19977+ /* Definition of front- and rotated buffers */
19978+ uint32_t num_scanouts;
19979+ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
19980+
19981+ int planeA_x;
19982+ int planeA_y;
19983+ int planeA_w;
19984+ int planeA_h;
19985+ int planeB_x;
19986+ int planeB_y;
19987+ int planeB_w;
19988+ int planeB_h;
19989+ uint32_t msvdx_state;
19990+ uint32_t msvdx_context;
19991+};
19992+
19993+#define PSB_RELOC_MAGIC 0x67676767
19994+#define PSB_RELOC_SHIFT_MASK 0x0000FFFF
19995+#define PSB_RELOC_SHIFT_SHIFT 0
19996+#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000
19997+#define PSB_RELOC_ALSHIFT_SHIFT 16
19998+
19999+#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated
20000+ * buffer
20001+ */
20002+#define PSB_RELOC_OP_2D_OFFSET 1 /* Offset of the indicated
20003+ * buffer, relative to 2D
20004+ * base address
20005+ */
20006+#define PSB_RELOC_OP_PDS_OFFSET 2 /* Offset of the indicated buffer,
20007+ * relative to PDS base address
20008+ */
20009+#define PSB_RELOC_OP_STRIDE 3 /* Stride of the indicated
20010+ * buffer (for tiling)
20011+ */
20012+#define PSB_RELOC_OP_USE_OFFSET 4 /* Offset of USE buffer
20013+ * relative to base reg
20014+ */
20015+#define PSB_RELOC_OP_USE_REG 5 /* Base reg of USE buffer */
20016+
20017+struct drm_psb_reloc {
20018+ uint32_t reloc_op;
20019+ uint32_t where; /* offset in destination buffer */
20020+ uint32_t buffer; /* Buffer reloc applies to */
20021+ uint32_t mask; /* Destination format: */
20022+ uint32_t shift; /* Destination format: */
20023+ uint32_t pre_add; /* Destination format: */
20024+ uint32_t background; /* Destination add */
20025+ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */
20026+ uint32_t arg0; /* Reloc-op dependant */
20027+ uint32_t arg1;
20028+};
20029+
20030+#define PSB_BO_FLAG_TA (1ULL << 48)
20031+#define PSB_BO_FLAG_SCENE (1ULL << 49)
20032+#define PSB_BO_FLAG_FEEDBACK (1ULL << 50)
20033+#define PSB_BO_FLAG_USSE (1ULL << 51)
20034+
20035+#define PSB_ENGINE_2D 0
20036+#define PSB_ENGINE_VIDEO 1
20037+#define PSB_ENGINE_RASTERIZER 2
20038+#define PSB_ENGINE_TA 3
20039+#define PSB_ENGINE_HPRAST 4
20040+
20041+/*
20042+ * For this fence class we have a couple of
20043+ * fence types.
20044+ */
20045+
20046+#define _PSB_FENCE_EXE_SHIFT 0
20047+#define _PSB_FENCE_TA_DONE_SHIFT 1
20048+#define _PSB_FENCE_RASTER_DONE_SHIFT 2
20049+#define _PSB_FENCE_SCENE_DONE_SHIFT 3
20050+#define _PSB_FENCE_FEEDBACK_SHIFT 4
20051+
20052+#define _PSB_ENGINE_TA_FENCE_TYPES 5
20053+#define _PSB_FENCE_TYPE_TA_DONE (1 << _PSB_FENCE_TA_DONE_SHIFT)
20054+#define _PSB_FENCE_TYPE_RASTER_DONE (1 << _PSB_FENCE_RASTER_DONE_SHIFT)
20055+#define _PSB_FENCE_TYPE_SCENE_DONE (1 << _PSB_FENCE_SCENE_DONE_SHIFT)
20056+#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT)
20057+
20058+#define PSB_ENGINE_HPRAST 4
20059+#define PSB_NUM_ENGINES 5
20060+
20061+#define PSB_TA_FLAG_FIRSTPASS (1 << 0)
20062+#define PSB_TA_FLAG_LASTPASS (1 << 1)
20063+
20064+#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
20065+
20066+struct drm_psb_scene {
20067+ int handle_valid;
20068+ uint32_t handle;
20069+ uint32_t w;
20070+ uint32_t h;
20071+ uint32_t num_buffers;
20072+};
20073+
20074+struct drm_psb_hw_info
20075+{
20076+ uint32_t rev_id;
20077+ uint32_t caps;
20078+};
20079+
20080+typedef struct drm_psb_cmdbuf_arg {
20081+ uint64_t buffer_list; /* List of buffers to validate */
20082+ uint64_t clip_rects; /* See i915 counterpart */
20083+ uint64_t scene_arg;
20084+ uint64_t fence_arg;
20085+
20086+ uint32_t ta_flags;
20087+
20088+ uint32_t ta_handle; /* TA reg-value pairs */
20089+ uint32_t ta_offset;
20090+ uint32_t ta_size;
20091+
20092+ uint32_t oom_handle;
20093+ uint32_t oom_offset;
20094+ uint32_t oom_size;
20095+
20096+ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
20097+ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
20098+ uint32_t cmdbuf_size;
20099+
20100+ uint32_t reloc_handle; /* Reloc buffer object */
20101+ uint32_t reloc_offset;
20102+ uint32_t num_relocs;
20103+
20104+ int32_t damage; /* Damage front buffer with cliprects */
20105+ /* Not implemented yet */
20106+ uint32_t fence_flags;
20107+ uint32_t engine;
20108+
20109+ /*
20110+ * Feedback;
20111+ */
20112+
20113+ uint32_t feedback_ops;
20114+ uint32_t feedback_handle;
20115+ uint32_t feedback_offset;
20116+ uint32_t feedback_breakpoints;
20117+ uint32_t feedback_size;
20118+} drm_psb_cmdbuf_arg_t;
20119+
20120+struct drm_psb_xhw_init_arg {
20121+ uint32_t operation;
20122+ uint32_t buffer_handle;
20123+};
20124+
20125+/*
20126+ * Feedback components:
20127+ */
20128+
20129+/*
20130+ * Vistest component. The number of these in the feedback buffer
20131+ * equals the number of vistest breakpoints + 1.
20132+ * This is currently the only feedback component.
20133+ */
20134+
20135+struct drm_psb_vistest {
20136+ uint32_t vt[8];
20137+};
20138+
20139+#define PSB_HW_COOKIE_SIZE 16
20140+#define PSB_HW_FEEDBACK_SIZE 8
20141+#define PSB_HW_OOM_CMD_SIZE 6
20142+
20143+struct drm_psb_xhw_arg {
20144+ uint32_t op;
20145+ int ret;
20146+ uint32_t irq_op;
20147+ uint32_t issue_irq;
20148+ uint32_t cookie[PSB_HW_COOKIE_SIZE];
20149+ union {
20150+ struct {
20151+ uint32_t w;
20152+ uint32_t h;
20153+ uint32_t size;
20154+ uint32_t clear_p_start;
20155+ uint32_t clear_num_pages;
20156+ } si;
20157+ struct {
20158+ uint32_t fire_flags;
20159+ uint32_t hw_context;
20160+ uint32_t offset;
20161+ uint32_t engine;
20162+ uint32_t flags;
20163+ uint32_t rca;
20164+ uint32_t num_oom_cmds;
20165+ uint32_t oom_cmds[PSB_HW_OOM_CMD_SIZE];
20166+ } sb;
20167+ struct {
20168+ uint32_t pages;
20169+ uint32_t size;
20170+ } bi;
20171+ struct {
20172+ uint32_t bca;
20173+ uint32_t rca;
20174+ uint32_t flags;
20175+ } oom;
20176+ struct {
20177+ uint32_t pt_offset;
20178+ uint32_t param_offset;
20179+ uint32_t flags;
20180+ } bl;
20181+ struct {
20182+ uint32_t value;
20183+ } cl;
20184+ uint32_t feedback[PSB_HW_FEEDBACK_SIZE];
20185+ } arg;
20186+};
20187+
20188+#define DRM_PSB_CMDBUF 0x00
20189+#define DRM_PSB_XHW_INIT 0x01
20190+#define DRM_PSB_XHW 0x02
20191+#define DRM_PSB_SCENE_UNREF 0x03
20192+/* Controlling the kernel modesetting buffers */
20193+#define DRM_PSB_KMS_OFF 0x04
20194+#define DRM_PSB_KMS_ON 0x05
20195+#define DRM_PSB_HW_INFO 0x06
20196+
20197+#define PSB_XHW_INIT 0x00
20198+#define PSB_XHW_TAKEDOWN 0x01
20199+
20200+#define PSB_XHW_FIRE_RASTER 0x00
20201+#define PSB_XHW_SCENE_INFO 0x01
20202+#define PSB_XHW_SCENE_BIND_FIRE 0x02
20203+#define PSB_XHW_TA_MEM_INFO 0x03
20204+#define PSB_XHW_RESET_DPM 0x04
20205+#define PSB_XHW_OOM 0x05
20206+#define PSB_XHW_TERMINATE 0x06
20207+#define PSB_XHW_VISTEST 0x07
20208+#define PSB_XHW_RESUME 0x08
20209+#define PSB_XHW_TA_MEM_LOAD 0x09
20210+#define PSB_XHW_CHECK_LOCKUP 0x0a
20211+
20212+#define PSB_SCENE_FLAG_DIRTY (1 << 0)
20213+#define PSB_SCENE_FLAG_COMPLETE (1 << 1)
20214+#define PSB_SCENE_FLAG_SETUP (1 << 2)
20215+#define PSB_SCENE_FLAG_SETUP_ONLY (1 << 3)
20216+#define PSB_SCENE_FLAG_CLEARED (1 << 4)
20217+
20218+#define PSB_TA_MEM_FLAG_TA (1 << 0)
20219+#define PSB_TA_MEM_FLAG_RASTER (1 << 1)
20220+#define PSB_TA_MEM_FLAG_HOSTA (1 << 2)
20221+#define PSB_TA_MEM_FLAG_HOSTD (1 << 3)
20222+#define PSB_TA_MEM_FLAG_INIT (1 << 4)
20223+#define PSB_TA_MEM_FLAG_NEW_PT_OFFSET (1 << 5)
20224+
20225+/*Raster fire will deallocate memory */
20226+#define PSB_FIRE_FLAG_RASTER_DEALLOC (1 << 0)
20227+/*Isp reset needed due to change in ZLS format */
20228+#define PSB_FIRE_FLAG_NEEDS_ISP_RESET (1 << 1)
20229+/*These are set by Xpsb. */
20230+#define PSB_FIRE_FLAG_XHW_MASK 0xff000000
20231+/*The task has had at least one OOM and Xpsb will
20232+ send back messages on each fire. */
20233+#define PSB_FIRE_FLAG_XHW_OOM (1 << 24)
20234+
20235+#define PSB_SCENE_ENGINE_TA 0
20236+#define PSB_SCENE_ENGINE_RASTER 1
20237+#define PSB_SCENE_NUM_ENGINES 2
20238+
20239+struct drm_psb_dev_info_arg {
20240+ uint32_t num_use_attribute_registers;
20241+};
20242+#define DRM_PSB_DEVINFO 0x01
20243+
20244+#endif
20245Index: linux-2.6.27/drivers/gpu/drm/psb/psb_drv.c
20246===================================================================
20247--- /dev/null 1970-01-01 00:00:00.000000000 +0000
20248+++ linux-2.6.27/drivers/gpu/drm/psb/psb_drv.c 2009-01-14 11:58:01.000000000 +0000
20249@@ -0,0 +1,1006 @@
20250+/**************************************************************************
20251+ * Copyright (c) 2007, Intel Corporation.
20252+ * All Rights Reserved.
20253+ *
20254+ * This program is free software; you can redistribute it and/or modify it
20255+ * under the terms and conditions of the GNU General Public License,
20256+ * version 2, as published by the Free Software Foundation.
20257+ *
20258+ * This program is distributed in the hope it will be useful, but WITHOUT
20259+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20260+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20261+ * more details.
20262+ *
20263+ * You should have received a copy of the GNU General Public License along with
20264+ * this program; if not, write to the Free Software Foundation, Inc.,
20265+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20266+ *
20267+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
20268+ * develop this driver.
20269+ *
20270+ **************************************************************************/
20271+/*
20272+ */
20273+
20274+#include "drmP.h"
20275+#include "drm.h"
20276+#include "psb_drm.h"
20277+#include "psb_drv.h"
20278+#include "psb_reg.h"
20279+#include "i915_reg.h"
20280+#include "psb_msvdx.h"
20281+#include "drm_pciids.h"
20282+#include "psb_scene.h"
20283+#include <linux/cpu.h>
20284+#include <linux/notifier.h>
20285+#include <linux/fb.h>
20286+
20287+int drm_psb_debug = 0;
20288+EXPORT_SYMBOL(drm_psb_debug);
20289+static int drm_psb_trap_pagefaults = 0;
20290+static int drm_psb_clock_gating = 0;
20291+static int drm_psb_ta_mem_size = 32 * 1024;
20292+int drm_psb_disable_vsync = 1;
20293+int drm_psb_no_fb = 0;
20294+int drm_psb_force_pipeb = 0;
20295+char* psb_init_mode;
20296+int psb_init_xres;
20297+int psb_init_yres;
20298+/*
20299+ *
20300+ */
20301+#define SII_1392_WA
20302+#ifdef SII_1392_WA
20303+extern int SII_1392;
20304+#endif
20305+
20306+MODULE_PARM_DESC(debug, "Enable debug output");
20307+MODULE_PARM_DESC(clock_gating, "clock gating");
20308+MODULE_PARM_DESC(no_fb, "Disable FBdev");
20309+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
20310+MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
20311+MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
20312+MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
20313+MODULE_PARM_DESC(mode, "initial mode name");
20314+MODULE_PARM_DESC(xres, "initial mode width");
20315+MODULE_PARM_DESC(yres, "initial mode height");
20316+
20317+module_param_named(debug, drm_psb_debug, int, 0600);
20318+module_param_named(clock_gating, drm_psb_clock_gating, int, 0600);
20319+module_param_named(no_fb, drm_psb_no_fb, int, 0600);
20320+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
20321+module_param_named(disable_vsync, drm_psb_disable_vsync, int, 0600);
20322+module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
20323+module_param_named(ta_mem_size, drm_psb_ta_mem_size, int, 0600);
20324+module_param_named(mode, psb_init_mode, charp, 0600);
20325+module_param_named(xres, psb_init_xres, int, 0600);
20326+module_param_named(yres, psb_init_yres, int, 0600);
20327+
20328+static struct pci_device_id pciidlist[] = {
20329+ psb_PCI_IDS
20330+};
20331+
20332+#define DRM_PSB_CMDBUF_IOCTL DRM_IOW(DRM_PSB_CMDBUF, \
20333+ struct drm_psb_cmdbuf_arg)
20334+#define DRM_PSB_XHW_INIT_IOCTL DRM_IOR(DRM_PSB_XHW_INIT, \
20335+ struct drm_psb_xhw_init_arg)
20336+#define DRM_PSB_XHW_IOCTL DRM_IO(DRM_PSB_XHW)
20337+
20338+#define DRM_PSB_SCENE_UNREF_IOCTL DRM_IOWR(DRM_PSB_SCENE_UNREF, \
20339+ struct drm_psb_scene)
20340+#define DRM_PSB_HW_INFO_IOCTL DRM_IOR(DRM_PSB_HW_INFO, \
20341+ struct drm_psb_hw_info)
20342+
20343+#define DRM_PSB_KMS_OFF_IOCTL DRM_IO(DRM_PSB_KMS_OFF)
20344+#define DRM_PSB_KMS_ON_IOCTL DRM_IO(DRM_PSB_KMS_ON)
20345+
20346+static struct drm_ioctl_desc psb_ioctls[] = {
20347+ DRM_IOCTL_DEF(DRM_PSB_CMDBUF_IOCTL, psb_cmdbuf_ioctl, DRM_AUTH),
20348+ DRM_IOCTL_DEF(DRM_PSB_XHW_INIT_IOCTL, psb_xhw_init_ioctl,
20349+ DRM_ROOT_ONLY),
20350+ DRM_IOCTL_DEF(DRM_PSB_XHW_IOCTL, psb_xhw_ioctl, DRM_ROOT_ONLY),
20351+ DRM_IOCTL_DEF(DRM_PSB_SCENE_UNREF_IOCTL, drm_psb_scene_unref_ioctl,
20352+ DRM_AUTH),
20353+ DRM_IOCTL_DEF(DRM_PSB_KMS_OFF_IOCTL, psbfb_kms_off_ioctl,
20354+ DRM_ROOT_ONLY),
20355+ DRM_IOCTL_DEF(DRM_PSB_KMS_ON_IOCTL, psbfb_kms_on_ioctl, DRM_ROOT_ONLY),
20356+ DRM_IOCTL_DEF(DRM_PSB_HW_INFO_IOCTL, psb_hw_info_ioctl, DRM_AUTH),
20357+};
20358+static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
20359+
20360+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
20361+
20362+#ifdef USE_PAT_WC
20363+#warning Init pat
20364+static int __cpuinit psb_cpu_callback(struct notifier_block *nfb,
20365+ unsigned long action,
20366+ void *hcpu)
20367+{
20368+ if (action == CPU_ONLINE)
20369+ drm_init_pat();
20370+
20371+ return 0;
20372+}
20373+
20374+static struct notifier_block __cpuinitdata psb_nb = {
20375+ .notifier_call = psb_cpu_callback,
20376+ .priority = 1
20377+};
20378+#endif
20379+
20380+static int dri_library_name(struct drm_device *dev, char *buf)
20381+{
20382+ return snprintf(buf, PAGE_SIZE, "psb\n");
20383+}
20384+
20385+static void psb_set_uopt(struct drm_psb_uopt *uopt)
20386+{
20387+ uopt->clock_gating = drm_psb_clock_gating;
20388+}
20389+
20390+static void psb_lastclose(struct drm_device *dev)
20391+{
20392+ struct drm_psb_private *dev_priv =
20393+ (struct drm_psb_private *)dev->dev_private;
20394+
20395+ if (!dev->dev_private)
20396+ return;
20397+
20398+ mutex_lock(&dev->struct_mutex);
20399+ if (dev_priv->ta_mem)
20400+ psb_ta_mem_unref_devlocked(&dev_priv->ta_mem);
20401+ mutex_unlock(&dev->struct_mutex);
20402+ mutex_lock(&dev_priv->cmdbuf_mutex);
20403+ if (dev_priv->buffers) {
20404+ vfree(dev_priv->buffers);
20405+ dev_priv->buffers = NULL;
20406+ }
20407+ mutex_unlock(&dev_priv->cmdbuf_mutex);
20408+}
20409+
20410+static void psb_do_takedown(struct drm_device *dev)
20411+{
20412+ struct drm_psb_private *dev_priv =
20413+ (struct drm_psb_private *)dev->dev_private;
20414+
20415+ mutex_lock(&dev->struct_mutex);
20416+ if (dev->bm.initialized) {
20417+ if (dev_priv->have_mem_rastgeom) {
20418+ drm_bo_clean_mm(dev, DRM_PSB_MEM_RASTGEOM);
20419+ dev_priv->have_mem_rastgeom = 0;
20420+ }
20421+ if (dev_priv->have_mem_mmu) {
20422+ drm_bo_clean_mm(dev, DRM_PSB_MEM_MMU);
20423+ dev_priv->have_mem_mmu = 0;
20424+ }
20425+ if (dev_priv->have_mem_aper) {
20426+ drm_bo_clean_mm(dev, DRM_PSB_MEM_APER);
20427+ dev_priv->have_mem_aper = 0;
20428+ }
20429+ if (dev_priv->have_tt) {
20430+ drm_bo_clean_mm(dev, DRM_BO_MEM_TT);
20431+ dev_priv->have_tt = 0;
20432+ }
20433+ if (dev_priv->have_vram) {
20434+ drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM);
20435+ dev_priv->have_vram = 0;
20436+ }
20437+ }
20438+ mutex_unlock(&dev->struct_mutex);
20439+
20440+ if (dev_priv->has_msvdx)
20441+ psb_msvdx_uninit(dev);
20442+
20443+ if (dev_priv->comm) {
20444+ kunmap(dev_priv->comm_page);
20445+ dev_priv->comm = NULL;
20446+ }
20447+ if (dev_priv->comm_page) {
20448+ __free_page(dev_priv->comm_page);
20449+ dev_priv->comm_page = NULL;
20450+ }
20451+}
20452+
20453+void psb_clockgating(struct drm_psb_private *dev_priv)
20454+{
20455+ uint32_t clock_gating;
20456+
20457+ if (dev_priv->uopt.clock_gating == 1) {
20458+ PSB_DEBUG_INIT("Disabling clock gating.\n");
20459+
20460+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
20461+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
20462+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
20463+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
20464+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
20465+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
20466+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
20467+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
20468+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
20469+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
20470+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
20471+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
20472+
20473+ } else if (dev_priv->uopt.clock_gating == 2) {
20474+ PSB_DEBUG_INIT("Enabling clock gating.\n");
20475+
20476+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_AUTO <<
20477+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
20478+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
20479+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
20480+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
20481+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
20482+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
20483+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
20484+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
20485+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
20486+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
20487+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
20488+ } else
20489+ clock_gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
20490+
20491+#ifdef FIX_TG_2D_CLOCKGATE
20492+ clock_gating &= ~_PSB_C_CLKGATECTL_2D_CLKG_MASK;
20493+ clock_gating |= (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
20494+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT);
20495+#endif
20496+ PSB_WSGX32(clock_gating, PSB_CR_CLKGATECTL);
20497+ (void)PSB_RSGX32(PSB_CR_CLKGATECTL);
20498+}
20499+
20500+static int psb_do_init(struct drm_device *dev)
20501+{
20502+ struct drm_psb_private *dev_priv =
20503+ (struct drm_psb_private *)dev->dev_private;
20504+ struct psb_gtt *pg = dev_priv->pg;
20505+
20506+ uint32_t stolen_gtt;
20507+ uint32_t tt_start;
20508+ uint32_t tt_pages;
20509+
20510+ int ret = -ENOMEM;
20511+
20512+ DRM_ERROR("Debug is 0x%08x\n", drm_psb_debug);
20513+
20514+ dev_priv->ta_mem_pages =
20515+ PSB_ALIGN_TO(drm_psb_ta_mem_size * 1024, PAGE_SIZE) >> PAGE_SHIFT;
20516+ dev_priv->comm_page = alloc_page(GFP_KERNEL);
20517+ if (!dev_priv->comm_page)
20518+ goto out_err;
20519+
20520+ dev_priv->comm = kmap(dev_priv->comm_page);
20521+ memset((void *)dev_priv->comm, 0, PAGE_SIZE);
20522+
20523+ dev_priv->has_msvdx = 1;
20524+ if (psb_msvdx_init(dev))
20525+ dev_priv->has_msvdx = 0;
20526+
20527+ /*
20528+ * Initialize sequence numbers for the different command
20529+ * submission mechanisms.
20530+ */
20531+
20532+ dev_priv->sequence[PSB_ENGINE_2D] = 0;
20533+ dev_priv->sequence[PSB_ENGINE_RASTERIZER] = 0;
20534+ dev_priv->sequence[PSB_ENGINE_TA] = 0;
20535+ dev_priv->sequence[PSB_ENGINE_HPRAST] = 0;
20536+
20537+ if (pg->gatt_start & 0x0FFFFFFF) {
20538+ DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
20539+ ret = -EINVAL;
20540+ goto out_err;
20541+ }
20542+
20543+ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
20544+ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
20545+ stolen_gtt = (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
20546+
20547+ dev_priv->gatt_free_offset = pg->gatt_start +
20548+ (stolen_gtt << PAGE_SHIFT) * 1024;
20549+
20550+ /*
20551+ * Insert a cache-coherent communications page in mmu space
20552+ * just after the stolen area. Will be used for fencing etc.
20553+ */
20554+
20555+ dev_priv->comm_mmu_offset = dev_priv->gatt_free_offset;
20556+ dev_priv->gatt_free_offset += PAGE_SIZE;
20557+
20558+ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
20559+ &dev_priv->comm_page,
20560+ dev_priv->comm_mmu_offset, 1, 0, 0,
20561+ PSB_MMU_CACHED_MEMORY);
20562+
20563+ if (ret)
20564+ goto out_err;
20565+
20566+ if (1 || drm_debug) {
20567+ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
20568+ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
20569+ DRM_INFO("SGX core id = 0x%08x\n", core_id);
20570+ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
20571+ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
20572+ _PSB_CC_REVISION_MAJOR_SHIFT,
20573+ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
20574+ _PSB_CC_REVISION_MINOR_SHIFT);
20575+ DRM_INFO
20576+ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
20577+ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
20578+ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
20579+ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
20580+ _PSB_CC_REVISION_DESIGNER_SHIFT);
20581+ }
20582+
20583+ dev_priv->irqmask_lock = SPIN_LOCK_UNLOCKED;
20584+ dev_priv->fence0_irq_on = 0;
20585+
20586+ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
20587+ pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
20588+ tt_start = dev_priv->gatt_free_offset - pg->gatt_start;
20589+ tt_pages -= tt_start >> PAGE_SHIFT;
20590+
20591+ mutex_lock(&dev->struct_mutex);
20592+
20593+ if (!drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0,
20594+ pg->stolen_size >> PAGE_SHIFT)) {
20595+ dev_priv->have_vram = 1;
20596+ }
20597+
20598+ if (!drm_bo_init_mm(dev, DRM_BO_MEM_TT, tt_start >> PAGE_SHIFT,
20599+ tt_pages)) {
20600+ dev_priv->have_tt = 1;
20601+ }
20602+
20603+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_MMU, 0x00000000,
20604+ (pg->gatt_start -
20605+ PSB_MEM_MMU_START) >> PAGE_SHIFT)) {
20606+ dev_priv->have_mem_mmu = 1;
20607+ }
20608+
20609+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_RASTGEOM, 0x00000000,
20610+ (PSB_MEM_MMU_START -
20611+ PSB_MEM_RASTGEOM_START) >> PAGE_SHIFT)) {
20612+ dev_priv->have_mem_rastgeom = 1;
20613+ }
20614+#if 0
20615+ if (pg->gatt_pages > PSB_TT_PRIV0_PLIMIT) {
20616+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_APER, PSB_TT_PRIV0_PLIMIT,
20617+ pg->gatt_pages - PSB_TT_PRIV0_PLIMIT)) {
20618+ dev_priv->have_mem_aper = 1;
20619+ }
20620+ }
20621+#endif
20622+
20623+ mutex_unlock(&dev->struct_mutex);
20624+
20625+ return 0;
20626+ out_err:
20627+ psb_do_takedown(dev);
20628+ return ret;
20629+}
20630+
20631+static int psb_driver_unload(struct drm_device *dev)
20632+{
20633+ struct drm_psb_private *dev_priv =
20634+ (struct drm_psb_private *)dev->dev_private;
20635+
20636+ intel_modeset_cleanup(dev);
20637+
20638+ if (dev_priv) {
20639+ psb_watchdog_takedown(dev_priv);
20640+ psb_do_takedown(dev);
20641+ psb_xhw_takedown(dev_priv);
20642+ psb_scheduler_takedown(&dev_priv->scheduler);
20643+
20644+ mutex_lock(&dev->struct_mutex);
20645+ if (dev_priv->have_mem_pds) {
20646+ drm_bo_clean_mm(dev, DRM_PSB_MEM_PDS);
20647+ dev_priv->have_mem_pds = 0;
20648+ }
20649+ if (dev_priv->have_mem_kernel) {
20650+ drm_bo_clean_mm(dev, DRM_PSB_MEM_KERNEL);
20651+ dev_priv->have_mem_kernel = 0;
20652+ }
20653+ mutex_unlock(&dev->struct_mutex);
20654+
20655+ (void)drm_bo_driver_finish(dev);
20656+
20657+ if (dev_priv->pf_pd) {
20658+ psb_mmu_free_pagedir(dev_priv->pf_pd);
20659+ dev_priv->pf_pd = NULL;
20660+ }
20661+ if (dev_priv->mmu) {
20662+ struct psb_gtt *pg = dev_priv->pg;
20663+
20664+ down_read(&pg->sem);
20665+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
20666+ (dev_priv->mmu),
20667+ pg->gatt_start,
20668+ pg->
20669+ stolen_size >> PAGE_SHIFT);
20670+ up_read(&pg->sem);
20671+ psb_mmu_driver_takedown(dev_priv->mmu);
20672+ dev_priv->mmu = NULL;
20673+ }
20674+ psb_gtt_takedown(dev_priv->pg, 1);
20675+ if (dev_priv->scratch_page) {
20676+ __free_page(dev_priv->scratch_page);
20677+ dev_priv->scratch_page = NULL;
20678+ }
20679+ psb_takedown_use_base(dev_priv);
20680+ if (dev_priv->vdc_reg) {
20681+ iounmap(dev_priv->vdc_reg);
20682+ dev_priv->vdc_reg = NULL;
20683+ }
20684+ if (dev_priv->sgx_reg) {
20685+ iounmap(dev_priv->sgx_reg);
20686+ dev_priv->sgx_reg = NULL;
20687+ }
20688+ if (dev_priv->msvdx_reg) {
20689+ iounmap(dev_priv->msvdx_reg);
20690+ dev_priv->msvdx_reg = NULL;
20691+ }
20692+
20693+ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
20694+ dev->dev_private = NULL;
20695+ }
20696+ return 0;
20697+}
20698+
20699+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
20700+extern int drm_pick_crtcs(struct drm_device *dev);
20701+extern char drm_init_mode[32];
20702+extern int drm_init_xres;
20703+extern int drm_init_yres;
20704+
20705+static int psb_initial_config(struct drm_device *dev, bool can_grow)
20706+{
20707+ struct drm_psb_private *dev_priv = dev->dev_private;
20708+ struct drm_output *output;
20709+ struct drm_crtc *crtc;
20710+ int ret = false;
20711+
20712+ mutex_lock(&dev->mode_config.mutex);
20713+
20714+ drm_crtc_probe_output_modes(dev, 2048, 2048);
20715+
20716+ /* strncpy(drm_init_mode, psb_init_mode, strlen(psb_init_mode)); */
20717+ drm_init_xres = psb_init_xres;
20718+ drm_init_yres = psb_init_yres;
20719+
20720+ drm_pick_crtcs(dev);
20721+
20722+ if ((I915_READ(PIPEACONF) & PIPEACONF_ENABLE) && !drm_psb_force_pipeb)
20723+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
20724+ if (!crtc->desired_mode)
20725+ continue;
20726+
20727+ dev->driver->fb_probe(dev, crtc);
20728+ } else
20729+ list_for_each_entry_reverse(crtc, &dev->mode_config.crtc_list,
20730+ head) {
20731+ if (!crtc->desired_mode)
20732+ continue;
20733+
20734+ dev->driver->fb_probe(dev, crtc);
20735+ }
20736+
20737+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
20738+
20739+ if (!output->crtc || !output->crtc->desired_mode)
20740+ continue;
20741+
20742+ if (output->crtc->fb)
20743+ drm_crtc_set_mode(output->crtc,
20744+ output->crtc->desired_mode, 0, 0);
20745+ }
20746+
20747+#ifdef SII_1392_WA
20748+ if((SII_1392 != 1) || (drm_psb_no_fb==0))
20749+ drm_disable_unused_functions(dev);
20750+#else
20751+ drm_disable_unused_functions(dev);
20752+#endif
20753+
20754+
20755+ mutex_unlock(&dev->mode_config.mutex);
20756+
20757+ return ret;
20758+
20759+}
20760+
20761+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
20762+{
20763+ struct drm_psb_private *dev_priv;
20764+ unsigned long resource_start;
20765+ struct psb_gtt *pg;
20766+ int ret = -ENOMEM;
20767+
20768+ DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION);
20769+ dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
20770+ if (dev_priv == NULL)
20771+ return -ENOMEM;
20772+
20773+ mutex_init(&dev_priv->temp_mem);
20774+ mutex_init(&dev_priv->cmdbuf_mutex);
20775+ mutex_init(&dev_priv->reset_mutex);
20776+ psb_init_disallowed();
20777+
20778+ atomic_set(&dev_priv->msvdx_mmu_invaldc, 0);
20779+
20780+#ifdef FIX_TG_16
20781+ atomic_set(&dev_priv->lock_2d, 0);
20782+ atomic_set(&dev_priv->ta_wait_2d, 0);
20783+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
20784+ atomic_set(&dev_priv->waiters_2d, 0);;
20785+ DRM_INIT_WAITQUEUE(&dev_priv->queue_2d);
20786+#else
20787+ mutex_init(&dev_priv->mutex_2d);
20788+#endif
20789+
20790+ spin_lock_init(&dev_priv->reloc_lock);
20791+
20792+ DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
20793+ DRM_INIT_WAITQUEUE(&dev_priv->event_2d_queue);
20794+
20795+ dev->dev_private = (void *)dev_priv;
20796+ dev_priv->chipset = chipset;
20797+ psb_set_uopt(&dev_priv->uopt);
20798+
20799+ psb_watchdog_init(dev_priv);
20800+ psb_scheduler_init(dev, &dev_priv->scheduler);
20801+
20802+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
20803+
20804+ dev_priv->msvdx_reg =
20805+ ioremap(resource_start + PSB_MSVDX_OFFSET, PSB_MSVDX_SIZE);
20806+ if (!dev_priv->msvdx_reg)
20807+ goto out_err;
20808+
20809+ dev_priv->vdc_reg =
20810+ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
20811+ if (!dev_priv->vdc_reg)
20812+ goto out_err;
20813+
20814+ dev_priv->sgx_reg =
20815+ ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
20816+ if (!dev_priv->sgx_reg)
20817+ goto out_err;
20818+
20819+ psb_clockgating(dev_priv);
20820+ if (psb_init_use_base(dev_priv, 3, 13))
20821+ goto out_err;
20822+
20823+ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
20824+ if (!dev_priv->scratch_page)
20825+ goto out_err;
20826+
20827+ dev_priv->pg = psb_gtt_alloc(dev);
20828+ if (!dev_priv->pg)
20829+ goto out_err;
20830+
20831+ ret = psb_gtt_init(dev_priv->pg, 0);
20832+ if (ret)
20833+ goto out_err;
20834+
20835+ dev_priv->mmu = psb_mmu_driver_init(dev_priv->sgx_reg,
20836+ drm_psb_trap_pagefaults, 0,
20837+ &dev_priv->msvdx_mmu_invaldc);
20838+ if (!dev_priv->mmu)
20839+ goto out_err;
20840+
20841+ pg = dev_priv->pg;
20842+
20843+ /*
20844+ * Make sgx MMU aware of the stolen memory area we call VRAM.
20845+ */
20846+
20847+ down_read(&pg->sem);
20848+ ret =
20849+ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
20850+ pg->stolen_base >> PAGE_SHIFT,
20851+ pg->gatt_start,
20852+ pg->stolen_size >> PAGE_SHIFT, 0);
20853+ up_read(&pg->sem);
20854+ if (ret)
20855+ goto out_err;
20856+
20857+ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
20858+ if (!dev_priv->pf_pd)
20859+ goto out_err;
20860+
20861+ /*
20862+ * Make all presumably unused requestors page-fault by making them
20863+ * use context 1 which does not have any valid mappings.
20864+ */
20865+
20866+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
20867+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
20868+ PSB_RSGX32(PSB_CR_BIF_BANK1);
20869+
20870+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
20871+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
20872+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
20873+
20874+ psb_init_2d(dev_priv);
20875+
20876+ ret = drm_bo_driver_init(dev);
20877+ if (ret)
20878+ goto out_err;
20879+
20880+ ret = drm_bo_init_mm(dev, DRM_PSB_MEM_KERNEL, 0x00000000,
20881+ (PSB_MEM_PDS_START - PSB_MEM_KERNEL_START)
20882+ >> PAGE_SHIFT);
20883+ if (ret)
20884+ goto out_err;
20885+ dev_priv->have_mem_kernel = 1;
20886+
20887+ ret = drm_bo_init_mm(dev, DRM_PSB_MEM_PDS, 0x00000000,
20888+ (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START)
20889+ >> PAGE_SHIFT);
20890+ if (ret)
20891+ goto out_err;
20892+ dev_priv->have_mem_pds = 1;
20893+
20894+ ret = psb_do_init(dev);
20895+ if (ret)
20896+ return ret;
20897+
20898+ ret = psb_xhw_init(dev);
20899+ if (ret)
20900+ return ret;
20901+
20902+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
20903+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
20904+
20905+ intel_modeset_init(dev);
20906+ psb_initial_config(dev, false);
20907+
20908+#ifdef USE_PAT_WC
20909+#warning Init pat
20910+ register_cpu_notifier(&psb_nb);
20911+#endif
20912+
20913+ return 0;
20914+ out_err:
20915+ psb_driver_unload(dev);
20916+ return ret;
20917+}
20918+
20919+int psb_driver_device_is_agp(struct drm_device *dev)
20920+{
20921+ return 0;
20922+}
20923+
20924+static int psb_prepare_msvdx_suspend(struct drm_device *dev)
20925+{
20926+ struct drm_psb_private *dev_priv =
20927+ (struct drm_psb_private *)dev->dev_private;
20928+ struct drm_fence_manager *fm = &dev->fm;
20929+ struct drm_fence_class_manager *fc = &fm->fence_class[PSB_ENGINE_VIDEO];
20930+ struct drm_fence_object *fence;
20931+ int ret = 0;
20932+ int signaled = 0;
20933+ int count = 0;
20934+ unsigned long _end = jiffies + 3 * DRM_HZ;
20935+
20936+ PSB_DEBUG_GENERAL("MSVDXACPI Entering psb_prepare_msvdx_suspend....\n");
20937+
20938+ /*set the msvdx-reset flag here.. */
20939+ dev_priv->msvdx_needs_reset = 1;
20940+
20941+ /*Ensure that all pending IRQs are serviced, */
20942+ list_for_each_entry(fence, &fc->ring, ring) {
20943+ count++;
20944+ do {
20945+ DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
20946+ (signaled =
20947+ drm_fence_object_signaled(fence,
20948+ DRM_FENCE_TYPE_EXE)));
20949+ if (signaled)
20950+ break;
20951+ if (time_after_eq(jiffies, _end))
20952+ PSB_DEBUG_GENERAL
20953+ ("MSVDXACPI: fence 0x%x didn't get signaled for 3 secs; we will suspend anyways\n",
20954+ (unsigned int)fence);
20955+ } while (ret == -EINTR);
20956+
20957+ }
20958+
20959+ /* Issue software reset */
20960+ PSB_WMSVDX32 (msvdx_sw_reset_all, MSVDX_CONTROL);
20961+
20962+ ret = psb_wait_for_register (dev_priv, MSVDX_CONTROL, 0,
20963+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
20964+
20965+ PSB_DEBUG_GENERAL("MSVDXACPI: All MSVDX IRQs (%d) serviced...\n",
20966+ count);
20967+ return 0;
20968+}
20969+
20970+static int psb_suspend(struct pci_dev *pdev, pm_message_t state)
20971+{
20972+ struct drm_device *dev = pci_get_drvdata(pdev);
20973+ struct drm_psb_private *dev_priv =
20974+ (struct drm_psb_private *)dev->dev_private;
20975+ struct drm_output *output;
20976+
20977+ if (drm_psb_no_fb == 0)
20978+ psbfb_suspend(dev);
20979+#ifdef WA_NO_FB_GARBAGE_DISPLAY
20980+ else {
20981+ if(num_registered_fb)
20982+ {
20983+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
20984+ if(output->crtc != NULL)
20985+ intel_crtc_mode_save(output->crtc);
20986+ //if(output->funcs->save)
20987+ // output->funcs->save(output);
20988+ }
20989+ }
20990+ }
20991+#endif
20992+
20993+ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
20994+ (void)psb_idle_3d(dev);
20995+ (void)psb_idle_2d(dev);
20996+ flush_scheduled_work();
20997+
20998+ psb_takedown_use_base(dev_priv);
20999+
21000+ if (dev_priv->has_msvdx)
21001+ psb_prepare_msvdx_suspend(dev);
21002+
21003+ pci_save_state(pdev);
21004+ pci_disable_device(pdev);
21005+ pci_set_power_state(pdev, PCI_D3hot);
21006+
21007+ return 0;
21008+}
21009+
21010+static int psb_resume(struct pci_dev *pdev)
21011+{
21012+ struct drm_device *dev = pci_get_drvdata(pdev);
21013+ struct drm_psb_private *dev_priv =
21014+ (struct drm_psb_private *)dev->dev_private;
21015+ struct psb_gtt *pg = dev_priv->pg;
21016+ struct drm_output *output;
21017+ int ret;
21018+
21019+ pci_set_power_state(pdev, PCI_D0);
21020+ pci_restore_state(pdev);
21021+ ret = pci_enable_device(pdev);
21022+ if (ret)
21023+ return ret;
21024+
21025+#ifdef USE_PAT_WC
21026+#warning Init pat
21027+ /* for single CPU's we do it here, then for more than one CPU we
21028+ * use the CPU notifier to reinit PAT on those CPU's.
21029+ */
21030+ drm_init_pat();
21031+#endif
21032+
21033+ INIT_LIST_HEAD(&dev_priv->resume_buf.head);
21034+ dev_priv->msvdx_needs_reset = 1;
21035+
21036+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
21037+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
21038+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
21039+
21040+ /*
21041+ * The GTT page tables are probably not saved.
21042+ * However, TT and VRAM is empty at this point.
21043+ */
21044+
21045+ psb_gtt_init(dev_priv->pg, 1);
21046+
21047+ /*
21048+ * The SGX loses it's register contents.
21049+ * Restore BIF registers. The MMU page tables are
21050+ * "normal" pages, so their contents should be kept.
21051+ */
21052+
21053+ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
21054+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
21055+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
21056+ PSB_RSGX32(PSB_CR_BIF_BANK1);
21057+
21058+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
21059+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
21060+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
21061+
21062+ /*
21063+ * 2D Base registers..
21064+ */
21065+ psb_init_2d(dev_priv);
21066+
21067+ if (drm_psb_no_fb == 0) {
21068+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
21069+ if(output->crtc != NULL)
21070+ drm_crtc_set_mode(output->crtc, &output->crtc->mode,
21071+ output->crtc->x, output->crtc->y);
21072+ }
21073+ }
21074+
21075+ /*
21076+ * Persistant 3D base registers and USSE base registers..
21077+ */
21078+
21079+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
21080+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
21081+ psb_init_use_base(dev_priv, 3, 13);
21082+
21083+ /*
21084+ * Now, re-initialize the 3D engine.
21085+ */
21086+
21087+ psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
21088+
21089+ psb_scheduler_ta_mem_check(dev_priv);
21090+ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
21091+ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
21092+ PSB_TA_MEM_FLAG_TA |
21093+ PSB_TA_MEM_FLAG_RASTER |
21094+ PSB_TA_MEM_FLAG_HOSTA |
21095+ PSB_TA_MEM_FLAG_HOSTD |
21096+ PSB_TA_MEM_FLAG_INIT,
21097+ dev_priv->ta_mem->ta_memory->offset,
21098+ dev_priv->ta_mem->hw_data->offset,
21099+ dev_priv->ta_mem->hw_cookie);
21100+ }
21101+
21102+ if (drm_psb_no_fb == 0)
21103+ psbfb_resume(dev);
21104+#ifdef WA_NO_FB_GARBAGE_DISPLAY
21105+ else {
21106+ if(num_registered_fb)
21107+ {
21108+ struct fb_info *fb_info=registered_fb[0];
21109+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
21110+ if(output->crtc != NULL)
21111+ intel_crtc_mode_restore(output->crtc);
21112+ }
21113+ if(fb_info)
21114+ {
21115+ fb_set_suspend(fb_info, 0);
21116+ printk("set the fb_set_suspend resume end\n");
21117+ }
21118+ }
21119+ }
21120+#endif
21121+
21122+ return 0;
21123+}
21124+
21125+/* always available as we are SIGIO'd */
21126+static unsigned int psb_poll(struct file *filp, struct poll_table_struct *wait)
21127+{
21128+ return (POLLIN | POLLRDNORM);
21129+}
21130+
21131+static int psb_release(struct inode *inode, struct file *filp)
21132+{
21133+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
21134+ struct drm_device *dev = file_priv->minor->dev;
21135+ struct drm_psb_private *dev_priv =
21136+ (struct drm_psb_private *)dev->dev_private;
21137+
21138+ if (dev_priv && dev_priv->xhw_file) {
21139+ psb_xhw_init_takedown(dev_priv, file_priv, 1);
21140+ }
21141+ return drm_release(inode, filp);
21142+}
21143+
21144+extern struct drm_fence_driver psb_fence_driver;
21145+
21146+/*
21147+ * Use this memory type priority if no eviction is needed.
21148+ */
21149+static uint32_t psb_mem_prios[] = { DRM_BO_MEM_VRAM,
21150+ DRM_BO_MEM_TT,
21151+ DRM_PSB_MEM_KERNEL,
21152+ DRM_PSB_MEM_MMU,
21153+ DRM_PSB_MEM_RASTGEOM,
21154+ DRM_PSB_MEM_PDS,
21155+ DRM_PSB_MEM_APER,
21156+ DRM_BO_MEM_LOCAL
21157+};
21158+
21159+/*
21160+ * Use this memory type priority if need to evict.
21161+ */
21162+static uint32_t psb_busy_prios[] = { DRM_BO_MEM_TT,
21163+ DRM_BO_MEM_VRAM,
21164+ DRM_PSB_MEM_KERNEL,
21165+ DRM_PSB_MEM_MMU,
21166+ DRM_PSB_MEM_RASTGEOM,
21167+ DRM_PSB_MEM_PDS,
21168+ DRM_PSB_MEM_APER,
21169+ DRM_BO_MEM_LOCAL
21170+};
21171+
21172+static struct drm_bo_driver psb_bo_driver = {
21173+ .mem_type_prio = psb_mem_prios,
21174+ .mem_busy_prio = psb_busy_prios,
21175+ .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
21176+ .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
21177+ .create_ttm_backend_entry = drm_psb_tbe_init,
21178+ .fence_type = psb_fence_types,
21179+ .invalidate_caches = psb_invalidate_caches,
21180+ .init_mem_type = psb_init_mem_type,
21181+ .evict_mask = psb_evict_mask,
21182+ .move = psb_move,
21183+ .backend_size = psb_tbe_size,
21184+ .command_stream_barrier = NULL,
21185+};
21186+
21187+static struct drm_driver driver = {
21188+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
21189+ DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
21190+ .load = psb_driver_load,
21191+ .unload = psb_driver_unload,
21192+ .dri_library_name = dri_library_name,
21193+ .get_reg_ofs = drm_core_get_reg_ofs,
21194+ .ioctls = psb_ioctls,
21195+ .device_is_agp = psb_driver_device_is_agp,
21196+ .vblank_wait = psb_vblank_wait,
21197+ .vblank_wait2 = psb_vblank_wait2,
21198+ .irq_preinstall = psb_irq_preinstall,
21199+ .irq_postinstall = psb_irq_postinstall,
21200+ .irq_uninstall = psb_irq_uninstall,
21201+ .irq_handler = psb_irq_handler,
21202+ .fb_probe = psbfb_probe,
21203+ .fb_remove = psbfb_remove,
21204+ .firstopen = NULL,
21205+ .lastclose = psb_lastclose,
21206+ .fops = {
21207+ .owner = THIS_MODULE,
21208+ .open = drm_open,
21209+ .release = psb_release,
21210+ .ioctl = drm_ioctl,
21211+ .mmap = drm_mmap,
21212+ .poll = psb_poll,
21213+ .fasync = drm_fasync,
21214+ },
21215+ .pci_driver = {
21216+ .name = DRIVER_NAME,
21217+ .id_table = pciidlist,
21218+ .probe = probe,
21219+ .remove = __devexit_p(drm_cleanup_pci),
21220+ .resume = psb_resume,
21221+ .suspend = psb_suspend,
21222+ },
21223+ .fence_driver = &psb_fence_driver,
21224+ .bo_driver = &psb_bo_driver,
21225+ .name = DRIVER_NAME,
21226+ .desc = DRIVER_DESC,
21227+ .date = PSB_DRM_DRIVER_DATE,
21228+ .major = PSB_DRM_DRIVER_MAJOR,
21229+ .minor = PSB_DRM_DRIVER_MINOR,
21230+ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
21231+};
21232+
21233+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
21234+{
21235+ return drm_get_dev(pdev, ent, &driver);
21236+}
21237+
21238+static int __init psb_init(void)
21239+{
21240+ driver.num_ioctls = psb_max_ioctl;
21241+
21242+ return drm_init(&driver, pciidlist);
21243+}
21244+
21245+static void __exit psb_exit(void)
21246+{
21247+ drm_exit(&driver);
21248+}
21249+
21250+module_init(psb_init);
21251+module_exit(psb_exit);
21252+
21253+MODULE_AUTHOR(DRIVER_AUTHOR);
21254+MODULE_DESCRIPTION(DRIVER_DESC);
21255+MODULE_LICENSE("GPL");
21256Index: linux-2.6.27/drivers/gpu/drm/psb/psb_drv.h
21257===================================================================
21258--- /dev/null 1970-01-01 00:00:00.000000000 +0000
21259+++ linux-2.6.27/drivers/gpu/drm/psb/psb_drv.h 2009-01-14 11:58:01.000000000 +0000
21260@@ -0,0 +1,775 @@
21261+/**************************************************************************
21262+ * Copyright (c) 2007, Intel Corporation.
21263+ * All Rights Reserved.
21264+ *
21265+ * This program is free software; you can redistribute it and/or modify it
21266+ * under the terms and conditions of the GNU General Public License,
21267+ * version 2, as published by the Free Software Foundation.
21268+ *
21269+ * This program is distributed in the hope it will be useful, but WITHOUT
21270+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21271+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21272+ * more details.
21273+ *
21274+ * You should have received a copy of the GNU General Public License along with
21275+ * this program; if not, write to the Free Software Foundation, Inc.,
21276+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21277+ *
21278+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
21279+ * develop this driver.
21280+ *
21281+ **************************************************************************/
21282+/*
21283+ */
21284+#ifndef _PSB_DRV_H_
21285+#define _PSB_DRV_H_
21286+
21287+#include "drmP.h"
21288+#include "psb_drm.h"
21289+#include "psb_reg.h"
21290+#include "psb_schedule.h"
21291+#include "intel_drv.h"
21292+
21293+enum {
21294+ CHIP_PSB_8108 = 0,
21295+ CHIP_PSB_8109 = 1
21296+};
21297+
21298+/*
21299+ * Hardware bugfixes
21300+ */
21301+
21302+#define FIX_TG_16
21303+#define FIX_TG_2D_CLOCKGATE
21304+
21305+#define DRIVER_NAME "psb"
21306+#define DRIVER_DESC "drm driver for the Intel GMA500"
21307+#define DRIVER_AUTHOR "Tungsten Graphics Inc."
21308+
21309+#define PSB_DRM_DRIVER_DATE "20080613"
21310+#define PSB_DRM_DRIVER_MAJOR 4
21311+#define PSB_DRM_DRIVER_MINOR 12
21312+#define PSB_DRM_DRIVER_PATCHLEVEL 0
21313+
21314+#define PSB_VDC_OFFSET 0x00000000
21315+#define PSB_VDC_SIZE 0x000080000
21316+#define PSB_SGX_SIZE 0x8000
21317+#define PSB_SGX_OFFSET 0x00040000
21318+#define PSB_MMIO_RESOURCE 0
21319+#define PSB_GATT_RESOURCE 2
21320+#define PSB_GTT_RESOURCE 3
21321+#define PSB_GMCH_CTRL 0x52
21322+#define PSB_BSM 0x5C
21323+#define _PSB_GMCH_ENABLED 0x4
21324+#define PSB_PGETBL_CTL 0x2020
21325+#define _PSB_PGETBL_ENABLED 0x00000001
21326+#define PSB_SGX_2D_SLAVE_PORT 0x4000
21327+#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
21328+#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
21329+#define PSB_NUM_VALIDATE_BUFFERS 1024
21330+#define PSB_MEM_KERNEL_START 0x10000000
21331+#define PSB_MEM_PDS_START 0x20000000
21332+#define PSB_MEM_MMU_START 0x40000000
21333+
21334+#define DRM_PSB_MEM_KERNEL DRM_BO_MEM_PRIV0
21335+#define DRM_PSB_FLAG_MEM_KERNEL DRM_BO_FLAG_MEM_PRIV0
21336+
21337+/*
21338+ * Flags for external memory type field.
21339+ */
21340+
21341+#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */
21342+#define PSB_MSVDX_SIZE 0x8000 /*MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
21343+
21344+#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
21345+#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
21346+#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
21347+
21348+/*
21349+ * PTE's and PDE's
21350+ */
21351+
21352+#define PSB_PDE_MASK 0x003FFFFF
21353+#define PSB_PDE_SHIFT 22
21354+#define PSB_PTE_SHIFT 12
21355+
21356+#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
21357+#define PSB_PTE_WO 0x0002 /* Write only */
21358+#define PSB_PTE_RO 0x0004 /* Read only */
21359+#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
21360+
21361+/*
21362+ * VDC registers and bits
21363+ */
21364+#define PSB_HWSTAM 0x2098
21365+#define PSB_INSTPM 0x20C0
21366+#define PSB_INT_IDENTITY_R 0x20A4
21367+#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
21368+#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
21369+#define _PSB_IRQ_SGX_FLAG (1<<18)
21370+#define _PSB_IRQ_MSVDX_FLAG (1<<19)
21371+#define PSB_INT_MASK_R 0x20A8
21372+#define PSB_INT_ENABLE_R 0x20A0
21373+#define PSB_PIPEASTAT 0x70024
21374+#define _PSB_VBLANK_INTERRUPT_ENABLE (1 << 17)
21375+#define _PSB_VBLANK_CLEAR (1 << 1)
21376+#define PSB_PIPEBSTAT 0x71024
21377+
21378+#define _PSB_MMU_ER_MASK 0x0001FF00
21379+#define _PSB_MMU_ER_HOST (1 << 16)
21380+#define GPIOA 0x5010
21381+#define GPIOB 0x5014
21382+#define GPIOC 0x5018
21383+#define GPIOD 0x501c
21384+#define GPIOE 0x5020
21385+#define GPIOF 0x5024
21386+#define GPIOG 0x5028
21387+#define GPIOH 0x502c
21388+#define GPIO_CLOCK_DIR_MASK (1 << 0)
21389+#define GPIO_CLOCK_DIR_IN (0 << 1)
21390+#define GPIO_CLOCK_DIR_OUT (1 << 1)
21391+#define GPIO_CLOCK_VAL_MASK (1 << 2)
21392+#define GPIO_CLOCK_VAL_OUT (1 << 3)
21393+#define GPIO_CLOCK_VAL_IN (1 << 4)
21394+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
21395+#define GPIO_DATA_DIR_MASK (1 << 8)
21396+#define GPIO_DATA_DIR_IN (0 << 9)
21397+#define GPIO_DATA_DIR_OUT (1 << 9)
21398+#define GPIO_DATA_VAL_MASK (1 << 10)
21399+#define GPIO_DATA_VAL_OUT (1 << 11)
21400+#define GPIO_DATA_VAL_IN (1 << 12)
21401+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
21402+
21403+#define VCLK_DIVISOR_VGA0 0x6000
21404+#define VCLK_DIVISOR_VGA1 0x6004
21405+#define VCLK_POST_DIV 0x6010
21406+
21407+#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
21408+#define I915_WRITE(_offs, _val) \
21409+ iowrite32(_val, dev_priv->vdc_reg + (_offs))
21410+#define I915_READ(_offs) \
21411+ ioread32(dev_priv->vdc_reg + (_offs))
21412+
21413+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
21414+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
21415+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
21416+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
21417+#define PSB_COMM_USER_IRQ (1024 >> 2)
21418+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
21419+#define PSB_COMM_FW (2048 >> 2)
21420+
21421+#define PSB_UIRQ_VISTEST 1
21422+#define PSB_UIRQ_OOM_REPLY 2
21423+#define PSB_UIRQ_FIRE_TA_REPLY 3
21424+#define PSB_UIRQ_FIRE_RASTER_REPLY 4
21425+
21426+#define PSB_2D_SIZE (256*1024*1024)
21427+#define PSB_MAX_RELOC_PAGES 1024
21428+
21429+#define PSB_LOW_REG_OFFS 0x0204
21430+#define PSB_HIGH_REG_OFFS 0x0600
21431+
21432+#define PSB_NUM_VBLANKS 2
21433+
21434+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
21435+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
21436+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
21437+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
21438+#define PSB_COMM_FW (2048 >> 2)
21439+
21440+#define PSB_2D_SIZE (256*1024*1024)
21441+#define PSB_MAX_RELOC_PAGES 1024
21442+
21443+#define PSB_LOW_REG_OFFS 0x0204
21444+#define PSB_HIGH_REG_OFFS 0x0600
21445+
21446+#define PSB_NUM_VBLANKS 2
21447+#define PSB_WATCHDOG_DELAY (DRM_HZ / 10)
21448+
21449+/*
21450+ * User options.
21451+ */
21452+
21453+struct drm_psb_uopt {
21454+ int clock_gating;
21455+};
21456+
21457+struct psb_gtt {
21458+ struct drm_device *dev;
21459+ int initialized;
21460+ uint32_t gatt_start;
21461+ uint32_t gtt_start;
21462+ uint32_t gtt_phys_start;
21463+ unsigned gtt_pages;
21464+ unsigned gatt_pages;
21465+ uint32_t stolen_base;
21466+ uint32_t pge_ctl;
21467+ u16 gmch_ctrl;
21468+ unsigned long stolen_size;
21469+ uint32_t *gtt_map;
21470+ struct rw_semaphore sem;
21471+};
21472+
21473+struct psb_use_base {
21474+ struct list_head head;
21475+ struct drm_fence_object *fence;
21476+ unsigned int reg;
21477+ unsigned long offset;
21478+ unsigned int dm;
21479+};
21480+
21481+struct psb_buflist_item;
21482+
21483+struct psb_msvdx_cmd_queue {
21484+ struct list_head head;
21485+ void *cmd;
21486+ unsigned long cmd_size;
21487+ uint32_t sequence;
21488+};
21489+
21490+struct drm_psb_private {
21491+ unsigned long chipset;
21492+ uint8_t psb_rev_id;
21493+
21494+ struct psb_xhw_buf resume_buf;
21495+ struct drm_psb_dev_info_arg dev_info;
21496+ struct drm_psb_uopt uopt;
21497+
21498+ struct psb_gtt *pg;
21499+
21500+ struct page *scratch_page;
21501+ struct page *comm_page;
21502+
21503+ volatile uint32_t *comm;
21504+ uint32_t comm_mmu_offset;
21505+ uint32_t mmu_2d_offset;
21506+ uint32_t sequence[PSB_NUM_ENGINES];
21507+ uint32_t last_sequence[PSB_NUM_ENGINES];
21508+ int idle[PSB_NUM_ENGINES];
21509+ uint32_t last_submitted_seq[PSB_NUM_ENGINES];
21510+ int engine_lockup_2d;
21511+
21512+ struct psb_mmu_driver *mmu;
21513+ struct psb_mmu_pd *pf_pd;
21514+
21515+ uint8_t *sgx_reg;
21516+ uint8_t *vdc_reg;
21517+ uint8_t *msvdx_reg;
21518+
21519+ /*
21520+ * MSVDX
21521+ */
21522+ int msvdx_needs_reset;
21523+ int has_msvdx;
21524+ uint32_t gatt_free_offset;
21525+ atomic_t msvdx_mmu_invaldc;
21526+
21527+ /*
21528+ * Fencing / irq.
21529+ */
21530+
21531+ uint32_t sgx_irq_mask;
21532+ uint32_t sgx2_irq_mask;
21533+ uint32_t vdc_irq_mask;
21534+
21535+ spinlock_t irqmask_lock;
21536+ spinlock_t sequence_lock;
21537+ int fence0_irq_on;
21538+ int irq_enabled;
21539+ unsigned int irqen_count_2d;
21540+ wait_queue_head_t event_2d_queue;
21541+
21542+#ifdef FIX_TG_16
21543+ wait_queue_head_t queue_2d;
21544+ atomic_t lock_2d;
21545+ atomic_t ta_wait_2d;
21546+ atomic_t ta_wait_2d_irq;
21547+ atomic_t waiters_2d;
21548+#else
21549+ struct mutex mutex_2d;
21550+#endif
21551+ uint32_t msvdx_current_sequence;
21552+ uint32_t msvdx_last_sequence;
21553+#define MSVDX_MAX_IDELTIME HZ*30
21554+ uint32_t msvdx_finished_sequence;
21555+ uint32_t msvdx_start_idle;
21556+ unsigned long msvdx_idle_start_jiffies;
21557+
21558+ int fence2_irq_on;
21559+
21560+ /*
21561+ * MSVDX Rendec Memory
21562+ */
21563+ struct drm_buffer_object *ccb0;
21564+ uint32_t base_addr0;
21565+ struct drm_buffer_object *ccb1;
21566+ uint32_t base_addr1;
21567+
21568+ /*
21569+ * Memory managers
21570+ */
21571+
21572+ int have_vram;
21573+ int have_tt;
21574+ int have_mem_mmu;
21575+ int have_mem_aper;
21576+ int have_mem_kernel;
21577+ int have_mem_pds;
21578+ int have_mem_rastgeom;
21579+ struct mutex temp_mem;
21580+
21581+ /*
21582+ * Relocation buffer mapping.
21583+ */
21584+
21585+ spinlock_t reloc_lock;
21586+ unsigned int rel_mapped_pages;
21587+ wait_queue_head_t rel_mapped_queue;
21588+
21589+ /*
21590+ * SAREA
21591+ */
21592+ struct drm_psb_sarea *sarea_priv;
21593+
21594+ /*
21595+ * LVDS info
21596+ */
21597+ int backlight_duty_cycle; /* restore backlight to this value */
21598+ bool panel_wants_dither;
21599+ struct drm_display_mode *panel_fixed_mode;
21600+
21601+ /*
21602+ * Register state
21603+ */
21604+ uint32_t saveDSPACNTR;
21605+ uint32_t saveDSPBCNTR;
21606+ uint32_t savePIPEACONF;
21607+ uint32_t savePIPEBCONF;
21608+ uint32_t savePIPEASRC;
21609+ uint32_t savePIPEBSRC;
21610+ uint32_t saveFPA0;
21611+ uint32_t saveFPA1;
21612+ uint32_t saveDPLL_A;
21613+ uint32_t saveDPLL_A_MD;
21614+ uint32_t saveHTOTAL_A;
21615+ uint32_t saveHBLANK_A;
21616+ uint32_t saveHSYNC_A;
21617+ uint32_t saveVTOTAL_A;
21618+ uint32_t saveVBLANK_A;
21619+ uint32_t saveVSYNC_A;
21620+ uint32_t saveDSPASTRIDE;
21621+ uint32_t saveDSPASIZE;
21622+ uint32_t saveDSPAPOS;
21623+ uint32_t saveDSPABASE;
21624+ uint32_t saveDSPASURF;
21625+ uint32_t saveFPB0;
21626+ uint32_t saveFPB1;
21627+ uint32_t saveDPLL_B;
21628+ uint32_t saveDPLL_B_MD;
21629+ uint32_t saveHTOTAL_B;
21630+ uint32_t saveHBLANK_B;
21631+ uint32_t saveHSYNC_B;
21632+ uint32_t saveVTOTAL_B;
21633+ uint32_t saveVBLANK_B;
21634+ uint32_t saveVSYNC_B;
21635+ uint32_t saveDSPBSTRIDE;
21636+ uint32_t saveDSPBSIZE;
21637+ uint32_t saveDSPBPOS;
21638+ uint32_t saveDSPBBASE;
21639+ uint32_t saveDSPBSURF;
21640+ uint32_t saveVCLK_DIVISOR_VGA0;
21641+ uint32_t saveVCLK_DIVISOR_VGA1;
21642+ uint32_t saveVCLK_POST_DIV;
21643+ uint32_t saveVGACNTRL;
21644+ uint32_t saveADPA;
21645+ uint32_t saveLVDS;
21646+ uint32_t saveDVOA;
21647+ uint32_t saveDVOB;
21648+ uint32_t saveDVOC;
21649+ uint32_t savePP_ON;
21650+ uint32_t savePP_OFF;
21651+ uint32_t savePP_CONTROL;
21652+ uint32_t savePP_CYCLE;
21653+ uint32_t savePFIT_CONTROL;
21654+ uint32_t savePaletteA[256];
21655+ uint32_t savePaletteB[256];
21656+ uint32_t saveBLC_PWM_CTL;
21657+ uint32_t saveCLOCKGATING;
21658+
21659+ /*
21660+ * USE code base register management.
21661+ */
21662+
21663+ struct drm_reg_manager use_manager;
21664+
21665+ /*
21666+ * Xhw
21667+ */
21668+
21669+ uint32_t *xhw;
21670+ struct drm_buffer_object *xhw_bo;
21671+ struct drm_bo_kmap_obj xhw_kmap;
21672+ struct list_head xhw_in;
21673+ spinlock_t xhw_lock;
21674+ atomic_t xhw_client;
21675+ struct drm_file *xhw_file;
21676+ wait_queue_head_t xhw_queue;
21677+ wait_queue_head_t xhw_caller_queue;
21678+ struct mutex xhw_mutex;
21679+ struct psb_xhw_buf *xhw_cur_buf;
21680+ int xhw_submit_ok;
21681+ int xhw_on;
21682+
21683+ /*
21684+ * Scheduling.
21685+ */
21686+
21687+ struct mutex reset_mutex;
21688+ struct mutex cmdbuf_mutex;
21689+ struct psb_scheduler scheduler;
21690+ struct psb_buflist_item *buffers;
21691+ uint32_t ta_mem_pages;
21692+ struct psb_ta_mem *ta_mem;
21693+ int force_ta_mem_load;
21694+
21695+ /*
21696+ * Watchdog
21697+ */
21698+
21699+ spinlock_t watchdog_lock;
21700+ struct timer_list watchdog_timer;
21701+ struct work_struct watchdog_wq;
21702+ struct work_struct msvdx_watchdog_wq;
21703+ int timer_available;
21704+
21705+ /*
21706+ * msvdx command queue
21707+ */
21708+ spinlock_t msvdx_lock;
21709+ struct mutex msvdx_mutex;
21710+ struct list_head msvdx_queue;
21711+ int msvdx_busy;
21712+
21713+};
21714+
21715+struct psb_mmu_driver;
21716+
21717+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
21718+ int trap_pagefaults,
21719+ int invalid_type,
21720+ atomic_t *msvdx_mmu_invaldc);
21721+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
21722+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver);
21723+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
21724+ uint32_t gtt_start, uint32_t gtt_pages);
21725+extern void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset);
21726+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
21727+ int trap_pagefaults,
21728+ int invalid_type);
21729+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
21730+extern void psb_mmu_flush(struct psb_mmu_driver *driver);
21731+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
21732+ unsigned long address,
21733+ uint32_t num_pages);
21734+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
21735+ uint32_t start_pfn,
21736+ unsigned long address,
21737+ uint32_t num_pages, int type);
21738+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
21739+ unsigned long *pfn);
21740+
21741+/*
21742+ * Enable / disable MMU for different requestors.
21743+ */
21744+
21745+extern void psb_mmu_enable_requestor(struct psb_mmu_driver *driver,
21746+ uint32_t mask);
21747+extern void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
21748+ uint32_t mask);
21749+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
21750+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
21751+ unsigned long address, uint32_t num_pages,
21752+ uint32_t desired_tile_stride,
21753+ uint32_t hw_tile_stride, int type);
21754+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
21755+ uint32_t num_pages,
21756+ uint32_t desired_tile_stride,
21757+ uint32_t hw_tile_stride);
21758+/*
21759+ * psb_sgx.c
21760+ */
21761+
21762+extern int psb_blit_sequence(struct drm_psb_private *dev_priv,
21763+ uint32_t sequence);
21764+extern void psb_init_2d(struct drm_psb_private *dev_priv);
21765+extern int psb_idle_2d(struct drm_device *dev);
21766+extern int psb_idle_3d(struct drm_device *dev);
21767+extern int psb_emit_2d_copy_blit(struct drm_device *dev,
21768+ uint32_t src_offset,
21769+ uint32_t dst_offset, uint32_t pages,
21770+ int direction);
21771+extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
21772+ struct drm_file *file_priv);
21773+extern int psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t * regs,
21774+ unsigned int cmds);
21775+extern int psb_submit_copy_cmdbuf(struct drm_device *dev,
21776+ struct drm_buffer_object *cmd_buffer,
21777+ unsigned long cmd_offset,
21778+ unsigned long cmd_size, int engine,
21779+ uint32_t * copy_buffer);
21780+extern void psb_fence_or_sync(struct drm_file *priv,
21781+ int engine,
21782+ struct drm_psb_cmdbuf_arg *arg,
21783+ struct drm_fence_arg *fence_arg,
21784+ struct drm_fence_object **fence_p);
21785+extern void psb_init_disallowed(void);
21786+
21787+/*
21788+ * psb_irq.c
21789+ */
21790+
21791+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
21792+extern void psb_irq_preinstall(struct drm_device *dev);
21793+extern int psb_irq_postinstall(struct drm_device *dev);
21794+extern void psb_irq_uninstall(struct drm_device *dev);
21795+extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
21796+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
21797+
21798+/*
21799+ * psb_fence.c
21800+ */
21801+
21802+extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
21803+extern void psb_2D_irq_off(struct drm_psb_private *dev_priv);
21804+extern void psb_2D_irq_on(struct drm_psb_private *dev_priv);
21805+extern uint32_t psb_fence_advance_sequence(struct drm_device *dev,
21806+ uint32_t class);
21807+extern int psb_fence_emit_sequence(struct drm_device *dev, uint32_t fence_class,
21808+ uint32_t flags, uint32_t * sequence,
21809+ uint32_t * native_type);
21810+extern void psb_fence_error(struct drm_device *dev,
21811+ uint32_t class,
21812+ uint32_t sequence, uint32_t type, int error);
21813+
21814+/*MSVDX stuff*/
21815+extern void psb_msvdx_irq_off(struct drm_psb_private *dev_priv);
21816+extern void psb_msvdx_irq_on(struct drm_psb_private *dev_priv);
21817+extern int psb_hw_info_ioctl(struct drm_device *dev, void *data,
21818+ struct drm_file *file_priv);
21819+
21820+/*
21821+ * psb_buffer.c
21822+ */
21823+extern struct drm_ttm_backend *drm_psb_tbe_init(struct drm_device *dev);
21824+extern int psb_fence_types(struct drm_buffer_object *bo, uint32_t * class,
21825+ uint32_t * type);
21826+extern uint32_t psb_evict_mask(struct drm_buffer_object *bo);
21827+extern int psb_invalidate_caches(struct drm_device *dev, uint64_t flags);
21828+extern int psb_init_mem_type(struct drm_device *dev, uint32_t type,
21829+ struct drm_mem_type_manager *man);
21830+extern int psb_move(struct drm_buffer_object *bo,
21831+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
21832+extern int psb_tbe_size(struct drm_device *dev, unsigned long num_pages);
21833+
21834+/*
21835+ * psb_gtt.c
21836+ */
21837+extern int psb_gtt_init(struct psb_gtt *pg, int resume);
21838+extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
21839+ unsigned offset_pages, unsigned num_pages,
21840+ unsigned desired_tile_stride,
21841+ unsigned hw_tile_stride, int type);
21842+extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
21843+ unsigned num_pages,
21844+ unsigned desired_tile_stride,
21845+ unsigned hw_tile_stride);
21846+
21847+extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
21848+extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
21849+
21850+/*
21851+ * psb_fb.c
21852+ */
21853+extern int psbfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
21854+extern int psbfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
21855+extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
21856+ struct drm_file *file_priv);
21857+extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
21858+ struct drm_file *file_priv);
21859+extern void psbfb_suspend(struct drm_device *dev);
21860+extern void psbfb_resume(struct drm_device *dev);
21861+
21862+/*
21863+ * psb_reset.c
21864+ */
21865+
21866+extern void psb_reset(struct drm_psb_private *dev_priv, int reset_2d);
21867+extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
21868+extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
21869+extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
21870+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
21871+
21872+/*
21873+ * psb_regman.c
21874+ */
21875+
21876+extern void psb_takedown_use_base(struct drm_psb_private *dev_priv);
21877+extern int psb_grab_use_base(struct drm_psb_private *dev_priv,
21878+ unsigned long dev_virtual,
21879+ unsigned long size,
21880+ unsigned int data_master,
21881+ uint32_t fence_class,
21882+ uint32_t fence_type,
21883+ int no_wait,
21884+ int ignore_signals,
21885+ int *r_reg, uint32_t * r_offset);
21886+extern int psb_init_use_base(struct drm_psb_private *dev_priv,
21887+ unsigned int reg_start, unsigned int reg_num);
21888+
21889+/*
21890+ * psb_xhw.c
21891+ */
21892+
21893+extern int psb_xhw_ioctl(struct drm_device *dev, void *data,
21894+ struct drm_file *file_priv);
21895+extern int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
21896+ struct drm_file *file_priv);
21897+extern int psb_xhw_init(struct drm_device *dev);
21898+extern void psb_xhw_takedown(struct drm_psb_private *dev_priv);
21899+extern void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
21900+ struct drm_file *file_priv, int closing);
21901+extern int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
21902+ struct psb_xhw_buf *buf,
21903+ uint32_t fire_flags,
21904+ uint32_t hw_context,
21905+ uint32_t * cookie,
21906+ uint32_t * oom_cmds,
21907+ uint32_t num_oom_cmds,
21908+ uint32_t offset,
21909+ uint32_t engine, uint32_t flags);
21910+extern int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
21911+ struct psb_xhw_buf *buf, uint32_t fire_flags);
21912+extern int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
21913+ struct psb_xhw_buf *buf,
21914+ uint32_t w,
21915+ uint32_t h,
21916+ uint32_t * hw_cookie,
21917+ uint32_t * bo_size,
21918+ uint32_t * clear_p_start,
21919+ uint32_t * clear_num_pages);
21920+
21921+extern int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
21922+ struct psb_xhw_buf *buf);
21923+extern int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
21924+ struct psb_xhw_buf *buf, uint32_t * value);
21925+extern int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
21926+ struct psb_xhw_buf *buf,
21927+ uint32_t pages,
21928+ uint32_t * hw_cookie, uint32_t * size);
21929+extern int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
21930+ struct psb_xhw_buf *buf, uint32_t * cookie);
21931+extern void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
21932+ struct psb_xhw_buf *buf,
21933+ uint32_t * cookie,
21934+ uint32_t * bca,
21935+ uint32_t * rca, uint32_t * flags);
21936+extern int psb_xhw_vistest(struct drm_psb_private *dev_priv,
21937+ struct psb_xhw_buf *buf);
21938+extern int psb_xhw_handler(struct drm_psb_private *dev_priv);
21939+extern int psb_xhw_resume(struct drm_psb_private *dev_priv,
21940+ struct psb_xhw_buf *buf);
21941+extern void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
21942+ struct psb_xhw_buf *buf, uint32_t * cookie);
21943+extern int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
21944+ struct psb_xhw_buf *buf,
21945+ uint32_t flags,
21946+ uint32_t param_offset,
21947+ uint32_t pt_offset, uint32_t * hw_cookie);
21948+extern void psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
21949+ struct psb_xhw_buf *buf);
21950+
21951+/*
21952+ * psb_schedule.c: HW bug fixing.
21953+ */
21954+
21955+#ifdef FIX_TG_16
21956+
21957+extern void psb_2d_unlock(struct drm_psb_private *dev_priv);
21958+extern void psb_2d_lock(struct drm_psb_private *dev_priv);
21959+extern void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv);
21960+
21961+#else
21962+
21963+#define psb_2d_lock(_dev_priv) mutex_lock(&(_dev_priv)->mutex_2d)
21964+#define psb_2d_unlock(_dev_priv) mutex_unlock(&(_dev_priv)->mutex_2d)
21965+
21966+#endif
21967+
21968+/*
21969+ * Utilities
21970+ */
21971+
21972+#define PSB_ALIGN_TO(_val, _align) \
21973+ (((_val) + ((_align) - 1)) & ~((_align) - 1))
21974+#define PSB_WVDC32(_val, _offs) \
21975+ iowrite32(_val, dev_priv->vdc_reg + (_offs))
21976+#define PSB_RVDC32(_offs) \
21977+ ioread32(dev_priv->vdc_reg + (_offs))
21978+#define PSB_WSGX32(_val, _offs) \
21979+ iowrite32(_val, dev_priv->sgx_reg + (_offs))
21980+#define PSB_RSGX32(_offs) \
21981+ ioread32(dev_priv->sgx_reg + (_offs))
21982+#define PSB_WMSVDX32(_val, _offs) \
21983+ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
21984+#define PSB_RMSVDX32(_offs) \
21985+ ioread32(dev_priv->msvdx_reg + (_offs))
21986+
21987+#define PSB_ALPL(_val, _base) \
21988+ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
21989+#define PSB_ALPLM(_val, _base) \
21990+ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
21991+
21992+#define PSB_D_RENDER (1 << 16)
21993+
21994+#define PSB_D_GENERAL (1 << 0)
21995+#define PSB_D_INIT (1 << 1)
21996+#define PSB_D_IRQ (1 << 2)
21997+#define PSB_D_FW (1 << 3)
21998+#define PSB_D_PERF (1 << 4)
21999+#define PSB_D_TMP (1 << 5)
22000+#define PSB_D_RELOC (1 << 6)
22001+
22002+extern int drm_psb_debug;
22003+extern int drm_psb_no_fb;
22004+extern int drm_psb_disable_vsync;
22005+
22006+#define PSB_DEBUG_FW(_fmt, _arg...) \
22007+ PSB_DEBUG(PSB_D_FW, _fmt, ##_arg)
22008+#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
22009+ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
22010+#define PSB_DEBUG_INIT(_fmt, _arg...) \
22011+ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
22012+#define PSB_DEBUG_IRQ(_fmt, _arg...) \
22013+ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
22014+#define PSB_DEBUG_RENDER(_fmt, _arg...) \
22015+ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
22016+#define PSB_DEBUG_PERF(_fmt, _arg...) \
22017+ PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg)
22018+#define PSB_DEBUG_TMP(_fmt, _arg...) \
22019+ PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg)
22020+#define PSB_DEBUG_RELOC(_fmt, _arg...) \
22021+ PSB_DEBUG(PSB_D_RELOC, _fmt, ##_arg)
22022+
22023+#if DRM_DEBUG_CODE
22024+#define PSB_DEBUG(_flag, _fmt, _arg...) \
22025+ do { \
22026+ if (unlikely((_flag) & drm_psb_debug)) \
22027+ printk(KERN_DEBUG \
22028+ "[psb:0x%02x:%s] " _fmt , _flag, \
22029+ __FUNCTION__ , ##_arg); \
22030+ } while (0)
22031+#else
22032+#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
22033+#endif
22034+
22035+#endif
22036Index: linux-2.6.27/drivers/gpu/drm/psb/psb_fb.c
22037===================================================================
22038--- /dev/null 1970-01-01 00:00:00.000000000 +0000
22039+++ linux-2.6.27/drivers/gpu/drm/psb/psb_fb.c 2009-01-14 12:03:18.000000000 +0000
22040@@ -0,0 +1,1330 @@
22041+/**************************************************************************
22042+ * Copyright (c) 2007, Intel Corporation.
22043+ * All Rights Reserved.
22044+ *
22045+ * This program is free software; you can redistribute it and/or modify it
22046+ * under the terms and conditions of the GNU General Public License,
22047+ * version 2, as published by the Free Software Foundation.
22048+ *
22049+ * This program is distributed in the hope it will be useful, but WITHOUT
22050+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22051+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22052+ * more details.
22053+ *
22054+ * You should have received a copy of the GNU General Public License along with
22055+ * this program; if not, write to the Free Software Foundation, Inc.,
22056+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22057+ *
22058+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
22059+ * develop this driver.
22060+ *
22061+ **************************************************************************/
22062+
22063+#include <linux/module.h>
22064+#include <linux/kernel.h>
22065+#include <linux/errno.h>
22066+#include <linux/string.h>
22067+#include <linux/mm.h>
22068+#include <linux/tty.h>
22069+#include <linux/slab.h>
22070+#include <linux/delay.h>
22071+#include <linux/fb.h>
22072+#include <linux/init.h>
22073+#include <linux/console.h>
22074+
22075+#include "drmP.h"
22076+#include "drm.h"
22077+#include "drm_crtc.h"
22078+#include "psb_drv.h"
22079+
22080+#define SII_1392_WA
22081+#ifdef SII_1392_WA
22082+extern int SII_1392;
22083+#endif
22084+
22085+struct psbfb_vm_info {
22086+ struct drm_buffer_object *bo;
22087+ struct address_space *f_mapping;
22088+ struct mutex vm_mutex;
22089+ atomic_t refcount;
22090+};
22091+
22092+struct psbfb_par {
22093+ struct drm_device *dev;
22094+ struct drm_crtc *crtc;
22095+ struct drm_output *output;
22096+ struct psbfb_vm_info *vi;
22097+ int dpms_state;
22098+};
22099+
22100+static void psbfb_vm_info_deref(struct psbfb_vm_info **vi)
22101+{
22102+ struct psbfb_vm_info *tmp = *vi;
22103+ *vi = NULL;
22104+ if (atomic_dec_and_test(&tmp->refcount)) {
22105+ drm_bo_usage_deref_unlocked(&tmp->bo);
22106+ drm_free(tmp, sizeof(*tmp), DRM_MEM_MAPS);
22107+ }
22108+}
22109+
22110+static struct psbfb_vm_info *psbfb_vm_info_ref(struct psbfb_vm_info *vi)
22111+{
22112+ atomic_inc(&vi->refcount);
22113+ return vi;
22114+}
22115+
22116+static struct psbfb_vm_info *psbfb_vm_info_create(void)
22117+{
22118+ struct psbfb_vm_info *vi;
22119+
22120+ vi = drm_calloc(1, sizeof(*vi), DRM_MEM_MAPS);
22121+ if (!vi)
22122+ return NULL;
22123+
22124+ mutex_init(&vi->vm_mutex);
22125+ atomic_set(&vi->refcount, 1);
22126+ return vi;
22127+}
22128+
22129+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
22130+
22131+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
22132+ unsigned blue, unsigned transp, struct fb_info *info)
22133+{
22134+ struct psbfb_par *par = info->par;
22135+ struct drm_crtc *crtc = par->crtc;
22136+ uint32_t v;
22137+
22138+ if (!crtc->fb)
22139+ return -ENOMEM;
22140+
22141+ if (regno > 255)
22142+ return 1;
22143+
22144+ if (crtc->funcs->gamma_set)
22145+ crtc->funcs->gamma_set(crtc, red, green, blue, regno);
22146+
22147+ red = CMAP_TOHW(red, info->var.red.length);
22148+ blue = CMAP_TOHW(blue, info->var.blue.length);
22149+ green = CMAP_TOHW(green, info->var.green.length);
22150+ transp = CMAP_TOHW(transp, info->var.transp.length);
22151+
22152+ v = (red << info->var.red.offset) |
22153+ (green << info->var.green.offset) |
22154+ (blue << info->var.blue.offset) |
22155+ (transp << info->var.transp.offset);
22156+
22157+ switch (crtc->fb->bits_per_pixel) {
22158+ case 16:
22159+ ((uint32_t *) info->pseudo_palette)[regno] = v;
22160+ break;
22161+ case 24:
22162+ case 32:
22163+ ((uint32_t *) info->pseudo_palette)[regno] = v;
22164+ break;
22165+ }
22166+
22167+ return 0;
22168+}
22169+
22170+static int psbfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
22171+{
22172+ struct psbfb_par *par = info->par;
22173+ struct drm_device *dev = par->dev;
22174+ struct drm_framebuffer *fb = par->crtc->fb;
22175+ struct drm_display_mode *drm_mode;
22176+ struct drm_output *output;
22177+ int depth;
22178+ int pitch;
22179+ int bpp = var->bits_per_pixel;
22180+
22181+ if (!fb)
22182+ return -ENOMEM;
22183+
22184+ if (!var->pixclock)
22185+ return -EINVAL;
22186+
22187+ /* don't support virtuals for now */
22188+ if (var->xres_virtual > var->xres)
22189+ return -EINVAL;
22190+
22191+ if (var->yres_virtual > var->yres)
22192+ return -EINVAL;
22193+
22194+ switch (bpp) {
22195+ case 8:
22196+ depth = 8;
22197+ break;
22198+ case 16:
22199+ depth = (var->green.length == 6) ? 16 : 15;
22200+ break;
22201+ case 24: /* assume this is 32bpp / depth 24 */
22202+ bpp = 32;
22203+ /* fallthrough */
22204+ case 32:
22205+ depth = (var->transp.length > 0) ? 32 : 24;
22206+ break;
22207+ default:
22208+ return -EINVAL;
22209+ }
22210+
22211+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
22212+
22213+ /* Check that we can resize */
22214+ if ((pitch * var->yres) > (fb->bo->num_pages << PAGE_SHIFT)) {
22215+#if 1
22216+ /* Need to resize the fb object.
22217+ * But the generic fbdev code doesn't really understand
22218+ * that we can do this. So disable for now.
22219+ */
22220+ DRM_INFO("Can't support requested size, too big!\n");
22221+ return -EINVAL;
22222+#else
22223+ int ret;
22224+ struct drm_buffer_object *fbo = NULL;
22225+ struct drm_bo_kmap_obj tmp_kmap;
22226+
22227+ /* a temporary BO to check if we could resize in setpar.
22228+ * Therefore no need to set NO_EVICT.
22229+ */
22230+ ret = drm_buffer_object_create(dev,
22231+ pitch * var->yres,
22232+ drm_bo_type_kernel,
22233+ DRM_BO_FLAG_READ |
22234+ DRM_BO_FLAG_WRITE |
22235+ DRM_BO_FLAG_MEM_TT |
22236+ DRM_BO_FLAG_MEM_VRAM,
22237+ DRM_BO_HINT_DONT_FENCE,
22238+ 0, 0, &fbo);
22239+ if (ret || !fbo)
22240+ return -ENOMEM;
22241+
22242+ ret = drm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
22243+ if (ret) {
22244+ drm_bo_usage_deref_unlocked(&fbo);
22245+ return -EINVAL;
22246+ }
22247+
22248+ drm_bo_kunmap(&tmp_kmap);
22249+ /* destroy our current fbo! */
22250+ drm_bo_usage_deref_unlocked(&fbo);
22251+#endif
22252+ }
22253+
22254+ switch (depth) {
22255+ case 8:
22256+ var->red.offset = 0;
22257+ var->green.offset = 0;
22258+ var->blue.offset = 0;
22259+ var->red.length = 8;
22260+ var->green.length = 8;
22261+ var->blue.length = 8;
22262+ var->transp.length = 0;
22263+ var->transp.offset = 0;
22264+ break;
22265+ case 15:
22266+ var->red.offset = 10;
22267+ var->green.offset = 5;
22268+ var->blue.offset = 0;
22269+ var->red.length = 5;
22270+ var->green.length = 5;
22271+ var->blue.length = 5;
22272+ var->transp.length = 1;
22273+ var->transp.offset = 15;
22274+ break;
22275+ case 16:
22276+ var->red.offset = 11;
22277+ var->green.offset = 5;
22278+ var->blue.offset = 0;
22279+ var->red.length = 5;
22280+ var->green.length = 6;
22281+ var->blue.length = 5;
22282+ var->transp.length = 0;
22283+ var->transp.offset = 0;
22284+ break;
22285+ case 24:
22286+ var->red.offset = 16;
22287+ var->green.offset = 8;
22288+ var->blue.offset = 0;
22289+ var->red.length = 8;
22290+ var->green.length = 8;
22291+ var->blue.length = 8;
22292+ var->transp.length = 0;
22293+ var->transp.offset = 0;
22294+ break;
22295+ case 32:
22296+ var->red.offset = 16;
22297+ var->green.offset = 8;
22298+ var->blue.offset = 0;
22299+ var->red.length = 8;
22300+ var->green.length = 8;
22301+ var->blue.length = 8;
22302+ var->transp.length = 8;
22303+ var->transp.offset = 24;
22304+ break;
22305+ default:
22306+ return -EINVAL;
22307+ }
22308+
22309+#if 0
22310+ /* Here we walk the output mode list and look for modes. If we haven't
22311+ * got it, then bail. Not very nice, so this is disabled.
22312+ * In the set_par code, we create our mode based on the incoming
22313+ * parameters. Nicer, but may not be desired by some.
22314+ */
22315+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
22316+ if (output->crtc == par->crtc)
22317+ break;
22318+ }
22319+
22320+ list_for_each_entry(drm_mode, &output->modes, head) {
22321+ if (drm_mode->hdisplay == var->xres &&
22322+ drm_mode->vdisplay == var->yres && drm_mode->clock != 0)
22323+ break;
22324+ }
22325+
22326+ if (!drm_mode)
22327+ return -EINVAL;
22328+#else
22329+ (void)dev; /* silence warnings */
22330+ (void)output;
22331+ (void)drm_mode;
22332+#endif
22333+
22334+ return 0;
22335+}
22336+
22337+static int psbfb_move_fb_bo(struct fb_info *info, struct drm_buffer_object *bo,
22338+ uint64_t mem_type_flags)
22339+{
22340+ struct psbfb_par *par;
22341+ loff_t holelen;
22342+ int ret;
22343+
22344+ /*
22345+ * Kill all user-space mappings of this device. They will be
22346+ * faulted back using nopfn when accessed.
22347+ */
22348+
22349+ par = info->par;
22350+ holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
22351+ mutex_lock(&par->vi->vm_mutex);
22352+ if (par->vi->f_mapping) {
22353+ unmap_mapping_range(par->vi->f_mapping, 0, holelen, 1);
22354+ }
22355+
22356+ ret = drm_bo_do_validate(bo,
22357+ mem_type_flags,
22358+ DRM_BO_MASK_MEM |
22359+ DRM_BO_FLAG_NO_EVICT,
22360+ DRM_BO_HINT_DONT_FENCE, 0, 1, NULL);
22361+
22362+ mutex_unlock(&par->vi->vm_mutex);
22363+ return ret;
22364+}
22365+
22366+/* this will let fbcon do the mode init */
22367+static int psbfb_set_par(struct fb_info *info)
22368+{
22369+ struct psbfb_par *par = info->par;
22370+ struct drm_framebuffer *fb = par->crtc->fb;
22371+ struct drm_device *dev = par->dev;
22372+ struct drm_display_mode *drm_mode;
22373+ struct fb_var_screeninfo *var = &info->var;
22374+ struct drm_psb_private *dev_priv = dev->dev_private;
22375+ struct drm_output *output;
22376+ int pitch;
22377+ int depth;
22378+ int bpp = var->bits_per_pixel;
22379+
22380+ if (!fb)
22381+ return -ENOMEM;
22382+
22383+ switch (bpp) {
22384+ case 8:
22385+ depth = 8;
22386+ break;
22387+ case 16:
22388+ depth = (var->green.length == 6) ? 16 : 15;
22389+ break;
22390+ case 24: /* assume this is 32bpp / depth 24 */
22391+ bpp = 32;
22392+ /* fallthrough */
22393+ case 32:
22394+ depth = (var->transp.length > 0) ? 32 : 24;
22395+ break;
22396+ default:
22397+ return -EINVAL;
22398+ }
22399+
22400+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
22401+
22402+ if ((pitch * var->yres) > (fb->bo->num_pages << PAGE_SHIFT)) {
22403+#if 1
22404+ /* Need to resize the fb object.
22405+ * But the generic fbdev code doesn't really understand
22406+ * that we can do this. So disable for now.
22407+ */
22408+ DRM_INFO("Can't support requested size, too big!\n");
22409+ return -EINVAL;
22410+#else
22411+ int ret;
22412+ struct drm_buffer_object *fbo = NULL, *tfbo;
22413+ struct drm_bo_kmap_obj tmp_kmap, tkmap;
22414+
22415+ ret = drm_buffer_object_create(dev,
22416+ pitch * var->yres,
22417+ drm_bo_type_kernel,
22418+ DRM_BO_FLAG_READ |
22419+ DRM_BO_FLAG_WRITE |
22420+ DRM_BO_FLAG_MEM_TT |
22421+ DRM_BO_FLAG_MEM_VRAM |
22422+ DRM_BO_FLAG_NO_EVICT,
22423+ DRM_BO_HINT_DONT_FENCE,
22424+ 0, 0, &fbo);
22425+ if (ret || !fbo) {
22426+ DRM_ERROR
22427+ ("failed to allocate new resized framebuffer\n");
22428+ return -ENOMEM;
22429+ }
22430+
22431+ ret = drm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
22432+ if (ret) {
22433+ DRM_ERROR("failed to kmap framebuffer.\n");
22434+ drm_bo_usage_deref_unlocked(&fbo);
22435+ return -EINVAL;
22436+ }
22437+
22438+ DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width,
22439+ fb->height, fb->offset, fbo);
22440+
22441+ /* set new screen base */
22442+ info->screen_base = tmp_kmap.virtual;
22443+
22444+ tkmap = fb->kmap;
22445+ fb->kmap = tmp_kmap;
22446+ drm_bo_kunmap(&tkmap);
22447+
22448+ tfbo = fb->bo;
22449+ fb->bo = fbo;
22450+ drm_bo_usage_deref_unlocked(&tfbo);
22451+#endif
22452+ }
22453+
22454+ fb->offset = fb->bo->offset - dev_priv->pg->gatt_start;
22455+ fb->width = var->xres;
22456+ fb->height = var->yres;
22457+ fb->bits_per_pixel = bpp;
22458+ fb->pitch = pitch;
22459+ fb->depth = depth;
22460+
22461+ info->fix.line_length = fb->pitch;
22462+ info->fix.visual =
22463+ (fb->depth == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
22464+
22465+ /* some fbdev's apps don't want these to change */
22466+ info->fix.smem_start = dev->mode_config.fb_base + fb->offset;
22467+
22468+ /* we have to align the output base address because the fb->bo
22469+ may be moved in the previous drm_bo_do_validate().
22470+ Otherwise the output screens may go black when exit the X
22471+ window and re-enter the console */
22472+ info->screen_base = fb->kmap.virtual;
22473+
22474+#if 0
22475+ /* relates to resize - disable */
22476+ info->fix.smem_len = info->fix.line_length * var->yres;
22477+ info->screen_size = info->fix.smem_len; /* ??? */
22478+#endif
22479+
22480+ /* Should we walk the output's modelist or just create our own ???
22481+ * For now, we create and destroy a mode based on the incoming
22482+ * parameters. But there's commented out code below which scans
22483+ * the output list too.
22484+ */
22485+#if 0
22486+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
22487+ if (output->crtc == par->crtc)
22488+ break;
22489+ }
22490+
22491+ list_for_each_entry(drm_mode, &output->modes, head) {
22492+ if (drm_mode->hdisplay == var->xres &&
22493+ drm_mode->vdisplay == var->yres && drm_mode->clock != 0)
22494+ break;
22495+ }
22496+#else
22497+ (void)output; /* silence warning */
22498+
22499+ drm_mode = drm_mode_create(dev);
22500+ drm_mode->hdisplay = var->xres;
22501+ drm_mode->hsync_start = drm_mode->hdisplay + var->right_margin;
22502+ drm_mode->hsync_end = drm_mode->hsync_start + var->hsync_len;
22503+ drm_mode->htotal = drm_mode->hsync_end + var->left_margin;
22504+ drm_mode->vdisplay = var->yres;
22505+ drm_mode->vsync_start = drm_mode->vdisplay + var->lower_margin;
22506+ drm_mode->vsync_end = drm_mode->vsync_start + var->vsync_len;
22507+ drm_mode->vtotal = drm_mode->vsync_end + var->upper_margin;
22508+ drm_mode->clock = PICOS2KHZ(var->pixclock);
22509+ drm_mode->vrefresh = drm_mode_vrefresh(drm_mode);
22510+ drm_mode_set_name(drm_mode);
22511+ drm_mode_set_crtcinfo(drm_mode, CRTC_INTERLACE_HALVE_V);
22512+#endif
22513+
22514+ if (!drm_crtc_set_mode(par->crtc, drm_mode, 0, 0))
22515+ return -EINVAL;
22516+
22517+ /* Have to destroy our created mode if we're not searching the mode
22518+ * list for it.
22519+ */
22520+#if 1
22521+ drm_mode_destroy(dev, drm_mode);
22522+#endif
22523+
22524+ return 0;
22525+}
22526+
22527+extern int psb_2d_submit(struct drm_psb_private *, uint32_t *, uint32_t);;
22528+
22529+static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv,
22530+ uint32_t dst_offset, uint32_t dst_stride,
22531+ uint32_t dst_format, uint16_t dst_x,
22532+ uint16_t dst_y, uint16_t size_x,
22533+ uint16_t size_y, uint32_t fill)
22534+{
22535+ uint32_t buffer[10];
22536+ uint32_t *buf;
22537+ int ret;
22538+
22539+ buf = buffer;
22540+
22541+ *buf++ = PSB_2D_FENCE_BH;
22542+
22543+ *buf++ =
22544+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
22545+ PSB_2D_DST_STRIDE_SHIFT);
22546+ *buf++ = dst_offset;
22547+
22548+ *buf++ =
22549+ PSB_2D_BLIT_BH |
22550+ PSB_2D_ROT_NONE |
22551+ PSB_2D_COPYORDER_TL2BR |
22552+ PSB_2D_DSTCK_DISABLE |
22553+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
22554+
22555+ *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT;
22556+ *buf++ =
22557+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
22558+ PSB_2D_DST_YSTART_SHIFT);
22559+ *buf++ =
22560+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
22561+ PSB_2D_DST_YSIZE_SHIFT);
22562+ *buf++ = PSB_2D_FLUSH_BH;
22563+
22564+ psb_2d_lock(dev_priv);
22565+ ret = psb_2d_submit(dev_priv, buffer, buf - buffer);
22566+ psb_2d_unlock(dev_priv);
22567+
22568+ return ret;
22569+}
22570+
22571+static void psbfb_fillrect_accel(struct fb_info *info,
22572+ const struct fb_fillrect *r)
22573+{
22574+ struct psbfb_par *par = info->par;
22575+ struct drm_framebuffer *fb = par->crtc->fb;
22576+ struct drm_psb_private *dev_priv = par->dev->dev_private;
22577+ uint32_t offset;
22578+ uint32_t stride;
22579+ uint32_t format;
22580+
22581+ if (!fb)
22582+ return;
22583+
22584+ offset = fb->offset;
22585+ stride = fb->pitch;
22586+
22587+ switch (fb->depth) {
22588+ case 8:
22589+ format = PSB_2D_DST_332RGB;
22590+ break;
22591+ case 15:
22592+ format = PSB_2D_DST_555RGB;
22593+ break;
22594+ case 16:
22595+ format = PSB_2D_DST_565RGB;
22596+ break;
22597+ case 24:
22598+ case 32:
22599+ /* this is wrong but since we don't do blending its okay */
22600+ format = PSB_2D_DST_8888ARGB;
22601+ break;
22602+ default:
22603+ /* software fallback */
22604+ cfb_fillrect(info, r);
22605+ return;
22606+ }
22607+
22608+ psb_accel_2d_fillrect(dev_priv,
22609+ offset, stride, format,
22610+ r->dx, r->dy, r->width, r->height, r->color);
22611+}
22612+
22613+static void psbfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
22614+{
22615+ if (info->state != FBINFO_STATE_RUNNING)
22616+ return;
22617+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
22618+ cfb_fillrect(info, rect);
22619+ return;
22620+ }
22621+ if (in_interrupt() || in_atomic()) {
22622+ /*
22623+ * Catch case when we're shutting down.
22624+ */
22625+ cfb_fillrect(info, rect);
22626+ return;
22627+ }
22628+ psbfb_fillrect_accel(info, rect);
22629+}
22630+
22631+uint32_t psb_accel_2d_copy_direction(int xdir, int ydir)
22632+{
22633+ if (xdir < 0)
22634+ return ((ydir <
22635+ 0) ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TR2BL);
22636+ else
22637+ return ((ydir <
22638+ 0) ? PSB_2D_COPYORDER_BL2TR : PSB_2D_COPYORDER_TL2BR);
22639+}
22640+
22641+/*
22642+ * @srcOffset in bytes
22643+ * @srcStride in bytes
22644+ * @srcFormat psb 2D format defines
22645+ * @dstOffset in bytes
22646+ * @dstStride in bytes
22647+ * @dstFormat psb 2D format defines
22648+ * @srcX offset in pixels
22649+ * @srcY offset in pixels
22650+ * @dstX offset in pixels
22651+ * @dstY offset in pixels
22652+ * @sizeX of the copied area
22653+ * @sizeY of the copied area
22654+ */
22655+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
22656+ uint32_t src_offset, uint32_t src_stride,
22657+ uint32_t src_format, uint32_t dst_offset,
22658+ uint32_t dst_stride, uint32_t dst_format,
22659+ uint16_t src_x, uint16_t src_y, uint16_t dst_x,
22660+ uint16_t dst_y, uint16_t size_x, uint16_t size_y)
22661+{
22662+ uint32_t blit_cmd;
22663+ uint32_t buffer[10];
22664+ uint32_t *buf;
22665+ uint32_t direction;
22666+ int ret;
22667+
22668+ buf = buffer;
22669+
22670+ direction = psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
22671+
22672+ if (direction == PSB_2D_COPYORDER_BR2TL ||
22673+ direction == PSB_2D_COPYORDER_TR2BL) {
22674+ src_x += size_x - 1;
22675+ dst_x += size_x - 1;
22676+ }
22677+ if (direction == PSB_2D_COPYORDER_BR2TL ||
22678+ direction == PSB_2D_COPYORDER_BL2TR) {
22679+ src_y += size_y - 1;
22680+ dst_y += size_y - 1;
22681+ }
22682+
22683+ blit_cmd =
22684+ PSB_2D_BLIT_BH |
22685+ PSB_2D_ROT_NONE |
22686+ PSB_2D_DSTCK_DISABLE |
22687+ PSB_2D_SRCCK_DISABLE |
22688+ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
22689+
22690+ *buf++ = PSB_2D_FENCE_BH;
22691+ *buf++ =
22692+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
22693+ PSB_2D_DST_STRIDE_SHIFT);
22694+ *buf++ = dst_offset;
22695+ *buf++ =
22696+ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
22697+ PSB_2D_SRC_STRIDE_SHIFT);
22698+ *buf++ = src_offset;
22699+ *buf++ =
22700+ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) | (src_y
22701+ <<
22702+ PSB_2D_SRCOFF_YSTART_SHIFT);
22703+ *buf++ = blit_cmd;
22704+ *buf++ =
22705+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
22706+ PSB_2D_DST_YSTART_SHIFT);
22707+ *buf++ =
22708+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
22709+ PSB_2D_DST_YSIZE_SHIFT);
22710+ *buf++ = PSB_2D_FLUSH_BH;
22711+
22712+ psb_2d_lock(dev_priv);
22713+ ret = psb_2d_submit(dev_priv, buffer, buf - buffer);
22714+ psb_2d_unlock(dev_priv);
22715+ return ret;
22716+}
22717+
22718+static void psbfb_copyarea_accel(struct fb_info *info,
22719+ const struct fb_copyarea *a)
22720+{
22721+ struct psbfb_par *par = info->par;
22722+ struct drm_framebuffer *fb = par->crtc->fb;
22723+ struct drm_psb_private *dev_priv = par->dev->dev_private;
22724+ uint32_t offset;
22725+ uint32_t stride;
22726+ uint32_t src_format;
22727+ uint32_t dst_format;
22728+
22729+ if (!fb)
22730+ return;
22731+
22732+ offset = fb->offset;
22733+ stride = fb->pitch;
22734+
22735+ if (a->width == 8 || a->height == 8) {
22736+ psb_2d_lock(dev_priv);
22737+ psb_idle_2d(par->dev);
22738+ psb_2d_unlock(dev_priv);
22739+ cfb_copyarea(info, a);
22740+ return;
22741+ }
22742+
22743+ switch (fb->depth) {
22744+ case 8:
22745+ src_format = PSB_2D_SRC_332RGB;
22746+ dst_format = PSB_2D_DST_332RGB;
22747+ break;
22748+ case 15:
22749+ src_format = PSB_2D_SRC_555RGB;
22750+ dst_format = PSB_2D_DST_555RGB;
22751+ break;
22752+ case 16:
22753+ src_format = PSB_2D_SRC_565RGB;
22754+ dst_format = PSB_2D_DST_565RGB;
22755+ break;
22756+ case 24:
22757+ case 32:
22758+ /* this is wrong but since we don't do blending its okay */
22759+ src_format = PSB_2D_SRC_8888ARGB;
22760+ dst_format = PSB_2D_DST_8888ARGB;
22761+ break;
22762+ default:
22763+ /* software fallback */
22764+ cfb_copyarea(info, a);
22765+ return;
22766+ }
22767+
22768+ psb_accel_2d_copy(dev_priv,
22769+ offset, stride, src_format,
22770+ offset, stride, dst_format,
22771+ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
22772+}
22773+
22774+static void psbfb_copyarea(struct fb_info *info,
22775+ const struct fb_copyarea *region)
22776+{
22777+ if (info->state != FBINFO_STATE_RUNNING)
22778+ return;
22779+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
22780+ cfb_copyarea(info, region);
22781+ return;
22782+ }
22783+ if (in_interrupt() || in_atomic()) {
22784+ /*
22785+ * Catch case when we're shutting down.
22786+ */
22787+ cfb_copyarea(info, region);
22788+ return;
22789+ }
22790+
22791+ psbfb_copyarea_accel(info, region);
22792+}
22793+
22794+void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
22795+{
22796+ if (info->state != FBINFO_STATE_RUNNING)
22797+ return;
22798+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
22799+ cfb_imageblit(info, image);
22800+ return;
22801+ }
22802+ if (in_interrupt() || in_atomic()) {
22803+ cfb_imageblit(info, image);
22804+ return;
22805+ }
22806+
22807+ cfb_imageblit(info, image);
22808+}
22809+
22810+static int psbfb_blank(int blank_mode, struct fb_info *info)
22811+{
22812+ int dpms_mode;
22813+ struct psbfb_par *par = info->par;
22814+ struct drm_output *output;
22815+
22816+ par->dpms_state = blank_mode;
22817+
22818+ switch(blank_mode) {
22819+ case FB_BLANK_UNBLANK:
22820+ dpms_mode = DPMSModeOn;
22821+ break;
22822+ case FB_BLANK_NORMAL:
22823+ if (!par->crtc)
22824+ return 0;
22825+ (*par->crtc->funcs->dpms)(par->crtc, DPMSModeStandby);
22826+ return 0;
22827+ case FB_BLANK_HSYNC_SUSPEND:
22828+ default:
22829+ dpms_mode = DPMSModeStandby;
22830+ break;
22831+ case FB_BLANK_VSYNC_SUSPEND:
22832+ dpms_mode = DPMSModeSuspend;
22833+ break;
22834+ case FB_BLANK_POWERDOWN:
22835+ dpms_mode = DPMSModeOff;
22836+ break;
22837+ }
22838+
22839+ if (!par->crtc)
22840+ return 0;
22841+
22842+ list_for_each_entry(output, &par->dev->mode_config.output_list, head) {
22843+ if (output->crtc == par->crtc)
22844+ (*output->funcs->dpms)(output, dpms_mode);
22845+ }
22846+
22847+ (*par->crtc->funcs->dpms)(par->crtc, dpms_mode);
22848+ return 0;
22849+}
22850+
22851+
22852+static int psbfb_kms_off(struct drm_device *dev, int suspend)
22853+{
22854+ struct drm_framebuffer *fb = 0;
22855+ struct drm_buffer_object *bo = 0;
22856+ struct drm_psb_private *dev_priv = dev->dev_private;
22857+ int ret = 0;
22858+
22859+ DRM_DEBUG("psbfb_kms_off_ioctl\n");
22860+
22861+ mutex_lock(&dev->mode_config.mutex);
22862+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
22863+ struct fb_info *info = fb->fbdev;
22864+ struct psbfb_par *par = info->par;
22865+ int save_dpms_state;
22866+
22867+ if (suspend)
22868+ fb_set_suspend(info, 1);
22869+ else
22870+ info->state &= ~FBINFO_STATE_RUNNING;
22871+
22872+ info->screen_base = NULL;
22873+
22874+ bo = fb->bo;
22875+
22876+ if (!bo)
22877+ continue;
22878+
22879+ drm_bo_kunmap(&fb->kmap);
22880+
22881+ /*
22882+ * We don't take the 2D lock here as we assume that the
22883+ * 2D engine will eventually idle anyway.
22884+ */
22885+
22886+ if (!suspend) {
22887+ uint32_t dummy2 = 0;
22888+ (void) psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
22889+ &dummy2, &dummy2);
22890+ psb_2d_lock(dev_priv);
22891+ (void)psb_idle_2d(dev);
22892+ psb_2d_unlock(dev_priv);
22893+ } else
22894+ psb_idle_2d(dev);
22895+
22896+ save_dpms_state = par->dpms_state;
22897+ psbfb_blank(FB_BLANK_NORMAL, info);
22898+ par->dpms_state = save_dpms_state;
22899+
22900+ ret = psbfb_move_fb_bo(info, bo, DRM_BO_FLAG_MEM_LOCAL);
22901+
22902+ if (ret)
22903+ goto out_err;
22904+ }
22905+ out_err:
22906+ mutex_unlock(&dev->mode_config.mutex);
22907+
22908+ return ret;
22909+}
22910+
22911+int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
22912+ struct drm_file *file_priv)
22913+{
22914+ int ret;
22915+
22916+ acquire_console_sem();
22917+ ret = psbfb_kms_off(dev, 0);
22918+ release_console_sem();
22919+
22920+ return ret;
22921+}
22922+
22923+static int psbfb_kms_on(struct drm_device *dev, int resume)
22924+{
22925+ struct drm_framebuffer *fb = 0;
22926+ struct drm_buffer_object *bo = 0;
22927+ struct drm_psb_private *dev_priv = dev->dev_private;
22928+ int ret = 0;
22929+ int dummy;
22930+
22931+ DRM_DEBUG("psbfb_kms_on_ioctl\n");
22932+
22933+ if (!resume) {
22934+ uint32_t dummy2 = 0;
22935+ (void) psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
22936+ &dummy2, &dummy2);
22937+ psb_2d_lock(dev_priv);
22938+ (void)psb_idle_2d(dev);
22939+ psb_2d_unlock(dev_priv);
22940+ } else
22941+ psb_idle_2d(dev);
22942+
22943+ mutex_lock(&dev->mode_config.mutex);
22944+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
22945+ struct fb_info *info = fb->fbdev;
22946+ struct psbfb_par *par = info->par;
22947+
22948+ bo = fb->bo;
22949+ if (!bo)
22950+ continue;
22951+
22952+ ret = psbfb_move_fb_bo(info, bo,
22953+ DRM_BO_FLAG_MEM_TT |
22954+ DRM_BO_FLAG_MEM_VRAM |
22955+ DRM_BO_FLAG_NO_EVICT);
22956+ if (ret)
22957+ goto out_err;
22958+
22959+ ret = drm_bo_kmap(bo, 0, bo->num_pages, &fb->kmap);
22960+ if (ret)
22961+ goto out_err;
22962+
22963+ info->screen_base = drm_bmo_virtual(&fb->kmap, &dummy);
22964+ fb->offset = bo->offset - dev_priv->pg->gatt_start;
22965+
22966+ if (ret)
22967+ goto out_err;
22968+
22969+ if (resume)
22970+ fb_set_suspend(info, 0);
22971+ else
22972+ info->state |= FBINFO_STATE_RUNNING;
22973+
22974+ /*
22975+ * Re-run modesetting here, since the VDS scanout offset may
22976+ * have changed.
22977+ */
22978+
22979+ if (par->crtc->enabled) {
22980+ psbfb_set_par(info);
22981+ psbfb_blank(par->dpms_state, info);
22982+ }
22983+ }
22984+ out_err:
22985+ mutex_unlock(&dev->mode_config.mutex);
22986+
22987+ return ret;
22988+}
22989+
22990+int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
22991+ struct drm_file *file_priv)
22992+{
22993+ int ret;
22994+
22995+ acquire_console_sem();
22996+ ret = psbfb_kms_on(dev, 0);
22997+ release_console_sem();
22998+#ifdef SII_1392_WA
22999+ if((SII_1392 != 1) || (drm_psb_no_fb==0))
23000+ drm_disable_unused_functions(dev);
23001+#else
23002+ drm_disable_unused_functions(dev);
23003+#endif
23004+ return ret;
23005+}
23006+
23007+void psbfb_suspend(struct drm_device *dev)
23008+{
23009+ acquire_console_sem();
23010+ psbfb_kms_off(dev, 1);
23011+ release_console_sem();
23012+}
23013+
23014+void psbfb_resume(struct drm_device *dev)
23015+{
23016+ acquire_console_sem();
23017+ psbfb_kms_on(dev, 1);
23018+ release_console_sem();
23019+#ifdef SII_1392_WA
23020+ if((SII_1392 != 1) || (drm_psb_no_fb==0))
23021+ drm_disable_unused_functions(dev);
23022+#else
23023+ drm_disable_unused_functions(dev);
23024+#endif
23025+}
23026+
23027+/*
23028+ * FIXME: Before kernel inclusion, migrate nopfn to fault.
23029+ * Also, these should be the default vm ops for buffer object type fbs.
23030+ */
23031+
23032+extern unsigned long drm_bo_vm_fault(struct vm_area_struct *vma,
23033+ struct vm_fault *vmf);
23034+
23035+/*
23036+ * This wrapper is a bit ugly and is here because we need access to a mutex
23037+ * that we can lock both around nopfn and around unmap_mapping_range + move.
23038+ * Normally, this would've been done using the bo mutex, but unfortunately
23039+ * we cannot lock it around drm_bo_do_validate(), since that would imply
23040+ * recursive locking.
23041+ */
23042+
23043+static int psbfb_fault(struct vm_area_struct *vma,
23044+ struct vm_fault *vmf)
23045+{
23046+ struct psbfb_vm_info *vi = (struct psbfb_vm_info *)vma->vm_private_data;
23047+ struct vm_area_struct tmp_vma;
23048+ int ret;
23049+
23050+ mutex_lock(&vi->vm_mutex);
23051+ tmp_vma = *vma;
23052+ tmp_vma.vm_private_data = vi->bo;
23053+ ret = drm_bo_vm_fault(&tmp_vma, vmf);
23054+ mutex_unlock(&vi->vm_mutex);
23055+ return ret;
23056+}
23057+
23058+static void psbfb_vm_open(struct vm_area_struct *vma)
23059+{
23060+ struct psbfb_vm_info *vi = (struct psbfb_vm_info *)vma->vm_private_data;
23061+
23062+ atomic_inc(&vi->refcount);
23063+}
23064+
23065+static void psbfb_vm_close(struct vm_area_struct *vma)
23066+{
23067+ psbfb_vm_info_deref((struct psbfb_vm_info **)&vma->vm_private_data);
23068+}
23069+
23070+static struct vm_operations_struct psbfb_vm_ops = {
23071+ .fault = psbfb_fault,
23072+ .open = psbfb_vm_open,
23073+ .close = psbfb_vm_close,
23074+};
23075+
23076+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
23077+{
23078+ struct psbfb_par *par = info->par;
23079+ struct drm_framebuffer *fb = par->crtc->fb;
23080+ struct drm_buffer_object *bo = fb->bo;
23081+ unsigned long size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
23082+ unsigned long offset = vma->vm_pgoff;
23083+
23084+ if (vma->vm_pgoff != 0)
23085+ return -EINVAL;
23086+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
23087+ return -EINVAL;
23088+ if (offset + size > bo->num_pages)
23089+ return -EINVAL;
23090+
23091+ mutex_lock(&par->vi->vm_mutex);
23092+ if (!par->vi->f_mapping)
23093+ par->vi->f_mapping = vma->vm_file->f_mapping;
23094+ mutex_unlock(&par->vi->vm_mutex);
23095+
23096+ vma->vm_private_data = psbfb_vm_info_ref(par->vi);
23097+
23098+ vma->vm_ops = &psbfb_vm_ops;
23099+ vma->vm_flags |= VM_PFNMAP;
23100+
23101+ return 0;
23102+}
23103+
23104+int psbfb_sync(struct fb_info *info)
23105+{
23106+ struct psbfb_par *par = info->par;
23107+ struct drm_psb_private *dev_priv = par->dev->dev_private;
23108+
23109+ psb_2d_lock(dev_priv);
23110+ psb_idle_2d(par->dev);
23111+ psb_2d_unlock(dev_priv);
23112+
23113+ return 0;
23114+}
23115+
23116+static struct fb_ops psbfb_ops = {
23117+ .owner = THIS_MODULE,
23118+ .fb_check_var = psbfb_check_var,
23119+ .fb_set_par = psbfb_set_par,
23120+ .fb_setcolreg = psbfb_setcolreg,
23121+ .fb_fillrect = psbfb_fillrect,
23122+ .fb_copyarea = psbfb_copyarea,
23123+ .fb_imageblit = psbfb_imageblit,
23124+ .fb_mmap = psbfb_mmap,
23125+ .fb_sync = psbfb_sync,
23126+ .fb_blank = psbfb_blank,
23127+};
23128+
23129+int psbfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
23130+{
23131+ struct fb_info *info;
23132+ struct psbfb_par *par;
23133+ struct device *device = &dev->pdev->dev;
23134+ struct drm_framebuffer *fb;
23135+ struct drm_display_mode *mode = crtc->desired_mode;
23136+ struct drm_psb_private *dev_priv =
23137+ (struct drm_psb_private *)dev->dev_private;
23138+ struct drm_buffer_object *fbo = NULL;
23139+ int ret;
23140+ int is_iomem;
23141+
23142+ if (drm_psb_no_fb) {
23143+ /* need to do this as the DRM will disable the output */
23144+ crtc->enabled = 1;
23145+ return 0;
23146+ }
23147+
23148+ info = framebuffer_alloc(sizeof(struct psbfb_par), device);
23149+ if (!info) {
23150+ return -ENOMEM;
23151+ }
23152+
23153+ fb = drm_framebuffer_create(dev);
23154+ if (!fb) {
23155+ framebuffer_release(info);
23156+ DRM_ERROR("failed to allocate fb.\n");
23157+ return -ENOMEM;
23158+ }
23159+ crtc->fb = fb;
23160+
23161+ fb->width = mode->hdisplay;
23162+ fb->height = mode->vdisplay;
23163+
23164+ fb->bits_per_pixel = 32;
23165+ fb->depth = 24;
23166+ fb->pitch =
23167+ ((fb->width * ((fb->bits_per_pixel + 1) / 8)) + 0x3f) & ~0x3f;
23168+
23169+ ret = drm_buffer_object_create(dev,
23170+ fb->pitch * fb->height,
23171+ drm_bo_type_kernel,
23172+ DRM_BO_FLAG_READ |
23173+ DRM_BO_FLAG_WRITE |
23174+ DRM_BO_FLAG_MEM_TT |
23175+ DRM_BO_FLAG_MEM_VRAM |
23176+ DRM_BO_FLAG_NO_EVICT,
23177+ DRM_BO_HINT_DONT_FENCE, 0, 0, &fbo);
23178+ if (ret || !fbo) {
23179+ DRM_ERROR("failed to allocate framebuffer\n");
23180+ goto out_err0;
23181+ }
23182+
23183+ fb->offset = fbo->offset - dev_priv->pg->gatt_start;
23184+ fb->bo = fbo;
23185+ DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width,
23186+ fb->height, fb->offset, fbo);
23187+
23188+ fb->fbdev = info;
23189+
23190+ par = info->par;
23191+
23192+ par->dev = dev;
23193+ par->crtc = crtc;
23194+ par->vi = psbfb_vm_info_create();
23195+ if (!par->vi)
23196+ goto out_err1;
23197+
23198+ mutex_lock(&dev->struct_mutex);
23199+ par->vi->bo = fbo;
23200+ atomic_inc(&fbo->usage);
23201+ mutex_unlock(&dev->struct_mutex);
23202+
23203+ par->vi->f_mapping = NULL;
23204+ info->fbops = &psbfb_ops;
23205+
23206+ strcpy(info->fix.id, "psbfb");
23207+ info->fix.type = FB_TYPE_PACKED_PIXELS;
23208+ info->fix.visual = FB_VISUAL_DIRECTCOLOR;
23209+ info->fix.type_aux = 0;
23210+ info->fix.xpanstep = 1;
23211+ info->fix.ypanstep = 1;
23212+ info->fix.ywrapstep = 0;
23213+ info->fix.accel = FB_ACCEL_NONE; /* ??? */
23214+ info->fix.type_aux = 0;
23215+ info->fix.mmio_start = 0;
23216+ info->fix.mmio_len = 0;
23217+ info->fix.line_length = fb->pitch;
23218+ info->fix.smem_start = dev->mode_config.fb_base + fb->offset;
23219+ info->fix.smem_len = info->fix.line_length * fb->height;
23220+
23221+ info->flags = FBINFO_DEFAULT |
23222+ FBINFO_PARTIAL_PAN_OK /*| FBINFO_MISC_ALWAYS_SETPAR */ ;
23223+
23224+ ret = drm_bo_kmap(fb->bo, 0, fb->bo->num_pages, &fb->kmap);
23225+ if (ret) {
23226+ DRM_ERROR("error mapping fb: %d\n", ret);
23227+ goto out_err2;
23228+ }
23229+
23230+ info->screen_base = drm_bmo_virtual(&fb->kmap, &is_iomem);
23231+ memset(info->screen_base, 0x00, fb->pitch*fb->height);
23232+ info->screen_size = info->fix.smem_len; /* FIXME */
23233+ info->pseudo_palette = fb->pseudo_palette;
23234+ info->var.xres_virtual = fb->width;
23235+ info->var.yres_virtual = fb->height;
23236+ info->var.bits_per_pixel = fb->bits_per_pixel;
23237+ info->var.xoffset = 0;
23238+ info->var.yoffset = 0;
23239+ info->var.activate = FB_ACTIVATE_NOW;
23240+ info->var.height = -1;
23241+ info->var.width = -1;
23242+ info->var.vmode = FB_VMODE_NONINTERLACED;
23243+
23244+ info->var.xres = mode->hdisplay;
23245+ info->var.right_margin = mode->hsync_start - mode->hdisplay;
23246+ info->var.hsync_len = mode->hsync_end - mode->hsync_start;
23247+ info->var.left_margin = mode->htotal - mode->hsync_end;
23248+ info->var.yres = mode->vdisplay;
23249+ info->var.lower_margin = mode->vsync_start - mode->vdisplay;
23250+ info->var.vsync_len = mode->vsync_end - mode->vsync_start;
23251+ info->var.upper_margin = mode->vtotal - mode->vsync_end;
23252+ info->var.pixclock = 10000000 / mode->htotal * 1000 /
23253+ mode->vtotal * 100;
23254+ /* avoid overflow */
23255+ info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
23256+
23257+ info->pixmap.size = 64 * 1024;
23258+ info->pixmap.buf_align = 8;
23259+ info->pixmap.access_align = 32;
23260+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
23261+ info->pixmap.scan_align = 1;
23262+
23263+ DRM_DEBUG("fb depth is %d\n", fb->depth);
23264+ DRM_DEBUG(" pitch is %d\n", fb->pitch);
23265+ switch (fb->depth) {
23266+ case 8:
23267+ info->var.red.offset = 0;
23268+ info->var.green.offset = 0;
23269+ info->var.blue.offset = 0;
23270+ info->var.red.length = 8; /* 8bit DAC */
23271+ info->var.green.length = 8;
23272+ info->var.blue.length = 8;
23273+ info->var.transp.offset = 0;
23274+ info->var.transp.length = 0;
23275+ break;
23276+ case 15:
23277+ info->var.red.offset = 10;
23278+ info->var.green.offset = 5;
23279+ info->var.blue.offset = 0;
23280+ info->var.red.length = info->var.green.length =
23281+ info->var.blue.length = 5;
23282+ info->var.transp.offset = 15;
23283+ info->var.transp.length = 1;
23284+ break;
23285+ case 16:
23286+ info->var.red.offset = 11;
23287+ info->var.green.offset = 5;
23288+ info->var.blue.offset = 0;
23289+ info->var.red.length = 5;
23290+ info->var.green.length = 6;
23291+ info->var.blue.length = 5;
23292+ info->var.transp.offset = 0;
23293+ break;
23294+ case 24:
23295+ info->var.red.offset = 16;
23296+ info->var.green.offset = 8;
23297+ info->var.blue.offset = 0;
23298+ info->var.red.length = info->var.green.length =
23299+ info->var.blue.length = 8;
23300+ info->var.transp.offset = 0;
23301+ info->var.transp.length = 0;
23302+ break;
23303+ case 32:
23304+ info->var.red.offset = 16;
23305+ info->var.green.offset = 8;
23306+ info->var.blue.offset = 0;
23307+ info->var.red.length = info->var.green.length =
23308+ info->var.blue.length = 8;
23309+ info->var.transp.offset = 24;
23310+ info->var.transp.length = 8;
23311+ break;
23312+ default:
23313+ break;
23314+ }
23315+
23316+ if (register_framebuffer(info) < 0)
23317+ goto out_err3;
23318+
23319+ if (psbfb_check_var(&info->var, info) < 0)
23320+ goto out_err4;
23321+
23322+ psbfb_set_par(info);
23323+
23324+ DRM_INFO("fb%d: %s frame buffer device\n", info->node, info->fix.id);
23325+
23326+ return 0;
23327+ out_err4:
23328+ unregister_framebuffer(info);
23329+ out_err3:
23330+ drm_bo_kunmap(&fb->kmap);
23331+ out_err2:
23332+ psbfb_vm_info_deref(&par->vi);
23333+ out_err1:
23334+ drm_bo_usage_deref_unlocked(&fb->bo);
23335+ out_err0:
23336+ drm_framebuffer_destroy(fb);
23337+ framebuffer_release(info);
23338+ crtc->fb = NULL;
23339+ return -EINVAL;
23340+}
23341+
23342+EXPORT_SYMBOL(psbfb_probe);
23343+
23344+int psbfb_remove(struct drm_device *dev, struct drm_crtc *crtc)
23345+{
23346+ struct drm_framebuffer *fb;
23347+ struct fb_info *info;
23348+ struct psbfb_par *par;
23349+
23350+ if (drm_psb_no_fb)
23351+ return 0;
23352+
23353+ fb = crtc->fb;
23354+ info = fb->fbdev;
23355+
23356+ if (info) {
23357+ unregister_framebuffer(info);
23358+ drm_bo_kunmap(&fb->kmap);
23359+ par = info->par;
23360+ if (par)
23361+ psbfb_vm_info_deref(&par->vi);
23362+ drm_bo_usage_deref_unlocked(&fb->bo);
23363+ drm_framebuffer_destroy(fb);
23364+ framebuffer_release(info);
23365+ }
23366+ return 0;
23367+}
23368+
23369+EXPORT_SYMBOL(psbfb_remove);
23370+
23371Index: linux-2.6.27/drivers/gpu/drm/psb/psb_fence.c
23372===================================================================
23373--- /dev/null 1970-01-01 00:00:00.000000000 +0000
23374+++ linux-2.6.27/drivers/gpu/drm/psb/psb_fence.c 2009-01-14 11:58:01.000000000 +0000
23375@@ -0,0 +1,285 @@
23376+/**************************************************************************
23377+ * Copyright (c) 2007, Intel Corporation.
23378+ * All Rights Reserved.
23379+ *
23380+ * This program is free software; you can redistribute it and/or modify it
23381+ * under the terms and conditions of the GNU General Public License,
23382+ * version 2, as published by the Free Software Foundation.
23383+ *
23384+ * This program is distributed in the hope it will be useful, but WITHOUT
23385+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23386+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23387+ * more details.
23388+ *
23389+ * You should have received a copy of the GNU General Public License along with
23390+ * this program; if not, write to the Free Software Foundation, Inc.,
23391+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23392+ *
23393+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
23394+ * develop this driver.
23395+ *
23396+ **************************************************************************/
23397+/*
23398+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
23399+ */
23400+
23401+#include "drmP.h"
23402+#include "psb_drv.h"
23403+
23404+static void psb_poll_ta(struct drm_device *dev, uint32_t waiting_types)
23405+{
23406+ struct drm_psb_private *dev_priv =
23407+ (struct drm_psb_private *)dev->dev_private;
23408+ struct drm_fence_driver *driver = dev->driver->fence_driver;
23409+ uint32_t cur_flag = 1;
23410+ uint32_t flags = 0;
23411+ uint32_t sequence = 0;
23412+ uint32_t remaining = 0xFFFFFFFF;
23413+ uint32_t diff;
23414+
23415+ struct psb_scheduler *scheduler;
23416+ struct psb_scheduler_seq *seq;
23417+ struct drm_fence_class_manager *fc =
23418+ &dev->fm.fence_class[PSB_ENGINE_TA];
23419+
23420+ if (unlikely(!dev_priv))
23421+ return;
23422+
23423+ scheduler = &dev_priv->scheduler;
23424+ seq = scheduler->seq;
23425+
23426+ while (likely(waiting_types & remaining)) {
23427+ if (!(waiting_types & cur_flag))
23428+ goto skip;
23429+ if (seq->reported)
23430+ goto skip;
23431+ if (flags == 0)
23432+ sequence = seq->sequence;
23433+ else if (sequence != seq->sequence) {
23434+ drm_fence_handler(dev, PSB_ENGINE_TA,
23435+ sequence, flags, 0);
23436+ sequence = seq->sequence;
23437+ flags = 0;
23438+ }
23439+ flags |= cur_flag;
23440+
23441+ /*
23442+ * Sequence may not have ended up on the ring yet.
23443+ * In that case, report it but don't mark it as
23444+ * reported. A subsequent poll will report it again.
23445+ */
23446+
23447+ diff = (fc->latest_queued_sequence - sequence) &
23448+ driver->sequence_mask;
23449+ if (diff < driver->wrap_diff)
23450+ seq->reported = 1;
23451+
23452+ skip:
23453+ cur_flag <<= 1;
23454+ remaining <<= 1;
23455+ seq++;
23456+ }
23457+
23458+ if (flags) {
23459+ drm_fence_handler(dev, PSB_ENGINE_TA, sequence, flags, 0);
23460+ }
23461+}
23462+
23463+static void psb_poll_other(struct drm_device *dev, uint32_t fence_class,
23464+ uint32_t waiting_types)
23465+{
23466+ struct drm_psb_private *dev_priv =
23467+ (struct drm_psb_private *)dev->dev_private;
23468+ struct drm_fence_manager *fm = &dev->fm;
23469+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
23470+ uint32_t sequence;
23471+
23472+ if (unlikely(!dev_priv))
23473+ return;
23474+
23475+ if (waiting_types) {
23476+ if (fence_class == PSB_ENGINE_VIDEO)
23477+ sequence = dev_priv->msvdx_current_sequence;
23478+ else
23479+ sequence = dev_priv->comm[fence_class << 4];
23480+
23481+ drm_fence_handler(dev, fence_class, sequence,
23482+ DRM_FENCE_TYPE_EXE, 0);
23483+
23484+ switch (fence_class) {
23485+ case PSB_ENGINE_2D:
23486+ if (dev_priv->fence0_irq_on && !fc->waiting_types) {
23487+ psb_2D_irq_off(dev_priv);
23488+ dev_priv->fence0_irq_on = 0;
23489+ } else if (!dev_priv->fence0_irq_on
23490+ && fc->waiting_types) {
23491+ psb_2D_irq_on(dev_priv);
23492+ dev_priv->fence0_irq_on = 1;
23493+ }
23494+ break;
23495+#if 0
23496+ /*
23497+ * FIXME: MSVDX irq switching
23498+ */
23499+
23500+ case PSB_ENGINE_VIDEO:
23501+ if (dev_priv->fence2_irq_on && !fc->waiting_types) {
23502+ psb_msvdx_irq_off(dev_priv);
23503+ dev_priv->fence2_irq_on = 0;
23504+ } else if (!dev_priv->fence2_irq_on
23505+ && fc->pending_exe_flush) {
23506+ psb_msvdx_irq_on(dev_priv);
23507+ dev_priv->fence2_irq_on = 1;
23508+ }
23509+ break;
23510+#endif
23511+ default:
23512+ return;
23513+ }
23514+ }
23515+}
23516+
23517+static void psb_fence_poll(struct drm_device *dev,
23518+ uint32_t fence_class, uint32_t waiting_types)
23519+{
23520+ switch (fence_class) {
23521+ case PSB_ENGINE_TA:
23522+ psb_poll_ta(dev, waiting_types);
23523+ break;
23524+ default:
23525+ psb_poll_other(dev, fence_class, waiting_types);
23526+ break;
23527+ }
23528+}
23529+
23530+void psb_fence_error(struct drm_device *dev,
23531+ uint32_t fence_class,
23532+ uint32_t sequence, uint32_t type, int error)
23533+{
23534+ struct drm_fence_manager *fm = &dev->fm;
23535+ unsigned long irq_flags;
23536+
23537+ BUG_ON(fence_class >= PSB_NUM_ENGINES);
23538+ write_lock_irqsave(&fm->lock, irq_flags);
23539+ drm_fence_handler(dev, fence_class, sequence, type, error);
23540+ write_unlock_irqrestore(&fm->lock, irq_flags);
23541+}
23542+
23543+int psb_fence_emit_sequence(struct drm_device *dev, uint32_t fence_class,
23544+ uint32_t flags, uint32_t * sequence,
23545+ uint32_t * native_type)
23546+{
23547+ struct drm_psb_private *dev_priv =
23548+ (struct drm_psb_private *)dev->dev_private;
23549+ uint32_t seq = 0;
23550+ int ret;
23551+
23552+ if (!dev_priv)
23553+ return -EINVAL;
23554+
23555+ if (fence_class >= PSB_NUM_ENGINES)
23556+ return -EINVAL;
23557+
23558+ switch (fence_class) {
23559+ case PSB_ENGINE_2D:
23560+ spin_lock(&dev_priv->sequence_lock);
23561+ seq = ++dev_priv->sequence[fence_class];
23562+ spin_unlock(&dev_priv->sequence_lock);
23563+ ret = psb_blit_sequence(dev_priv, seq);
23564+ if (ret)
23565+ return ret;
23566+ break;
23567+ case PSB_ENGINE_VIDEO:
23568+ spin_lock(&dev_priv->sequence_lock);
23569+ seq = ++dev_priv->sequence[fence_class];
23570+ spin_unlock(&dev_priv->sequence_lock);
23571+ break;
23572+ default:
23573+ spin_lock(&dev_priv->sequence_lock);
23574+ seq = dev_priv->sequence[fence_class];
23575+ spin_unlock(&dev_priv->sequence_lock);
23576+ }
23577+
23578+ *sequence = seq;
23579+ *native_type = DRM_FENCE_TYPE_EXE;
23580+
23581+ return 0;
23582+}
23583+
23584+uint32_t psb_fence_advance_sequence(struct drm_device * dev,
23585+ uint32_t fence_class)
23586+{
23587+ struct drm_psb_private *dev_priv =
23588+ (struct drm_psb_private *)dev->dev_private;
23589+ uint32_t sequence;
23590+
23591+ spin_lock(&dev_priv->sequence_lock);
23592+ sequence = ++dev_priv->sequence[fence_class];
23593+ spin_unlock(&dev_priv->sequence_lock);
23594+
23595+ return sequence;
23596+}
23597+
23598+void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
23599+{
23600+ struct drm_fence_manager *fm = &dev->fm;
23601+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
23602+
23603+#ifdef FIX_TG_16
23604+ if (fence_class == 0) {
23605+ struct drm_psb_private *dev_priv =
23606+ (struct drm_psb_private *)dev->dev_private;
23607+
23608+ if ((atomic_read(&dev_priv->ta_wait_2d_irq) == 1) &&
23609+ (PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
23610+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
23611+ _PSB_C2B_STATUS_BUSY) == 0))
23612+ psb_resume_ta_2d_idle(dev_priv);
23613+ }
23614+#endif
23615+ write_lock(&fm->lock);
23616+ psb_fence_poll(dev, fence_class, fc->waiting_types);
23617+ write_unlock(&fm->lock);
23618+}
23619+
23620+static int psb_fence_wait(struct drm_fence_object *fence,
23621+ int lazy, int interruptible, uint32_t mask)
23622+{
23623+ struct drm_device *dev = fence->dev;
23624+ struct drm_fence_class_manager *fc =
23625+ &dev->fm.fence_class[fence->fence_class];
23626+ int ret = 0;
23627+ unsigned long timeout = DRM_HZ *
23628+ ((fence->fence_class == PSB_ENGINE_TA) ? 30 : 3);
23629+
23630+ drm_fence_object_flush(fence, mask);
23631+ if (interruptible)
23632+ ret = wait_event_interruptible_timeout
23633+ (fc->fence_queue, drm_fence_object_signaled(fence, mask),
23634+ timeout);
23635+ else
23636+ ret = wait_event_timeout
23637+ (fc->fence_queue, drm_fence_object_signaled(fence, mask),
23638+ timeout);
23639+
23640+ if (unlikely(ret == -ERESTARTSYS))
23641+ return -EAGAIN;
23642+
23643+ if (unlikely(ret == 0))
23644+ return -EBUSY;
23645+
23646+ return 0;
23647+}
23648+
23649+struct drm_fence_driver psb_fence_driver = {
23650+ .num_classes = PSB_NUM_ENGINES,
23651+ .wrap_diff = (1 << 30),
23652+ .flush_diff = (1 << 29),
23653+ .sequence_mask = 0xFFFFFFFFU,
23654+ .has_irq = NULL,
23655+ .emit = psb_fence_emit_sequence,
23656+ .flush = NULL,
23657+ .poll = psb_fence_poll,
23658+ .needed_flush = NULL,
23659+ .wait = psb_fence_wait
23660+};
23661Index: linux-2.6.27/drivers/gpu/drm/psb/psb_gtt.c
23662===================================================================
23663--- /dev/null 1970-01-01 00:00:00.000000000 +0000
23664+++ linux-2.6.27/drivers/gpu/drm/psb/psb_gtt.c 2009-01-14 11:58:01.000000000 +0000
23665@@ -0,0 +1,233 @@
23666+/**************************************************************************
23667+ * Copyright (c) 2007, Intel Corporation.
23668+ * All Rights Reserved.
23669+ *
23670+ * This program is free software; you can redistribute it and/or modify it
23671+ * under the terms and conditions of the GNU General Public License,
23672+ * version 2, as published by the Free Software Foundation.
23673+ *
23674+ * This program is distributed in the hope it will be useful, but WITHOUT
23675+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23676+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23677+ * more details.
23678+ *
23679+ * You should have received a copy of the GNU General Public License along with
23680+ * this program; if not, write to the Free Software Foundation, Inc.,
23681+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23682+ *
23683+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
23684+ * develop this driver.
23685+ *
23686+ **************************************************************************/
23687+/*
23688+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
23689+ */
23690+#include "drmP.h"
23691+#include "psb_drv.h"
23692+
23693+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
23694+{
23695+ uint32_t mask = PSB_PTE_VALID;
23696+
23697+ if (type & PSB_MMU_CACHED_MEMORY)
23698+ mask |= PSB_PTE_CACHED;
23699+ if (type & PSB_MMU_RO_MEMORY)
23700+ mask |= PSB_PTE_RO;
23701+ if (type & PSB_MMU_WO_MEMORY)
23702+ mask |= PSB_PTE_WO;
23703+
23704+ return (pfn << PAGE_SHIFT) | mask;
23705+}
23706+
23707+struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
23708+{
23709+ struct psb_gtt *tmp = drm_calloc(1, sizeof(*tmp), DRM_MEM_DRIVER);
23710+
23711+ if (!tmp)
23712+ return NULL;
23713+
23714+ init_rwsem(&tmp->sem);
23715+ tmp->dev = dev;
23716+
23717+ return tmp;
23718+}
23719+
23720+void psb_gtt_takedown(struct psb_gtt *pg, int free)
23721+{
23722+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
23723+
23724+ if (!pg)
23725+ return;
23726+
23727+ if (pg->gtt_map) {
23728+ iounmap(pg->gtt_map);
23729+ pg->gtt_map = NULL;
23730+ }
23731+ if (pg->initialized) {
23732+ pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
23733+ pg->gmch_ctrl);
23734+ PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
23735+ (void)PSB_RVDC32(PSB_PGETBL_CTL);
23736+ }
23737+ if (free)
23738+ drm_free(pg, sizeof(*pg), DRM_MEM_DRIVER);
23739+}
23740+
23741+int psb_gtt_init(struct psb_gtt *pg, int resume)
23742+{
23743+ struct drm_device *dev = pg->dev;
23744+ struct drm_psb_private *dev_priv = dev->dev_private;
23745+ unsigned gtt_pages;
23746+ unsigned long stolen_size;
23747+ unsigned i, num_pages;
23748+ unsigned pfn_base;
23749+
23750+ int ret = 0;
23751+ uint32_t pte;
23752+
23753+ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
23754+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
23755+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
23756+
23757+ pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
23758+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
23759+ (void)PSB_RVDC32(PSB_PGETBL_CTL);
23760+
23761+ pg->initialized = 1;
23762+
23763+ pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
23764+ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
23765+ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
23766+ gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
23767+ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
23768+ >> PAGE_SHIFT;
23769+ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
23770+ stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
23771+
23772+ PSB_DEBUG_INIT("GTT phys start: 0x%08x.\n", pg->gtt_phys_start);
23773+ PSB_DEBUG_INIT("GTT start: 0x%08x.\n", pg->gtt_start);
23774+ PSB_DEBUG_INIT("GATT start: 0x%08x.\n", pg->gatt_start);
23775+ PSB_DEBUG_INIT("GTT pages: %u\n", gtt_pages);
23776+ PSB_DEBUG_INIT("Stolen size: %lu kiB\n", stolen_size / 1024);
23777+
23778+ if (resume && (gtt_pages != pg->gtt_pages) &&
23779+ (stolen_size != pg->stolen_size)) {
23780+ DRM_ERROR("GTT resume error.\n");
23781+ ret = -EINVAL;
23782+ goto out_err;
23783+ }
23784+
23785+ pg->gtt_pages = gtt_pages;
23786+ pg->stolen_size = stolen_size;
23787+ pg->gtt_map =
23788+ ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
23789+ if (!pg->gtt_map) {
23790+ DRM_ERROR("Failure to map gtt.\n");
23791+ ret = -ENOMEM;
23792+ goto out_err;
23793+ }
23794+
23795+ /*
23796+ * insert stolen pages.
23797+ */
23798+
23799+ pfn_base = pg->stolen_base >> PAGE_SHIFT;
23800+ num_pages = stolen_size >> PAGE_SHIFT;
23801+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
23802+ num_pages, pfn_base);
23803+ for (i = 0; i < num_pages; ++i) {
23804+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
23805+ iowrite32(pte, pg->gtt_map + i);
23806+ }
23807+
23808+ /*
23809+ * Init rest of gtt.
23810+ */
23811+
23812+ pfn_base = page_to_pfn(dev_priv->scratch_page);
23813+ pte = psb_gtt_mask_pte(pfn_base, 0);
23814+ PSB_DEBUG_INIT("Initializing the rest of a total "
23815+ "of %d gtt pages.\n", pg->gatt_pages);
23816+
23817+ for (; i < pg->gatt_pages; ++i)
23818+ iowrite32(pte, pg->gtt_map + i);
23819+ (void)ioread32(pg->gtt_map + i - 1);
23820+
23821+ return 0;
23822+
23823+ out_err:
23824+ psb_gtt_takedown(pg, 0);
23825+ return ret;
23826+}
23827+
23828+int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
23829+ unsigned offset_pages, unsigned num_pages,
23830+ unsigned desired_tile_stride, unsigned hw_tile_stride,
23831+ int type)
23832+{
23833+ unsigned rows = 1;
23834+ unsigned add;
23835+ unsigned row_add;
23836+ unsigned i;
23837+ unsigned j;
23838+ uint32_t *cur_page = NULL;
23839+ uint32_t pte;
23840+
23841+ if (hw_tile_stride)
23842+ rows = num_pages / desired_tile_stride;
23843+ else
23844+ desired_tile_stride = num_pages;
23845+
23846+ add = desired_tile_stride;
23847+ row_add = hw_tile_stride;
23848+
23849+ down_read(&pg->sem);
23850+ for (i = 0; i < rows; ++i) {
23851+ cur_page = pg->gtt_map + offset_pages;
23852+ for (j = 0; j < desired_tile_stride; ++j) {
23853+ pte = psb_gtt_mask_pte(page_to_pfn(*pages++), type);
23854+ iowrite32(pte, cur_page++);
23855+ }
23856+ offset_pages += add;
23857+ }
23858+ (void)ioread32(cur_page - 1);
23859+ up_read(&pg->sem);
23860+
23861+ return 0;
23862+}
23863+
23864+int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
23865+ unsigned num_pages, unsigned desired_tile_stride,
23866+ unsigned hw_tile_stride)
23867+{
23868+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
23869+ unsigned rows = 1;
23870+ unsigned add;
23871+ unsigned row_add;
23872+ unsigned i;
23873+ unsigned j;
23874+ uint32_t *cur_page = NULL;
23875+ unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
23876+ uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
23877+
23878+ if (hw_tile_stride)
23879+ rows = num_pages / desired_tile_stride;
23880+ else
23881+ desired_tile_stride = num_pages;
23882+
23883+ add = desired_tile_stride;
23884+ row_add = hw_tile_stride;
23885+
23886+ down_read(&pg->sem);
23887+ for (i = 0; i < rows; ++i) {
23888+ cur_page = pg->gtt_map + offset_pages;
23889+ for (j = 0; j < desired_tile_stride; ++j) {
23890+ iowrite32(pte, cur_page++);
23891+ }
23892+ offset_pages += add;
23893+ }
23894+ (void)ioread32(cur_page - 1);
23895+ up_read(&pg->sem);
23896+
23897+ return 0;
23898+}
23899Index: linux-2.6.27/drivers/gpu/drm/psb/psb_i2c.c
23900===================================================================
23901--- /dev/null 1970-01-01 00:00:00.000000000 +0000
23902+++ linux-2.6.27/drivers/gpu/drm/psb/psb_i2c.c 2009-01-14 11:58:01.000000000 +0000
23903@@ -0,0 +1,179 @@
23904+/*
23905+ * Copyright © 2006-2007 Intel Corporation
23906+ *
23907+ * Permission is hereby granted, free of charge, to any person obtaining a
23908+ * copy of this software and associated documentation files (the "Software"),
23909+ * to deal in the Software without restriction, including without limitation
23910+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
23911+ * and/or sell copies of the Software, and to permit persons to whom the
23912+ * Software is furnished to do so, subject to the following conditions:
23913+ *
23914+ * The above copyright notice and this permission notice (including the next
23915+ * paragraph) shall be included in all copies or substantial portions of the
23916+ * Software.
23917+ *
23918+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23919+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23920+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23921+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23922+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23923+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23924+ * DEALINGS IN THE SOFTWARE.
23925+ *
23926+ * Authors:
23927+ * Eric Anholt <eric@anholt.net>
23928+ */
23929+/*
23930+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
23931+ * Jesse Barnes <jesse.barnes@intel.com>
23932+ */
23933+
23934+#include <linux/i2c.h>
23935+#include <linux/i2c-id.h>
23936+#include <linux/i2c-algo-bit.h>
23937+#include "drmP.h"
23938+#include "drm.h"
23939+#include "intel_drv.h"
23940+#include "psb_drv.h"
23941+
23942+/*
23943+ * Intel GPIO access functions
23944+ */
23945+
23946+#define I2C_RISEFALL_TIME 20
23947+
23948+static int get_clock(void *data)
23949+{
23950+ struct intel_i2c_chan *chan = data;
23951+ struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
23952+ uint32_t val;
23953+
23954+ val = PSB_RVDC32(chan->reg);
23955+ return ((val & GPIO_CLOCK_VAL_IN) != 0);
23956+}
23957+
23958+static int get_data(void *data)
23959+{
23960+ struct intel_i2c_chan *chan = data;
23961+ struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
23962+ uint32_t val;
23963+
23964+ val = PSB_RVDC32(chan->reg);
23965+ return ((val & GPIO_DATA_VAL_IN) != 0);
23966+}
23967+
23968+static void set_clock(void *data, int state_high)
23969+{
23970+ struct intel_i2c_chan *chan = data;
23971+ struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
23972+ uint32_t reserved = 0, clock_bits;
23973+
23974+ /* On most chips, these bits must be preserved in software. */
23975+ reserved = PSB_RVDC32(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
23976+ GPIO_CLOCK_PULLUP_DISABLE);
23977+
23978+ if (state_high)
23979+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
23980+ else
23981+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
23982+ GPIO_CLOCK_VAL_MASK;
23983+ PSB_WVDC32(reserved | clock_bits, chan->reg);
23984+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
23985+}
23986+
23987+static void set_data(void *data, int state_high)
23988+{
23989+ struct intel_i2c_chan *chan = data;
23990+ struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
23991+ uint32_t reserved = 0, data_bits;
23992+
23993+ /* On most chips, these bits must be preserved in software. */
23994+ reserved = PSB_RVDC32(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
23995+ GPIO_CLOCK_PULLUP_DISABLE);
23996+
23997+ if (state_high)
23998+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
23999+ else
24000+ data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
24001+ GPIO_DATA_VAL_MASK;
24002+
24003+ PSB_WVDC32(data_bits, chan->reg);
24004+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
24005+}
24006+
24007+/**
24008+ * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
24009+ * @dev: DRM device
24010+ * @output: driver specific output device
24011+ * @reg: GPIO reg to use
24012+ * @name: name for this bus
24013+ *
24014+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
24015+ * in output probing and control (e.g. DDC or SDVO control functions).
24016+ *
24017+ * Possible values for @reg include:
24018+ * %GPIOA
24019+ * %GPIOB
24020+ * %GPIOC
24021+ * %GPIOD
24022+ * %GPIOE
24023+ * %GPIOF
24024+ * %GPIOG
24025+ * %GPIOH
24026+ * see PRM for details on how these different busses are used.
24027+ */
24028+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev,
24029+ const uint32_t reg, const char *name)
24030+{
24031+ struct intel_i2c_chan *chan;
24032+
24033+ chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
24034+ if (!chan)
24035+ goto out_free;
24036+
24037+ chan->drm_dev = dev;
24038+ chan->reg = reg;
24039+ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
24040+ chan->adapter.owner = THIS_MODULE;
24041+ chan->adapter.id = I2C_HW_B_INTELFB;
24042+ chan->adapter.algo_data = &chan->algo;
24043+ chan->adapter.dev.parent = &dev->pdev->dev;
24044+ chan->algo.setsda = set_data;
24045+ chan->algo.setscl = set_clock;
24046+ chan->algo.getsda = get_data;
24047+ chan->algo.getscl = get_clock;
24048+ chan->algo.udelay = 20;
24049+ chan->algo.timeout = usecs_to_jiffies(2200);
24050+ chan->algo.data = chan;
24051+
24052+ i2c_set_adapdata(&chan->adapter, chan);
24053+
24054+ if (i2c_bit_add_bus(&chan->adapter))
24055+ goto out_free;
24056+
24057+ /* JJJ: raise SCL and SDA? */
24058+ set_data(chan, 1);
24059+ set_clock(chan, 1);
24060+ udelay(20);
24061+
24062+ return chan;
24063+
24064+ out_free:
24065+ kfree(chan);
24066+ return NULL;
24067+}
24068+
24069+/**
24070+ * intel_i2c_destroy - unregister and free i2c bus resources
24071+ * @output: channel to free
24072+ *
24073+ * Unregister the adapter from the i2c layer, then free the structure.
24074+ */
24075+void intel_i2c_destroy(struct intel_i2c_chan *chan)
24076+{
24077+ if (!chan)
24078+ return;
24079+
24080+ i2c_del_adapter(&chan->adapter);
24081+ kfree(chan);
24082+}
24083Index: linux-2.6.27/drivers/gpu/drm/psb/psb_irq.c
24084===================================================================
24085--- /dev/null 1970-01-01 00:00:00.000000000 +0000
24086+++ linux-2.6.27/drivers/gpu/drm/psb/psb_irq.c 2009-01-14 11:58:01.000000000 +0000
24087@@ -0,0 +1,382 @@
24088+/**************************************************************************
24089+ * Copyright (c) 2007, Intel Corporation.
24090+ * All Rights Reserved.
24091+ *
24092+ * This program is free software; you can redistribute it and/or modify it
24093+ * under the terms and conditions of the GNU General Public License,
24094+ * version 2, as published by the Free Software Foundation.
24095+ *
24096+ * This program is distributed in the hope it will be useful, but WITHOUT
24097+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24098+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24099+ * more details.
24100+ *
24101+ * You should have received a copy of the GNU General Public License along with
24102+ * this program; if not, write to the Free Software Foundation, Inc.,
24103+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24104+ *
24105+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
24106+ * develop this driver.
24107+ *
24108+ **************************************************************************/
24109+/*
24110+ */
24111+
24112+#include "drmP.h"
24113+#include "psb_drv.h"
24114+#include "psb_reg.h"
24115+#include "psb_msvdx.h"
24116+
24117+/*
24118+ * Video display controller interrupt.
24119+ */
24120+
24121+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
24122+{
24123+ struct drm_psb_private *dev_priv =
24124+ (struct drm_psb_private *)dev->dev_private;
24125+ uint32_t pipe_stats;
24126+ int wake = 0;
24127+
24128+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)) {
24129+ pipe_stats = PSB_RVDC32(PSB_PIPEASTAT);
24130+ atomic_inc(&dev->vbl_received);
24131+ wake = 1;
24132+ PSB_WVDC32(pipe_stats | _PSB_VBLANK_INTERRUPT_ENABLE |
24133+ _PSB_VBLANK_CLEAR, PSB_PIPEASTAT);
24134+ }
24135+
24136+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)) {
24137+ pipe_stats = PSB_RVDC32(PSB_PIPEBSTAT);
24138+ atomic_inc(&dev->vbl_received2);
24139+ wake = 1;
24140+ PSB_WVDC32(pipe_stats | _PSB_VBLANK_INTERRUPT_ENABLE |
24141+ _PSB_VBLANK_CLEAR, PSB_PIPEBSTAT);
24142+ }
24143+
24144+ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
24145+ (void)PSB_RVDC32(PSB_INT_IDENTITY_R);
24146+ DRM_READMEMORYBARRIER();
24147+
24148+ if (wake) {
24149+ DRM_WAKEUP(&dev->vbl_queue);
24150+ drm_vbl_send_signals(dev);
24151+ }
24152+}
24153+
24154+/*
24155+ * SGX interrupt source 1.
24156+ */
24157+
24158+static void psb_sgx_interrupt(struct drm_device *dev, uint32_t sgx_stat,
24159+ uint32_t sgx_stat2)
24160+{
24161+ struct drm_psb_private *dev_priv =
24162+ (struct drm_psb_private *)dev->dev_private;
24163+
24164+ if (sgx_stat & _PSB_CE_TWOD_COMPLETE) {
24165+ DRM_WAKEUP(&dev_priv->event_2d_queue);
24166+ psb_fence_handler(dev, 0);
24167+ }
24168+
24169+ if (unlikely(sgx_stat2 & _PSB_CE2_BIF_REQUESTER_FAULT))
24170+ psb_print_pagefault(dev_priv);
24171+
24172+ psb_scheduler_handler(dev_priv, sgx_stat);
24173+}
24174+
24175+/*
24176+ * MSVDX interrupt.
24177+ */
24178+static void psb_msvdx_interrupt(struct drm_device *dev, uint32_t msvdx_stat)
24179+{
24180+ struct drm_psb_private *dev_priv =
24181+ (struct drm_psb_private *)dev->dev_private;
24182+
24183+ if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) {
24184+ /*Ideally we should we should never get to this */
24185+ PSB_DEBUG_GENERAL
24186+ ("******MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d ***** (MMU FAULT)\n",
24187+ msvdx_stat, dev_priv->fence2_irq_on);
24188+
24189+ /* Pause MMU */
24190+ PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK,
24191+ MSVDX_MMU_CONTROL0);
24192+ DRM_WRITEMEMORYBARRIER();
24193+
24194+ /* Clear this interupt bit only */
24195+ PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK,
24196+ MSVDX_INTERRUPT_CLEAR);
24197+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
24198+ DRM_READMEMORYBARRIER();
24199+
24200+ dev_priv->msvdx_needs_reset = 1;
24201+ } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) {
24202+ PSB_DEBUG_GENERAL
24203+ ("******MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d ***** (MTX)\n",
24204+ msvdx_stat, dev_priv->fence2_irq_on);
24205+
24206+ /* Clear all interupt bits */
24207+ PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR);
24208+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
24209+ DRM_READMEMORYBARRIER();
24210+
24211+ psb_msvdx_mtx_interrupt(dev);
24212+ }
24213+}
24214+
24215+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
24216+{
24217+ struct drm_device *dev = (struct drm_device *)arg;
24218+ struct drm_psb_private *dev_priv =
24219+ (struct drm_psb_private *)dev->dev_private;
24220+
24221+ uint32_t vdc_stat;
24222+ uint32_t sgx_stat;
24223+ uint32_t sgx_stat2;
24224+ uint32_t msvdx_stat;
24225+ int handled = 0;
24226+
24227+ spin_lock(&dev_priv->irqmask_lock);
24228+
24229+ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
24230+ sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS);
24231+ sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
24232+ msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
24233+
24234+ sgx_stat2 &= dev_priv->sgx2_irq_mask;
24235+ sgx_stat &= dev_priv->sgx_irq_mask;
24236+ PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2);
24237+ PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR);
24238+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
24239+
24240+ vdc_stat &= dev_priv->vdc_irq_mask;
24241+ spin_unlock(&dev_priv->irqmask_lock);
24242+
24243+ if (msvdx_stat) {
24244+ psb_msvdx_interrupt(dev, msvdx_stat);
24245+ handled = 1;
24246+ }
24247+
24248+ if (vdc_stat) {
24249+ /* MSVDX IRQ status is part of vdc_irq_mask */
24250+ psb_vdc_interrupt(dev, vdc_stat);
24251+ handled = 1;
24252+ }
24253+
24254+ if (sgx_stat || sgx_stat2) {
24255+ psb_sgx_interrupt(dev, sgx_stat, sgx_stat2);
24256+ handled = 1;
24257+ }
24258+
24259+ if (!handled) {
24260+ return IRQ_NONE;
24261+ }
24262+
24263+ return IRQ_HANDLED;
24264+}
24265+
24266+void psb_msvdx_irq_preinstall(struct drm_psb_private *dev_priv)
24267+{
24268+ unsigned long mtx_int = 0;
24269+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
24270+
24271+ /*Clear MTX interrupt */
24272+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
24273+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
24274+}
24275+
24276+void psb_irq_preinstall(struct drm_device *dev)
24277+{
24278+ struct drm_psb_private *dev_priv =
24279+ (struct drm_psb_private *)dev->dev_private;
24280+ spin_lock(&dev_priv->irqmask_lock);
24281+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
24282+ PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
24283+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
24284+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
24285+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
24286+
24287+ dev_priv->sgx_irq_mask = _PSB_CE_PIXELBE_END_RENDER |
24288+ _PSB_CE_DPM_3D_MEM_FREE |
24289+ _PSB_CE_TA_FINISHED |
24290+ _PSB_CE_DPM_REACHED_MEM_THRESH |
24291+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
24292+ _PSB_CE_DPM_OUT_OF_MEMORY_MT |
24293+ _PSB_CE_TA_TERMINATE | _PSB_CE_SW_EVENT;
24294+
24295+ dev_priv->sgx2_irq_mask = _PSB_CE2_BIF_REQUESTER_FAULT;
24296+
24297+ dev_priv->vdc_irq_mask = _PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG;
24298+
24299+ if (!drm_psb_disable_vsync)
24300+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG |
24301+ _PSB_VSYNC_PIPEB_FLAG;
24302+
24303+ /*Clear MTX interrupt */
24304+ {
24305+ unsigned long mtx_int = 0;
24306+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS,
24307+ CR_MTX_IRQ, 1);
24308+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
24309+ }
24310+ spin_unlock(&dev_priv->irqmask_lock);
24311+}
24312+
24313+void psb_msvdx_irq_postinstall(struct drm_psb_private *dev_priv)
24314+{
24315+ /* Enable Mtx Interupt to host */
24316+ unsigned long enables = 0;
24317+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
24318+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
24319+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
24320+}
24321+
24322+int psb_irq_postinstall(struct drm_device *dev)
24323+{
24324+ struct drm_psb_private *dev_priv =
24325+ (struct drm_psb_private *)dev->dev_private;
24326+ unsigned long irqflags;
24327+
24328+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
24329+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
24330+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
24331+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
24332+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
24333+ /****MSVDX IRQ Setup...*****/
24334+ /* Enable Mtx Interupt to host */
24335+ {
24336+ unsigned long enables = 0;
24337+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
24338+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS,
24339+ CR_MTX_IRQ, 1);
24340+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
24341+ }
24342+ dev_priv->irq_enabled = 1;
24343+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
24344+ return 0;
24345+}
24346+
24347+void psb_irq_uninstall(struct drm_device *dev)
24348+{
24349+ struct drm_psb_private *dev_priv =
24350+ (struct drm_psb_private *)dev->dev_private;
24351+ unsigned long irqflags;
24352+
24353+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
24354+
24355+ dev_priv->sgx_irq_mask = 0x00000000;
24356+ dev_priv->sgx2_irq_mask = 0x00000000;
24357+ dev_priv->vdc_irq_mask = 0x00000000;
24358+
24359+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
24360+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
24361+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
24362+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
24363+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
24364+ wmb();
24365+ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
24366+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS), PSB_CR_EVENT_HOST_CLEAR);
24367+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2), PSB_CR_EVENT_HOST_CLEAR2);
24368+
24369+ /****MSVDX IRQ Setup...*****/
24370+ /* Clear interrupt enabled flag */
24371+ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
24372+
24373+ dev_priv->irq_enabled = 0;
24374+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
24375+
24376+}
24377+
24378+void psb_2D_irq_off(struct drm_psb_private *dev_priv)
24379+{
24380+ unsigned long irqflags;
24381+ uint32_t old_mask;
24382+ uint32_t cleared_mask;
24383+
24384+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
24385+ --dev_priv->irqen_count_2d;
24386+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
24387+
24388+ old_mask = dev_priv->sgx_irq_mask;
24389+ dev_priv->sgx_irq_mask &= ~_PSB_CE_TWOD_COMPLETE;
24390+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
24391+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
24392+
24393+ cleared_mask = (old_mask ^ dev_priv->sgx_irq_mask) & old_mask;
24394+ PSB_WSGX32(cleared_mask, PSB_CR_EVENT_HOST_CLEAR);
24395+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
24396+ }
24397+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
24398+}
24399+
24400+void psb_2D_irq_on(struct drm_psb_private *dev_priv)
24401+{
24402+ unsigned long irqflags;
24403+
24404+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
24405+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
24406+ dev_priv->sgx_irq_mask |= _PSB_CE_TWOD_COMPLETE;
24407+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
24408+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
24409+ }
24410+ ++dev_priv->irqen_count_2d;
24411+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
24412+}
24413+
24414+static int psb_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
24415+ atomic_t * counter)
24416+{
24417+ unsigned int cur_vblank;
24418+ int ret = 0;
24419+
24420+ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
24421+ (((cur_vblank = atomic_read(counter))
24422+ - *sequence) <= (1 << 23)));
24423+
24424+ *sequence = cur_vblank;
24425+
24426+ return ret;
24427+}
24428+
24429+int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence)
24430+{
24431+ int ret;
24432+
24433+ ret = psb_vblank_do_wait(dev, sequence, &dev->vbl_received);
24434+ return ret;
24435+}
24436+
24437+int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
24438+{
24439+ int ret;
24440+
24441+ ret = psb_vblank_do_wait(dev, sequence, &dev->vbl_received2);
24442+ return ret;
24443+}
24444+
24445+void psb_msvdx_irq_off(struct drm_psb_private *dev_priv)
24446+{
24447+ unsigned long irqflags;
24448+
24449+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
24450+ if (dev_priv->irq_enabled) {
24451+ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
24452+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
24453+ (void)PSB_RSGX32(PSB_INT_ENABLE_R);
24454+ }
24455+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
24456+}
24457+
24458+void psb_msvdx_irq_on(struct drm_psb_private *dev_priv)
24459+{
24460+ unsigned long irqflags;
24461+
24462+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
24463+ if (dev_priv->irq_enabled) {
24464+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
24465+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
24466+ (void)PSB_RSGX32(PSB_INT_ENABLE_R);
24467+ }
24468+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
24469+}
24470Index: linux-2.6.27/drivers/gpu/drm/psb/psb_mmu.c
24471===================================================================
24472--- /dev/null 1970-01-01 00:00:00.000000000 +0000
24473+++ linux-2.6.27/drivers/gpu/drm/psb/psb_mmu.c 2009-01-14 11:58:01.000000000 +0000
24474@@ -0,0 +1,1037 @@
24475+/**************************************************************************
24476+ * Copyright (c) 2007, Intel Corporation.
24477+ * All Rights Reserved.
24478+ *
24479+ * This program is free software; you can redistribute it and/or modify it
24480+ * under the terms and conditions of the GNU General Public License,
24481+ * version 2, as published by the Free Software Foundation.
24482+ *
24483+ * This program is distributed in the hope it will be useful, but WITHOUT
24484+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24485+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24486+ * more details.
24487+ *
24488+ * You should have received a copy of the GNU General Public License along with
24489+ * this program; if not, write to the Free Software Foundation, Inc.,
24490+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24491+ *
24492+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
24493+ * develop this driver.
24494+ *
24495+ **************************************************************************/
24496+#include "drmP.h"
24497+#include "psb_drv.h"
24498+#include "psb_reg.h"
24499+
24500+/*
24501+ * Code for the SGX MMU:
24502+ */
24503+
24504+/*
24505+ * clflush on one processor only:
24506+ * clflush should apparently flush the cache line on all processors in an
24507+ * SMP system.
24508+ */
24509+
24510+/*
24511+ * kmap atomic:
24512+ * The usage of the slots must be completely encapsulated within a spinlock, and
24513+ * no other functions that may be using the locks for other purposed may be
24514+ * called from within the locked region.
24515+ * Since the slots are per processor, this will guarantee that we are the only
24516+ * user.
24517+ */
24518+
24519+/*
24520+ * TODO: Inserting ptes from an interrupt handler:
24521+ * This may be desirable for some SGX functionality where the GPU can fault in
24522+ * needed pages. For that, we need to make an atomic insert_pages function, that
24523+ * may fail.
24524+ * If it fails, the caller need to insert the page using a workqueue function,
24525+ * but on average it should be fast.
24526+ */
24527+
24528+struct psb_mmu_driver {
24529+ /* protects driver- and pd structures. Always take in read mode
24530+ * before taking the page table spinlock.
24531+ */
24532+ struct rw_semaphore sem;
24533+
24534+ /* protects page tables, directory tables and pt tables.
24535+ * and pt structures.
24536+ */
24537+ spinlock_t lock;
24538+
24539+ atomic_t needs_tlbflush;
24540+ atomic_t *msvdx_mmu_invaldc;
24541+ uint8_t __iomem *register_map;
24542+ struct psb_mmu_pd *default_pd;
24543+ uint32_t bif_ctrl;
24544+ int has_clflush;
24545+ int clflush_add;
24546+ unsigned long clflush_mask;
24547+};
24548+
24549+struct psb_mmu_pd;
24550+
24551+struct psb_mmu_pt {
24552+ struct psb_mmu_pd *pd;
24553+ uint32_t index;
24554+ uint32_t count;
24555+ struct page *p;
24556+ uint32_t *v;
24557+};
24558+
24559+struct psb_mmu_pd {
24560+ struct psb_mmu_driver *driver;
24561+ int hw_context;
24562+ struct psb_mmu_pt **tables;
24563+ struct page *p;
24564+ struct page *dummy_pt;
24565+ struct page *dummy_page;
24566+ uint32_t pd_mask;
24567+ uint32_t invalid_pde;
24568+ uint32_t invalid_pte;
24569+};
24570+
24571+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
24572+{
24573+ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
24574+}
24575+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
24576+{
24577+ return (offset >> PSB_PDE_SHIFT);
24578+}
24579+
24580+#if defined(CONFIG_X86)
24581+static inline void psb_clflush(void *addr)
24582+{
24583+ __asm__ __volatile__("clflush (%0)\n"::"r"(addr):"memory");
24584+}
24585+
24586+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
24587+{
24588+ if (!driver->has_clflush)
24589+ return;
24590+
24591+ mb();
24592+ psb_clflush(addr);
24593+ mb();
24594+}
24595+#else
24596+
24597+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
24598+{;
24599+}
24600+
24601+#endif
24602+
24603+static inline void psb_iowrite32(const struct psb_mmu_driver *d,
24604+ uint32_t val, uint32_t offset)
24605+{
24606+ iowrite32(val, d->register_map + offset);
24607+}
24608+
24609+static inline uint32_t psb_ioread32(const struct psb_mmu_driver *d,
24610+ uint32_t offset)
24611+{
24612+ return ioread32(d->register_map + offset);
24613+}
24614+
24615+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
24616+{
24617+ if (atomic_read(&driver->needs_tlbflush) || force) {
24618+ uint32_t val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
24619+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
24620+ PSB_CR_BIF_CTRL);
24621+ wmb();
24622+ psb_iowrite32(driver, val & ~_PSB_CB_CTRL_INVALDC,
24623+ PSB_CR_BIF_CTRL);
24624+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
24625+ if (driver->msvdx_mmu_invaldc)
24626+ atomic_set(driver->msvdx_mmu_invaldc, 1);
24627+ }
24628+ atomic_set(&driver->needs_tlbflush, 0);
24629+}
24630+
24631+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
24632+{
24633+ down_write(&driver->sem);
24634+ psb_mmu_flush_pd_locked(driver, force);
24635+ up_write(&driver->sem);
24636+}
24637+
24638+void psb_mmu_flush(struct psb_mmu_driver *driver)
24639+{
24640+ uint32_t val;
24641+
24642+ down_write(&driver->sem);
24643+ val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
24644+ if (atomic_read(&driver->needs_tlbflush))
24645+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
24646+ PSB_CR_BIF_CTRL);
24647+ else
24648+ psb_iowrite32(driver, val | _PSB_CB_CTRL_FLUSH,
24649+ PSB_CR_BIF_CTRL);
24650+ wmb();
24651+ psb_iowrite32(driver,
24652+ val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
24653+ PSB_CR_BIF_CTRL);
24654+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
24655+ atomic_set(&driver->needs_tlbflush, 0);
24656+ if (driver->msvdx_mmu_invaldc)
24657+ atomic_set(driver->msvdx_mmu_invaldc, 1);
24658+ up_write(&driver->sem);
24659+}
24660+
24661+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
24662+{
24663+ uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
24664+ PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
24665+
24666+ drm_ttm_cache_flush();
24667+ down_write(&pd->driver->sem);
24668+ psb_iowrite32(pd->driver, (page_to_pfn(pd->p) << PAGE_SHIFT), offset);
24669+ wmb();
24670+ psb_mmu_flush_pd_locked(pd->driver, 1);
24671+ pd->hw_context = hw_context;
24672+ up_write(&pd->driver->sem);
24673+
24674+}
24675+
24676+static inline unsigned long psb_pd_addr_end(unsigned long addr,
24677+ unsigned long end)
24678+{
24679+
24680+ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
24681+ return (addr < end) ? addr : end;
24682+}
24683+
24684+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
24685+{
24686+ uint32_t mask = PSB_PTE_VALID;
24687+
24688+ if (type & PSB_MMU_CACHED_MEMORY)
24689+ mask |= PSB_PTE_CACHED;
24690+ if (type & PSB_MMU_RO_MEMORY)
24691+ mask |= PSB_PTE_RO;
24692+ if (type & PSB_MMU_WO_MEMORY)
24693+ mask |= PSB_PTE_WO;
24694+
24695+ return (pfn << PAGE_SHIFT) | mask;
24696+}
24697+
24698+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
24699+ int trap_pagefaults, int invalid_type)
24700+{
24701+ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
24702+ uint32_t *v;
24703+ int i;
24704+
24705+ if (!pd)
24706+ return NULL;
24707+
24708+ pd->p = alloc_page(GFP_DMA32);
24709+ if (!pd->p)
24710+ goto out_err1;
24711+ pd->dummy_pt = alloc_page(GFP_DMA32);
24712+ if (!pd->dummy_pt)
24713+ goto out_err2;
24714+ pd->dummy_page = alloc_page(GFP_DMA32);
24715+ if (!pd->dummy_page)
24716+ goto out_err3;
24717+
24718+ if (!trap_pagefaults) {
24719+ pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
24720+ invalid_type |
24721+ PSB_MMU_CACHED_MEMORY);
24722+ pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
24723+ invalid_type |
24724+ PSB_MMU_CACHED_MEMORY);
24725+ } else {
24726+ pd->invalid_pde = 0;
24727+ pd->invalid_pte = 0;
24728+ }
24729+
24730+ v = kmap(pd->dummy_pt);
24731+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
24732+ v[i] = pd->invalid_pte;
24733+ }
24734+ kunmap(pd->dummy_pt);
24735+
24736+ v = kmap(pd->p);
24737+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
24738+ v[i] = pd->invalid_pde;
24739+ }
24740+ kunmap(pd->p);
24741+
24742+ clear_page(kmap(pd->dummy_page));
24743+ kunmap(pd->dummy_page);
24744+
24745+ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
24746+ if (!pd->tables)
24747+ goto out_err4;
24748+
24749+ pd->hw_context = -1;
24750+ pd->pd_mask = PSB_PTE_VALID;
24751+ pd->driver = driver;
24752+
24753+ return pd;
24754+
24755+ out_err4:
24756+ __free_page(pd->dummy_page);
24757+ out_err3:
24758+ __free_page(pd->dummy_pt);
24759+ out_err2:
24760+ __free_page(pd->p);
24761+ out_err1:
24762+ kfree(pd);
24763+ return NULL;
24764+}
24765+
24766+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
24767+{
24768+ __free_page(pt->p);
24769+ kfree(pt);
24770+}
24771+
24772+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
24773+{
24774+ struct psb_mmu_driver *driver = pd->driver;
24775+ struct psb_mmu_pt *pt;
24776+ int i;
24777+
24778+ down_write(&driver->sem);
24779+ if (pd->hw_context != -1) {
24780+ psb_iowrite32(driver, 0,
24781+ PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
24782+ psb_mmu_flush_pd_locked(driver, 1);
24783+ }
24784+
24785+ /* Should take the spinlock here, but we don't need to do that
24786+ since we have the semaphore in write mode. */
24787+
24788+ for (i = 0; i < 1024; ++i) {
24789+ pt = pd->tables[i];
24790+ if (pt)
24791+ psb_mmu_free_pt(pt);
24792+ }
24793+
24794+ vfree(pd->tables);
24795+ __free_page(pd->dummy_page);
24796+ __free_page(pd->dummy_pt);
24797+ __free_page(pd->p);
24798+ kfree(pd);
24799+ up_write(&driver->sem);
24800+}
24801+
24802+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
24803+{
24804+ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
24805+ void *v;
24806+ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
24807+ uint32_t clflush_count = PAGE_SIZE / clflush_add;
24808+ spinlock_t *lock = &pd->driver->lock;
24809+ uint8_t *clf;
24810+ uint32_t *ptes;
24811+ int i;
24812+
24813+ if (!pt)
24814+ return NULL;
24815+
24816+ pt->p = alloc_page(GFP_DMA32);
24817+ if (!pt->p) {
24818+ kfree(pt);
24819+ return NULL;
24820+ }
24821+
24822+ spin_lock(lock);
24823+
24824+ v = kmap_atomic(pt->p, KM_USER0);
24825+ clf = (uint8_t *) v;
24826+ ptes = (uint32_t *) v;
24827+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
24828+ *ptes++ = pd->invalid_pte;
24829+ }
24830+
24831+#if defined(CONFIG_X86)
24832+ if (pd->driver->has_clflush && pd->hw_context != -1) {
24833+ mb();
24834+ for (i = 0; i < clflush_count; ++i) {
24835+ psb_clflush(clf);
24836+ clf += clflush_add;
24837+ }
24838+ mb();
24839+ }
24840+#endif
24841+ kunmap_atomic(v, KM_USER0);
24842+ spin_unlock(lock);
24843+
24844+ pt->count = 0;
24845+ pt->pd = pd;
24846+ pt->index = 0;
24847+
24848+ return pt;
24849+}
24850+
24851+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
24852+ unsigned long addr)
24853+{
24854+ uint32_t index = psb_mmu_pd_index(addr);
24855+ struct psb_mmu_pt *pt;
24856+ volatile uint32_t *v;
24857+ spinlock_t *lock = &pd->driver->lock;
24858+
24859+ spin_lock(lock);
24860+ pt = pd->tables[index];
24861+ while (!pt) {
24862+ spin_unlock(lock);
24863+ pt = psb_mmu_alloc_pt(pd);
24864+ if (!pt)
24865+ return NULL;
24866+ spin_lock(lock);
24867+
24868+ if (pd->tables[index]) {
24869+ spin_unlock(lock);
24870+ psb_mmu_free_pt(pt);
24871+ spin_lock(lock);
24872+ pt = pd->tables[index];
24873+ continue;
24874+ }
24875+
24876+ v = kmap_atomic(pd->p, KM_USER0);
24877+ pd->tables[index] = pt;
24878+ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
24879+ pt->index = index;
24880+ kunmap_atomic((void *)v, KM_USER0);
24881+
24882+ if (pd->hw_context != -1) {
24883+ psb_mmu_clflush(pd->driver, (void *)&v[index]);
24884+ atomic_set(&pd->driver->needs_tlbflush, 1);
24885+ }
24886+ }
24887+ pt->v = kmap_atomic(pt->p, KM_USER0);
24888+ return pt;
24889+}
24890+
24891+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
24892+ unsigned long addr)
24893+{
24894+ uint32_t index = psb_mmu_pd_index(addr);
24895+ struct psb_mmu_pt *pt;
24896+ spinlock_t *lock = &pd->driver->lock;
24897+
24898+ spin_lock(lock);
24899+ pt = pd->tables[index];
24900+ if (!pt) {
24901+ spin_unlock(lock);
24902+ return NULL;
24903+ }
24904+ pt->v = kmap_atomic(pt->p, KM_USER0);
24905+ return pt;
24906+}
24907+
24908+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
24909+{
24910+ struct psb_mmu_pd *pd = pt->pd;
24911+ volatile uint32_t *v;
24912+
24913+ kunmap_atomic(pt->v, KM_USER0);
24914+ if (pt->count == 0) {
24915+ v = kmap_atomic(pd->p, KM_USER0);
24916+ v[pt->index] = pd->invalid_pde;
24917+ pd->tables[pt->index] = NULL;
24918+
24919+ if (pd->hw_context != -1) {
24920+ psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
24921+ atomic_set(&pd->driver->needs_tlbflush, 1);
24922+ }
24923+ kunmap_atomic(pt->v, KM_USER0);
24924+ spin_unlock(&pd->driver->lock);
24925+ psb_mmu_free_pt(pt);
24926+ return;
24927+ }
24928+ spin_unlock(&pd->driver->lock);
24929+}
24930+
24931+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
24932+ uint32_t pte)
24933+{
24934+ pt->v[psb_mmu_pt_index(addr)] = pte;
24935+}
24936+
24937+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
24938+ unsigned long addr)
24939+{
24940+ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
24941+}
24942+
24943+#if 0
24944+static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
24945+ uint32_t mmu_offset)
24946+{
24947+ uint32_t *v;
24948+ uint32_t pfn;
24949+
24950+ v = kmap_atomic(pd->p, KM_USER0);
24951+ if (!v) {
24952+ printk(KERN_INFO "Could not kmap pde page.\n");
24953+ return 0;
24954+ }
24955+ pfn = v[psb_mmu_pd_index(mmu_offset)];
24956+ // printk(KERN_INFO "pde is 0x%08x\n",pfn);
24957+ kunmap_atomic(v, KM_USER0);
24958+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
24959+ printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
24960+ mmu_offset, pfn);
24961+ }
24962+ v = ioremap(pfn & 0xFFFFF000, 4096);
24963+ if (!v) {
24964+ printk(KERN_INFO "Could not kmap pte page.\n");
24965+ return 0;
24966+ }
24967+ pfn = v[psb_mmu_pt_index(mmu_offset)];
24968+ // printk(KERN_INFO "pte is 0x%08x\n",pfn);
24969+ iounmap(v);
24970+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
24971+ printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
24972+ mmu_offset, pfn);
24973+ }
24974+ return pfn >> PAGE_SHIFT;
24975+}
24976+
24977+static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
24978+ uint32_t mmu_offset, uint32_t gtt_pages)
24979+{
24980+ uint32_t start;
24981+ uint32_t next;
24982+
24983+ printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
24984+ mmu_offset, gtt_pages);
24985+ down_read(&pd->driver->sem);
24986+ start = psb_mmu_check_pte_locked(pd, mmu_offset);
24987+ mmu_offset += PAGE_SIZE;
24988+ gtt_pages -= 1;
24989+ while (gtt_pages--) {
24990+ next = psb_mmu_check_pte_locked(pd, mmu_offset);
24991+ if (next != start + 1) {
24992+ printk(KERN_INFO "Ptes out of order: 0x%08x, 0x%08x.\n",
24993+ start, next);
24994+ }
24995+ start = next;
24996+ mmu_offset += PAGE_SIZE;
24997+ }
24998+ up_read(&pd->driver->sem);
24999+}
25000+
25001+#endif
25002+
25003+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
25004+ uint32_t mmu_offset, uint32_t gtt_start,
25005+ uint32_t gtt_pages)
25006+{
25007+ uint32_t *v;
25008+ uint32_t start = psb_mmu_pd_index(mmu_offset);
25009+ struct psb_mmu_driver *driver = pd->driver;
25010+
25011+ down_read(&driver->sem);
25012+ spin_lock(&driver->lock);
25013+
25014+ v = kmap_atomic(pd->p, KM_USER0);
25015+ v += start;
25016+
25017+ while (gtt_pages--) {
25018+ *v++ = gtt_start | pd->pd_mask;
25019+ gtt_start += PAGE_SIZE;
25020+ }
25021+
25022+ drm_ttm_cache_flush();
25023+ kunmap_atomic(v, KM_USER0);
25024+ spin_unlock(&driver->lock);
25025+
25026+ if (pd->hw_context != -1)
25027+ atomic_set(&pd->driver->needs_tlbflush, 1);
25028+
25029+ up_read(&pd->driver->sem);
25030+ psb_mmu_flush_pd(pd->driver, 0);
25031+}
25032+
25033+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
25034+{
25035+ struct psb_mmu_pd *pd;
25036+
25037+ down_read(&driver->sem);
25038+ pd = driver->default_pd;
25039+ up_read(&driver->sem);
25040+
25041+ return pd;
25042+}
25043+
25044+/* Returns the physical address of the PD shared by sgx/msvdx */
25045+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver * driver)
25046+{
25047+ struct psb_mmu_pd *pd;
25048+
25049+ pd = psb_mmu_get_default_pd(driver);
25050+ return ((page_to_pfn(pd->p) << PAGE_SHIFT));
25051+}
25052+
25053+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
25054+{
25055+ psb_iowrite32(driver, driver->bif_ctrl, PSB_CR_BIF_CTRL);
25056+ psb_mmu_free_pagedir(driver->default_pd);
25057+ kfree(driver);
25058+}
25059+
25060+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
25061+ int trap_pagefaults,
25062+ int invalid_type,
25063+ atomic_t *msvdx_mmu_invaldc)
25064+{
25065+ struct psb_mmu_driver *driver;
25066+
25067+ driver = (struct psb_mmu_driver *)kmalloc(sizeof(*driver), GFP_KERNEL);
25068+
25069+ if (!driver)
25070+ return NULL;
25071+
25072+ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
25073+ invalid_type);
25074+ if (!driver->default_pd)
25075+ goto out_err1;
25076+
25077+ spin_lock_init(&driver->lock);
25078+ init_rwsem(&driver->sem);
25079+ down_write(&driver->sem);
25080+ driver->register_map = registers;
25081+ atomic_set(&driver->needs_tlbflush, 1);
25082+ driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
25083+
25084+ driver->bif_ctrl = psb_ioread32(driver, PSB_CR_BIF_CTRL);
25085+ psb_iowrite32(driver, driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
25086+ PSB_CR_BIF_CTRL);
25087+ psb_iowrite32(driver, driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
25088+ PSB_CR_BIF_CTRL);
25089+
25090+ driver->has_clflush = 0;
25091+
25092+#if defined(CONFIG_X86)
25093+ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
25094+ uint32_t tfms, misc, cap0, cap4, clflush_size;
25095+
25096+ /*
25097+ * clflush size is determined at kernel setup for x86_64 but not for
25098+ * i386. We have to do it here.
25099+ */
25100+
25101+ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
25102+ clflush_size = ((misc >> 8) & 0xff) * 8;
25103+ driver->has_clflush = 1;
25104+ driver->clflush_add =
25105+ PAGE_SIZE * clflush_size / sizeof(uint32_t);
25106+ driver->clflush_mask = driver->clflush_add - 1;
25107+ driver->clflush_mask = ~driver->clflush_mask;
25108+ }
25109+#endif
25110+
25111+ up_write(&driver->sem);
25112+ return driver;
25113+
25114+ out_err1:
25115+ kfree(driver);
25116+ return NULL;
25117+}
25118+
25119+#if defined(CONFIG_X86)
25120+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
25121+ uint32_t num_pages, uint32_t desired_tile_stride,
25122+ uint32_t hw_tile_stride)
25123+{
25124+ struct psb_mmu_pt *pt;
25125+ uint32_t rows = 1;
25126+ uint32_t i;
25127+ unsigned long addr;
25128+ unsigned long end;
25129+ unsigned long next;
25130+ unsigned long add;
25131+ unsigned long row_add;
25132+ unsigned long clflush_add = pd->driver->clflush_add;
25133+ unsigned long clflush_mask = pd->driver->clflush_mask;
25134+
25135+ if (!pd->driver->has_clflush) {
25136+ drm_ttm_cache_flush();
25137+ return;
25138+ }
25139+
25140+ if (hw_tile_stride)
25141+ rows = num_pages / desired_tile_stride;
25142+ else
25143+ desired_tile_stride = num_pages;
25144+
25145+ add = desired_tile_stride << PAGE_SHIFT;
25146+ row_add = hw_tile_stride << PAGE_SHIFT;
25147+ mb();
25148+ for (i = 0; i < rows; ++i) {
25149+
25150+ addr = address;
25151+ end = addr + add;
25152+
25153+ do {
25154+ next = psb_pd_addr_end(addr, end);
25155+ pt = psb_mmu_pt_map_lock(pd, addr);
25156+ if (!pt)
25157+ continue;
25158+ do {
25159+ psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
25160+ } while (addr += clflush_add,
25161+ (addr & clflush_mask) < next);
25162+
25163+ psb_mmu_pt_unmap_unlock(pt);
25164+ } while (addr = next, next != end);
25165+ address += row_add;
25166+ }
25167+ mb();
25168+}
25169+#else
25170+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
25171+ uint32_t num_pages, uint32_t desired_tile_stride,
25172+ uint32_t hw_tile_stride)
25173+{
25174+ drm_ttm_cache_flush();
25175+}
25176+#endif
25177+
25178+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
25179+ unsigned long address, uint32_t num_pages)
25180+{
25181+ struct psb_mmu_pt *pt;
25182+ unsigned long addr;
25183+ unsigned long end;
25184+ unsigned long next;
25185+ unsigned long f_address = address;
25186+
25187+ down_read(&pd->driver->sem);
25188+
25189+ addr = address;
25190+ end = addr + (num_pages << PAGE_SHIFT);
25191+
25192+ do {
25193+ next = psb_pd_addr_end(addr, end);
25194+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
25195+ if (!pt)
25196+ goto out;
25197+ do {
25198+ psb_mmu_invalidate_pte(pt, addr);
25199+ --pt->count;
25200+ } while (addr += PAGE_SIZE, addr < next);
25201+ psb_mmu_pt_unmap_unlock(pt);
25202+
25203+ } while (addr = next, next != end);
25204+
25205+ out:
25206+ if (pd->hw_context != -1)
25207+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
25208+
25209+ up_read(&pd->driver->sem);
25210+
25211+ if (pd->hw_context != -1)
25212+ psb_mmu_flush(pd->driver);
25213+
25214+ return;
25215+}
25216+
25217+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
25218+ uint32_t num_pages, uint32_t desired_tile_stride,
25219+ uint32_t hw_tile_stride)
25220+{
25221+ struct psb_mmu_pt *pt;
25222+ uint32_t rows = 1;
25223+ uint32_t i;
25224+ unsigned long addr;
25225+ unsigned long end;
25226+ unsigned long next;
25227+ unsigned long add;
25228+ unsigned long row_add;
25229+ unsigned long f_address = address;
25230+
25231+ if (hw_tile_stride)
25232+ rows = num_pages / desired_tile_stride;
25233+ else
25234+ desired_tile_stride = num_pages;
25235+
25236+ add = desired_tile_stride << PAGE_SHIFT;
25237+ row_add = hw_tile_stride << PAGE_SHIFT;
25238+
25239+ down_read(&pd->driver->sem);
25240+
25241+ /* Make sure we only need to flush this processor's cache */
25242+
25243+ for (i = 0; i < rows; ++i) {
25244+
25245+ addr = address;
25246+ end = addr + add;
25247+
25248+ do {
25249+ next = psb_pd_addr_end(addr, end);
25250+ pt = psb_mmu_pt_map_lock(pd, addr);
25251+ if (!pt)
25252+ continue;
25253+ do {
25254+ psb_mmu_invalidate_pte(pt, addr);
25255+ --pt->count;
25256+
25257+ } while (addr += PAGE_SIZE, addr < next);
25258+ psb_mmu_pt_unmap_unlock(pt);
25259+
25260+ } while (addr = next, next != end);
25261+ address += row_add;
25262+ }
25263+ if (pd->hw_context != -1)
25264+ psb_mmu_flush_ptes(pd, f_address, num_pages,
25265+ desired_tile_stride, hw_tile_stride);
25266+
25267+ up_read(&pd->driver->sem);
25268+
25269+ if (pd->hw_context != -1)
25270+ psb_mmu_flush(pd->driver);
25271+}
25272+
25273+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
25274+ unsigned long address, uint32_t num_pages,
25275+ int type)
25276+{
25277+ struct psb_mmu_pt *pt;
25278+ uint32_t pte;
25279+ unsigned long addr;
25280+ unsigned long end;
25281+ unsigned long next;
25282+ unsigned long f_address = address;
25283+ int ret = -ENOMEM;
25284+
25285+ down_read(&pd->driver->sem);
25286+
25287+ addr = address;
25288+ end = addr + (num_pages << PAGE_SHIFT);
25289+
25290+ do {
25291+ next = psb_pd_addr_end(addr, end);
25292+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
25293+ if (!pt) {
25294+ ret = -ENOMEM;
25295+ goto out;
25296+ }
25297+ do {
25298+ pte = psb_mmu_mask_pte(start_pfn++, type);
25299+ psb_mmu_set_pte(pt, addr, pte);
25300+ pt->count++;
25301+ } while (addr += PAGE_SIZE, addr < next);
25302+ psb_mmu_pt_unmap_unlock(pt);
25303+
25304+ } while (addr = next, next != end);
25305+ ret = 0;
25306+
25307+ out:
25308+ if (pd->hw_context != -1)
25309+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
25310+
25311+ up_read(&pd->driver->sem);
25312+
25313+ if (pd->hw_context != -1)
25314+ psb_mmu_flush(pd->driver);
25315+
25316+ return 0;
25317+}
25318+
25319+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
25320+ unsigned long address, uint32_t num_pages,
25321+ uint32_t desired_tile_stride, uint32_t hw_tile_stride,
25322+ int type)
25323+{
25324+ struct psb_mmu_pt *pt;
25325+ uint32_t rows = 1;
25326+ uint32_t i;
25327+ uint32_t pte;
25328+ unsigned long addr;
25329+ unsigned long end;
25330+ unsigned long next;
25331+ unsigned long add;
25332+ unsigned long row_add;
25333+ unsigned long f_address = address;
25334+ int ret = -ENOMEM;
25335+
25336+ if (hw_tile_stride) {
25337+ if (num_pages % desired_tile_stride != 0)
25338+ return -EINVAL;
25339+ rows = num_pages / desired_tile_stride;
25340+ } else {
25341+ desired_tile_stride = num_pages;
25342+ }
25343+
25344+ add = desired_tile_stride << PAGE_SHIFT;
25345+ row_add = hw_tile_stride << PAGE_SHIFT;
25346+
25347+ down_read(&pd->driver->sem);
25348+
25349+ for (i = 0; i < rows; ++i) {
25350+
25351+ addr = address;
25352+ end = addr + add;
25353+
25354+ do {
25355+ next = psb_pd_addr_end(addr, end);
25356+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
25357+ if (!pt)
25358+ goto out;
25359+ do {
25360+ pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
25361+ type);
25362+ psb_mmu_set_pte(pt, addr, pte);
25363+ pt->count++;
25364+ } while (addr += PAGE_SIZE, addr < next);
25365+ psb_mmu_pt_unmap_unlock(pt);
25366+
25367+ } while (addr = next, next != end);
25368+
25369+ address += row_add;
25370+ }
25371+ ret = 0;
25372+ out:
25373+ if (pd->hw_context != -1)
25374+ psb_mmu_flush_ptes(pd, f_address, num_pages,
25375+ desired_tile_stride, hw_tile_stride);
25376+
25377+ up_read(&pd->driver->sem);
25378+
25379+ if (pd->hw_context != -1)
25380+ psb_mmu_flush(pd->driver);
25381+
25382+ return 0;
25383+}
25384+
25385+void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
25386+{
25387+ mask &= _PSB_MMU_ER_MASK;
25388+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
25389+ PSB_CR_BIF_CTRL);
25390+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
25391+}
25392+
25393+void psb_mmu_disable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
25394+{
25395+ mask &= _PSB_MMU_ER_MASK;
25396+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
25397+ PSB_CR_BIF_CTRL);
25398+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
25399+}
25400+
25401+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
25402+ unsigned long *pfn)
25403+{
25404+ int ret;
25405+ struct psb_mmu_pt *pt;
25406+ uint32_t tmp;
25407+ spinlock_t *lock = &pd->driver->lock;
25408+
25409+ down_read(&pd->driver->sem);
25410+ pt = psb_mmu_pt_map_lock(pd, virtual);
25411+ if (!pt) {
25412+ uint32_t *v;
25413+
25414+ spin_lock(lock);
25415+ v = kmap_atomic(pd->p, KM_USER0);
25416+ tmp = v[psb_mmu_pd_index(virtual)];
25417+ kunmap_atomic(v, KM_USER0);
25418+ spin_unlock(lock);
25419+
25420+ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
25421+ !(pd->invalid_pte & PSB_PTE_VALID)) {
25422+ ret = -EINVAL;
25423+ goto out;
25424+ }
25425+ ret = 0;
25426+ *pfn = pd->invalid_pte >> PAGE_SHIFT;
25427+ goto out;
25428+ }
25429+ tmp = pt->v[psb_mmu_pt_index(virtual)];
25430+ if (!(tmp & PSB_PTE_VALID)) {
25431+ ret = -EINVAL;
25432+ } else {
25433+ ret = 0;
25434+ *pfn = tmp >> PAGE_SHIFT;
25435+ }
25436+ psb_mmu_pt_unmap_unlock(pt);
25437+ out:
25438+ up_read(&pd->driver->sem);
25439+ return ret;
25440+}
25441+
25442+void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
25443+{
25444+ struct page *p;
25445+ unsigned long pfn;
25446+ int ret = 0;
25447+ struct psb_mmu_pd *pd;
25448+ uint32_t *v;
25449+ uint32_t *vmmu;
25450+
25451+ pd = driver->default_pd;
25452+ if (!pd) {
25453+ printk(KERN_WARNING "Could not get default pd\n");
25454+ }
25455+
25456+ p = alloc_page(GFP_DMA32);
25457+
25458+ if (!p) {
25459+ printk(KERN_WARNING "Failed allocating page\n");
25460+ return;
25461+ }
25462+
25463+ v = kmap(p);
25464+ memset(v, 0x67, PAGE_SIZE);
25465+
25466+ pfn = (offset >> PAGE_SHIFT);
25467+
25468+ ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0,
25469+ PSB_MMU_CACHED_MEMORY);
25470+ if (ret) {
25471+ printk(KERN_WARNING "Failed inserting mmu page\n");
25472+ goto out_err1;
25473+ }
25474+
25475+ /* Ioremap the page through the GART aperture */
25476+
25477+ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
25478+ if (!vmmu) {
25479+ printk(KERN_WARNING "Failed ioremapping page\n");
25480+ goto out_err2;
25481+ }
25482+
25483+ /* Read from the page with mmu disabled. */
25484+ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
25485+
25486+ /* Enable the mmu for host accesses and read again. */
25487+ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
25488+
25489+ printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
25490+ ioread32(vmmu));
25491+ *v = 0x15243705;
25492+ printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
25493+ ioread32(vmmu));
25494+ iowrite32(0x16243355, vmmu);
25495+ (void)ioread32(vmmu);
25496+ printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
25497+
25498+ printk(KERN_INFO "Int stat is 0x%08x\n",
25499+ psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
25500+ printk(KERN_INFO "Fault is 0x%08x\n",
25501+ psb_ioread32(driver, PSB_CR_BIF_FAULT));
25502+
25503+ /* Disable MMU for host accesses and clear page fault register */
25504+ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
25505+ iounmap(vmmu);
25506+ out_err2:
25507+ psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
25508+ out_err1:
25509+ kunmap(p);
25510+ __free_page(p);
25511+}
25512Index: linux-2.6.27/drivers/gpu/drm/psb/psb_msvdx.c
25513===================================================================
25514--- /dev/null 1970-01-01 00:00:00.000000000 +0000
25515+++ linux-2.6.27/drivers/gpu/drm/psb/psb_msvdx.c 2009-01-14 11:58:01.000000000 +0000
25516@@ -0,0 +1,676 @@
25517+/**
25518+ * file psb_msvdx.c
25519+ * MSVDX I/O operations and IRQ handling
25520+ *
25521+ */
25522+
25523+/**************************************************************************
25524+ *
25525+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
25526+ * Copyright (c) Imagination Technologies Limited, UK
25527+ * All Rights Reserved.
25528+ *
25529+ * Permission is hereby granted, free of charge, to any person obtaining a
25530+ * copy of this software and associated documentation files (the
25531+ * "Software"), to deal in the Software without restriction, including
25532+ * without limitation the rights to use, copy, modify, merge, publish,
25533+ * distribute, sub license, and/or sell copies of the Software, and to
25534+ * permit persons to whom the Software is furnished to do so, subject to
25535+ * the following conditions:
25536+ *
25537+ * The above copyright notice and this permission notice (including the
25538+ * next paragraph) shall be included in all copies or substantial portions
25539+ * of the Software.
25540+ *
25541+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25542+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25543+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25544+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25545+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25546+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25547+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
25548+ *
25549+ **************************************************************************/
25550+
25551+#include "drmP.h"
25552+#include "drm_os_linux.h"
25553+#include "psb_drv.h"
25554+#include "psb_drm.h"
25555+#include "psb_msvdx.h"
25556+
25557+#include <asm/io.h>
25558+#include <linux/delay.h>
25559+
25560+#ifndef list_first_entry
25561+#define list_first_entry(ptr, type, member) \
25562+ list_entry((ptr)->next, type, member)
25563+#endif
25564+
25565+static int psb_msvdx_send (struct drm_device *dev, void *cmd,
25566+ unsigned long cmd_size);
25567+
25568+int
25569+psb_msvdx_dequeue_send (struct drm_device *dev)
25570+{
25571+ struct drm_psb_private *dev_priv = dev->dev_private;
25572+ struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
25573+ int ret = 0;
25574+
25575+ if (list_empty (&dev_priv->msvdx_queue))
25576+ {
25577+ PSB_DEBUG_GENERAL ("MSVDXQUE: msvdx list empty.\n");
25578+ dev_priv->msvdx_busy = 0;
25579+ return -EINVAL;
25580+ }
25581+ msvdx_cmd =
25582+ list_first_entry (&dev_priv->msvdx_queue, struct psb_msvdx_cmd_queue,
25583+ head);
25584+ PSB_DEBUG_GENERAL ("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
25585+ ret = psb_msvdx_send (dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size);
25586+ if (ret)
25587+ {
25588+ PSB_DEBUG_GENERAL ("MSVDXQUE: psb_msvdx_send failed\n");
25589+ ret = -EINVAL;
25590+ }
25591+ list_del (&msvdx_cmd->head);
25592+ kfree (msvdx_cmd->cmd);
25593+ drm_free (msvdx_cmd, sizeof (struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
25594+ return ret;
25595+}
25596+
25597+int
25598+psb_msvdx_map_command (struct drm_device *dev,
25599+ struct drm_buffer_object *cmd_buffer,
25600+ unsigned long cmd_offset, unsigned long cmd_size,
25601+ void **msvdx_cmd, uint32_t sequence, int copy_cmd)
25602+{
25603+ struct drm_psb_private *dev_priv = dev->dev_private;
25604+ int ret = 0;
25605+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
25606+ unsigned long cmd_size_remaining;
25607+ struct drm_bo_kmap_obj cmd_kmap;
25608+ void *cmd, *tmp, *cmd_start;
25609+ int is_iomem;
25610+
25611+ /* command buffers may not exceed page boundary */
25612+ if (cmd_size + cmd_page_offset > PAGE_SIZE)
25613+ return -EINVAL;
25614+
25615+ ret = drm_bo_kmap (cmd_buffer, cmd_offset >> PAGE_SHIFT, 2, &cmd_kmap);
25616+
25617+ if (ret)
25618+ {
25619+ PSB_DEBUG_GENERAL ("MSVDXQUE:ret:%d\n", ret);
25620+ return ret;
25621+ }
25622+
25623+ cmd_start =
25624+ (void *) drm_bmo_virtual (&cmd_kmap, &is_iomem) + cmd_page_offset;
25625+ cmd = cmd_start;
25626+ cmd_size_remaining = cmd_size;
25627+
25628+ while (cmd_size_remaining > 0)
25629+ {
25630+ uint32_t mmu_ptd;
25631+ uint32_t cur_cmd_size = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_SIZE);
25632+ uint32_t cur_cmd_id = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_ID);
25633+ PSB_DEBUG_GENERAL
25634+ ("cmd start at %08x cur_cmd_size = %d cur_cmd_id = %02x fence = %08x\n",
25635+ (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence);
25636+ if ((cur_cmd_size % sizeof (uint32_t))
25637+ || (cur_cmd_size > cmd_size_remaining))
25638+ {
25639+ ret = -EINVAL;
25640+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25641+ goto out;
25642+ }
25643+
25644+ switch (cur_cmd_id)
25645+ {
25646+ case VA_MSGID_RENDER:
25647+ /* Fence ID */
25648+ MEMIO_WRITE_FIELD (cmd, FW_VA_RENDER_FENCE_VALUE, sequence);
25649+
25650+ mmu_ptd = psb_get_default_pd_addr (dev_priv->mmu);
25651+ if (atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc, 1, 0) == 1)
25652+ {
25653+ mmu_ptd |= 1;
25654+ PSB_DEBUG_GENERAL ("MSVDX: Setting MMU invalidate flag\n");
25655+ }
25656+ /* PTD */
25657+ MEMIO_WRITE_FIELD (cmd, FW_VA_RENDER_MMUPTD, mmu_ptd);
25658+ break;
25659+
25660+ default:
25661+ /* Msg not supported */
25662+ ret = -EINVAL;
25663+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25664+ goto out;
25665+ }
25666+
25667+ cmd += cur_cmd_size;
25668+ cmd_size_remaining -= cur_cmd_size;
25669+ }
25670+
25671+ if (copy_cmd)
25672+ {
25673+ PSB_DEBUG_GENERAL
25674+ ("MSVDXQUE: psb_msvdx_map_command copying command...\n");
25675+ tmp = drm_calloc (1, cmd_size, DRM_MEM_DRIVER);
25676+ if (tmp == NULL)
25677+ {
25678+ ret = -ENOMEM;
25679+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25680+ goto out;
25681+ }
25682+ memcpy (tmp, cmd_start, cmd_size);
25683+ *msvdx_cmd = tmp;
25684+ }
25685+ else
25686+ {
25687+ PSB_DEBUG_GENERAL
25688+ ("MSVDXQUE: psb_msvdx_map_command did NOT copy command...\n");
25689+ ret = psb_msvdx_send (dev, cmd_start, cmd_size);
25690+ if (ret)
25691+ {
25692+ PSB_DEBUG_GENERAL ("MSVDXQUE: psb_msvdx_send failed\n");
25693+ ret = -EINVAL;
25694+ }
25695+ }
25696+
25697+out:
25698+ drm_bo_kunmap (&cmd_kmap);
25699+
25700+ return ret;
25701+}
25702+
25703+int
25704+psb_submit_video_cmdbuf (struct drm_device *dev,
25705+ struct drm_buffer_object *cmd_buffer,
25706+ unsigned long cmd_offset, unsigned long cmd_size,
25707+ struct drm_fence_object *fence)
25708+{
25709+ struct drm_psb_private *dev_priv = dev->dev_private;
25710+ uint32_t sequence = fence->sequence;
25711+ unsigned long irq_flags;
25712+ int ret = 0;
25713+
25714+ mutex_lock (&dev_priv->msvdx_mutex);
25715+ psb_schedule_watchdog (dev_priv);
25716+
25717+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
25718+ if (dev_priv->msvdx_needs_reset)
25719+ {
25720+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
25721+ PSB_DEBUG_GENERAL ("MSVDX: Needs reset\n");
25722+ if (psb_msvdx_reset (dev_priv))
25723+ {
25724+ mutex_unlock (&dev_priv->msvdx_mutex);
25725+ ret = -EBUSY;
25726+ PSB_DEBUG_GENERAL ("MSVDX: Reset failed\n");
25727+ return ret;
25728+ }
25729+ PSB_DEBUG_GENERAL ("MSVDX: Reset ok\n");
25730+ dev_priv->msvdx_needs_reset = 0;
25731+ dev_priv->msvdx_busy = 0;
25732+ dev_priv->msvdx_start_idle = 0;
25733+
25734+ psb_msvdx_init (dev);
25735+ psb_msvdx_irq_preinstall (dev_priv);
25736+ psb_msvdx_irq_postinstall (dev_priv);
25737+ PSB_DEBUG_GENERAL ("MSVDX: Init ok\n");
25738+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
25739+ }
25740+
25741+ if (!dev_priv->msvdx_busy)
25742+ {
25743+ dev_priv->msvdx_busy = 1;
25744+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
25745+ PSB_DEBUG_GENERAL
25746+ ("MSVDXQUE: nothing in the queue sending sequence:%08x..\n",
25747+ sequence);
25748+ ret =
25749+ psb_msvdx_map_command (dev, cmd_buffer, cmd_offset, cmd_size,
25750+ NULL, sequence, 0);
25751+ if (ret)
25752+ {
25753+ mutex_unlock (&dev_priv->msvdx_mutex);
25754+ PSB_DEBUG_GENERAL ("MSVDXQUE: Failed to extract cmd...\n");
25755+ return ret;
25756+ }
25757+ }
25758+ else
25759+ {
25760+ struct psb_msvdx_cmd_queue *msvdx_cmd;
25761+ void *cmd = NULL;
25762+
25763+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
25764+ /*queue the command to be sent when the h/w is ready */
25765+ PSB_DEBUG_GENERAL ("MSVDXQUE: queueing sequence:%08x..\n", sequence);
25766+ msvdx_cmd =
25767+ drm_calloc (1, sizeof (struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
25768+ if (msvdx_cmd == NULL)
25769+ {
25770+ mutex_unlock (&dev_priv->msvdx_mutex);
25771+ PSB_DEBUG_GENERAL ("MSVDXQUE: Out of memory...\n");
25772+ return -ENOMEM;
25773+ }
25774+
25775+ ret =
25776+ psb_msvdx_map_command (dev, cmd_buffer, cmd_offset, cmd_size,
25777+ &cmd, sequence, 1);
25778+ if (ret)
25779+ {
25780+ mutex_unlock (&dev_priv->msvdx_mutex);
25781+ PSB_DEBUG_GENERAL ("MSVDXQUE: Failed to extract cmd...\n");
25782+ drm_free (msvdx_cmd, sizeof (struct psb_msvdx_cmd_queue),
25783+ DRM_MEM_DRIVER);
25784+ return ret;
25785+ }
25786+ msvdx_cmd->cmd = cmd;
25787+ msvdx_cmd->cmd_size = cmd_size;
25788+ msvdx_cmd->sequence = sequence;
25789+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
25790+ list_add_tail (&msvdx_cmd->head, &dev_priv->msvdx_queue);
25791+ if (!dev_priv->msvdx_busy)
25792+ {
25793+ dev_priv->msvdx_busy = 1;
25794+ PSB_DEBUG_GENERAL ("MSVDXQUE: Need immediate dequeue\n");
25795+ psb_msvdx_dequeue_send (dev);
25796+ }
25797+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
25798+ }
25799+ mutex_unlock (&dev_priv->msvdx_mutex);
25800+ return ret;
25801+}
25802+
25803+int
25804+psb_msvdx_send (struct drm_device *dev, void *cmd, unsigned long cmd_size)
25805+{
25806+ int ret = 0;
25807+ struct drm_psb_private *dev_priv = dev->dev_private;
25808+
25809+ while (cmd_size > 0)
25810+ {
25811+ uint32_t cur_cmd_size = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_SIZE);
25812+ if (cur_cmd_size > cmd_size)
25813+ {
25814+ ret = -EINVAL;
25815+ PSB_DEBUG_GENERAL
25816+ ("MSVDX: cmd_size = %d cur_cmd_size = %d\n",
25817+ (int) cmd_size, cur_cmd_size);
25818+ goto out;
25819+ }
25820+ /* Send the message to h/w */
25821+ ret = psb_mtx_send (dev_priv, cmd);
25822+ if (ret)
25823+ {
25824+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25825+ goto out;
25826+ }
25827+ cmd += cur_cmd_size;
25828+ cmd_size -= cur_cmd_size;
25829+ }
25830+
25831+out:
25832+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25833+ return ret;
25834+}
25835+
25836+/***********************************************************************************
25837+ * Function Name : psb_mtx_send
25838+ * Inputs :
25839+ * Outputs :
25840+ * Returns :
25841+ * Description :
25842+ ************************************************************************************/
25843+int
25844+psb_mtx_send (struct drm_psb_private *dev_priv, const void *pvMsg)
25845+{
25846+
25847+ static uint32_t padMessage[FWRK_PADMSG_SIZE];
25848+
25849+ const uint32_t *pui32Msg = (uint32_t *) pvMsg;
25850+ uint32_t msgNumWords, wordsFree, readIndex, writeIndex;
25851+ int ret = 0;
25852+
25853+ PSB_DEBUG_GENERAL ("MSVDX: psb_mtx_send\n");
25854+
25855+ /* we need clocks enabled before we touch VEC local ram */
25856+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
25857+
25858+ msgNumWords = (MEMIO_READ_FIELD (pvMsg, FWRK_GENMSG_SIZE) + 3) / 4;
25859+
25860+ if (msgNumWords > NUM_WORDS_MTX_BUF)
25861+ {
25862+ ret = -EINVAL;
25863+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25864+ goto out;
25865+ }
25866+
25867+ readIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_RD_INDEX);
25868+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_WRT_INDEX);
25869+
25870+ if (writeIndex + msgNumWords > NUM_WORDS_MTX_BUF)
25871+ { /* message would wrap, need to send a pad message */
25872+ BUG_ON (MEMIO_READ_FIELD (pvMsg, FWRK_GENMSG_ID) == FWRK_MSGID_PADDING); /* Shouldn't happen for a PAD message itself */
25873+ /* if the read pointer is at zero then we must wait for it to change otherwise the write
25874+ * pointer will equal the read pointer,which should only happen when the buffer is empty
25875+ *
25876+ * This will only happens if we try to overfill the queue, queue management should make
25877+ * sure this never happens in the first place.
25878+ */
25879+ BUG_ON (0 == readIndex);
25880+ if (0 == readIndex)
25881+ {
25882+ ret = -EINVAL;
25883+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25884+ goto out;
25885+ }
25886+ /* Send a pad message */
25887+ MEMIO_WRITE_FIELD (padMessage, FWRK_GENMSG_SIZE,
25888+ (NUM_WORDS_MTX_BUF - writeIndex) << 2);
25889+ MEMIO_WRITE_FIELD (padMessage, FWRK_GENMSG_ID, FWRK_MSGID_PADDING);
25890+ psb_mtx_send (dev_priv, padMessage);
25891+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_WRT_INDEX);
25892+ }
25893+
25894+ wordsFree =
25895+ (writeIndex >=
25896+ readIndex) ? NUM_WORDS_MTX_BUF - (writeIndex -
25897+ readIndex) : readIndex - writeIndex;
25898+
25899+ BUG_ON (msgNumWords > wordsFree);
25900+ if (msgNumWords > wordsFree)
25901+ {
25902+ ret = -EINVAL;
25903+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
25904+ goto out;
25905+ }
25906+
25907+ while (msgNumWords > 0)
25908+ {
25909+ PSB_WMSVDX32 (*pui32Msg++, MSVDX_COMMS_TO_MTX_BUF + (writeIndex << 2));
25910+ msgNumWords--;
25911+ writeIndex++;
25912+ if (NUM_WORDS_MTX_BUF == writeIndex)
25913+ {
25914+ writeIndex = 0;
25915+ }
25916+ }
25917+ PSB_WMSVDX32 (writeIndex, MSVDX_COMMS_TO_MTX_WRT_INDEX);
25918+
25919+ /* Make sure clocks are enabled before we kick */
25920+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
25921+
25922+ /* signal an interrupt to let the mtx know there is a new message */
25923+ PSB_WMSVDX32 (1, MSVDX_MTX_KICKI);
25924+
25925+out:
25926+ return ret;
25927+}
25928+
25929+/*
25930+ * MSVDX MTX interrupt
25931+ */
25932+void
25933+psb_msvdx_mtx_interrupt (struct drm_device *dev)
25934+{
25935+ static uint32_t msgBuffer[128];
25936+ uint32_t readIndex, writeIndex;
25937+ uint32_t msgNumWords, msgWordOffset;
25938+ struct drm_psb_private *dev_priv =
25939+ (struct drm_psb_private *) dev->dev_private;
25940+
25941+ /* Are clocks enabled - If not enable before attempting to read from VLR */
25942+ if (PSB_RMSVDX32 (MSVDX_MAN_CLK_ENABLE) != (clk_enable_all))
25943+ {
25944+ PSB_DEBUG_GENERAL
25945+ ("MSVDX: Warning - Clocks disabled when Interupt set\n");
25946+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
25947+ }
25948+
25949+ for (;;)
25950+ {
25951+ readIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_RD_INDEX);
25952+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_WRT_INDEX);
25953+
25954+ if (readIndex != writeIndex)
25955+ {
25956+ msgWordOffset = 0;
25957+
25958+ msgBuffer[msgWordOffset] =
25959+ PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_BUF + (readIndex << 2));
25960+
25961+ msgNumWords = (MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_SIZE) + 3) / 4; /* round to nearest word */
25962+
25963+ /*ASSERT(msgNumWords <= sizeof(msgBuffer) / sizeof(uint32_t)); */
25964+
25965+ if (++readIndex >= NUM_WORDS_HOST_BUF)
25966+ readIndex = 0;
25967+
25968+ for (msgWordOffset++; msgWordOffset < msgNumWords; msgWordOffset++)
25969+ {
25970+ msgBuffer[msgWordOffset] =
25971+ PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_BUF + (readIndex << 2));
25972+
25973+ if (++readIndex >= NUM_WORDS_HOST_BUF)
25974+ {
25975+ readIndex = 0;
25976+ }
25977+ }
25978+
25979+ /* Update the Read index */
25980+ PSB_WMSVDX32 (readIndex, MSVDX_COMMS_TO_HOST_RD_INDEX);
25981+
25982+ if (!dev_priv->msvdx_needs_reset)
25983+ switch (MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID))
25984+ {
25985+ case VA_MSGID_CMD_HW_PANIC:
25986+ case VA_MSGID_CMD_FAILED:
25987+ {
25988+ uint32_t ui32Fence = MEMIO_READ_FIELD (msgBuffer,
25989+ FW_VA_CMD_FAILED_FENCE_VALUE);
25990+ uint32_t ui32FaultStatus = MEMIO_READ_FIELD (msgBuffer,
25991+ FW_VA_CMD_FAILED_IRQSTATUS);
25992+
25993+ if(MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID) == VA_MSGID_CMD_HW_PANIC )
25994+ PSB_DEBUG_GENERAL
25995+ ("MSVDX: VA_MSGID_CMD_HW_PANIC: Msvdx fault detected - Fence: %08x, Status: %08x - resetting and ignoring error\n",
25996+ ui32Fence, ui32FaultStatus);
25997+ else
25998+ PSB_DEBUG_GENERAL
25999+ ("MSVDX: VA_MSGID_CMD_FAILED: Msvdx fault detected - Fence: %08x, Status: %08x - resetting and ignoring error\n",
26000+ ui32Fence, ui32FaultStatus);
26001+
26002+ dev_priv->msvdx_needs_reset = 1;
26003+
26004+ if(MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID) == VA_MSGID_CMD_HW_PANIC)
26005+ {
26006+ if (dev_priv->
26007+ msvdx_current_sequence
26008+ - dev_priv->sequence[PSB_ENGINE_VIDEO] > 0x0FFFFFFF)
26009+ dev_priv->msvdx_current_sequence++;
26010+ PSB_DEBUG_GENERAL
26011+ ("MSVDX: Fence ID missing, assuming %08x\n",
26012+ dev_priv->msvdx_current_sequence);
26013+ }
26014+ else
26015+ dev_priv->msvdx_current_sequence = ui32Fence;
26016+
26017+ psb_fence_error (dev,
26018+ PSB_ENGINE_VIDEO,
26019+ dev_priv->
26020+ msvdx_current_sequence,
26021+ DRM_FENCE_TYPE_EXE, DRM_CMD_FAILED);
26022+
26023+ /* Flush the command queue */
26024+ psb_msvdx_flush_cmd_queue (dev);
26025+
26026+ goto isrExit;
26027+ break;
26028+ }
26029+ case VA_MSGID_CMD_COMPLETED:
26030+ {
26031+ uint32_t ui32Fence = MEMIO_READ_FIELD (msgBuffer,
26032+ FW_VA_CMD_COMPLETED_FENCE_VALUE);
26033+ uint32_t ui32Flags =
26034+ MEMIO_READ_FIELD (msgBuffer, FW_VA_CMD_COMPLETED_FLAGS);
26035+
26036+ PSB_DEBUG_GENERAL
26037+ ("msvdx VA_MSGID_CMD_COMPLETED: FenceID: %08x, flags: 0x%x\n",
26038+ ui32Fence, ui32Flags);
26039+ dev_priv->msvdx_current_sequence = ui32Fence;
26040+
26041+ psb_fence_handler (dev, PSB_ENGINE_VIDEO);
26042+
26043+
26044+ if (ui32Flags & FW_VA_RENDER_HOST_INT)
26045+ {
26046+ /*Now send the next command from the msvdx cmd queue */
26047+ psb_msvdx_dequeue_send (dev);
26048+ goto isrExit;
26049+ }
26050+ break;
26051+ }
26052+ case VA_MSGID_ACK:
26053+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_ACK\n");
26054+ break;
26055+
26056+ case VA_MSGID_TEST1:
26057+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_TEST1\n");
26058+ break;
26059+
26060+ case VA_MSGID_TEST2:
26061+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_TEST2\n");
26062+ break;
26063+ /* Don't need to do anything with these messages */
26064+
26065+ case VA_MSGID_DEBLOCK_REQUIRED:
26066+ {
26067+ uint32_t ui32ContextId = MEMIO_READ_FIELD (msgBuffer,
26068+ FW_VA_DEBLOCK_REQUIRED_CONTEXT);
26069+
26070+ /* The BE we now be locked. */
26071+
26072+ /* Unblock rendec by reading the mtx2mtx end of slice */
26073+ (void) PSB_RMSVDX32 (MSVDX_RENDEC_READ_DATA);
26074+
26075+ PSB_DEBUG_GENERAL
26076+ ("msvdx VA_MSGID_DEBLOCK_REQUIRED Context=%08x\n",
26077+ ui32ContextId);
26078+ goto isrExit;
26079+ break;
26080+ }
26081+
26082+ default:
26083+ {
26084+ PSB_DEBUG_GENERAL
26085+ ("ERROR: msvdx Unknown message from MTX \n");
26086+ }
26087+ break;
26088+
26089+ }
26090+ }
26091+ else
26092+ {
26093+ /* Get out of here if nothing */
26094+ break;
26095+ }
26096+ }
26097+isrExit:
26098+
26099+#if 1
26100+ if (!dev_priv->msvdx_busy)
26101+ {
26102+ /* check that clocks are enabled before reading VLR */
26103+ if( PSB_RMSVDX32( MSVDX_MAN_CLK_ENABLE ) != (clk_enable_all) )
26104+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
26105+
26106+ /* If the firmware says the hardware is idle and the CCB is empty then we can power down */
26107+ uint32_t ui32FWStatus = PSB_RMSVDX32( MSVDX_COMMS_FW_STATUS );
26108+ uint32_t ui32CCBRoff = PSB_RMSVDX32 ( MSVDX_COMMS_TO_MTX_RD_INDEX );
26109+ uint32_t ui32CCBWoff = PSB_RMSVDX32 ( MSVDX_COMMS_TO_MTX_WRT_INDEX );
26110+
26111+ if( (ui32FWStatus & MSVDX_FW_STATUS_HW_IDLE) && (ui32CCBRoff == ui32CCBWoff))
26112+ {
26113+ PSB_DEBUG_GENERAL("MSVDX_CLOCK: Setting clock to minimal...\n");
26114+ PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
26115+ }
26116+ }
26117+#endif
26118+ DRM_MEMORYBARRIER ();
26119+}
26120+
26121+void
26122+psb_msvdx_lockup (struct drm_psb_private *dev_priv,
26123+ int *msvdx_lockup, int *msvdx_idle)
26124+{
26125+ unsigned long irq_flags;
26126+// struct psb_scheduler *scheduler = &dev_priv->scheduler;
26127+
26128+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
26129+ *msvdx_lockup = 0;
26130+ *msvdx_idle = 1;
26131+
26132+ if (!dev_priv->has_msvdx)
26133+ {
26134+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
26135+ return;
26136+ }
26137+#if 0
26138+ PSB_DEBUG_GENERAL ("MSVDXTimer: current_sequence:%d "
26139+ "last_sequence:%d and last_submitted_sequence :%d\n",
26140+ dev_priv->msvdx_current_sequence,
26141+ dev_priv->msvdx_last_sequence,
26142+ dev_priv->sequence[PSB_ENGINE_VIDEO]);
26143+#endif
26144+ if (dev_priv->msvdx_current_sequence -
26145+ dev_priv->sequence[PSB_ENGINE_VIDEO] > 0x0FFFFFFF)
26146+ {
26147+
26148+ if (dev_priv->msvdx_current_sequence == dev_priv->msvdx_last_sequence)
26149+ {
26150+ PSB_DEBUG_GENERAL
26151+ ("MSVDXTimer: msvdx locked-up for sequence:%d\n",
26152+ dev_priv->msvdx_current_sequence);
26153+ *msvdx_lockup = 1;
26154+ }
26155+ else
26156+ {
26157+ PSB_DEBUG_GENERAL ("MSVDXTimer: msvdx responded fine so far...\n");
26158+ dev_priv->msvdx_last_sequence = dev_priv->msvdx_current_sequence;
26159+ *msvdx_idle = 0;
26160+ }
26161+ if (dev_priv->msvdx_start_idle)
26162+ dev_priv->msvdx_start_idle = 0;
26163+ }
26164+ else
26165+ {
26166+ if (dev_priv->msvdx_needs_reset == 0)
26167+ {
26168+ if (dev_priv->msvdx_start_idle && (dev_priv->msvdx_finished_sequence == dev_priv->msvdx_current_sequence))
26169+ {
26170+ //if (dev_priv->msvdx_idle_start_jiffies + MSVDX_MAX_IDELTIME >= jiffies)
26171+ if (time_after_eq(jiffies, dev_priv->msvdx_idle_start_jiffies + MSVDX_MAX_IDELTIME))
26172+ {
26173+ printk("set the msvdx clock to 0 in the %s\n", __FUNCTION__);
26174+ PSB_WMSVDX32 (0, MSVDX_MAN_CLK_ENABLE);
26175+ dev_priv->msvdx_needs_reset = 1;
26176+ }
26177+ else
26178+ {
26179+ *msvdx_idle = 0;
26180+ }
26181+ }
26182+ else
26183+ {
26184+ dev_priv->msvdx_start_idle = 1;
26185+ dev_priv->msvdx_idle_start_jiffies = jiffies;
26186+ dev_priv->msvdx_finished_sequence = dev_priv->msvdx_current_sequence;
26187+ *msvdx_idle = 0;
26188+ }
26189+ }
26190+ }
26191+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
26192+}
26193Index: linux-2.6.27/drivers/gpu/drm/psb/psb_msvdx.h
26194===================================================================
26195--- /dev/null 1970-01-01 00:00:00.000000000 +0000
26196+++ linux-2.6.27/drivers/gpu/drm/psb/psb_msvdx.h 2009-01-14 11:58:01.000000000 +0000
26197@@ -0,0 +1,564 @@
26198+/**************************************************************************
26199+ *
26200+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
26201+ * Copyright (c) Imagination Technologies Limited, UK
26202+ * All Rights Reserved.
26203+ *
26204+ * Permission is hereby granted, free of charge, to any person obtaining a
26205+ * copy of this software and associated documentation files (the
26206+ * "Software"), to deal in the Software without restriction, including
26207+ * without limitation the rights to use, copy, modify, merge, publish,
26208+ * distribute, sub license, and/or sell copies of the Software, and to
26209+ * permit persons to whom the Software is furnished to do so, subject to
26210+ * the following conditions:
26211+ *
26212+ * The above copyright notice and this permission notice (including the
26213+ * next paragraph) shall be included in all copies or substantial portions
26214+ * of the Software.
26215+ *
26216+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26217+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26218+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26219+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26220+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26221+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26222+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
26223+ *
26224+ **************************************************************************/
26225+
26226+#ifndef _PSB_MSVDX_H_
26227+#define _PSB_MSVDX_H_
26228+
26229+#define assert(expr) \
26230+ if(unlikely(!(expr))) { \
26231+ printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
26232+ #expr,__FILE__,__FUNCTION__,__LINE__); \
26233+ }
26234+
26235+#define PSB_ASSERT(x) assert (x)
26236+#define IMG_ASSERT(x) assert (x)
26237+
26238+#include "psb_drv.h"
26239+int
26240+psb_wait_for_register (struct drm_psb_private *dev_priv,
26241+ uint32_t ui32Offset,
26242+ uint32_t ui32Value, uint32_t ui32Enable);
26243+
26244+void psb_msvdx_mtx_interrupt (struct drm_device *dev);
26245+int psb_msvdx_init (struct drm_device *dev);
26246+int psb_msvdx_uninit (struct drm_device *dev);
26247+int psb_msvdx_reset (struct drm_psb_private *dev_priv);
26248+uint32_t psb_get_default_pd_addr (struct psb_mmu_driver *driver);
26249+int psb_mtx_send (struct drm_psb_private *dev_priv, const void *pvMsg);
26250+void psb_msvdx_irq_preinstall (struct drm_psb_private *dev_priv);
26251+void psb_msvdx_irq_postinstall (struct drm_psb_private *dev_priv);
26252+void psb_msvdx_flush_cmd_queue (struct drm_device *dev);
26253+extern void psb_msvdx_lockup (struct drm_psb_private *dev_priv,
26254+ int *msvdx_lockup, int *msvdx_idle);
26255+#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2 /* Non-Optimal Invalidation is not default */
26256+#define FW_VA_RENDER_HOST_INT 0x00004000
26257+#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020
26258+
26259+#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200
26260+
26261+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION \
26262+ | MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
26263+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION \
26264+ | MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
26265+
26266+
26267+#define POULSBO_D0 0x5
26268+#define POULSBO_D1 0x6
26269+#define PSB_REVID_OFFSET 0x8
26270+
26271+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001 /* There is no work currently underway on the hardware*/
26272+
26273+#define clk_enable_all MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
26274+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
26275+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
26276+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
26277+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
26278+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
26279+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
26280+
26281+#define clk_enable_minimal MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
26282+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
26283+
26284+#define clk_enable_auto MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \
26285+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \
26286+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \
26287+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \
26288+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \
26289+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
26290+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
26291+
26292+#define msvdx_sw_reset_all MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \
26293+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \
26294+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \
26295+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \
26296+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK
26297+
26298+
26299+#define PCI_PORT5_REG80_FFUSE 0xD0058000
26300+#define MTX_CODE_BASE (0x80900000)
26301+#define MTX_DATA_BASE (0x82880000)
26302+#define PC_START_ADDRESS (0x80900000)
26303+
26304+#define MTX_CORE_CODE_MEM (0x10 )
26305+#define MTX_CORE_DATA_MEM (0x18 )
26306+
26307+#define MTX_INTERNAL_REG( R_SPECIFIER , U_SPECIFIER ) ( ((R_SPECIFIER)<<4) | (U_SPECIFIER) )
26308+#define MTX_PC MTX_INTERNAL_REG( 0 , 5 )
26309+
26310+#define RENDEC_A_SIZE ( 2 * 1024* 1024 )
26311+#define RENDEC_B_SIZE ( RENDEC_A_SIZE / 4 )
26312+
26313+#define MEMIO_READ_FIELD(vpMem, field) \
26314+ ((uint32_t)(((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) & field##_MASK) >> field##_SHIFT))
26315+
26316+#define MEMIO_WRITE_FIELD(vpMem, field, ui32Value) \
26317+ (*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) = \
26318+ ((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) & (field##_TYPE)~field##_MASK) | \
26319+ (field##_TYPE)(( (uint32_t) (ui32Value) << field##_SHIFT) & field##_MASK);
26320+
26321+#define MEMIO_WRITE_FIELD_LITE(vpMem, field, ui32Value) \
26322+ (*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) = \
26323+ ((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) | \
26324+ (field##_TYPE) (( (uint32_t) (ui32Value) << field##_SHIFT)) );
26325+
26326+#define REGIO_READ_FIELD(ui32RegValue, reg, field) \
26327+ ((ui32RegValue & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
26328+
26329+#define REGIO_WRITE_FIELD(ui32RegValue, reg, field, ui32Value) \
26330+ (ui32RegValue) = \
26331+ ((ui32RegValue) & ~(reg##_##field##_MASK)) | \
26332+ (((ui32Value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));
26333+
26334+#define REGIO_WRITE_FIELD_LITE(ui32RegValue, reg, field, ui32Value) \
26335+ (ui32RegValue) = \
26336+ ( (ui32RegValue) | ( (ui32Value) << (reg##_##field##_SHIFT) ) );
26337+
26338+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK (0x00000001)
26339+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK (0x00000002)
26340+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK (0x00000004)
26341+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK (0x00000008)
26342+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK (0x00000010)
26343+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK (0x00000020)
26344+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK (0x00000040)
26345+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK (0x00040000)
26346+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK (0x00080000)
26347+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK (0x00100000)
26348+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK (0x00200000)
26349+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
26350+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK (0x00010000)
26351+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK (0x00100000)
26352+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK (0x01000000)
26353+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK (0x10000000)
26354+
26355+/* MTX registers */
26356+#define MSVDX_MTX_ENABLE (0x0000)
26357+#define MSVDX_MTX_KICKI (0x0088)
26358+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC)
26359+#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8)
26360+#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104)
26361+#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108)
26362+#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C)
26363+#define MSVDX_MTX_SOFT_RESET (0x0200)
26364+
26365+/* MSVDX registers */
26366+#define MSVDX_CONTROL (0x0600)
26367+#define MSVDX_INTERRUPT_CLEAR (0x060C)
26368+#define MSVDX_INTERRUPT_STATUS (0x0608)
26369+#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610)
26370+#define MSVDX_MMU_CONTROL0 (0x0680)
26371+#define MSVDX_MTX_RAM_BANK (0x06F0)
26372+#define MSVDX_MAN_CLK_ENABLE (0x0620)
26373+
26374+/* RENDEC registers */
26375+#define MSVDX_RENDEC_CONTROL0 (0x0868)
26376+#define MSVDX_RENDEC_CONTROL1 (0x086C)
26377+#define MSVDX_RENDEC_BUFFER_SIZE (0x0870)
26378+#define MSVDX_RENDEC_BASE_ADDR0 (0x0874)
26379+#define MSVDX_RENDEC_BASE_ADDR1 (0x0878)
26380+#define MSVDX_RENDEC_READ_DATA (0x0898)
26381+#define MSVDX_RENDEC_CONTEXT0 (0x0950)
26382+#define MSVDX_RENDEC_CONTEXT1 (0x0954)
26383+#define MSVDX_RENDEC_CONTEXT2 (0x0958)
26384+#define MSVDX_RENDEC_CONTEXT3 (0x095C)
26385+#define MSVDX_RENDEC_CONTEXT4 (0x0960)
26386+#define MSVDX_RENDEC_CONTEXT5 (0x0964)
26387+
26388+/*
26389+ * This defines the MSVDX communication buffer
26390+ */
26391+#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */
26392+#define NUM_WORDS_HOST_BUF (100) /*!< Host buffer size (in 32-bit words) */
26393+#define NUM_WORDS_MTX_BUF (100) /*!< MTX buffer size (in 32-bit words) */
26394+
26395+#define MSVDX_COMMS_AREA_ADDR (0x02cc0)
26396+
26397+#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10)
26398+#define MSVDX_COMMS_SCRATCH (MSVDX_COMMS_AREA_ADDR - 0x08)
26399+#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04)
26400+#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00)
26401+#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04)
26402+#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08)
26403+#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C)
26404+#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10)
26405+#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14)
26406+#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18)
26407+#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C)
26408+#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20)
26409+#define MSVDX_COMMS_TO_MTX_BUF (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
26410+
26411+#define MSVDX_COMMS_AREA_END (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
26412+
26413+#if (MSVDX_COMMS_AREA_END != 0x03000)
26414+#error
26415+#endif
26416+
26417+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000)
26418+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31)
26419+
26420+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000)
26421+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16)
26422+
26423+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000)
26424+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20)
26425+
26426+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC)
26427+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2)
26428+
26429+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002)
26430+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1)
26431+
26432+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001)
26433+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0)
26434+
26435+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001)
26436+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0)
26437+
26438+#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001)
26439+#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0)
26440+
26441+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
26442+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
26443+
26444+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00)
26445+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8)
26446+
26447+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000)
26448+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14)
26449+
26450+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002)
26451+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1)
26452+
26453+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000)
26454+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16)
26455+
26456+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF)
26457+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0)
26458+
26459+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000)
26460+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16)
26461+
26462+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF)
26463+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0)
26464+
26465+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000)
26466+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18)
26467+
26468+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000)
26469+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16)
26470+
26471+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000)
26472+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24)
26473+
26474+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001)
26475+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0)
26476+
26477+#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80) /*!< Start of parser specific Host->MTX messages. */
26478+#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0) /*!< Start of parser specific MTX->Host messages. */
26479+#define FWRK_MSGID_PADDING ( 0 )
26480+
26481+#define FWRK_GENMSG_SIZE_TYPE uint8_t
26482+#define FWRK_GENMSG_SIZE_MASK (0xFF)
26483+#define FWRK_GENMSG_SIZE_SHIFT (0)
26484+#define FWRK_GENMSG_SIZE_OFFSET (0x0000)
26485+#define FWRK_GENMSG_ID_TYPE uint8_t
26486+#define FWRK_GENMSG_ID_MASK (0xFF)
26487+#define FWRK_GENMSG_ID_SHIFT (0)
26488+#define FWRK_GENMSG_ID_OFFSET (0x0001)
26489+#define FWRK_PADMSG_SIZE (2)
26490+
26491+/*!
26492+******************************************************************************
26493+ This type defines the framework specified message ids
26494+******************************************************************************/
26495+enum
26496+{
26497+ /*! Sent by the DXVA driver on the host to the mtx firmware.
26498+ */
26499+ VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
26500+ VA_MSGID_RENDER,
26501+ VA_MSGID_DEBLOCK,
26502+ VA_MSGID_OOLD,
26503+
26504+ /* Test Messages */
26505+ VA_MSGID_TEST1,
26506+ VA_MSGID_TEST2,
26507+
26508+ /*! Sent by the mtx firmware to itself.
26509+ */
26510+ VA_MSGID_RENDER_MC_INTERRUPT,
26511+
26512+ /*! Sent by the DXVA firmware on the MTX to the host.
26513+ */
26514+ VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
26515+ VA_MSGID_CMD_COMPLETED_BATCH,
26516+ VA_MSGID_DEBLOCK_REQUIRED,
26517+ VA_MSGID_TEST_RESPONCE,
26518+ VA_MSGID_ACK,
26519+
26520+ VA_MSGID_CMD_FAILED,
26521+ VA_MSGID_CMD_UNSUPPORTED,
26522+ VA_MSGID_CMD_HW_PANIC,
26523+};
26524+
26525+/* MSVDX Firmware interface */
26526+
26527+#define FW_VA_RENDER_SIZE (32)
26528+
26529+// FW_VA_RENDER MSG_SIZE
26530+#define FW_VA_RENDER_MSG_SIZE_ALIGNMENT (1)
26531+#define FW_VA_RENDER_MSG_SIZE_TYPE uint8_t
26532+#define FW_VA_RENDER_MSG_SIZE_MASK (0xFF)
26533+#define FW_VA_RENDER_MSG_SIZE_LSBMASK (0xFF)
26534+#define FW_VA_RENDER_MSG_SIZE_OFFSET (0x0000)
26535+#define FW_VA_RENDER_MSG_SIZE_SHIFT (0)
26536+
26537+// FW_VA_RENDER ID
26538+#define FW_VA_RENDER_ID_ALIGNMENT (1)
26539+#define FW_VA_RENDER_ID_TYPE uint8_t
26540+#define FW_VA_RENDER_ID_MASK (0xFF)
26541+#define FW_VA_RENDER_ID_LSBMASK (0xFF)
26542+#define FW_VA_RENDER_ID_OFFSET (0x0001)
26543+#define FW_VA_RENDER_ID_SHIFT (0)
26544+
26545+// FW_VA_RENDER BUFFER_SIZE
26546+#define FW_VA_RENDER_BUFFER_SIZE_ALIGNMENT (2)
26547+#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t
26548+#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF)
26549+#define FW_VA_RENDER_BUFFER_SIZE_LSBMASK (0x0FFF)
26550+#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002)
26551+#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0)
26552+
26553+// FW_VA_RENDER MMUPTD
26554+#define FW_VA_RENDER_MMUPTD_ALIGNMENT (4)
26555+#define FW_VA_RENDER_MMUPTD_TYPE uint32_t
26556+#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF)
26557+#define FW_VA_RENDER_MMUPTD_LSBMASK (0xFFFFFFFF)
26558+#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004)
26559+#define FW_VA_RENDER_MMUPTD_SHIFT (0)
26560+
26561+// FW_VA_RENDER LLDMA_ADDRESS
26562+#define FW_VA_RENDER_LLDMA_ADDRESS_ALIGNMENT (4)
26563+#define FW_VA_RENDER_LLDMA_ADDRESS_TYPE uint32_t
26564+#define FW_VA_RENDER_LLDMA_ADDRESS_MASK (0xFFFFFFFF)
26565+#define FW_VA_RENDER_LLDMA_ADDRESS_LSBMASK (0xFFFFFFFF)
26566+#define FW_VA_RENDER_LLDMA_ADDRESS_OFFSET (0x0008)
26567+#define FW_VA_RENDER_LLDMA_ADDRESS_SHIFT (0)
26568+
26569+// FW_VA_RENDER CONTEXT
26570+#define FW_VA_RENDER_CONTEXT_ALIGNMENT (4)
26571+#define FW_VA_RENDER_CONTEXT_TYPE uint32_t
26572+#define FW_VA_RENDER_CONTEXT_MASK (0xFFFFFFFF)
26573+#define FW_VA_RENDER_CONTEXT_LSBMASK (0xFFFFFFFF)
26574+#define FW_VA_RENDER_CONTEXT_OFFSET (0x000C)
26575+#define FW_VA_RENDER_CONTEXT_SHIFT (0)
26576+
26577+// FW_VA_RENDER FENCE_VALUE
26578+#define FW_VA_RENDER_FENCE_VALUE_ALIGNMENT (4)
26579+#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t
26580+#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF)
26581+#define FW_VA_RENDER_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
26582+#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010)
26583+#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0)
26584+
26585+// FW_VA_RENDER OPERATING_MODE
26586+#define FW_VA_RENDER_OPERATING_MODE_ALIGNMENT (4)
26587+#define FW_VA_RENDER_OPERATING_MODE_TYPE uint32_t
26588+#define FW_VA_RENDER_OPERATING_MODE_MASK (0xFFFFFFFF)
26589+#define FW_VA_RENDER_OPERATING_MODE_LSBMASK (0xFFFFFFFF)
26590+#define FW_VA_RENDER_OPERATING_MODE_OFFSET (0x0014)
26591+#define FW_VA_RENDER_OPERATING_MODE_SHIFT (0)
26592+
26593+// FW_VA_RENDER FIRST_MB_IN_SLICE
26594+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_ALIGNMENT (2)
26595+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_TYPE uint16_t
26596+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_MASK (0xFFFF)
26597+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_LSBMASK (0xFFFF)
26598+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_OFFSET (0x0018)
26599+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_SHIFT (0)
26600+
26601+// FW_VA_RENDER LAST_MB_IN_FRAME
26602+#define FW_VA_RENDER_LAST_MB_IN_FRAME_ALIGNMENT (2)
26603+#define FW_VA_RENDER_LAST_MB_IN_FRAME_TYPE uint16_t
26604+#define FW_VA_RENDER_LAST_MB_IN_FRAME_MASK (0xFFFF)
26605+#define FW_VA_RENDER_LAST_MB_IN_FRAME_LSBMASK (0xFFFF)
26606+#define FW_VA_RENDER_LAST_MB_IN_FRAME_OFFSET (0x001A)
26607+#define FW_VA_RENDER_LAST_MB_IN_FRAME_SHIFT (0)
26608+
26609+// FW_VA_RENDER FLAGS
26610+#define FW_VA_RENDER_FLAGS_ALIGNMENT (4)
26611+#define FW_VA_RENDER_FLAGS_TYPE uint32_t
26612+#define FW_VA_RENDER_FLAGS_MASK (0xFFFFFFFF)
26613+#define FW_VA_RENDER_FLAGS_LSBMASK (0xFFFFFFFF)
26614+#define FW_VA_RENDER_FLAGS_OFFSET (0x001C)
26615+#define FW_VA_RENDER_FLAGS_SHIFT (0)
26616+
26617+#define FW_VA_CMD_COMPLETED_SIZE (12)
26618+
26619+// FW_VA_CMD_COMPLETED MSG_SIZE
26620+#define FW_VA_CMD_COMPLETED_MSG_SIZE_ALIGNMENT (1)
26621+#define FW_VA_CMD_COMPLETED_MSG_SIZE_TYPE uint8_t
26622+#define FW_VA_CMD_COMPLETED_MSG_SIZE_MASK (0xFF)
26623+#define FW_VA_CMD_COMPLETED_MSG_SIZE_LSBMASK (0xFF)
26624+#define FW_VA_CMD_COMPLETED_MSG_SIZE_OFFSET (0x0000)
26625+#define FW_VA_CMD_COMPLETED_MSG_SIZE_SHIFT (0)
26626+
26627+// FW_VA_CMD_COMPLETED ID
26628+#define FW_VA_CMD_COMPLETED_ID_ALIGNMENT (1)
26629+#define FW_VA_CMD_COMPLETED_ID_TYPE uint8_t
26630+#define FW_VA_CMD_COMPLETED_ID_MASK (0xFF)
26631+#define FW_VA_CMD_COMPLETED_ID_LSBMASK (0xFF)
26632+#define FW_VA_CMD_COMPLETED_ID_OFFSET (0x0001)
26633+#define FW_VA_CMD_COMPLETED_ID_SHIFT (0)
26634+
26635+// FW_VA_CMD_COMPLETED FENCE_VALUE
26636+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_ALIGNMENT (4)
26637+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t
26638+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF)
26639+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
26640+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004)
26641+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0)
26642+
26643+// FW_VA_CMD_COMPLETED FLAGS
26644+#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4)
26645+#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t
26646+#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF)
26647+#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF)
26648+#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008)
26649+#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0)
26650+
26651+#define FW_VA_CMD_FAILED_SIZE (12)
26652+
26653+// FW_VA_CMD_FAILED MSG_SIZE
26654+#define FW_VA_CMD_FAILED_MSG_SIZE_ALIGNMENT (1)
26655+#define FW_VA_CMD_FAILED_MSG_SIZE_TYPE uint8_t
26656+#define FW_VA_CMD_FAILED_MSG_SIZE_MASK (0xFF)
26657+#define FW_VA_CMD_FAILED_MSG_SIZE_LSBMASK (0xFF)
26658+#define FW_VA_CMD_FAILED_MSG_SIZE_OFFSET (0x0000)
26659+#define FW_VA_CMD_FAILED_MSG_SIZE_SHIFT (0)
26660+
26661+// FW_VA_CMD_FAILED ID
26662+#define FW_VA_CMD_FAILED_ID_ALIGNMENT (1)
26663+#define FW_VA_CMD_FAILED_ID_TYPE uint8_t
26664+#define FW_VA_CMD_FAILED_ID_MASK (0xFF)
26665+#define FW_VA_CMD_FAILED_ID_LSBMASK (0xFF)
26666+#define FW_VA_CMD_FAILED_ID_OFFSET (0x0001)
26667+#define FW_VA_CMD_FAILED_ID_SHIFT (0)
26668+
26669+// FW_VA_CMD_FAILED FLAGS
26670+#define FW_VA_CMD_FAILED_FLAGS_ALIGNMENT (2)
26671+#define FW_VA_CMD_FAILED_FLAGS_TYPE uint16_t
26672+#define FW_VA_CMD_FAILED_FLAGS_MASK (0xFFFF)
26673+#define FW_VA_CMD_FAILED_FLAGS_LSBMASK (0xFFFF)
26674+#define FW_VA_CMD_FAILED_FLAGS_OFFSET (0x0002)
26675+#define FW_VA_CMD_FAILED_FLAGS_SHIFT (0)
26676+
26677+// FW_VA_CMD_FAILED FENCE_VALUE
26678+#define FW_VA_CMD_FAILED_FENCE_VALUE_ALIGNMENT (4)
26679+#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t
26680+#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF)
26681+#define FW_VA_CMD_FAILED_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
26682+#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004)
26683+#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0)
26684+
26685+// FW_VA_CMD_FAILED IRQSTATUS
26686+#define FW_VA_CMD_FAILED_IRQSTATUS_ALIGNMENT (4)
26687+#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t
26688+#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF)
26689+#define FW_VA_CMD_FAILED_IRQSTATUS_LSBMASK (0xFFFFFFFF)
26690+#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008)
26691+#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0)
26692+
26693+#define FW_VA_DEBLOCK_REQUIRED_SIZE (8)
26694+
26695+// FW_VA_DEBLOCK_REQUIRED MSG_SIZE
26696+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_ALIGNMENT (1)
26697+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_TYPE uint8_t
26698+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_MASK (0xFF)
26699+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_LSBMASK (0xFF)
26700+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_OFFSET (0x0000)
26701+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_SHIFT (0)
26702+
26703+// FW_VA_DEBLOCK_REQUIRED ID
26704+#define FW_VA_DEBLOCK_REQUIRED_ID_ALIGNMENT (1)
26705+#define FW_VA_DEBLOCK_REQUIRED_ID_TYPE uint8_t
26706+#define FW_VA_DEBLOCK_REQUIRED_ID_MASK (0xFF)
26707+#define FW_VA_DEBLOCK_REQUIRED_ID_LSBMASK (0xFF)
26708+#define FW_VA_DEBLOCK_REQUIRED_ID_OFFSET (0x0001)
26709+#define FW_VA_DEBLOCK_REQUIRED_ID_SHIFT (0)
26710+
26711+// FW_VA_DEBLOCK_REQUIRED CONTEXT
26712+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_ALIGNMENT (4)
26713+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t
26714+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF)
26715+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_LSBMASK (0xFFFFFFFF)
26716+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004)
26717+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0)
26718+
26719+#define FW_VA_HW_PANIC_SIZE (12)
26720+
26721+// FW_VA_HW_PANIC FLAGS
26722+#define FW_VA_HW_PANIC_FLAGS_ALIGNMENT (2)
26723+#define FW_VA_HW_PANIC_FLAGS_TYPE uint16_t
26724+#define FW_VA_HW_PANIC_FLAGS_MASK (0xFFFF)
26725+#define FW_VA_HW_PANIC_FLAGS_LSBMASK (0xFFFF)
26726+#define FW_VA_HW_PANIC_FLAGS_OFFSET (0x0002)
26727+#define FW_VA_HW_PANIC_FLAGS_SHIFT (0)
26728+
26729+// FW_VA_HW_PANIC MSG_SIZE
26730+#define FW_VA_HW_PANIC_MSG_SIZE_ALIGNMENT (1)
26731+#define FW_VA_HW_PANIC_MSG_SIZE_TYPE uint8_t
26732+#define FW_VA_HW_PANIC_MSG_SIZE_MASK (0xFF)
26733+#define FW_VA_HW_PANIC_MSG_SIZE_LSBMASK (0xFF)
26734+#define FW_VA_HW_PANIC_MSG_SIZE_OFFSET (0x0000)
26735+#define FW_VA_HW_PANIC_MSG_SIZE_SHIFT (0)
26736+
26737+// FW_VA_HW_PANIC ID
26738+#define FW_VA_HW_PANIC_ID_ALIGNMENT (1)
26739+#define FW_VA_HW_PANIC_ID_TYPE uint8_t
26740+#define FW_VA_HW_PANIC_ID_MASK (0xFF)
26741+#define FW_VA_HW_PANIC_ID_LSBMASK (0xFF)
26742+#define FW_VA_HW_PANIC_ID_OFFSET (0x0001)
26743+#define FW_VA_HW_PANIC_ID_SHIFT (0)
26744+
26745+// FW_VA_HW_PANIC FENCE_VALUE
26746+#define FW_VA_HW_PANIC_FENCE_VALUE_ALIGNMENT (4)
26747+#define FW_VA_HW_PANIC_FENCE_VALUE_TYPE uint32_t
26748+#define FW_VA_HW_PANIC_FENCE_VALUE_MASK (0xFFFFFFFF)
26749+#define FW_VA_HW_PANIC_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
26750+#define FW_VA_HW_PANIC_FENCE_VALUE_OFFSET (0x0004)
26751+#define FW_VA_HW_PANIC_FENCE_VALUE_SHIFT (0)
26752+
26753+// FW_VA_HW_PANIC IRQSTATUS
26754+#define FW_VA_HW_PANIC_IRQSTATUS_ALIGNMENT (4)
26755+#define FW_VA_HW_PANIC_IRQSTATUS_TYPE uint32_t
26756+#define FW_VA_HW_PANIC_IRQSTATUS_MASK (0xFFFFFFFF)
26757+#define FW_VA_HW_PANIC_IRQSTATUS_LSBMASK (0xFFFFFFFF)
26758+#define FW_VA_HW_PANIC_IRQSTATUS_OFFSET (0x0008)
26759+#define FW_VA_HW_PANIC_IRQSTATUS_SHIFT (0)
26760+
26761+#endif
26762Index: linux-2.6.27/drivers/gpu/drm/psb/psb_msvdxinit.c
26763===================================================================
26764--- /dev/null 1970-01-01 00:00:00.000000000 +0000
26765+++ linux-2.6.27/drivers/gpu/drm/psb/psb_msvdxinit.c 2009-01-14 11:58:01.000000000 +0000
26766@@ -0,0 +1,625 @@
26767+/**
26768+ * file psb_msvdxinit.c
26769+ * MSVDX initialization and mtx-firmware upload
26770+ *
26771+ */
26772+
26773+/**************************************************************************
26774+ *
26775+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
26776+ * Copyright (c) Imagination Technologies Limited, UK
26777+ * All Rights Reserved.
26778+ *
26779+ * Permission is hereby granted, free of charge, to any person obtaining a
26780+ * copy of this software and associated documentation files (the
26781+ * "Software"), to deal in the Software without restriction, including
26782+ * without limitation the rights to use, copy, modify, merge, publish,
26783+ * distribute, sub license, and/or sell copies of the Software, and to
26784+ * permit persons to whom the Software is furnished to do so, subject to
26785+ * the following conditions:
26786+ *
26787+ * The above copyright notice and this permission notice (including the
26788+ * next paragraph) shall be included in all copies or substantial portions
26789+ * of the Software.
26790+ *
26791+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26792+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26793+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26794+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26795+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26796+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26797+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
26798+ *
26799+ **************************************************************************/
26800+
26801+#include "drmP.h"
26802+#include "drm.h"
26803+#include "psb_drv.h"
26804+#include "psb_msvdx.h"
26805+#include <linux/firmware.h>
26806+
26807+/*MSVDX FW header*/
26808+struct msvdx_fw
26809+{
26810+ uint32_t ver;
26811+ uint32_t text_size;
26812+ uint32_t data_size;
26813+ uint32_t data_location;
26814+};
26815+
26816+int
26817+psb_wait_for_register (struct drm_psb_private *dev_priv,
26818+ uint32_t ui32Offset,
26819+ uint32_t ui32Value, uint32_t ui32Enable)
26820+{
26821+ uint32_t ui32Temp;
26822+ uint32_t ui32PollCount = 1000;
26823+ while (ui32PollCount)
26824+ {
26825+ ui32Temp = PSB_RMSVDX32 (ui32Offset);
26826+ if (ui32Value == (ui32Temp & ui32Enable)) /* All the bits are reset */
26827+ return 0; /* So exit */
26828+
26829+ /* Wait a bit */
26830+ DRM_UDELAY (100);
26831+ ui32PollCount--;
26832+ }
26833+ PSB_DEBUG_GENERAL
26834+ ("MSVDX: Timeout while waiting for register %08x: expecting %08x (mask %08x), got %08x\n",
26835+ ui32Offset, ui32Value, ui32Enable, ui32Temp);
26836+ return 1;
26837+}
26838+
26839+int
26840+psb_poll_mtx_irq (struct drm_psb_private *dev_priv)
26841+{
26842+ int ret = 0;
26843+ uint32_t MtxInt = 0;
26844+ REGIO_WRITE_FIELD_LITE (MtxInt, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
26845+
26846+ ret = psb_wait_for_register (dev_priv, MSVDX_INTERRUPT_STATUS, MtxInt, /* Required value */
26847+ MtxInt /* Enabled bits */ );
26848+ if (ret)
26849+ {
26850+ PSB_DEBUG_GENERAL
26851+ ("MSVDX: Error Mtx did not return int within a resonable time\n");
26852+
26853+ return ret;
26854+ }
26855+
26856+ PSB_DEBUG_GENERAL ("MSVDX: Got MTX Int\n");
26857+
26858+ /* Got it so clear the bit */
26859+ PSB_WMSVDX32 (MtxInt, MSVDX_INTERRUPT_CLEAR);
26860+
26861+ return ret;
26862+}
26863+
26864+void
26865+psb_write_mtx_core_reg (struct drm_psb_private *dev_priv,
26866+ const uint32_t ui32CoreRegister,
26867+ const uint32_t ui32Val)
26868+{
26869+ uint32_t ui32Reg = 0;
26870+
26871+ /* Put data in MTX_RW_DATA */
26872+ PSB_WMSVDX32 (ui32Val, MSVDX_MTX_REGISTER_READ_WRITE_DATA);
26873+
26874+ /* DREADY is set to 0 and request a write */
26875+ ui32Reg = ui32CoreRegister;
26876+ REGIO_WRITE_FIELD_LITE (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
26877+ MTX_RNW, 0);
26878+ REGIO_WRITE_FIELD_LITE (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
26879+ MTX_DREADY, 0);
26880+ PSB_WMSVDX32 (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST);
26881+
26882+ psb_wait_for_register (dev_priv, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK, /* Required Value */
26883+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
26884+}
26885+
26886+void
26887+psb_upload_fw (struct drm_psb_private *dev_priv, const uint32_t ui32DataMem,
26888+ uint32_t ui32RamBankSize, uint32_t ui32Address,
26889+ const unsigned int uiWords, const uint32_t * const pui32Data)
26890+{
26891+ uint32_t ui32Loop, ui32Ctrl, ui32RamId, ui32Addr, ui32CurrBank =
26892+ (uint32_t) ~ 0;
26893+ uint32_t ui32AccessControl;
26894+
26895+ /* Save the access control register... */
26896+ ui32AccessControl = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_CONTROL);
26897+
26898+ /* Wait for MCMSTAT to become be idle 1 */
26899+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
26900+ 0xffffffff /* Enables */ );
26901+
26902+ for (ui32Loop = 0; ui32Loop < uiWords; ui32Loop++)
26903+ {
26904+ ui32RamId = ui32DataMem + (ui32Address / ui32RamBankSize);
26905+
26906+ if (ui32RamId != ui32CurrBank)
26907+ {
26908+ ui32Addr = ui32Address >> 2;
26909+
26910+ ui32Ctrl = 0;
26911+
26912+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
26913+ MSVDX_MTX_RAM_ACCESS_CONTROL,
26914+ MTX_MCMID, ui32RamId);
26915+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
26916+ MSVDX_MTX_RAM_ACCESS_CONTROL,
26917+ MTX_MCM_ADDR, ui32Addr);
26918+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
26919+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMAI, 1);
26920+
26921+ PSB_WMSVDX32 (ui32Ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
26922+
26923+ ui32CurrBank = ui32RamId;
26924+ }
26925+ ui32Address += 4;
26926+
26927+ PSB_WMSVDX32 (pui32Data[ui32Loop], MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
26928+
26929+ /* Wait for MCMSTAT to become be idle 1 */
26930+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
26931+ 0xffffffff /* Enables */ );
26932+ }
26933+ PSB_DEBUG_GENERAL ("MSVDX: Upload done\n");
26934+
26935+ /* Restore the access control register... */
26936+ PSB_WMSVDX32 (ui32AccessControl, MSVDX_MTX_RAM_ACCESS_CONTROL);
26937+}
26938+
26939+static int
26940+psb_verify_fw (struct drm_psb_private *dev_priv,
26941+ const uint32_t ui32RamBankSize,
26942+ const uint32_t ui32DataMem, uint32_t ui32Address,
26943+ const uint32_t uiWords, const uint32_t * const pui32Data)
26944+{
26945+ uint32_t ui32Loop, ui32Ctrl, ui32RamId, ui32Addr, ui32CurrBank =
26946+ (uint32_t) ~ 0;
26947+ uint32_t ui32AccessControl;
26948+ int ret = 0;
26949+
26950+ /* Save the access control register... */
26951+ ui32AccessControl = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_CONTROL);
26952+
26953+ /* Wait for MCMSTAT to become be idle 1 */
26954+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
26955+ 0xffffffff /* Enables */ );
26956+
26957+ for (ui32Loop = 0; ui32Loop < uiWords; ui32Loop++)
26958+ {
26959+ uint32_t ui32ReadBackVal;
26960+ ui32RamId = ui32DataMem + (ui32Address / ui32RamBankSize);
26961+
26962+ if (ui32RamId != ui32CurrBank)
26963+ {
26964+ ui32Addr = ui32Address >> 2;
26965+ ui32Ctrl = 0;
26966+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
26967+ MSVDX_MTX_RAM_ACCESS_CONTROL,
26968+ MTX_MCMID, ui32RamId);
26969+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
26970+ MSVDX_MTX_RAM_ACCESS_CONTROL,
26971+ MTX_MCM_ADDR, ui32Addr);
26972+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
26973+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMAI, 1);
26974+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
26975+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMR, 1);
26976+
26977+ PSB_WMSVDX32 (ui32Ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
26978+
26979+ ui32CurrBank = ui32RamId;
26980+ }
26981+ ui32Address += 4;
26982+
26983+ /* Wait for MCMSTAT to become be idle 1 */
26984+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
26985+ 0xffffffff /* Enables */ );
26986+
26987+ ui32ReadBackVal = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
26988+ if (pui32Data[ui32Loop] != ui32ReadBackVal)
26989+ {
26990+ DRM_ERROR
26991+ ("psb: Firmware validation fails at index=%08x\n", ui32Loop);
26992+ ret = 1;
26993+ break;
26994+ }
26995+ }
26996+
26997+ /* Restore the access control register... */
26998+ PSB_WMSVDX32 (ui32AccessControl, MSVDX_MTX_RAM_ACCESS_CONTROL);
26999+
27000+ return ret;
27001+}
27002+
27003+static uint32_t *
27004+msvdx_get_fw (struct drm_device *dev,
27005+ const struct firmware **raw, uint8_t * name)
27006+{
27007+ int rc;
27008+ int *ptr = NULL;
27009+
27010+ rc = request_firmware (raw, name, &dev->pdev->dev);
27011+ if (rc < 0)
27012+ {
27013+ DRM_ERROR ("MSVDX: %s request_firmware failed: Reason %d\n", name, rc);
27014+ return NULL;
27015+ }
27016+
27017+ if ((*raw)->size < sizeof (struct msvdx_fw))
27018+ {
27019+ PSB_DEBUG_GENERAL ("MSVDX: %s is is not correct size(%zd)\n",
27020+ name, (*raw)->size);
27021+ return NULL;
27022+ }
27023+
27024+ ptr = (int *) ((*raw))->data;
27025+
27026+ if (!ptr)
27027+ {
27028+ PSB_DEBUG_GENERAL ("MSVDX: Failed to load %s\n", name);
27029+ return NULL;
27030+ }
27031+ /*another sanity check... */
27032+ if ((*raw)->size !=
27033+ (sizeof (struct msvdx_fw) +
27034+ sizeof (uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
27035+ sizeof (uint32_t) * ((struct msvdx_fw *) ptr)->data_size))
27036+ {
27037+ PSB_DEBUG_GENERAL ("MSVDX: %s is is not correct size(%zd)\n",
27038+ name, (*raw)->size);
27039+ return NULL;
27040+ }
27041+ return ptr;
27042+}
27043+
27044+static int
27045+psb_setup_fw (struct drm_device *dev)
27046+{
27047+ struct drm_psb_private *dev_priv = dev->dev_private;
27048+ int ret = 0;
27049+
27050+ uint32_t ram_bank_size;
27051+ struct msvdx_fw *fw;
27052+ uint32_t *fw_ptr = NULL;
27053+ uint32_t *text_ptr = NULL;
27054+ uint32_t *data_ptr = NULL;
27055+ const struct firmware *raw = NULL;
27056+ /* todo : Assert the clock is on - if not turn it on to upload code */
27057+
27058+ PSB_DEBUG_GENERAL ("MSVDX: psb_setup_fw\n");
27059+
27060+ /* Reset MTX */
27061+ PSB_WMSVDX32 (MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK, MSVDX_MTX_SOFT_RESET);
27062+
27063+ /* Initialses Communication controll area to 0 */
27064+ if(dev_priv->psb_rev_id >= POULSBO_D1)
27065+ {
27066+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1 or later revision.\n");
27067+ PSB_WMSVDX32 (MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1, MSVDX_COMMS_OFFSET_FLAGS);
27068+ }
27069+ else
27070+ {
27071+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0 or earlier revision.\n");
27072+ PSB_WMSVDX32 (MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0, MSVDX_COMMS_OFFSET_FLAGS);
27073+ }
27074+
27075+ PSB_WMSVDX32 (0, MSVDX_COMMS_MSG_COUNTER);
27076+ PSB_WMSVDX32 (0, MSVDX_COMMS_SIGNATURE);
27077+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_HOST_RD_INDEX);
27078+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
27079+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_MTX_RD_INDEX);
27080+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
27081+ PSB_WMSVDX32 (0, MSVDX_COMMS_FW_STATUS);
27082+
27083+ /* read register bank size */
27084+ {
27085+ uint32_t ui32BankSize, ui32Reg;
27086+ ui32Reg = PSB_RMSVDX32 (MSVDX_MTX_RAM_BANK);
27087+ ui32BankSize =
27088+ REGIO_READ_FIELD (ui32Reg, MSVDX_MTX_RAM_BANK, CR_MTX_RAM_BANK_SIZE);
27089+ ram_bank_size = (uint32_t) (1 << (ui32BankSize + 2));
27090+ }
27091+
27092+ PSB_DEBUG_GENERAL ("MSVDX: RAM bank size = %d bytes\n", ram_bank_size);
27093+
27094+ fw_ptr = msvdx_get_fw (dev, &raw, "msvdx_fw.bin");
27095+
27096+ if (!fw_ptr)
27097+ {
27098+ DRM_ERROR ("psb: No valid msvdx_fw.bin firmware found.\n");
27099+ ret = 1;
27100+ goto out;
27101+ }
27102+
27103+ fw = (struct msvdx_fw *) fw_ptr;
27104+ if (fw->ver != 0x02)
27105+ {
27106+ DRM_ERROR
27107+ ("psb: msvdx_fw.bin firmware version mismatch, got version=%02x expected version=%02x\n",
27108+ fw->ver, 0x02);
27109+ ret = 1;
27110+ goto out;
27111+ }
27112+
27113+ text_ptr = (uint32_t *) ((uint8_t *) fw_ptr + sizeof (struct msvdx_fw));
27114+ data_ptr = text_ptr + fw->text_size;
27115+
27116+ PSB_DEBUG_GENERAL ("MSVDX: Retrieved pointers for firmware\n");
27117+ PSB_DEBUG_GENERAL ("MSVDX: text_size: %d\n", fw->text_size);
27118+ PSB_DEBUG_GENERAL ("MSVDX: data_size: %d\n", fw->data_size);
27119+ PSB_DEBUG_GENERAL ("MSVDX: data_location: 0x%x\n", fw->data_location);
27120+ PSB_DEBUG_GENERAL ("MSVDX: First 4 bytes of text: 0x%x\n", *text_ptr);
27121+ PSB_DEBUG_GENERAL ("MSVDX: First 4 bytes of data: 0x%x\n", *data_ptr);
27122+
27123+ PSB_DEBUG_GENERAL ("MSVDX: Uploading firmware\n");
27124+ psb_upload_fw (dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
27125+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size, text_ptr);
27126+ psb_upload_fw (dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
27127+ fw->data_location - MTX_DATA_BASE, fw->data_size, data_ptr);
27128+
27129+ /*todo : Verify code upload possibly only in debug */
27130+ if (psb_verify_fw
27131+ (dev_priv, ram_bank_size, MTX_CORE_CODE_MEM,
27132+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size, text_ptr))
27133+ {
27134+ /* Firmware code upload failed */
27135+ ret = 1;
27136+ goto out;
27137+ }
27138+ if (psb_verify_fw
27139+ (dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
27140+ fw->data_location - MTX_DATA_BASE, fw->data_size, data_ptr))
27141+ {
27142+ /* Firmware data upload failed */
27143+ ret = 1;
27144+ goto out;
27145+ }
27146+
27147+ /* -- Set starting PC address */
27148+ psb_write_mtx_core_reg (dev_priv, MTX_PC, PC_START_ADDRESS);
27149+
27150+ /* -- Turn on the thread */
27151+ PSB_WMSVDX32 (MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE);
27152+
27153+ /* Wait for the signature value to be written back */
27154+ ret = psb_wait_for_register (dev_priv, MSVDX_COMMS_SIGNATURE, MSVDX_COMMS_SIGNATURE_VALUE, /* Required value */
27155+ 0xffffffff /* Enabled bits */ );
27156+ if (ret)
27157+ {
27158+ DRM_ERROR ("psb: MSVDX firmware fails to initialize.\n");
27159+ goto out;
27160+ }
27161+
27162+ PSB_DEBUG_GENERAL ("MSVDX: MTX Initial indications OK\n");
27163+ PSB_DEBUG_GENERAL ("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
27164+ MSVDX_COMMS_AREA_ADDR);
27165+out:
27166+ if (raw)
27167+ {
27168+ PSB_DEBUG_GENERAL ("MSVDX releasing firmware resouces....\n");
27169+ release_firmware (raw);
27170+ }
27171+ return ret;
27172+}
27173+
27174+static void
27175+psb_free_ccb (struct drm_buffer_object **ccb)
27176+{
27177+ drm_bo_usage_deref_unlocked (ccb);
27178+ *ccb = NULL;
27179+}
27180+
27181+/*******************************************************************************
27182+
27183+ @Function psb_msvdx_reset
27184+
27185+ @Description
27186+
27187+ Reset chip and disable interrupts.
27188+
27189+ @Input psDeviceNode - device info. structure
27190+
27191+ @Return 0 - Success
27192+ 1 - Failure
27193+
27194+******************************************************************************/
27195+int
27196+psb_msvdx_reset (struct drm_psb_private *dev_priv)
27197+{
27198+ int ret = 0;
27199+
27200+ /* Issue software reset */
27201+ PSB_WMSVDX32 (msvdx_sw_reset_all, MSVDX_CONTROL);
27202+
27203+ ret = psb_wait_for_register (dev_priv, MSVDX_CONTROL, 0, /* Required value */
27204+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK
27205+ /* Enabled bits */ );
27206+
27207+ if (!ret)
27208+ {
27209+ /* Clear interrupt enabled flag */
27210+ PSB_WMSVDX32 (0, MSVDX_HOST_INTERRUPT_ENABLE);
27211+
27212+ /* Clear any pending interrupt flags */
27213+ PSB_WMSVDX32 (0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR);
27214+ }
27215+
27216+ mutex_destroy (&dev_priv->msvdx_mutex);
27217+
27218+ return ret;
27219+}
27220+
27221+static int
27222+psb_allocate_ccb (struct drm_device *dev,
27223+ struct drm_buffer_object **ccb,
27224+ uint32_t * base_addr, int size)
27225+{
27226+ int ret;
27227+ struct drm_bo_kmap_obj tmp_kmap;
27228+ int is_iomem;
27229+
27230+ ret = drm_buffer_object_create (dev, size,
27231+ drm_bo_type_kernel,
27232+ DRM_BO_FLAG_READ |
27233+ DRM_PSB_FLAG_MEM_KERNEL |
27234+ DRM_BO_FLAG_NO_EVICT,
27235+ DRM_BO_HINT_DONT_FENCE, 0, 0, ccb);
27236+ if (ret)
27237+ {
27238+ PSB_DEBUG_GENERAL ("Failed to allocate CCB.\n");
27239+ *ccb = NULL;
27240+ return 1;
27241+ }
27242+
27243+ ret = drm_bo_kmap (*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
27244+ if (ret)
27245+ {
27246+ PSB_DEBUG_GENERAL ("drm_bo_kmap failed ret: %d\n", ret);
27247+ drm_bo_usage_deref_unlocked (ccb);
27248+ *ccb = NULL;
27249+ return 1;
27250+ }
27251+
27252+ memset (drm_bmo_virtual (&tmp_kmap, &is_iomem), 0, size);
27253+ drm_bo_kunmap (&tmp_kmap);
27254+
27255+ *base_addr = (*ccb)->offset;
27256+ return 0;
27257+}
27258+
27259+int
27260+psb_msvdx_init (struct drm_device *dev)
27261+{
27262+ struct drm_psb_private *dev_priv = dev->dev_private;
27263+ uint32_t ui32Cmd;
27264+ int ret;
27265+
27266+ PSB_DEBUG_GENERAL ("MSVDX: psb_msvdx_init\n");
27267+
27268+ /*Initialize command msvdx queueing */
27269+ INIT_LIST_HEAD (&dev_priv->msvdx_queue);
27270+ mutex_init (&dev_priv->msvdx_mutex);
27271+ spin_lock_init (&dev_priv->msvdx_lock);
27272+ dev_priv->msvdx_busy = 0;
27273+
27274+ /*figure out the stepping*/
27275+ pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &dev_priv->psb_rev_id );
27276+
27277+ /* Enable Clocks */
27278+ PSB_DEBUG_GENERAL ("Enabling clocks\n");
27279+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
27280+
27281+ /* Enable MMU by removing all bypass bits */
27282+ PSB_WMSVDX32 (0, MSVDX_MMU_CONTROL0);
27283+
27284+ PSB_DEBUG_GENERAL ("MSVDX: Setting up RENDEC\n");
27285+ /* Allocate device virtual memory as required by rendec.... */
27286+ if (!dev_priv->ccb0)
27287+ {
27288+ ret =
27289+ psb_allocate_ccb (dev, &dev_priv->ccb0,
27290+ &dev_priv->base_addr0, RENDEC_A_SIZE);
27291+ if (ret)
27292+ goto err_exit;
27293+ }
27294+
27295+ if (!dev_priv->ccb1)
27296+ {
27297+ ret =
27298+ psb_allocate_ccb (dev, &dev_priv->ccb1,
27299+ &dev_priv->base_addr1, RENDEC_B_SIZE);
27300+ if (ret)
27301+ goto err_exit;
27302+ }
27303+
27304+ PSB_DEBUG_GENERAL ("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
27305+ dev_priv->base_addr0, dev_priv->base_addr1);
27306+
27307+ PSB_WMSVDX32 (dev_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0);
27308+ PSB_WMSVDX32 (dev_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1);
27309+
27310+ ui32Cmd = 0;
27311+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE,
27312+ RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
27313+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE,
27314+ RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
27315+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE);
27316+
27317+ ui32Cmd = 0;
27318+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1,
27319+ RENDEC_DECODE_START_SIZE, 0);
27320+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1, RENDEC_BURST_SIZE_W, 1);
27321+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1, RENDEC_BURST_SIZE_R, 1);
27322+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1,
27323+ RENDEC_EXTERNAL_MEMORY, 1);
27324+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTROL1);
27325+
27326+ ui32Cmd = 0x00101010;
27327+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT0);
27328+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT1);
27329+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT2);
27330+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT3);
27331+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT4);
27332+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT5);
27333+
27334+ ui32Cmd = 0;
27335+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE, 1);
27336+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTROL0);
27337+
27338+ ret = psb_setup_fw (dev);
27339+ if (ret)
27340+ goto err_exit;
27341+
27342+ PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
27343+
27344+ return 0;
27345+
27346+err_exit:
27347+ if (dev_priv->ccb0)
27348+ psb_free_ccb (&dev_priv->ccb0);
27349+ if (dev_priv->ccb1)
27350+ psb_free_ccb (&dev_priv->ccb1);
27351+
27352+ return 1;
27353+}
27354+
27355+int
27356+psb_msvdx_uninit (struct drm_device *dev)
27357+{
27358+ struct drm_psb_private *dev_priv = dev->dev_private;
27359+
27360+ /*Reset MSVDX chip */
27361+ psb_msvdx_reset (dev_priv);
27362+
27363+// PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
27364+ printk("set the msvdx clock to 0 in the %s\n", __FUNCTION__);
27365+ PSB_WMSVDX32 (0, MSVDX_MAN_CLK_ENABLE);
27366+
27367+ /*Clean up resources...*/
27368+ if (dev_priv->ccb0)
27369+ psb_free_ccb (&dev_priv->ccb0);
27370+ if (dev_priv->ccb1)
27371+ psb_free_ccb (&dev_priv->ccb1);
27372+
27373+ return 0;
27374+}
27375+
27376+int psb_hw_info_ioctl(struct drm_device *dev, void *data,
27377+ struct drm_file *file_priv)
27378+{
27379+ struct drm_psb_private *dev_priv = dev->dev_private;
27380+ struct drm_psb_hw_info *hw_info = data;
27381+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
27382+
27383+ hw_info->rev_id = dev_priv->psb_rev_id;
27384+
27385+ /*read the fuse info to determine the caps*/
27386+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
27387+ pci_read_config_dword(pci_root, 0xD4, &hw_info->caps);
27388+
27389+ PSB_DEBUG_GENERAL("MSVDX: PSB caps: 0x%x\n", hw_info->caps);
27390+ return 0;
27391+}
27392Index: linux-2.6.27/drivers/gpu/drm/psb/psb_reg.h
27393===================================================================
27394--- /dev/null 1970-01-01 00:00:00.000000000 +0000
27395+++ linux-2.6.27/drivers/gpu/drm/psb/psb_reg.h 2009-01-14 11:58:01.000000000 +0000
27396@@ -0,0 +1,562 @@
27397+/**************************************************************************
27398+ *
27399+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
27400+ * Copyright (c) 2007, Intel Corporation.
27401+ * All Rights Reserved.
27402+ *
27403+ * This program is free software; you can redistribute it and/or modify it
27404+ * under the terms and conditions of the GNU General Public License,
27405+ * version 2, as published by the Free Software Foundation.
27406+ *
27407+ * This program is distributed in the hope it will be useful, but WITHOUT
27408+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
27409+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
27410+ * more details.
27411+ *
27412+ * You should have received a copy of the GNU General Public License along with
27413+ * this program; if not, write to the Free Software Foundation, Inc.,
27414+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
27415+ *
27416+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
27417+ * develop this driver.
27418+ *
27419+ **************************************************************************/
27420+/*
27421+ */
27422+#ifndef _PSB_REG_H_
27423+#define _PSB_REG_H_
27424+
27425+#define PSB_CR_CLKGATECTL 0x0000
27426+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
27427+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
27428+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
27429+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
27430+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
27431+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
27432+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
27433+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
27434+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
27435+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
27436+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
27437+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
27438+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
27439+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
27440+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
27441+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
27442+
27443+#define PSB_CR_CORE_ID 0x0010
27444+#define _PSB_CC_ID_ID_SHIFT (16)
27445+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
27446+#define _PSB_CC_ID_CONFIG_SHIFT (0)
27447+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
27448+
27449+#define PSB_CR_CORE_REVISION 0x0014
27450+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
27451+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
27452+#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
27453+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
27454+#define _PSB_CC_REVISION_MINOR_SHIFT (8)
27455+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
27456+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
27457+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
27458+
27459+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
27460+
27461+#define PSB_CR_SOFT_RESET 0x0080
27462+#define _PSB_CS_RESET_TSP_RESET (1 << 6)
27463+#define _PSB_CS_RESET_ISP_RESET (1 << 5)
27464+#define _PSB_CS_RESET_USE_RESET (1 << 4)
27465+#define _PSB_CS_RESET_TA_RESET (1 << 3)
27466+#define _PSB_CS_RESET_DPM_RESET (1 << 2)
27467+#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
27468+#define _PSB_CS_RESET_BIF_RESET (1 << 0)
27469+
27470+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
27471+
27472+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
27473+
27474+#define PSB_CR_EVENT_STATUS2 0x0118
27475+
27476+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
27477+#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
27478+
27479+#define PSB_CR_EVENT_STATUS 0x012C
27480+
27481+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
27482+
27483+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
27484+#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
27485+#define _PSB_CE_TA_DPM_FAULT (1 << 28)
27486+#define _PSB_CE_TWOD_COMPLETE (1 << 27)
27487+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
27488+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
27489+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
27490+#define _PSB_CE_SW_EVENT (1 << 14)
27491+#define _PSB_CE_TA_FINISHED (1 << 13)
27492+#define _PSB_CE_TA_TERMINATE (1 << 12)
27493+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
27494+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
27495+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
27496+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
27497+
27498+
27499+#define PSB_USE_OFFSET_MASK 0x0007FFFF
27500+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
27501+#define PSB_CR_USE_CODE_BASE0 0x0A0C
27502+#define PSB_CR_USE_CODE_BASE1 0x0A10
27503+#define PSB_CR_USE_CODE_BASE2 0x0A14
27504+#define PSB_CR_USE_CODE_BASE3 0x0A18
27505+#define PSB_CR_USE_CODE_BASE4 0x0A1C
27506+#define PSB_CR_USE_CODE_BASE5 0x0A20
27507+#define PSB_CR_USE_CODE_BASE6 0x0A24
27508+#define PSB_CR_USE_CODE_BASE7 0x0A28
27509+#define PSB_CR_USE_CODE_BASE8 0x0A2C
27510+#define PSB_CR_USE_CODE_BASE9 0x0A30
27511+#define PSB_CR_USE_CODE_BASE10 0x0A34
27512+#define PSB_CR_USE_CODE_BASE11 0x0A38
27513+#define PSB_CR_USE_CODE_BASE12 0x0A3C
27514+#define PSB_CR_USE_CODE_BASE13 0x0A40
27515+#define PSB_CR_USE_CODE_BASE14 0x0A44
27516+#define PSB_CR_USE_CODE_BASE15 0x0A48
27517+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
27518+#define _PSB_CUC_BASE_DM_SHIFT (25)
27519+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
27520+#define _PSB_CUC_BASE_ADDR_SHIFT (0) // 1024-bit aligned address?
27521+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
27522+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
27523+#define _PSB_CUC_DM_VERTEX (0)
27524+#define _PSB_CUC_DM_PIXEL (1)
27525+#define _PSB_CUC_DM_RESERVED (2)
27526+#define _PSB_CUC_DM_EDM (3)
27527+
27528+#define PSB_CR_PDS_EXEC_BASE 0x0AB8
27529+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) // 1MB aligned address
27530+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
27531+
27532+#define PSB_CR_EVENT_KICKER 0x0AC4
27533+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) // 128-bit aligned address
27534+
27535+#define PSB_CR_EVENT_KICK 0x0AC8
27536+#define _PSB_CE_KICK_NOW (1 << 0)
27537+
27538+
27539+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
27540+
27541+#define PSB_CR_BIF_CTRL 0x0C00
27542+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
27543+#define _PSB_CB_CTRL_INVALDC (1 << 3)
27544+#define _PSB_CB_CTRL_FLUSH (1 << 2)
27545+
27546+#define PSB_CR_BIF_INT_STAT 0x0C04
27547+
27548+#define PSB_CR_BIF_FAULT 0x0C08
27549+#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
27550+#define _PSB_CBI_STAT_FAULT_SHIFT (0)
27551+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
27552+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
27553+#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
27554+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
27555+#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
27556+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
27557+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
27558+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
27559+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
27560+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
27561+
27562+#define PSB_CR_BIF_BANK0 0x0C78
27563+
27564+#define PSB_CR_BIF_BANK1 0x0C7C
27565+
27566+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
27567+
27568+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
27569+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
27570+
27571+#define PSB_CR_2D_SOCIF 0x0E18
27572+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
27573+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
27574+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
27575+
27576+#define PSB_CR_2D_BLIT_STATUS 0x0E04
27577+#define _PSB_C2B_STATUS_BUSY (1 << 24)
27578+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
27579+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
27580+
27581+/*
27582+ * 2D defs.
27583+ */
27584+
27585+/*
27586+ * 2D Slave Port Data : Block Header's Object Type
27587+ */
27588+
27589+#define PSB_2D_CLIP_BH (0x00000000)
27590+#define PSB_2D_PAT_BH (0x10000000)
27591+#define PSB_2D_CTRL_BH (0x20000000)
27592+#define PSB_2D_SRC_OFF_BH (0x30000000)
27593+#define PSB_2D_MASK_OFF_BH (0x40000000)
27594+#define PSB_2D_RESERVED1_BH (0x50000000)
27595+#define PSB_2D_RESERVED2_BH (0x60000000)
27596+#define PSB_2D_FENCE_BH (0x70000000)
27597+#define PSB_2D_BLIT_BH (0x80000000)
27598+#define PSB_2D_SRC_SURF_BH (0x90000000)
27599+#define PSB_2D_DST_SURF_BH (0xA0000000)
27600+#define PSB_2D_PAT_SURF_BH (0xB0000000)
27601+#define PSB_2D_SRC_PAL_BH (0xC0000000)
27602+#define PSB_2D_PAT_PAL_BH (0xD0000000)
27603+#define PSB_2D_MASK_SURF_BH (0xE0000000)
27604+#define PSB_2D_FLUSH_BH (0xF0000000)
27605+
27606+/*
27607+ * Clip Definition block (PSB_2D_CLIP_BH)
27608+ */
27609+#define PSB_2D_CLIPCOUNT_MAX (1)
27610+#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
27611+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
27612+#define PSB_2D_CLIPCOUNT_SHIFT (0)
27613+// clip rectangle min & max
27614+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
27615+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
27616+#define PSB_2D_CLIP_XMAX_SHIFT (12)
27617+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
27618+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
27619+#define PSB_2D_CLIP_XMIN_SHIFT (0)
27620+// clip rectangle offset
27621+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
27622+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
27623+#define PSB_2D_CLIP_YMAX_SHIFT (12)
27624+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
27625+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
27626+#define PSB_2D_CLIP_YMIN_SHIFT (0)
27627+
27628+/*
27629+ * Pattern Control (PSB_2D_PAT_BH)
27630+ */
27631+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
27632+#define PSB_2D_PAT_HEIGHT_SHIFT (0)
27633+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
27634+#define PSB_2D_PAT_WIDTH_SHIFT (5)
27635+#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
27636+#define PSB_2D_PAT_YSTART_SHIFT (10)
27637+#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
27638+#define PSB_2D_PAT_XSTART_SHIFT (15)
27639+
27640+/*
27641+ * 2D Control block (PSB_2D_CTRL_BH)
27642+ */
27643+// Present Flags
27644+#define PSB_2D_SRCCK_CTRL (0x00000001)
27645+#define PSB_2D_DSTCK_CTRL (0x00000002)
27646+#define PSB_2D_ALPHA_CTRL (0x00000004)
27647+// Colour Key Colour (SRC/DST)
27648+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
27649+#define PSB_2D_CK_COL_CLRMASK (0x00000000)
27650+#define PSB_2D_CK_COL_SHIFT (0)
27651+// Colour Key Mask (SRC/DST)
27652+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
27653+#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
27654+#define PSB_2D_CK_MASK_SHIFT (0)
27655+// Alpha Control (Alpha/RGB)
27656+#define PSB_2D_GBLALPHA_MASK (0x000FF000)
27657+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
27658+#define PSB_2D_GBLALPHA_SHIFT (12)
27659+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
27660+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
27661+#define PSB_2D_SRCALPHA_OP_SHIFT (20)
27662+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
27663+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
27664+#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
27665+#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
27666+#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
27667+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
27668+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
27669+#define PSB_2D_SRCALPHA_INVERT (0x00800000)
27670+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
27671+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
27672+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
27673+#define PSB_2D_DSTALPHA_OP_SHIFT (24)
27674+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
27675+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
27676+#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
27677+#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
27678+#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
27679+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
27680+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
27681+#define PSB_2D_DSTALPHA_INVERT (0x08000000)
27682+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
27683+
27684+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
27685+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
27686+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
27687+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
27688+
27689+/*
27690+ *Source Offset (PSB_2D_SRC_OFF_BH)
27691+ */
27692+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
27693+#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
27694+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
27695+#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
27696+
27697+/*
27698+ * Mask Offset (PSB_2D_MASK_OFF_BH)
27699+ */
27700+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
27701+#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
27702+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
27703+#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
27704+
27705+/*
27706+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
27707+ */
27708+
27709+/*
27710+ *Blit Rectangle (PSB_2D_BLIT_BH)
27711+ */
27712+
27713+#define PSB_2D_ROT_MASK (3<<25)
27714+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
27715+#define PSB_2D_ROT_NONE (0<<25)
27716+#define PSB_2D_ROT_90DEGS (1<<25)
27717+#define PSB_2D_ROT_180DEGS (2<<25)
27718+#define PSB_2D_ROT_270DEGS (3<<25)
27719+
27720+#define PSB_2D_COPYORDER_MASK (3<<23)
27721+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
27722+#define PSB_2D_COPYORDER_TL2BR (0<<23)
27723+#define PSB_2D_COPYORDER_BR2TL (1<<23)
27724+#define PSB_2D_COPYORDER_TR2BL (2<<23)
27725+#define PSB_2D_COPYORDER_BL2TR (3<<23)
27726+
27727+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
27728+#define PSB_2D_DSTCK_DISABLE (0x00000000)
27729+#define PSB_2D_DSTCK_PASS (0x00200000)
27730+#define PSB_2D_DSTCK_REJECT (0x00400000)
27731+
27732+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
27733+#define PSB_2D_SRCCK_DISABLE (0x00000000)
27734+#define PSB_2D_SRCCK_PASS (0x00080000)
27735+#define PSB_2D_SRCCK_REJECT (0x00100000)
27736+
27737+#define PSB_2D_CLIP_ENABLE (0x00040000)
27738+
27739+#define PSB_2D_ALPHA_ENABLE (0x00020000)
27740+
27741+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
27742+#define PSB_2D_PAT_MASK (0x00010000)
27743+#define PSB_2D_USE_PAT (0x00010000)
27744+#define PSB_2D_USE_FILL (0x00000000)
27745+/*
27746+ * Tungsten Graphics note on rop codes: If rop A and rop B are
27747+ * identical, the mask surface will not be read and need not be
27748+ * set up.
27749+ */
27750+
27751+#define PSB_2D_ROP3B_MASK (0x0000FF00)
27752+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
27753+#define PSB_2D_ROP3B_SHIFT (8)
27754+// rop code A
27755+#define PSB_2D_ROP3A_MASK (0x000000FF)
27756+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
27757+#define PSB_2D_ROP3A_SHIFT (0)
27758+
27759+#define PSB_2D_ROP4_MASK (0x0000FFFF)
27760+/*
27761+ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
27762+ * Fill Colour RGBA8888
27763+ */
27764+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
27765+#define PSB_2D_FILLCOLOUR_SHIFT (0)
27766+/*
27767+ * DWORD1: (Always Present)
27768+ * X Start (Dest)
27769+ * Y Start (Dest)
27770+ */
27771+#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
27772+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
27773+#define PSB_2D_DST_XSTART_SHIFT (12)
27774+#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
27775+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
27776+#define PSB_2D_DST_YSTART_SHIFT (0)
27777+/*
27778+ * DWORD2: (Always Present)
27779+ * X Size (Dest)
27780+ * Y Size (Dest)
27781+ */
27782+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
27783+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
27784+#define PSB_2D_DST_XSIZE_SHIFT (12)
27785+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
27786+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
27787+#define PSB_2D_DST_YSIZE_SHIFT (0)
27788+
27789+/*
27790+ * Source Surface (PSB_2D_SRC_SURF_BH)
27791+ */
27792+/*
27793+ * WORD 0
27794+ */
27795+
27796+#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
27797+#define PSB_2D_SRC_1_PAL (0x00000000)
27798+#define PSB_2D_SRC_2_PAL (0x00008000)
27799+#define PSB_2D_SRC_4_PAL (0x00010000)
27800+#define PSB_2D_SRC_8_PAL (0x00018000)
27801+#define PSB_2D_SRC_8_ALPHA (0x00020000)
27802+#define PSB_2D_SRC_4_ALPHA (0x00028000)
27803+#define PSB_2D_SRC_332RGB (0x00030000)
27804+#define PSB_2D_SRC_4444ARGB (0x00038000)
27805+#define PSB_2D_SRC_555RGB (0x00040000)
27806+#define PSB_2D_SRC_1555ARGB (0x00048000)
27807+#define PSB_2D_SRC_565RGB (0x00050000)
27808+#define PSB_2D_SRC_0888ARGB (0x00058000)
27809+#define PSB_2D_SRC_8888ARGB (0x00060000)
27810+#define PSB_2D_SRC_8888UYVY (0x00068000)
27811+#define PSB_2D_SRC_RESERVED (0x00070000)
27812+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
27813+
27814+
27815+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
27816+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
27817+#define PSB_2D_SRC_STRIDE_SHIFT (0)
27818+/*
27819+ * WORD 1 - Base Address
27820+ */
27821+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
27822+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
27823+#define PSB_2D_SRC_ADDR_SHIFT (2)
27824+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
27825+
27826+/*
27827+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
27828+ */
27829+/*
27830+ * WORD 0
27831+ */
27832+
27833+#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
27834+#define PSB_2D_PAT_1_PAL (0x00000000)
27835+#define PSB_2D_PAT_2_PAL (0x00008000)
27836+#define PSB_2D_PAT_4_PAL (0x00010000)
27837+#define PSB_2D_PAT_8_PAL (0x00018000)
27838+#define PSB_2D_PAT_8_ALPHA (0x00020000)
27839+#define PSB_2D_PAT_4_ALPHA (0x00028000)
27840+#define PSB_2D_PAT_332RGB (0x00030000)
27841+#define PSB_2D_PAT_4444ARGB (0x00038000)
27842+#define PSB_2D_PAT_555RGB (0x00040000)
27843+#define PSB_2D_PAT_1555ARGB (0x00048000)
27844+#define PSB_2D_PAT_565RGB (0x00050000)
27845+#define PSB_2D_PAT_0888ARGB (0x00058000)
27846+#define PSB_2D_PAT_8888ARGB (0x00060000)
27847+
27848+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
27849+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
27850+#define PSB_2D_PAT_STRIDE_SHIFT (0)
27851+/*
27852+ * WORD 1 - Base Address
27853+ */
27854+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
27855+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
27856+#define PSB_2D_PAT_ADDR_SHIFT (2)
27857+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
27858+
27859+/*
27860+ * Destination Surface (PSB_2D_DST_SURF_BH)
27861+ */
27862+/*
27863+ * WORD 0
27864+ */
27865+
27866+#define PSB_2D_DST_FORMAT_MASK (0x00078000)
27867+#define PSB_2D_DST_332RGB (0x00030000)
27868+#define PSB_2D_DST_4444ARGB (0x00038000)
27869+#define PSB_2D_DST_555RGB (0x00040000)
27870+#define PSB_2D_DST_1555ARGB (0x00048000)
27871+#define PSB_2D_DST_565RGB (0x00050000)
27872+#define PSB_2D_DST_0888ARGB (0x00058000)
27873+#define PSB_2D_DST_8888ARGB (0x00060000)
27874+#define PSB_2D_DST_8888AYUV (0x00070000)
27875+
27876+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
27877+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
27878+#define PSB_2D_DST_STRIDE_SHIFT (0)
27879+/*
27880+ * WORD 1 - Base Address
27881+ */
27882+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
27883+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
27884+#define PSB_2D_DST_ADDR_SHIFT (2)
27885+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
27886+
27887+/*
27888+ * Mask Surface (PSB_2D_MASK_SURF_BH)
27889+ */
27890+/*
27891+ * WORD 0
27892+ */
27893+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
27894+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
27895+#define PSB_2D_MASK_STRIDE_SHIFT (0)
27896+/*
27897+ * WORD 1 - Base Address
27898+ */
27899+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
27900+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
27901+#define PSB_2D_MASK_ADDR_SHIFT (2)
27902+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
27903+
27904+/*
27905+ * Source Palette (PSB_2D_SRC_PAL_BH)
27906+ */
27907+
27908+#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
27909+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
27910+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
27911+#define PSB_2D_SRCPAL_BYTEALIGN (1024)
27912+
27913+/*
27914+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
27915+ */
27916+
27917+#define PSB_2D_PATPAL_ADDR_SHIFT (0)
27918+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
27919+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
27920+#define PSB_2D_PATPAL_BYTEALIGN (1024)
27921+
27922+/*
27923+ * Rop3 Codes (2 LS bytes)
27924+ */
27925+
27926+#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
27927+#define PSB_2D_ROP3_PATCOPY (0xF0F0)
27928+#define PSB_2D_ROP3_WHITENESS (0xFFFF)
27929+#define PSB_2D_ROP3_BLACKNESS (0x0000)
27930+#define PSB_2D_ROP3_SRC (0xCC)
27931+#define PSB_2D_ROP3_PAT (0xF0)
27932+#define PSB_2D_ROP3_DST (0xAA)
27933+
27934+
27935+/*
27936+ * Sizes.
27937+ */
27938+
27939+#define PSB_SCENE_HW_COOKIE_SIZE 16
27940+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
27941+
27942+/*
27943+ * Scene stuff.
27944+ */
27945+
27946+#define PSB_NUM_HW_SCENES 2
27947+
27948+/*
27949+ * Scheduler completion actions.
27950+ */
27951+
27952+#define PSB_RASTER_BLOCK 0
27953+#define PSB_RASTER 1
27954+#define PSB_RETURN 2
27955+#define PSB_TA 3
27956+
27957+
27958+#endif
27959Index: linux-2.6.27/drivers/gpu/drm/psb/psb_regman.c
27960===================================================================
27961--- /dev/null 1970-01-01 00:00:00.000000000 +0000
27962+++ linux-2.6.27/drivers/gpu/drm/psb/psb_regman.c 2009-01-14 11:58:01.000000000 +0000
27963@@ -0,0 +1,175 @@
27964+/**************************************************************************
27965+ * Copyright (c) 2007, Intel Corporation.
27966+ * All Rights Reserved.
27967+ *
27968+ * This program is free software; you can redistribute it and/or modify it
27969+ * under the terms and conditions of the GNU General Public License,
27970+ * version 2, as published by the Free Software Foundation.
27971+ *
27972+ * This program is distributed in the hope it will be useful, but WITHOUT
27973+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
27974+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
27975+ * more details.
27976+ *
27977+ * You should have received a copy of the GNU General Public License along with
27978+ * this program; if not, write to the Free Software Foundation, Inc.,
27979+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
27980+ *
27981+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
27982+ * develop this driver.
27983+ *
27984+ **************************************************************************/
27985+/*
27986+ */
27987+
27988+#include "drmP.h"
27989+#include "psb_drv.h"
27990+
27991+struct psb_use_reg {
27992+ struct drm_reg reg;
27993+ struct drm_psb_private *dev_priv;
27994+ uint32_t reg_seq;
27995+ uint32_t base;
27996+ uint32_t data_master;
27997+};
27998+
27999+struct psb_use_reg_data {
28000+ uint32_t base;
28001+ uint32_t size;
28002+ uint32_t data_master;
28003+};
28004+
28005+static int psb_use_reg_reusable(const struct drm_reg *reg, const void *data)
28006+{
28007+ struct psb_use_reg *use_reg =
28008+ container_of(reg, struct psb_use_reg, reg);
28009+ struct psb_use_reg_data *use_data = (struct psb_use_reg_data *)data;
28010+
28011+ return ((use_reg->base <= use_data->base) &&
28012+ (use_reg->base + PSB_USE_OFFSET_SIZE >
28013+ use_data->base + use_data->size) &&
28014+ use_reg->data_master == use_data->data_master);
28015+}
28016+
28017+static int psb_use_reg_set(struct psb_use_reg *use_reg,
28018+ const struct psb_use_reg_data *use_data)
28019+{
28020+ struct drm_psb_private *dev_priv = use_reg->dev_priv;
28021+
28022+ if (use_reg->reg.fence == NULL)
28023+ use_reg->data_master = use_data->data_master;
28024+
28025+ if (use_reg->reg.fence == NULL &&
28026+ !psb_use_reg_reusable(&use_reg->reg, (const void *)use_data)) {
28027+
28028+ use_reg->base = use_data->base & ~PSB_USE_OFFSET_MASK;
28029+ use_reg->data_master = use_data->data_master;
28030+
28031+ if (!psb_use_reg_reusable(&use_reg->reg,
28032+ (const void *)use_data)) {
28033+ DRM_ERROR("USE base mechanism didn't support "
28034+ "buffer size or alignment\n");
28035+ return -EINVAL;
28036+ }
28037+
28038+ PSB_WSGX32(PSB_ALPL(use_reg->base, _PSB_CUC_BASE_ADDR) |
28039+ (use_reg->data_master << _PSB_CUC_BASE_DM_SHIFT),
28040+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
28041+ }
28042+ return 0;
28043+
28044+}
28045+
28046+int psb_grab_use_base(struct drm_psb_private *dev_priv,
28047+ unsigned long base,
28048+ unsigned long size,
28049+ unsigned int data_master,
28050+ uint32_t fence_class,
28051+ uint32_t fence_type,
28052+ int no_wait,
28053+ int interruptible, int *r_reg, uint32_t * r_offset)
28054+{
28055+ struct psb_use_reg_data use_data = {
28056+ .base = base,
28057+ .size = size,
28058+ .data_master = data_master
28059+ };
28060+ int ret;
28061+
28062+ struct drm_reg *reg;
28063+ struct psb_use_reg *use_reg;
28064+
28065+ ret = drm_regs_alloc(&dev_priv->use_manager,
28066+ (const void *)&use_data,
28067+ fence_class,
28068+ fence_type, interruptible, no_wait, &reg);
28069+ if (ret)
28070+ return ret;
28071+
28072+ use_reg = container_of(reg, struct psb_use_reg, reg);
28073+ ret = psb_use_reg_set(use_reg, &use_data);
28074+
28075+ if (ret)
28076+ return ret;
28077+
28078+ *r_reg = use_reg->reg_seq;
28079+ *r_offset = base - use_reg->base;
28080+
28081+ return 0;
28082+};
28083+
28084+static void psb_use_reg_destroy(struct drm_reg *reg)
28085+{
28086+ struct psb_use_reg *use_reg =
28087+ container_of(reg, struct psb_use_reg, reg);
28088+ struct drm_psb_private *dev_priv = use_reg->dev_priv;
28089+
28090+ PSB_WSGX32(PSB_ALPL(0, _PSB_CUC_BASE_ADDR),
28091+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
28092+
28093+ drm_free(use_reg, sizeof(*use_reg), DRM_MEM_DRIVER);
28094+}
28095+
28096+int psb_init_use_base(struct drm_psb_private *dev_priv,
28097+ unsigned int reg_start, unsigned int reg_num)
28098+{
28099+ struct psb_use_reg *use_reg;
28100+ int i;
28101+ int ret = 0;
28102+
28103+ mutex_lock(&dev_priv->cmdbuf_mutex);
28104+
28105+ drm_regs_init(&dev_priv->use_manager,
28106+ &psb_use_reg_reusable, &psb_use_reg_destroy);
28107+
28108+ for (i = reg_start; i < reg_start + reg_num; ++i) {
28109+ use_reg = drm_calloc(1, sizeof(*use_reg), DRM_MEM_DRIVER);
28110+ if (!use_reg) {
28111+ ret = -ENOMEM;
28112+ goto out;
28113+ }
28114+
28115+ use_reg->dev_priv = dev_priv;
28116+ use_reg->reg_seq = i;
28117+ use_reg->base = 0;
28118+ use_reg->data_master = _PSB_CUC_DM_PIXEL;
28119+
28120+ PSB_WSGX32(PSB_ALPL(use_reg->base, _PSB_CUC_BASE_ADDR) |
28121+ (use_reg->data_master << _PSB_CUC_BASE_DM_SHIFT),
28122+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
28123+
28124+ drm_regs_add(&dev_priv->use_manager, &use_reg->reg);
28125+ }
28126+ out:
28127+ mutex_unlock(&dev_priv->cmdbuf_mutex);
28128+
28129+ return ret;
28130+
28131+}
28132+
28133+void psb_takedown_use_base(struct drm_psb_private *dev_priv)
28134+{
28135+ mutex_lock(&dev_priv->cmdbuf_mutex);
28136+ drm_regs_free(&dev_priv->use_manager);
28137+ mutex_unlock(&dev_priv->cmdbuf_mutex);
28138+}
28139Index: linux-2.6.27/drivers/gpu/drm/psb/psb_reset.c
28140===================================================================
28141--- /dev/null 1970-01-01 00:00:00.000000000 +0000
28142+++ linux-2.6.27/drivers/gpu/drm/psb/psb_reset.c 2009-01-14 11:58:01.000000000 +0000
28143@@ -0,0 +1,374 @@
28144+/**************************************************************************
28145+ * Copyright (c) 2007, Intel Corporation.
28146+ * All Rights Reserved.
28147+ *
28148+ * This program is free software; you can redistribute it and/or modify it
28149+ * under the terms and conditions of the GNU General Public License,
28150+ * version 2, as published by the Free Software Foundation.
28151+ *
28152+ * This program is distributed in the hope it will be useful, but WITHOUT
28153+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28154+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
28155+ * more details.
28156+ *
28157+ * You should have received a copy of the GNU General Public License along with
28158+ * this program; if not, write to the Free Software Foundation, Inc.,
28159+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28160+ *
28161+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
28162+ * develop this driver.
28163+ *
28164+ **************************************************************************/
28165+/*
28166+ * Authors:
28167+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
28168+ */
28169+
28170+#include "drmP.h"
28171+#include "psb_drv.h"
28172+#include "psb_reg.h"
28173+#include "psb_scene.h"
28174+#include "psb_msvdx.h"
28175+
28176+#define PSB_2D_TIMEOUT_MSEC 100
28177+
28178+void psb_reset(struct drm_psb_private *dev_priv, int reset_2d)
28179+{
28180+ uint32_t val;
28181+
28182+ val = _PSB_CS_RESET_BIF_RESET |
28183+ _PSB_CS_RESET_DPM_RESET |
28184+ _PSB_CS_RESET_TA_RESET |
28185+ _PSB_CS_RESET_USE_RESET |
28186+ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET;
28187+
28188+ if (reset_2d)
28189+ val |= _PSB_CS_RESET_TWOD_RESET;
28190+
28191+ PSB_WSGX32(val, PSB_CR_SOFT_RESET);
28192+ (void)PSB_RSGX32(PSB_CR_SOFT_RESET);
28193+
28194+ msleep(1);
28195+
28196+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
28197+ wmb();
28198+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
28199+ PSB_CR_BIF_CTRL);
28200+ wmb();
28201+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
28202+
28203+ msleep(1);
28204+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
28205+ PSB_CR_BIF_CTRL);
28206+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
28207+}
28208+
28209+void psb_print_pagefault(struct drm_psb_private *dev_priv)
28210+{
28211+ uint32_t val;
28212+ uint32_t addr;
28213+
28214+ val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
28215+ addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
28216+
28217+ if (val) {
28218+ if (val & _PSB_CBI_STAT_PF_N_RW)
28219+ DRM_ERROR("Poulsbo MMU page fault:\n");
28220+ else
28221+ DRM_ERROR("Poulsbo MMU read / write "
28222+ "protection fault:\n");
28223+
28224+ if (val & _PSB_CBI_STAT_FAULT_CACHE)
28225+ DRM_ERROR("\tCache requestor.\n");
28226+ if (val & _PSB_CBI_STAT_FAULT_TA)
28227+ DRM_ERROR("\tTA requestor.\n");
28228+ if (val & _PSB_CBI_STAT_FAULT_VDM)
28229+ DRM_ERROR("\tVDM requestor.\n");
28230+ if (val & _PSB_CBI_STAT_FAULT_2D)
28231+ DRM_ERROR("\t2D requestor.\n");
28232+ if (val & _PSB_CBI_STAT_FAULT_PBE)
28233+ DRM_ERROR("\tPBE requestor.\n");
28234+ if (val & _PSB_CBI_STAT_FAULT_TSP)
28235+ DRM_ERROR("\tTSP requestor.\n");
28236+ if (val & _PSB_CBI_STAT_FAULT_ISP)
28237+ DRM_ERROR("\tISP requestor.\n");
28238+ if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
28239+ DRM_ERROR("\tUSSEPDS requestor.\n");
28240+ if (val & _PSB_CBI_STAT_FAULT_HOST)
28241+ DRM_ERROR("\tHost requestor.\n");
28242+
28243+ DRM_ERROR("\tMMU failing address is 0x%08x.\n", (unsigned)addr);
28244+ }
28245+}
28246+
28247+void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
28248+{
28249+ struct timer_list *wt = &dev_priv->watchdog_timer;
28250+ unsigned long irq_flags;
28251+
28252+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28253+ if (dev_priv->timer_available && !timer_pending(wt)) {
28254+ wt->expires = jiffies + PSB_WATCHDOG_DELAY;
28255+ add_timer(wt);
28256+ }
28257+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28258+}
28259+
28260+#if 0
28261+static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv,
28262+ unsigned int engine, int *lockup, int *idle)
28263+{
28264+ uint32_t received_seq;
28265+
28266+ received_seq = dev_priv->comm[engine << 4];
28267+ spin_lock(&dev_priv->sequence_lock);
28268+ *idle = (received_seq == dev_priv->sequence[engine]);
28269+ spin_unlock(&dev_priv->sequence_lock);
28270+
28271+ if (*idle) {
28272+ dev_priv->idle[engine] = 1;
28273+ *lockup = 0;
28274+ return;
28275+ }
28276+
28277+ if (dev_priv->idle[engine]) {
28278+ dev_priv->idle[engine] = 0;
28279+ dev_priv->last_sequence[engine] = received_seq;
28280+ *lockup = 0;
28281+ return;
28282+ }
28283+
28284+ *lockup = (dev_priv->last_sequence[engine] == received_seq);
28285+}
28286+
28287+#endif
28288+static void psb_watchdog_func(unsigned long data)
28289+{
28290+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)data;
28291+ int lockup;
28292+ int msvdx_lockup;
28293+ int msvdx_idle;
28294+ int lockup_2d;
28295+ int idle_2d;
28296+ int idle;
28297+ unsigned long irq_flags;
28298+
28299+ psb_scheduler_lockup(dev_priv, &lockup, &idle);
28300+ psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
28301+#if 0
28302+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
28303+#else
28304+ lockup_2d = 0;
28305+ idle_2d = 1;
28306+#endif
28307+ if (lockup || msvdx_lockup || lockup_2d) {
28308+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28309+ dev_priv->timer_available = 0;
28310+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28311+ if (lockup) {
28312+ psb_print_pagefault(dev_priv);
28313+ schedule_work(&dev_priv->watchdog_wq);
28314+ }
28315+ if (msvdx_lockup)
28316+ schedule_work(&dev_priv->msvdx_watchdog_wq);
28317+ }
28318+ if (!idle || !msvdx_idle || !idle_2d)
28319+ psb_schedule_watchdog(dev_priv);
28320+}
28321+
28322+void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
28323+{
28324+ struct drm_psb_private *dev_priv = dev->dev_private;
28325+ struct psb_msvdx_cmd_queue *msvdx_cmd;
28326+ struct list_head *list, *next;
28327+ /*Flush the msvdx cmd queue and signal all fences in the queue */
28328+ list_for_each_safe(list, next, &dev_priv->msvdx_queue) {
28329+ msvdx_cmd = list_entry(list, struct psb_msvdx_cmd_queue, head);
28330+ PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
28331+ msvdx_cmd->sequence);
28332+ dev_priv->msvdx_current_sequence = msvdx_cmd->sequence;
28333+ psb_fence_error(dev, PSB_ENGINE_VIDEO,
28334+ dev_priv->msvdx_current_sequence,
28335+ DRM_FENCE_TYPE_EXE, DRM_CMD_HANG);
28336+ list_del(list);
28337+ kfree(msvdx_cmd->cmd);
28338+ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue),
28339+ DRM_MEM_DRIVER);
28340+ }
28341+}
28342+
28343+static void psb_msvdx_reset_wq(struct work_struct *work)
28344+{
28345+ struct drm_psb_private *dev_priv =
28346+ container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
28347+
28348+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
28349+ unsigned long irq_flags;
28350+
28351+ mutex_lock(&dev_priv->msvdx_mutex);
28352+ dev_priv->msvdx_needs_reset = 1;
28353+ dev_priv->msvdx_current_sequence++;
28354+ PSB_DEBUG_GENERAL
28355+ ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
28356+ dev_priv->msvdx_current_sequence);
28357+
28358+ psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
28359+ dev_priv->msvdx_current_sequence, DRM_FENCE_TYPE_EXE,
28360+ DRM_CMD_HANG);
28361+
28362+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28363+ dev_priv->timer_available = 1;
28364+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28365+
28366+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
28367+ psb_msvdx_flush_cmd_queue(scheduler->dev);
28368+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
28369+
28370+ psb_schedule_watchdog(dev_priv);
28371+ mutex_unlock(&dev_priv->msvdx_mutex);
28372+}
28373+
28374+static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv)
28375+{
28376+ struct psb_xhw_buf buf;
28377+ uint32_t bif_ctrl;
28378+
28379+ INIT_LIST_HEAD(&buf.head);
28380+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
28381+ bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
28382+ PSB_WSGX32(bif_ctrl |
28383+ _PSB_CB_CTRL_CLEAR_FAULT |
28384+ _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
28385+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
28386+ msleep(1);
28387+ PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL);
28388+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
28389+ return psb_xhw_reset_dpm(dev_priv, &buf);
28390+}
28391+
28392+/*
28393+ * Block command submission and reset hardware and schedulers.
28394+ */
28395+
28396+static void psb_reset_wq(struct work_struct *work)
28397+{
28398+ struct drm_psb_private *dev_priv =
28399+ container_of(work, struct drm_psb_private, watchdog_wq);
28400+ int lockup_2d;
28401+ int idle_2d;
28402+ unsigned long irq_flags;
28403+ int ret;
28404+ int reset_count = 0;
28405+ struct psb_xhw_buf buf;
28406+ uint32_t xhw_lockup;
28407+
28408+ /*
28409+ * Block command submission.
28410+ */
28411+
28412+ mutex_lock(&dev_priv->reset_mutex);
28413+
28414+ INIT_LIST_HEAD(&buf.head);
28415+ if (psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup) == 0) {
28416+ if (xhw_lockup == 0 && psb_extend_raster_timeout(dev_priv) == 0) {
28417+ /*
28418+ * no lockup, just re-schedule
28419+ */
28420+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28421+ dev_priv->timer_available = 1;
28422+ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
28423+ irq_flags);
28424+ psb_schedule_watchdog(dev_priv);
28425+ mutex_unlock(&dev_priv->reset_mutex);
28426+ return;
28427+ }
28428+ }
28429+#if 0
28430+ msleep(PSB_2D_TIMEOUT_MSEC);
28431+
28432+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
28433+
28434+ if (lockup_2d) {
28435+ uint32_t seq_2d;
28436+ spin_lock(&dev_priv->sequence_lock);
28437+ seq_2d = dev_priv->sequence[PSB_ENGINE_2D];
28438+ spin_unlock(&dev_priv->sequence_lock);
28439+ psb_fence_error(dev_priv->scheduler.dev,
28440+ PSB_ENGINE_2D,
28441+ seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY);
28442+ DRM_INFO("Resetting 2D engine.\n");
28443+ }
28444+
28445+ psb_reset(dev_priv, lockup_2d);
28446+#else
28447+ (void)lockup_2d;
28448+ (void)idle_2d;
28449+ psb_reset(dev_priv, 0);
28450+#endif
28451+ (void)psb_xhw_mmu_reset(dev_priv);
28452+ DRM_INFO("Resetting scheduler.\n");
28453+ psb_scheduler_pause(dev_priv);
28454+ psb_scheduler_reset(dev_priv, -EBUSY);
28455+ psb_scheduler_ta_mem_check(dev_priv);
28456+
28457+ while (dev_priv->ta_mem &&
28458+ !dev_priv->force_ta_mem_load && ++reset_count < 10) {
28459+
28460+ /*
28461+ * TA memory is currently fenced so offsets
28462+ * are valid. Reload offsets into the dpm now.
28463+ */
28464+
28465+ struct psb_xhw_buf buf;
28466+ INIT_LIST_HEAD(&buf.head);
28467+
28468+ msleep(100);
28469+ DRM_INFO("Trying to reload TA memory.\n");
28470+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
28471+ PSB_TA_MEM_FLAG_TA |
28472+ PSB_TA_MEM_FLAG_RASTER |
28473+ PSB_TA_MEM_FLAG_HOSTA |
28474+ PSB_TA_MEM_FLAG_HOSTD |
28475+ PSB_TA_MEM_FLAG_INIT,
28476+ dev_priv->ta_mem->ta_memory->offset,
28477+ dev_priv->ta_mem->hw_data->offset,
28478+ dev_priv->ta_mem->hw_cookie);
28479+ if (!ret)
28480+ break;
28481+
28482+ psb_reset(dev_priv, 0);
28483+ (void)psb_xhw_mmu_reset(dev_priv);
28484+ }
28485+
28486+ psb_scheduler_restart(dev_priv);
28487+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28488+ dev_priv->timer_available = 1;
28489+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28490+ mutex_unlock(&dev_priv->reset_mutex);
28491+}
28492+
28493+void psb_watchdog_init(struct drm_psb_private *dev_priv)
28494+{
28495+ struct timer_list *wt = &dev_priv->watchdog_timer;
28496+ unsigned long irq_flags;
28497+
28498+ dev_priv->watchdog_lock = SPIN_LOCK_UNLOCKED;
28499+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28500+ init_timer(wt);
28501+ INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq);
28502+ INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
28503+ wt->data = (unsigned long)dev_priv;
28504+ wt->function = &psb_watchdog_func;
28505+ dev_priv->timer_available = 1;
28506+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28507+}
28508+
28509+void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
28510+{
28511+ unsigned long irq_flags;
28512+
28513+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
28514+ dev_priv->timer_available = 0;
28515+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
28516+ (void)del_timer_sync(&dev_priv->watchdog_timer);
28517+}
28518Index: linux-2.6.27/drivers/gpu/drm/psb/psb_scene.c
28519===================================================================
28520--- /dev/null 1970-01-01 00:00:00.000000000 +0000
28521+++ linux-2.6.27/drivers/gpu/drm/psb/psb_scene.c 2009-01-14 11:58:01.000000000 +0000
28522@@ -0,0 +1,531 @@
28523+/**************************************************************************
28524+ * Copyright (c) 2007, Intel Corporation.
28525+ * All Rights Reserved.
28526+ *
28527+ * This program is free software; you can redistribute it and/or modify it
28528+ * under the terms and conditions of the GNU General Public License,
28529+ * version 2, as published by the Free Software Foundation.
28530+ *
28531+ * This program is distributed in the hope it will be useful, but WITHOUT
28532+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28533+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
28534+ * more details.
28535+ *
28536+ * You should have received a copy of the GNU General Public License along with
28537+ * this program; if not, write to the Free Software Foundation, Inc.,
28538+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28539+ *
28540+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
28541+ * develop this driver.
28542+ *
28543+ **************************************************************************/
28544+/*
28545+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
28546+ */
28547+
28548+#include "drmP.h"
28549+#include "psb_drv.h"
28550+#include "psb_scene.h"
28551+
28552+void psb_clear_scene_atomic(struct psb_scene *scene)
28553+{
28554+ int i;
28555+ struct page *page;
28556+ void *v;
28557+
28558+ for (i = 0; i < scene->clear_num_pages; ++i) {
28559+ page = drm_ttm_get_page(scene->hw_data->ttm,
28560+ scene->clear_p_start + i);
28561+ if (in_irq())
28562+ v = kmap_atomic(page, KM_IRQ0);
28563+ else
28564+ v = kmap_atomic(page, KM_USER0);
28565+
28566+ memset(v, 0, PAGE_SIZE);
28567+
28568+ if (in_irq())
28569+ kunmap_atomic(v, KM_IRQ0);
28570+ else
28571+ kunmap_atomic(v, KM_USER0);
28572+ }
28573+}
28574+
28575+int psb_clear_scene(struct psb_scene *scene)
28576+{
28577+ struct drm_bo_kmap_obj bmo;
28578+ int is_iomem;
28579+ void *addr;
28580+
28581+ int ret = drm_bo_kmap(scene->hw_data, scene->clear_p_start,
28582+ scene->clear_num_pages, &bmo);
28583+
28584+ PSB_DEBUG_RENDER("Scene clear\n");
28585+ if (ret)
28586+ return ret;
28587+
28588+ addr = drm_bmo_virtual(&bmo, &is_iomem);
28589+ BUG_ON(is_iomem);
28590+ memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT);
28591+ drm_bo_kunmap(&bmo);
28592+
28593+ return 0;
28594+}
28595+
28596+static void psb_destroy_scene_devlocked(struct psb_scene *scene)
28597+{
28598+ if (!scene)
28599+ return;
28600+
28601+ PSB_DEBUG_RENDER("Scene destroy\n");
28602+ drm_bo_usage_deref_locked(&scene->hw_data);
28603+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
28604+}
28605+
28606+void psb_scene_unref_devlocked(struct psb_scene **scene)
28607+{
28608+ struct psb_scene *tmp_scene = *scene;
28609+
28610+ PSB_DEBUG_RENDER("Scene unref\n");
28611+ *scene = NULL;
28612+ if (atomic_dec_and_test(&tmp_scene->ref_count)) {
28613+ psb_scheduler_remove_scene_refs(tmp_scene);
28614+ psb_destroy_scene_devlocked(tmp_scene);
28615+ }
28616+}
28617+
28618+struct psb_scene *psb_scene_ref(struct psb_scene *src)
28619+{
28620+ PSB_DEBUG_RENDER("Scene ref\n");
28621+ atomic_inc(&src->ref_count);
28622+ return src;
28623+}
28624+
28625+static struct psb_scene *psb_alloc_scene(struct drm_device *dev,
28626+ uint32_t w, uint32_t h)
28627+{
28628+ struct drm_psb_private *dev_priv =
28629+ (struct drm_psb_private *)dev->dev_private;
28630+ int ret = -EINVAL;
28631+ struct psb_scene *scene;
28632+ uint32_t bo_size;
28633+ struct psb_xhw_buf buf;
28634+
28635+ PSB_DEBUG_RENDER("Alloc scene w %u h %u\n", w, h);
28636+
28637+ scene = drm_calloc(1, sizeof(*scene), DRM_MEM_DRIVER);
28638+
28639+ if (!scene) {
28640+ DRM_ERROR("Out of memory allocating scene object.\n");
28641+ return NULL;
28642+ }
28643+
28644+ scene->dev = dev;
28645+ scene->w = w;
28646+ scene->h = h;
28647+ scene->hw_scene = NULL;
28648+ atomic_set(&scene->ref_count, 1);
28649+
28650+ INIT_LIST_HEAD(&buf.head);
28651+ ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h,
28652+ scene->hw_cookie, &bo_size,
28653+ &scene->clear_p_start,
28654+ &scene->clear_num_pages);
28655+ if (ret)
28656+ goto out_err;
28657+
28658+ ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
28659+ DRM_PSB_FLAG_MEM_MMU |
28660+ DRM_BO_FLAG_READ |
28661+ DRM_BO_FLAG_CACHED |
28662+ PSB_BO_FLAG_SCENE |
28663+ DRM_BO_FLAG_WRITE,
28664+ DRM_BO_HINT_DONT_FENCE,
28665+ 0, 0, &scene->hw_data);
28666+ if (ret)
28667+ goto out_err;
28668+
28669+ return scene;
28670+ out_err:
28671+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
28672+ return NULL;
28673+}
28674+
28675+int psb_validate_scene_pool(struct psb_scene_pool *pool, uint64_t flags,
28676+ uint64_t mask,
28677+ uint32_t hint,
28678+ uint32_t w,
28679+ uint32_t h,
28680+ int final_pass, struct psb_scene **scene_p)
28681+{
28682+ struct drm_device *dev = pool->dev;
28683+ struct drm_psb_private *dev_priv =
28684+ (struct drm_psb_private *)dev->dev_private;
28685+ struct psb_scene *scene = pool->scenes[pool->cur_scene];
28686+ int ret;
28687+ unsigned long irq_flags;
28688+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
28689+ uint32_t bin_pt_offset;
28690+ uint32_t bin_param_offset;
28691+
28692+ PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n", pool->cur_scene);
28693+
28694+ if (unlikely(!dev_priv->ta_mem)) {
28695+ dev_priv->ta_mem =
28696+ psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages);
28697+ if (!dev_priv->ta_mem)
28698+ return -ENOMEM;
28699+
28700+ bin_pt_offset = ~0;
28701+ bin_param_offset = ~0;
28702+ } else {
28703+ bin_pt_offset = dev_priv->ta_mem->hw_data->offset;
28704+ bin_param_offset = dev_priv->ta_mem->ta_memory->offset;
28705+ }
28706+
28707+ pool->w = w;
28708+ pool->h = h;
28709+ if (scene && (scene->w != pool->w || scene->h != pool->h)) {
28710+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28711+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
28712+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28713+ DRM_ERROR("Trying to resize a dirty scene.\n");
28714+ return -EINVAL;
28715+ }
28716+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28717+ mutex_lock(&dev->struct_mutex);
28718+ psb_scene_unref_devlocked(&pool->scenes[pool->cur_scene]);
28719+ mutex_unlock(&dev->struct_mutex);
28720+ scene = NULL;
28721+ }
28722+
28723+ if (!scene) {
28724+ pool->scenes[pool->cur_scene] = scene =
28725+ psb_alloc_scene(pool->dev, pool->w, pool->h);
28726+
28727+ if (!scene)
28728+ return -ENOMEM;
28729+
28730+ scene->flags = PSB_SCENE_FLAG_CLEARED;
28731+ }
28732+
28733+ /*
28734+ * FIXME: We need atomic bit manipulation here for the
28735+ * scheduler. For now use the spinlock.
28736+ */
28737+
28738+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28739+ if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) {
28740+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28741+ PSB_DEBUG_RENDER("Waiting to clear scene memory.\n");
28742+ mutex_lock(&scene->hw_data->mutex);
28743+ ret = drm_bo_wait(scene->hw_data, 0, 0, 0);
28744+ mutex_unlock(&scene->hw_data->mutex);
28745+ if (ret)
28746+ return ret;
28747+
28748+ ret = psb_clear_scene(scene);
28749+
28750+ if (ret)
28751+ return ret;
28752+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28753+ scene->flags |= PSB_SCENE_FLAG_CLEARED;
28754+ }
28755+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28756+
28757+ ret = drm_bo_do_validate(scene->hw_data, flags, mask, hint,
28758+ PSB_ENGINE_TA, 0, NULL);
28759+ if (ret)
28760+ return ret;
28761+ ret = drm_bo_do_validate(dev_priv->ta_mem->hw_data, 0, 0, 0,
28762+ PSB_ENGINE_TA, 0, NULL);
28763+ if (ret)
28764+ return ret;
28765+ ret = drm_bo_do_validate(dev_priv->ta_mem->ta_memory, 0, 0, 0,
28766+ PSB_ENGINE_TA, 0, NULL);
28767+ if (ret)
28768+ return ret;
28769+
28770+ if (unlikely(bin_param_offset !=
28771+ dev_priv->ta_mem->ta_memory->offset ||
28772+ bin_pt_offset !=
28773+ dev_priv->ta_mem->hw_data->offset ||
28774+ dev_priv->force_ta_mem_load)) {
28775+
28776+ struct psb_xhw_buf buf;
28777+
28778+ INIT_LIST_HEAD(&buf.head);
28779+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
28780+ PSB_TA_MEM_FLAG_TA |
28781+ PSB_TA_MEM_FLAG_RASTER |
28782+ PSB_TA_MEM_FLAG_HOSTA |
28783+ PSB_TA_MEM_FLAG_HOSTD |
28784+ PSB_TA_MEM_FLAG_INIT,
28785+ dev_priv->ta_mem->ta_memory->offset,
28786+ dev_priv->ta_mem->hw_data->offset,
28787+ dev_priv->ta_mem->hw_cookie);
28788+ if (ret)
28789+ return ret;
28790+
28791+ dev_priv->force_ta_mem_load = 0;
28792+ }
28793+
28794+ if (final_pass) {
28795+
28796+ /*
28797+ * Clear the scene on next use. Advance the scene counter.
28798+ */
28799+
28800+ spin_lock_irqsave(&scheduler->lock, irq_flags);
28801+ scene->flags &= ~PSB_SCENE_FLAG_CLEARED;
28802+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
28803+ pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes;
28804+ }
28805+
28806+ *scene_p = psb_scene_ref(scene);
28807+ return 0;
28808+}
28809+
28810+static void psb_scene_pool_destroy_devlocked(struct psb_scene_pool *pool)
28811+{
28812+ int i;
28813+
28814+ if (!pool)
28815+ return;
28816+
28817+ PSB_DEBUG_RENDER("Scene pool destroy.\n");
28818+ for (i = 0; i < pool->num_scenes; ++i) {
28819+ PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i,
28820+ (unsigned long)pool->scenes[i]);
28821+ if (pool->scenes[i])
28822+ psb_scene_unref_devlocked(&pool->scenes[i]);
28823+ }
28824+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
28825+}
28826+
28827+void psb_scene_pool_unref_devlocked(struct psb_scene_pool **pool)
28828+{
28829+ struct psb_scene_pool *tmp_pool = *pool;
28830+ struct drm_device *dev = tmp_pool->dev;
28831+
28832+ PSB_DEBUG_RENDER("Scene pool unref\n");
28833+ (void)dev;
28834+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
28835+ *pool = NULL;
28836+ if (--tmp_pool->ref_count == 0)
28837+ psb_scene_pool_destroy_devlocked(tmp_pool);
28838+}
28839+
28840+struct psb_scene_pool *psb_scene_pool_ref_devlocked(struct psb_scene_pool *src)
28841+{
28842+ ++src->ref_count;
28843+ return src;
28844+}
28845+
28846+/*
28847+ * Callback for user object manager.
28848+ */
28849+
28850+static void psb_scene_pool_destroy(struct drm_file *priv,
28851+ struct drm_user_object *base)
28852+{
28853+ struct psb_scene_pool *pool =
28854+ drm_user_object_entry(base, struct psb_scene_pool, user);
28855+
28856+ psb_scene_pool_unref_devlocked(&pool);
28857+}
28858+
28859+struct psb_scene_pool *psb_scene_pool_lookup_devlocked(struct drm_file *priv,
28860+ uint32_t handle,
28861+ int check_owner)
28862+{
28863+ struct drm_user_object *uo;
28864+ struct psb_scene_pool *pool;
28865+
28866+ uo = drm_lookup_user_object(priv, handle);
28867+ if (!uo || (uo->type != PSB_USER_OBJECT_SCENE_POOL)) {
28868+ DRM_ERROR("Could not find scene pool object 0x%08x\n", handle);
28869+ return NULL;
28870+ }
28871+
28872+ if (check_owner && priv != uo->owner) {
28873+ if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
28874+ return NULL;
28875+ }
28876+
28877+ pool = drm_user_object_entry(uo, struct psb_scene_pool, user);
28878+ return psb_scene_pool_ref_devlocked(pool);
28879+}
28880+
28881+struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
28882+ int shareable,
28883+ uint32_t num_scenes,
28884+ uint32_t w, uint32_t h)
28885+{
28886+ struct drm_device *dev = priv->minor->dev;
28887+ struct psb_scene_pool *pool;
28888+ int ret;
28889+
28890+ PSB_DEBUG_RENDER("Scene pool alloc\n");
28891+ pool = drm_calloc(1, sizeof(*pool), DRM_MEM_DRIVER);
28892+ if (!pool) {
28893+ DRM_ERROR("Out of memory allocating scene pool object.\n");
28894+ return NULL;
28895+ }
28896+ pool->w = w;
28897+ pool->h = h;
28898+ pool->dev = dev;
28899+ pool->num_scenes = num_scenes;
28900+
28901+ mutex_lock(&dev->struct_mutex);
28902+ ret = drm_add_user_object(priv, &pool->user, shareable);
28903+ if (ret)
28904+ goto out_err;
28905+
28906+ pool->user.type = PSB_USER_OBJECT_SCENE_POOL;
28907+ pool->user.remove = &psb_scene_pool_destroy;
28908+ pool->ref_count = 2;
28909+ mutex_unlock(&dev->struct_mutex);
28910+ return pool;
28911+ out_err:
28912+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
28913+ return NULL;
28914+}
28915+
28916+/*
28917+ * Code to support multiple ta memory buffers.
28918+ */
28919+
28920+static void psb_destroy_ta_mem_devlocked(struct psb_ta_mem *ta_mem)
28921+{
28922+ if (!ta_mem)
28923+ return;
28924+
28925+ drm_bo_usage_deref_locked(&ta_mem->hw_data);
28926+ drm_bo_usage_deref_locked(&ta_mem->ta_memory);
28927+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
28928+}
28929+
28930+void psb_ta_mem_unref_devlocked(struct psb_ta_mem **ta_mem)
28931+{
28932+ struct psb_ta_mem *tmp_ta_mem = *ta_mem;
28933+ struct drm_device *dev = tmp_ta_mem->dev;
28934+
28935+ (void)dev;
28936+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
28937+ *ta_mem = NULL;
28938+ if (--tmp_ta_mem->ref_count == 0)
28939+ psb_destroy_ta_mem_devlocked(tmp_ta_mem);
28940+}
28941+
28942+void psb_ta_mem_ref_devlocked(struct psb_ta_mem **dst, struct psb_ta_mem *src)
28943+{
28944+ struct drm_device *dev = src->dev;
28945+
28946+ (void)dev;
28947+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
28948+ *dst = src;
28949+ ++src->ref_count;
28950+}
28951+
28952+struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages)
28953+{
28954+ struct drm_psb_private *dev_priv =
28955+ (struct drm_psb_private *)dev->dev_private;
28956+ int ret = -EINVAL;
28957+ struct psb_ta_mem *ta_mem;
28958+ uint32_t bo_size;
28959+ struct psb_xhw_buf buf;
28960+
28961+ INIT_LIST_HEAD(&buf.head);
28962+
28963+ ta_mem = drm_calloc(1, sizeof(*ta_mem), DRM_MEM_DRIVER);
28964+
28965+ if (!ta_mem) {
28966+ DRM_ERROR("Out of memory allocating parameter memory.\n");
28967+ return NULL;
28968+ }
28969+
28970+ ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages,
28971+ ta_mem->hw_cookie, &bo_size);
28972+ if (ret == -ENOMEM) {
28973+ DRM_ERROR("Parameter memory size is too small.\n");
28974+ DRM_INFO("Attempted to use %u kiB of parameter memory.\n",
28975+ (unsigned int)(pages * (PAGE_SIZE / 1024)));
28976+ DRM_INFO("The Xpsb driver thinks this is too small and\n");
28977+ DRM_INFO("suggests %u kiB. Check the psb DRM\n",
28978+ (unsigned int)(bo_size / 1024));
28979+ DRM_INFO("\"ta_mem_size\" parameter!\n");
28980+ }
28981+ if (ret)
28982+ goto out_err0;
28983+
28984+ bo_size = pages * PAGE_SIZE;
28985+ ta_mem->dev = dev;
28986+ ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
28987+ DRM_PSB_FLAG_MEM_MMU | DRM_BO_FLAG_READ |
28988+ DRM_BO_FLAG_WRITE |
28989+ PSB_BO_FLAG_SCENE,
28990+ DRM_BO_HINT_DONT_FENCE, 0, 0,
28991+ &ta_mem->hw_data);
28992+ if (ret)
28993+ goto out_err0;
28994+
28995+ ret =
28996+ drm_buffer_object_create(dev, pages << PAGE_SHIFT,
28997+ drm_bo_type_kernel,
28998+ DRM_PSB_FLAG_MEM_RASTGEOM |
28999+ DRM_BO_FLAG_READ |
29000+ DRM_BO_FLAG_WRITE |
29001+ PSB_BO_FLAG_SCENE,
29002+ DRM_BO_HINT_DONT_FENCE, 0,
29003+ 1024 * 1024 >> PAGE_SHIFT,
29004+ &ta_mem->ta_memory);
29005+ if (ret)
29006+ goto out_err1;
29007+
29008+ ta_mem->ref_count = 1;
29009+ return ta_mem;
29010+ out_err1:
29011+ drm_bo_usage_deref_unlocked(&ta_mem->hw_data);
29012+ out_err0:
29013+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
29014+ return NULL;
29015+}
29016+
29017+int drm_psb_scene_unref_ioctl(struct drm_device *dev,
29018+ void *data, struct drm_file *file_priv)
29019+{
29020+ struct drm_psb_scene *scene = (struct drm_psb_scene *)data;
29021+ struct drm_user_object *uo;
29022+ struct drm_ref_object *ro;
29023+ int ret = 0;
29024+
29025+ mutex_lock(&dev->struct_mutex);
29026+ if (!scene->handle_valid)
29027+ goto out_unlock;
29028+
29029+ uo = drm_lookup_user_object(file_priv, scene->handle);
29030+ if (!uo) {
29031+ ret = -EINVAL;
29032+ goto out_unlock;
29033+ }
29034+ if (uo->type != PSB_USER_OBJECT_SCENE_POOL) {
29035+ DRM_ERROR("Not a scene pool object.\n");
29036+ ret = -EINVAL;
29037+ goto out_unlock;
29038+ }
29039+ if (uo->owner != file_priv) {
29040+ DRM_ERROR("Not owner of scene pool object.\n");
29041+ ret = -EPERM;
29042+ goto out_unlock;
29043+ }
29044+
29045+ scene->handle_valid = 0;
29046+ ro = drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE);
29047+ BUG_ON(!ro);
29048+ drm_remove_ref_object(file_priv, ro);
29049+
29050+ out_unlock:
29051+ mutex_unlock(&dev->struct_mutex);
29052+ return ret;
29053+}
29054Index: linux-2.6.27/drivers/gpu/drm/psb/psb_scene.h
29055===================================================================
29056--- /dev/null 1970-01-01 00:00:00.000000000 +0000
29057+++ linux-2.6.27/drivers/gpu/drm/psb/psb_scene.h 2009-01-14 11:58:01.000000000 +0000
29058@@ -0,0 +1,112 @@
29059+/**************************************************************************
29060+ * Copyright (c) 2007, Intel Corporation.
29061+ * All Rights Reserved.
29062+ *
29063+ * This program is free software; you can redistribute it and/or modify it
29064+ * under the terms and conditions of the GNU General Public License,
29065+ * version 2, as published by the Free Software Foundation.
29066+ *
29067+ * This program is distributed in the hope it will be useful, but WITHOUT
29068+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
29069+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
29070+ * more details.
29071+ *
29072+ * You should have received a copy of the GNU General Public License along with
29073+ * this program; if not, write to the Free Software Foundation, Inc.,
29074+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
29075+ *
29076+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
29077+ * develop this driver.
29078+ *
29079+ **************************************************************************/
29080+/*
29081+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
29082+ */
29083+
29084+#ifndef _PSB_SCENE_H_
29085+#define _PSB_SCENE_H_
29086+
29087+#define PSB_USER_OBJECT_SCENE_POOL drm_driver_type0
29088+#define PSB_USER_OBJECT_TA_MEM drm_driver_type1
29089+#define PSB_MAX_NUM_SCENES 8
29090+
29091+struct psb_hw_scene;
29092+struct psb_hw_ta_mem;
29093+
29094+struct psb_scene_pool {
29095+ struct drm_device *dev;
29096+ struct drm_user_object user;
29097+ uint32_t ref_count;
29098+ uint32_t w;
29099+ uint32_t h;
29100+ uint32_t cur_scene;
29101+ struct psb_scene *scenes[PSB_MAX_NUM_SCENES];
29102+ uint32_t num_scenes;
29103+};
29104+
29105+struct psb_scene {
29106+ struct drm_device *dev;
29107+ atomic_t ref_count;
29108+ uint32_t hw_cookie[PSB_SCENE_HW_COOKIE_SIZE];
29109+ uint32_t bo_size;
29110+ uint32_t w;
29111+ uint32_t h;
29112+ struct psb_ta_mem *ta_mem;
29113+ struct psb_hw_scene *hw_scene;
29114+ struct drm_buffer_object *hw_data;
29115+ uint32_t flags;
29116+ uint32_t clear_p_start;
29117+ uint32_t clear_num_pages;
29118+};
29119+
29120+struct psb_scene_entry {
29121+ struct list_head head;
29122+ struct psb_scene *scene;
29123+};
29124+
29125+struct psb_user_scene {
29126+ struct drm_device *dev;
29127+ struct drm_user_object user;
29128+};
29129+
29130+struct psb_ta_mem {
29131+ struct drm_device *dev;
29132+ struct drm_user_object user;
29133+ uint32_t ref_count;
29134+ uint32_t hw_cookie[PSB_TA_MEM_HW_COOKIE_SIZE];
29135+ uint32_t bo_size;
29136+ struct drm_buffer_object *ta_memory;
29137+ struct drm_buffer_object *hw_data;
29138+ int is_deallocating;
29139+ int deallocating_scheduled;
29140+};
29141+
29142+extern struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
29143+ int shareable,
29144+ uint32_t num_scenes,
29145+ uint32_t w, uint32_t h);
29146+extern void psb_scene_pool_unref_devlocked(struct psb_scene_pool **pool);
29147+extern struct psb_scene_pool *psb_scene_pool_lookup_devlocked(struct drm_file
29148+ *priv,
29149+ uint32_t handle,
29150+ int check_owner);
29151+extern int psb_validate_scene_pool(struct psb_scene_pool *pool, uint64_t flags,
29152+ uint64_t mask, uint32_t hint, uint32_t w,
29153+ uint32_t h, int final_pass,
29154+ struct psb_scene **scene_p);
29155+extern void psb_scene_unref_devlocked(struct psb_scene **scene);
29156+extern struct psb_scene *psb_scene_ref(struct psb_scene *src);
29157+extern int drm_psb_scene_unref_ioctl(struct drm_device *dev,
29158+ void *data, struct drm_file *file_priv);
29159+
29160+static inline uint32_t psb_scene_pool_handle(struct psb_scene_pool *pool)
29161+{
29162+ return pool->user.hash.key;
29163+}
29164+extern struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev,
29165+ uint32_t pages);
29166+extern void psb_ta_mem_ref_devlocked(struct psb_ta_mem **dst,
29167+ struct psb_ta_mem *src);
29168+extern void psb_ta_mem_unref_devlocked(struct psb_ta_mem **ta_mem);
29169+
29170+#endif
29171Index: linux-2.6.27/drivers/gpu/drm/psb/psb_schedule.c
29172===================================================================
29173--- /dev/null 1970-01-01 00:00:00.000000000 +0000
29174+++ linux-2.6.27/drivers/gpu/drm/psb/psb_schedule.c 2009-01-14 11:58:01.000000000 +0000
29175@@ -0,0 +1,1445 @@
29176+/**************************************************************************
29177+ * Copyright (c) 2007, Intel Corporation.
29178+ * All Rights Reserved.
29179+ *
29180+ * This program is free software; you can redistribute it and/or modify it
29181+ * under the terms and conditions of the GNU General Public License,
29182+ * version 2, as published by the Free Software Foundation.
29183+ *
29184+ * This program is distributed in the hope it will be useful, but WITHOUT
29185+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
29186+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
29187+ * more details.
29188+ *
29189+ * You should have received a copy of the GNU General Public License along with
29190+ * this program; if not, write to the Free Software Foundation, Inc.,
29191+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
29192+ *
29193+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
29194+ * develop this driver.
29195+ *
29196+ **************************************************************************/
29197+/*
29198+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
29199+ */
29200+
29201+#include "drmP.h"
29202+#include "psb_drm.h"
29203+#include "psb_drv.h"
29204+#include "psb_reg.h"
29205+#include "psb_scene.h"
29206+
29207+#define PSB_ALLOWED_RASTER_RUNTIME (DRM_HZ * 20)
29208+#define PSB_RASTER_TIMEOUT (DRM_HZ / 2)
29209+#define PSB_TA_TIMEOUT (DRM_HZ / 5)
29210+
29211+#undef PSB_SOFTWARE_WORKAHEAD
29212+
29213+#ifdef PSB_STABLE_SETTING
29214+
29215+/*
29216+ * Software blocks completely while the engines are working so there can be no
29217+ * overlap.
29218+ */
29219+
29220+#define PSB_WAIT_FOR_RASTER_COMPLETION
29221+#define PSB_WAIT_FOR_TA_COMPLETION
29222+
29223+#elif defined(PSB_PARANOID_SETTING)
29224+/*
29225+ * Software blocks "almost" while the engines are working so there can be no
29226+ * overlap.
29227+ */
29228+
29229+#define PSB_WAIT_FOR_RASTER_COMPLETION
29230+#define PSB_WAIT_FOR_TA_COMPLETION
29231+#define PSB_BE_PARANOID
29232+
29233+#elif defined(PSB_SOME_OVERLAP_BUT_LOCKUP)
29234+/*
29235+ * Software leaps ahead while the rasterizer is running and prepares
29236+ * a new ta job that can be scheduled before the rasterizer has
29237+ * finished.
29238+ */
29239+
29240+#define PSB_WAIT_FOR_TA_COMPLETION
29241+
29242+#elif defined(PSB_SOFTWARE_WORKAHEAD)
29243+/*
29244+ * Don't sync, but allow software to work ahead. and queue a number of jobs.
29245+ * But block overlapping in the scheduler.
29246+ */
29247+
29248+#define PSB_BLOCK_OVERLAP
29249+#define ONLY_ONE_JOB_IN_RASTER_QUEUE
29250+
29251+#endif
29252+
29253+/*
29254+ * Avoid pixelbe pagefaults on C0.
29255+ */
29256+#if 0
29257+#define PSB_BLOCK_OVERLAP
29258+#endif
29259+
29260+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
29261+ struct psb_scheduler *scheduler,
29262+ uint32_t reply_flag);
29263+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
29264+ struct psb_scheduler *scheduler,
29265+ uint32_t reply_flag);
29266+
29267+#ifdef FIX_TG_16
29268+
29269+static void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
29270+static int psb_2d_trylock(struct drm_psb_private *dev_priv);
29271+static int psb_check_2d_idle(struct drm_psb_private *dev_priv);
29272+
29273+#endif
29274+
29275+void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
29276+ int *lockup, int *idle)
29277+{
29278+ unsigned long irq_flags;
29279+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29280+
29281+ *lockup = 0;
29282+ *idle = 1;
29283+
29284+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29285+
29286+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
29287+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
29288+ *lockup = 1;
29289+ }
29290+ if (!*lockup
29291+ && (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
29292+ && time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
29293+ *lockup = 1;
29294+ }
29295+ if (!*lockup)
29296+ *idle = scheduler->idle;
29297+
29298+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29299+}
29300+
29301+static inline void psb_set_idle(struct psb_scheduler *scheduler)
29302+{
29303+ scheduler->idle =
29304+ (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] == NULL) &&
29305+ (scheduler->current_task[PSB_SCENE_ENGINE_TA] == NULL);
29306+ if (scheduler->idle)
29307+ wake_up(&scheduler->idle_queue);
29308+}
29309+
29310+/*
29311+ * Call with the scheduler spinlock held.
29312+ * Assigns a scene context to either the ta or the rasterizer,
29313+ * flushing out other scenes to memory if necessary.
29314+ */
29315+
29316+static int psb_set_scene_fire(struct psb_scheduler *scheduler,
29317+ struct psb_scene *scene,
29318+ int engine, struct psb_task *task)
29319+{
29320+ uint32_t flags = 0;
29321+ struct psb_hw_scene *hw_scene;
29322+ struct drm_device *dev = scene->dev;
29323+ struct drm_psb_private *dev_priv =
29324+ (struct drm_psb_private *)dev->dev_private;
29325+
29326+ hw_scene = scene->hw_scene;
29327+ if (hw_scene && hw_scene->last_scene == scene) {
29328+
29329+ /*
29330+ * Reuse the last hw scene context and delete it from the
29331+ * free list.
29332+ */
29333+
29334+ PSB_DEBUG_RENDER("Reusing hw scene %d.\n",
29335+ hw_scene->context_number);
29336+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
29337+
29338+ /*
29339+ * No hw context initialization to be done.
29340+ */
29341+
29342+ flags |= PSB_SCENE_FLAG_SETUP_ONLY;
29343+ }
29344+
29345+ list_del_init(&hw_scene->head);
29346+
29347+ } else {
29348+ struct list_head *list;
29349+ hw_scene = NULL;
29350+
29351+ /*
29352+ * Grab a new hw scene context.
29353+ */
29354+
29355+ list_for_each(list, &scheduler->hw_scenes) {
29356+ hw_scene = list_entry(list, struct psb_hw_scene, head);
29357+ break;
29358+ }
29359+ BUG_ON(!hw_scene);
29360+ PSB_DEBUG_RENDER("New hw scene %d.\n",
29361+ hw_scene->context_number);
29362+
29363+ list_del_init(list);
29364+ }
29365+ scene->hw_scene = hw_scene;
29366+ hw_scene->last_scene = scene;
29367+
29368+ flags |= PSB_SCENE_FLAG_SETUP;
29369+
29370+ /*
29371+ * Switch context and setup the engine.
29372+ */
29373+
29374+ return psb_xhw_scene_bind_fire(dev_priv,
29375+ &task->buf,
29376+ task->flags,
29377+ hw_scene->context_number,
29378+ scene->hw_cookie,
29379+ task->oom_cmds,
29380+ task->oom_cmd_size,
29381+ scene->hw_data->offset,
29382+ engine, flags | scene->flags);
29383+}
29384+
29385+static inline void psb_report_fence(struct psb_scheduler *scheduler,
29386+ uint32_t class,
29387+ uint32_t sequence,
29388+ uint32_t type, int call_handler)
29389+{
29390+ struct psb_scheduler_seq *seq = &scheduler->seq[type];
29391+
29392+ seq->sequence = sequence;
29393+ seq->reported = 0;
29394+ if (call_handler)
29395+ psb_fence_handler(scheduler->dev, class);
29396+}
29397+
29398+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
29399+ struct psb_scheduler *scheduler);
29400+
29401+static void psb_schedule_ta(struct drm_psb_private *dev_priv,
29402+ struct psb_scheduler *scheduler)
29403+{
29404+ struct psb_task *task = NULL;
29405+ struct list_head *list, *next;
29406+ int pushed_raster_task = 0;
29407+
29408+ PSB_DEBUG_RENDER("schedule ta\n");
29409+
29410+ if (scheduler->idle_count != 0)
29411+ return;
29412+
29413+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL)
29414+ return;
29415+
29416+ if (scheduler->ta_state)
29417+ return;
29418+
29419+ /*
29420+ * Skip the ta stage for rasterization-only
29421+ * tasks. They arrive here to make sure we're rasterizing
29422+ * tasks in the correct order.
29423+ */
29424+
29425+ list_for_each_safe(list, next, &scheduler->ta_queue) {
29426+ task = list_entry(list, struct psb_task, head);
29427+ if (task->task_type != psb_raster_task)
29428+ break;
29429+
29430+ list_del_init(list);
29431+ list_add_tail(list, &scheduler->raster_queue);
29432+ psb_report_fence(scheduler, task->engine, task->sequence,
29433+ _PSB_FENCE_TA_DONE_SHIFT, 1);
29434+ task = NULL;
29435+ pushed_raster_task = 1;
29436+ }
29437+
29438+ if (pushed_raster_task)
29439+ psb_schedule_raster(dev_priv, scheduler);
29440+
29441+ if (!task)
29442+ return;
29443+
29444+ /*
29445+ * Still waiting for a vistest?
29446+ */
29447+
29448+ if (scheduler->feedback_task == task)
29449+ return;
29450+
29451+#ifdef ONLY_ONE_JOB_IN_RASTER_QUEUE
29452+
29453+ /*
29454+ * Block ta from trying to use both hardware contexts
29455+ * without the rasterizer starting to render from one of them.
29456+ */
29457+
29458+ if (!list_empty(&scheduler->raster_queue)) {
29459+ return;
29460+ }
29461+#endif
29462+
29463+#ifdef PSB_BLOCK_OVERLAP
29464+ /*
29465+ * Make sure rasterizer isn't doing anything.
29466+ */
29467+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
29468+ return;
29469+#endif
29470+ if (list_empty(&scheduler->hw_scenes))
29471+ return;
29472+
29473+#ifdef FIX_TG_16
29474+ if (psb_check_2d_idle(dev_priv))
29475+ return;
29476+#endif
29477+
29478+ list_del_init(&task->head);
29479+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
29480+ scheduler->ta_state = 1;
29481+
29482+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = task;
29483+ scheduler->idle = 0;
29484+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
29485+
29486+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
29487+ 0x00000000 : PSB_RF_FIRE_TA;
29488+
29489+ (void)psb_reg_submit(dev_priv, task->ta_cmds, task->ta_cmd_size);
29490+ psb_set_scene_fire(scheduler, task->scene, PSB_SCENE_ENGINE_TA, task);
29491+ psb_schedule_watchdog(dev_priv);
29492+}
29493+
29494+static int psb_fire_raster(struct psb_scheduler *scheduler,
29495+ struct psb_task *task)
29496+{
29497+ struct drm_device *dev = scheduler->dev;
29498+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)
29499+ dev->dev_private;
29500+
29501+ PSB_DEBUG_RENDER("Fire raster %d\n", task->sequence);
29502+
29503+ return psb_xhw_fire_raster(dev_priv, &task->buf, task->flags);
29504+}
29505+
29506+/*
29507+ * Take the first rasterization task from the hp raster queue or from the
29508+ * raster queue and fire the rasterizer.
29509+ */
29510+
29511+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
29512+ struct psb_scheduler *scheduler)
29513+{
29514+ struct psb_task *task;
29515+ struct list_head *list;
29516+
29517+ if (scheduler->idle_count != 0)
29518+ return;
29519+
29520+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) {
29521+ PSB_DEBUG_RENDER("Raster busy.\n");
29522+ return;
29523+ }
29524+#ifdef PSB_BLOCK_OVERLAP
29525+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) {
29526+ PSB_DEBUG_RENDER("TA busy.\n");
29527+ return;
29528+ }
29529+#endif
29530+
29531+ if (!list_empty(&scheduler->hp_raster_queue))
29532+ list = scheduler->hp_raster_queue.next;
29533+ else if (!list_empty(&scheduler->raster_queue))
29534+ list = scheduler->raster_queue.next;
29535+ else {
29536+ PSB_DEBUG_RENDER("Nothing in list\n");
29537+ return;
29538+ }
29539+
29540+ task = list_entry(list, struct psb_task, head);
29541+
29542+ /*
29543+ * Sometimes changing ZLS format requires an ISP reset.
29544+ * Doesn't seem to consume too much time.
29545+ */
29546+
29547+ if (task->scene)
29548+ PSB_WSGX32(_PSB_CS_RESET_ISP_RESET, PSB_CR_SOFT_RESET);
29549+
29550+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = task;
29551+
29552+ list_del_init(list);
29553+ scheduler->idle = 0;
29554+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
29555+ scheduler->total_raster_jiffies = 0;
29556+
29557+ if (task->scene)
29558+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
29559+
29560+ (void)psb_reg_submit(dev_priv, task->raster_cmds,
29561+ task->raster_cmd_size);
29562+
29563+ if (task->scene) {
29564+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
29565+ 0x00000000 : PSB_RF_FIRE_RASTER;
29566+ psb_set_scene_fire(scheduler,
29567+ task->scene, PSB_SCENE_ENGINE_RASTER, task);
29568+ } else {
29569+ task->reply_flags = PSB_RF_DEALLOC | PSB_RF_FIRE_RASTER;
29570+ psb_fire_raster(scheduler, task);
29571+ }
29572+ psb_schedule_watchdog(dev_priv);
29573+}
29574+
29575+int psb_extend_raster_timeout(struct drm_psb_private *dev_priv)
29576+{
29577+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29578+ unsigned long irq_flags;
29579+ int ret;
29580+
29581+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29582+ scheduler->total_raster_jiffies +=
29583+ jiffies - scheduler->raster_end_jiffies + PSB_RASTER_TIMEOUT;
29584+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
29585+ ret = (scheduler->total_raster_jiffies > PSB_ALLOWED_RASTER_RUNTIME) ?
29586+ -EBUSY : 0;
29587+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29588+ return ret;
29589+}
29590+
29591+/*
29592+ * TA done handler.
29593+ */
29594+
29595+static void psb_ta_done(struct drm_psb_private *dev_priv,
29596+ struct psb_scheduler *scheduler)
29597+{
29598+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
29599+ struct psb_scene *scene = task->scene;
29600+
29601+ PSB_DEBUG_RENDER("TA done %u\n", task->sequence);
29602+
29603+ switch (task->ta_complete_action) {
29604+ case PSB_RASTER_BLOCK:
29605+ scheduler->ta_state = 1;
29606+ scene->flags |=
29607+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
29608+ list_add_tail(&task->head, &scheduler->raster_queue);
29609+ break;
29610+ case PSB_RASTER:
29611+ scene->flags |=
29612+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
29613+ list_add_tail(&task->head, &scheduler->raster_queue);
29614+ break;
29615+ case PSB_RETURN:
29616+ scheduler->ta_state = 0;
29617+ scene->flags |= PSB_SCENE_FLAG_DIRTY;
29618+ list_add_tail(&scene->hw_scene->head, &scheduler->hw_scenes);
29619+
29620+ break;
29621+ }
29622+
29623+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
29624+
29625+#ifdef FIX_TG_16
29626+ psb_2d_atomic_unlock(dev_priv);
29627+#endif
29628+
29629+ if (task->ta_complete_action != PSB_RASTER_BLOCK)
29630+ psb_report_fence(scheduler, task->engine, task->sequence,
29631+ _PSB_FENCE_TA_DONE_SHIFT, 1);
29632+
29633+ psb_schedule_raster(dev_priv, scheduler);
29634+ psb_schedule_ta(dev_priv, scheduler);
29635+ psb_set_idle(scheduler);
29636+
29637+ if (task->ta_complete_action != PSB_RETURN)
29638+ return;
29639+
29640+ list_add_tail(&task->head, &scheduler->task_done_queue);
29641+ schedule_delayed_work(&scheduler->wq, 1);
29642+}
29643+
29644+/*
29645+ * Rasterizer done handler.
29646+ */
29647+
29648+static void psb_raster_done(struct drm_psb_private *dev_priv,
29649+ struct psb_scheduler *scheduler)
29650+{
29651+ struct psb_task *task =
29652+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
29653+ struct psb_scene *scene = task->scene;
29654+ uint32_t complete_action = task->raster_complete_action;
29655+
29656+ PSB_DEBUG_RENDER("Raster done %u\n", task->sequence);
29657+
29658+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
29659+
29660+ if (complete_action != PSB_RASTER)
29661+ psb_schedule_raster(dev_priv, scheduler);
29662+
29663+ if (scene) {
29664+ if (task->feedback.page) {
29665+ if (unlikely(scheduler->feedback_task)) {
29666+ /*
29667+ * This should never happen, since the previous
29668+ * feedback query will return before the next
29669+ * raster task is fired.
29670+ */
29671+ DRM_ERROR("Feedback task busy.\n");
29672+ }
29673+ scheduler->feedback_task = task;
29674+ psb_xhw_vistest(dev_priv, &task->buf);
29675+ }
29676+ switch (complete_action) {
29677+ case PSB_RETURN:
29678+ scene->flags &=
29679+ ~(PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
29680+ list_add_tail(&scene->hw_scene->head,
29681+ &scheduler->hw_scenes);
29682+ psb_report_fence(scheduler, task->engine,
29683+ task->sequence,
29684+ _PSB_FENCE_SCENE_DONE_SHIFT, 1);
29685+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM) {
29686+ scheduler->ta_state = 0;
29687+ }
29688+ break;
29689+ case PSB_RASTER:
29690+ list_add(&task->head, &scheduler->raster_queue);
29691+ task->raster_complete_action = PSB_RETURN;
29692+ psb_schedule_raster(dev_priv, scheduler);
29693+ break;
29694+ case PSB_TA:
29695+ list_add(&task->head, &scheduler->ta_queue);
29696+ scheduler->ta_state = 0;
29697+ task->raster_complete_action = PSB_RETURN;
29698+ task->ta_complete_action = PSB_RASTER;
29699+ break;
29700+
29701+ }
29702+ }
29703+ psb_schedule_ta(dev_priv, scheduler);
29704+ psb_set_idle(scheduler);
29705+
29706+ if (complete_action == PSB_RETURN) {
29707+ if (task->scene == NULL) {
29708+ psb_report_fence(scheduler, task->engine,
29709+ task->sequence,
29710+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
29711+ }
29712+ if (!task->feedback.page) {
29713+ list_add_tail(&task->head, &scheduler->task_done_queue);
29714+ schedule_delayed_work(&scheduler->wq, 1);
29715+ }
29716+ }
29717+}
29718+
29719+void psb_scheduler_pause(struct drm_psb_private *dev_priv)
29720+{
29721+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29722+ unsigned long irq_flags;
29723+
29724+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29725+ scheduler->idle_count++;
29726+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29727+}
29728+
29729+void psb_scheduler_restart(struct drm_psb_private *dev_priv)
29730+{
29731+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29732+ unsigned long irq_flags;
29733+
29734+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29735+ if (--scheduler->idle_count == 0) {
29736+ psb_schedule_ta(dev_priv, scheduler);
29737+ psb_schedule_raster(dev_priv, scheduler);
29738+ }
29739+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29740+}
29741+
29742+int psb_scheduler_idle(struct drm_psb_private *dev_priv)
29743+{
29744+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29745+ unsigned long irq_flags;
29746+ int ret;
29747+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29748+ ret = scheduler->idle_count != 0 && scheduler->idle;
29749+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29750+ return ret;
29751+}
29752+
29753+int psb_scheduler_finished(struct drm_psb_private *dev_priv)
29754+{
29755+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29756+ unsigned long irq_flags;
29757+ int ret;
29758+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29759+ ret = (scheduler->idle &&
29760+ list_empty(&scheduler->raster_queue) &&
29761+ list_empty(&scheduler->ta_queue) &&
29762+ list_empty(&scheduler->hp_raster_queue));
29763+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29764+ return ret;
29765+}
29766+
29767+static void psb_ta_oom(struct drm_psb_private *dev_priv,
29768+ struct psb_scheduler *scheduler)
29769+{
29770+
29771+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
29772+ if (!task)
29773+ return;
29774+
29775+ if (task->aborting)
29776+ return;
29777+ task->aborting = 1;
29778+
29779+ DRM_INFO("Info: TA out of parameter memory.\n");
29780+
29781+ (void)psb_xhw_ta_oom(dev_priv, &task->buf, task->scene->hw_cookie);
29782+}
29783+
29784+static void psb_ta_oom_reply(struct drm_psb_private *dev_priv,
29785+ struct psb_scheduler *scheduler)
29786+{
29787+
29788+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
29789+ uint32_t flags;
29790+ if (!task)
29791+ return;
29792+
29793+ psb_xhw_ta_oom_reply(dev_priv, &task->buf,
29794+ task->scene->hw_cookie,
29795+ &task->ta_complete_action,
29796+ &task->raster_complete_action, &flags);
29797+ task->flags |= flags;
29798+ task->aborting = 0;
29799+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM_REPLY);
29800+}
29801+
29802+static void psb_ta_hw_scene_freed(struct drm_psb_private *dev_priv,
29803+ struct psb_scheduler *scheduler)
29804+{
29805+ DRM_ERROR("TA hw scene freed.\n");
29806+}
29807+
29808+static void psb_vistest_reply(struct drm_psb_private *dev_priv,
29809+ struct psb_scheduler *scheduler)
29810+{
29811+ struct psb_task *task = scheduler->feedback_task;
29812+ uint8_t *feedback_map;
29813+ uint32_t add;
29814+ uint32_t cur;
29815+ struct drm_psb_vistest *vistest;
29816+ int i;
29817+
29818+ scheduler->feedback_task = NULL;
29819+ if (!task) {
29820+ DRM_ERROR("No Poulsbo feedback task.\n");
29821+ return;
29822+ }
29823+ if (!task->feedback.page) {
29824+ DRM_ERROR("No Poulsbo feedback page.\n");
29825+ goto out;
29826+ }
29827+
29828+ if (in_irq())
29829+ feedback_map = kmap_atomic(task->feedback.page, KM_IRQ0);
29830+ else
29831+ feedback_map = kmap_atomic(task->feedback.page, KM_USER0);
29832+
29833+ /*
29834+ * Loop over all requested vistest components here.
29835+ * Only one (vistest) currently.
29836+ */
29837+
29838+ vistest = (struct drm_psb_vistest *)
29839+ (feedback_map + task->feedback.offset);
29840+
29841+ for (i = 0; i < PSB_HW_FEEDBACK_SIZE; ++i) {
29842+ add = task->buf.arg.arg.feedback[i];
29843+ cur = vistest->vt[i];
29844+
29845+ /*
29846+ * Vistest saturates.
29847+ */
29848+
29849+ vistest->vt[i] = (cur + add < cur) ? ~0 : cur + add;
29850+ }
29851+ if (in_irq())
29852+ kunmap_atomic(feedback_map, KM_IRQ0);
29853+ else
29854+ kunmap_atomic(feedback_map, KM_USER0);
29855+ out:
29856+ psb_report_fence(scheduler, task->engine, task->sequence,
29857+ _PSB_FENCE_FEEDBACK_SHIFT, 1);
29858+
29859+ if (list_empty(&task->head)) {
29860+ list_add_tail(&task->head, &scheduler->task_done_queue);
29861+ schedule_delayed_work(&scheduler->wq, 1);
29862+ } else
29863+ psb_schedule_ta(dev_priv, scheduler);
29864+}
29865+
29866+static void psb_ta_fire_reply(struct drm_psb_private *dev_priv,
29867+ struct psb_scheduler *scheduler)
29868+{
29869+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
29870+
29871+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
29872+
29873+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_FIRE_TA);
29874+}
29875+
29876+static void psb_raster_fire_reply(struct drm_psb_private *dev_priv,
29877+ struct psb_scheduler *scheduler)
29878+{
29879+ struct psb_task *task =
29880+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
29881+ uint32_t reply_flags;
29882+
29883+ if (!task) {
29884+ DRM_ERROR("Null task.\n");
29885+ return;
29886+ }
29887+
29888+ task->raster_complete_action = task->buf.arg.arg.sb.rca;
29889+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
29890+
29891+ reply_flags = PSB_RF_FIRE_RASTER;
29892+ if (task->raster_complete_action == PSB_RASTER)
29893+ reply_flags |= PSB_RF_DEALLOC;
29894+
29895+ psb_dispatch_raster(dev_priv, scheduler, reply_flags);
29896+}
29897+
29898+static int psb_user_interrupt(struct drm_psb_private *dev_priv,
29899+ struct psb_scheduler *scheduler)
29900+{
29901+ uint32_t type;
29902+ int ret;
29903+ unsigned long irq_flags;
29904+
29905+ /*
29906+ * Xhw cannot write directly to the comm page, so
29907+ * do it here. Firmware would have written directly.
29908+ */
29909+
29910+ ret = psb_xhw_handler(dev_priv);
29911+ if (unlikely(ret))
29912+ return ret;
29913+
29914+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
29915+ type = dev_priv->comm[PSB_COMM_USER_IRQ];
29916+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
29917+ if (dev_priv->comm[PSB_COMM_USER_IRQ_LOST]) {
29918+ dev_priv->comm[PSB_COMM_USER_IRQ_LOST] = 0;
29919+ DRM_ERROR("Lost Poulsbo hardware event.\n");
29920+ }
29921+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
29922+
29923+ if (type == 0)
29924+ return 0;
29925+
29926+ switch (type) {
29927+ case PSB_UIRQ_VISTEST:
29928+ psb_vistest_reply(dev_priv, scheduler);
29929+ break;
29930+ case PSB_UIRQ_OOM_REPLY:
29931+ psb_ta_oom_reply(dev_priv, scheduler);
29932+ break;
29933+ case PSB_UIRQ_FIRE_TA_REPLY:
29934+ psb_ta_fire_reply(dev_priv, scheduler);
29935+ break;
29936+ case PSB_UIRQ_FIRE_RASTER_REPLY:
29937+ psb_raster_fire_reply(dev_priv, scheduler);
29938+ break;
29939+ default:
29940+ DRM_ERROR("Unknown Poulsbo hardware event. %d\n", type);
29941+ }
29942+ return 0;
29943+}
29944+
29945+int psb_forced_user_interrupt(struct drm_psb_private *dev_priv)
29946+{
29947+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
29948+ unsigned long irq_flags;
29949+ int ret;
29950+
29951+ spin_lock_irqsave(&scheduler->lock, irq_flags);
29952+ ret = psb_user_interrupt(dev_priv, scheduler);
29953+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
29954+ return ret;
29955+}
29956+
29957+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
29958+ struct psb_scheduler *scheduler,
29959+ uint32_t reply_flag)
29960+{
29961+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
29962+ uint32_t flags;
29963+ uint32_t mask;
29964+
29965+ task->reply_flags |= reply_flag;
29966+ flags = task->reply_flags;
29967+ mask = PSB_RF_FIRE_TA;
29968+
29969+ if (!(flags & mask))
29970+ return;
29971+
29972+ mask = PSB_RF_TA_DONE;
29973+ if ((flags & mask) == mask) {
29974+ task->reply_flags &= ~mask;
29975+ psb_ta_done(dev_priv, scheduler);
29976+ }
29977+
29978+ mask = PSB_RF_OOM;
29979+ if ((flags & mask) == mask) {
29980+ task->reply_flags &= ~mask;
29981+ psb_ta_oom(dev_priv, scheduler);
29982+ }
29983+
29984+ mask = (PSB_RF_OOM_REPLY | PSB_RF_TERMINATE);
29985+ if ((flags & mask) == mask) {
29986+ task->reply_flags &= ~mask;
29987+ psb_ta_done(dev_priv, scheduler);
29988+ }
29989+}
29990+
29991+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
29992+ struct psb_scheduler *scheduler,
29993+ uint32_t reply_flag)
29994+{
29995+ struct psb_task *task =
29996+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
29997+ uint32_t flags;
29998+ uint32_t mask;
29999+
30000+ task->reply_flags |= reply_flag;
30001+ flags = task->reply_flags;
30002+ mask = PSB_RF_FIRE_RASTER;
30003+
30004+ if (!(flags & mask))
30005+ return;
30006+
30007+ /*
30008+ * For rasterizer-only tasks, don't report fence done here,
30009+ * as this is time consuming and the rasterizer wants a new
30010+ * task immediately. For other tasks, the hardware is probably
30011+ * still busy deallocating TA memory, so we can report
30012+ * fence done in parallel.
30013+ */
30014+
30015+ if (task->raster_complete_action == PSB_RETURN &&
30016+ (reply_flag & PSB_RF_RASTER_DONE) && task->scene != NULL) {
30017+ psb_report_fence(scheduler, task->engine, task->sequence,
30018+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
30019+ }
30020+
30021+ mask = PSB_RF_RASTER_DONE | PSB_RF_DEALLOC;
30022+ if ((flags & mask) == mask) {
30023+ task->reply_flags &= ~mask;
30024+ psb_raster_done(dev_priv, scheduler);
30025+ }
30026+}
30027+
30028+void psb_scheduler_handler(struct drm_psb_private *dev_priv, uint32_t status)
30029+{
30030+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30031+
30032+ spin_lock(&scheduler->lock);
30033+
30034+ if (status & _PSB_CE_PIXELBE_END_RENDER) {
30035+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_RASTER_DONE);
30036+ }
30037+ if (status & _PSB_CE_DPM_3D_MEM_FREE) {
30038+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_DEALLOC);
30039+ }
30040+ if (status & _PSB_CE_TA_FINISHED) {
30041+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TA_DONE);
30042+ }
30043+ if (status & _PSB_CE_TA_TERMINATE) {
30044+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TERMINATE);
30045+ }
30046+ if (status & (_PSB_CE_DPM_REACHED_MEM_THRESH |
30047+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
30048+ _PSB_CE_DPM_OUT_OF_MEMORY_MT)) {
30049+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM);
30050+ }
30051+ if (status & _PSB_CE_DPM_TA_MEM_FREE) {
30052+ psb_ta_hw_scene_freed(dev_priv, scheduler);
30053+ }
30054+ if (status & _PSB_CE_SW_EVENT) {
30055+ psb_user_interrupt(dev_priv, scheduler);
30056+ }
30057+ spin_unlock(&scheduler->lock);
30058+}
30059+
30060+static void psb_free_task_wq(struct work_struct *work)
30061+{
30062+ struct psb_scheduler *scheduler =
30063+ container_of(work, struct psb_scheduler, wq.work);
30064+
30065+ struct drm_device *dev = scheduler->dev;
30066+ struct list_head *list, *next;
30067+ unsigned long irq_flags;
30068+ struct psb_task *task;
30069+
30070+ if (!mutex_trylock(&scheduler->task_wq_mutex))
30071+ return;
30072+
30073+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30074+ list_for_each_safe(list, next, &scheduler->task_done_queue) {
30075+ task = list_entry(list, struct psb_task, head);
30076+ list_del_init(list);
30077+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30078+
30079+ PSB_DEBUG_RENDER("Checking Task %d: Scene 0x%08lx, "
30080+ "Feedback bo 0x%08lx, done %d\n",
30081+ task->sequence, (unsigned long)task->scene,
30082+ (unsigned long)task->feedback.bo,
30083+ atomic_read(&task->buf.done));
30084+
30085+ if (task->scene) {
30086+ mutex_lock(&dev->struct_mutex);
30087+ PSB_DEBUG_RENDER("Unref scene %d\n", task->sequence);
30088+ psb_scene_unref_devlocked(&task->scene);
30089+ if (task->feedback.bo) {
30090+ PSB_DEBUG_RENDER("Unref feedback bo %d\n",
30091+ task->sequence);
30092+ drm_bo_usage_deref_locked(&task->feedback.bo);
30093+ }
30094+ mutex_unlock(&dev->struct_mutex);
30095+ }
30096+
30097+ if (atomic_read(&task->buf.done)) {
30098+ PSB_DEBUG_RENDER("Deleting task %d\n", task->sequence);
30099+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
30100+ task = NULL;
30101+ }
30102+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30103+ if (task != NULL)
30104+ list_add(list, &scheduler->task_done_queue);
30105+ }
30106+ if (!list_empty(&scheduler->task_done_queue)) {
30107+ PSB_DEBUG_RENDER("Rescheduling wq\n");
30108+ schedule_delayed_work(&scheduler->wq, 1);
30109+ }
30110+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30111+
30112+ mutex_unlock(&scheduler->task_wq_mutex);
30113+}
30114+
30115+/*
30116+ * Check if any of the tasks in the queues is using a scene.
30117+ * In that case we know the TA memory buffer objects are
30118+ * fenced and will not be evicted until that fence is signaled.
30119+ */
30120+
30121+void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv)
30122+{
30123+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30124+ unsigned long irq_flags;
30125+ struct psb_task *task;
30126+ struct psb_task *next_task;
30127+
30128+ dev_priv->force_ta_mem_load = 1;
30129+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30130+ list_for_each_entry_safe(task, next_task, &scheduler->ta_queue, head) {
30131+ if (task->scene) {
30132+ dev_priv->force_ta_mem_load = 0;
30133+ break;
30134+ }
30135+ }
30136+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
30137+ head) {
30138+ if (task->scene) {
30139+ dev_priv->force_ta_mem_load = 0;
30140+ break;
30141+ }
30142+ }
30143+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30144+}
30145+
30146+void psb_scheduler_reset(struct drm_psb_private *dev_priv, int error_condition)
30147+{
30148+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30149+ unsigned long wait_jiffies;
30150+ unsigned long cur_jiffies;
30151+ struct psb_task *task;
30152+ struct psb_task *next_task;
30153+ unsigned long irq_flags;
30154+
30155+ psb_scheduler_pause(dev_priv);
30156+ if (!psb_scheduler_idle(dev_priv)) {
30157+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30158+
30159+ cur_jiffies = jiffies;
30160+ wait_jiffies = cur_jiffies;
30161+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] &&
30162+ time_after_eq(scheduler->ta_end_jiffies, wait_jiffies))
30163+ wait_jiffies = scheduler->ta_end_jiffies;
30164+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] &&
30165+ time_after_eq(scheduler->raster_end_jiffies, wait_jiffies))
30166+ wait_jiffies = scheduler->raster_end_jiffies;
30167+
30168+ wait_jiffies -= cur_jiffies;
30169+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30170+
30171+ (void)wait_event_timeout(scheduler->idle_queue,
30172+ psb_scheduler_idle(dev_priv),
30173+ wait_jiffies);
30174+ }
30175+
30176+ if (!psb_scheduler_idle(dev_priv)) {
30177+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30178+ task = scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
30179+ if (task) {
30180+ DRM_ERROR("Detected Poulsbo rasterizer lockup.\n");
30181+ if (task->engine == PSB_ENGINE_HPRAST) {
30182+ psb_fence_error(scheduler->dev,
30183+ PSB_ENGINE_HPRAST,
30184+ task->sequence,
30185+ _PSB_FENCE_TYPE_RASTER_DONE,
30186+ error_condition);
30187+
30188+ list_del(&task->head);
30189+ psb_xhw_clean_buf(dev_priv, &task->buf);
30190+ list_add_tail(&task->head,
30191+ &scheduler->task_done_queue);
30192+ } else {
30193+ list_add(&task->head, &scheduler->raster_queue);
30194+ }
30195+ }
30196+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
30197+ task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
30198+ if (task) {
30199+ DRM_ERROR("Detected Poulsbo ta lockup.\n");
30200+ list_add_tail(&task->head, &scheduler->raster_queue);
30201+#ifdef FIX_TG_16
30202+ psb_2d_atomic_unlock(dev_priv);
30203+#endif
30204+ }
30205+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
30206+ scheduler->ta_state = 0;
30207+
30208+#ifdef FIX_TG_16
30209+ atomic_set(&dev_priv->ta_wait_2d, 0);
30210+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
30211+ wake_up(&dev_priv->queue_2d);
30212+#endif
30213+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30214+ }
30215+
30216+ /*
30217+ * Empty raster queue.
30218+ */
30219+
30220+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30221+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
30222+ head) {
30223+ struct psb_scene *scene = task->scene;
30224+
30225+ psb_fence_error(scheduler->dev,
30226+ task->engine,
30227+ task->sequence,
30228+ _PSB_FENCE_TYPE_TA_DONE |
30229+ _PSB_FENCE_TYPE_RASTER_DONE |
30230+ _PSB_FENCE_TYPE_SCENE_DONE |
30231+ _PSB_FENCE_TYPE_FEEDBACK, error_condition);
30232+ if (scene) {
30233+ scene->flags = 0;
30234+ if (scene->hw_scene) {
30235+ list_add_tail(&scene->hw_scene->head,
30236+ &scheduler->hw_scenes);
30237+ scene->hw_scene = NULL;
30238+ }
30239+ }
30240+
30241+ psb_xhw_clean_buf(dev_priv, &task->buf);
30242+ list_del(&task->head);
30243+ list_add_tail(&task->head, &scheduler->task_done_queue);
30244+ }
30245+
30246+ schedule_delayed_work(&scheduler->wq, 1);
30247+ scheduler->idle = 1;
30248+ wake_up(&scheduler->idle_queue);
30249+
30250+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30251+ psb_scheduler_restart(dev_priv);
30252+
30253+}
30254+
30255+int psb_scheduler_init(struct drm_device *dev, struct psb_scheduler *scheduler)
30256+{
30257+ struct psb_hw_scene *hw_scene;
30258+ int i;
30259+
30260+ memset(scheduler, 0, sizeof(*scheduler));
30261+ scheduler->dev = dev;
30262+ mutex_init(&scheduler->task_wq_mutex);
30263+ scheduler->lock = SPIN_LOCK_UNLOCKED;
30264+ scheduler->idle = 1;
30265+
30266+ INIT_LIST_HEAD(&scheduler->ta_queue);
30267+ INIT_LIST_HEAD(&scheduler->raster_queue);
30268+ INIT_LIST_HEAD(&scheduler->hp_raster_queue);
30269+ INIT_LIST_HEAD(&scheduler->hw_scenes);
30270+ INIT_LIST_HEAD(&scheduler->task_done_queue);
30271+ INIT_DELAYED_WORK(&scheduler->wq, &psb_free_task_wq);
30272+ init_waitqueue_head(&scheduler->idle_queue);
30273+
30274+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
30275+ hw_scene = &scheduler->hs[i];
30276+ hw_scene->context_number = i;
30277+ list_add_tail(&hw_scene->head, &scheduler->hw_scenes);
30278+ }
30279+
30280+ for (i = 0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) {
30281+ scheduler->seq[i].reported = 0;
30282+ }
30283+
30284+ return 0;
30285+}
30286+
30287+/*
30288+ * Scene references maintained by the scheduler are not refcounted.
30289+ * Remove all references to a particular scene here.
30290+ */
30291+
30292+void psb_scheduler_remove_scene_refs(struct psb_scene *scene)
30293+{
30294+ struct drm_psb_private *dev_priv =
30295+ (struct drm_psb_private *)scene->dev->dev_private;
30296+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30297+ struct psb_hw_scene *hw_scene;
30298+ unsigned long irq_flags;
30299+ unsigned int i;
30300+
30301+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30302+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
30303+ hw_scene = &scheduler->hs[i];
30304+ if (hw_scene->last_scene == scene) {
30305+ BUG_ON(list_empty(&hw_scene->head));
30306+ hw_scene->last_scene = NULL;
30307+ }
30308+ }
30309+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30310+}
30311+
30312+void psb_scheduler_takedown(struct psb_scheduler *scheduler)
30313+{
30314+ flush_scheduled_work();
30315+}
30316+
30317+static int psb_setup_task_devlocked(struct drm_device *dev,
30318+ struct drm_psb_cmdbuf_arg *arg,
30319+ struct drm_buffer_object *raster_cmd_buffer,
30320+ struct drm_buffer_object *ta_cmd_buffer,
30321+ struct drm_buffer_object *oom_cmd_buffer,
30322+ struct psb_scene *scene,
30323+ enum psb_task_type task_type,
30324+ uint32_t engine,
30325+ uint32_t flags, struct psb_task **task_p)
30326+{
30327+ struct psb_task *task;
30328+ int ret;
30329+
30330+ if (ta_cmd_buffer && arg->ta_size > PSB_MAX_TA_CMDS) {
30331+ DRM_ERROR("Too many ta cmds %d.\n", arg->ta_size);
30332+ return -EINVAL;
30333+ }
30334+ if (raster_cmd_buffer && arg->cmdbuf_size > PSB_MAX_RASTER_CMDS) {
30335+ DRM_ERROR("Too many raster cmds %d.\n", arg->cmdbuf_size);
30336+ return -EINVAL;
30337+ }
30338+ if (oom_cmd_buffer && arg->oom_size > PSB_MAX_OOM_CMDS) {
30339+ DRM_ERROR("Too many raster cmds %d.\n", arg->oom_size);
30340+ return -EINVAL;
30341+ }
30342+
30343+ task = drm_calloc(1, sizeof(*task), DRM_MEM_DRIVER);
30344+ if (!task)
30345+ return -ENOMEM;
30346+
30347+ atomic_set(&task->buf.done, 1);
30348+ task->engine = engine;
30349+ INIT_LIST_HEAD(&task->head);
30350+ INIT_LIST_HEAD(&task->buf.head);
30351+ if (ta_cmd_buffer && arg->ta_size != 0) {
30352+ task->ta_cmd_size = arg->ta_size;
30353+ ret = psb_submit_copy_cmdbuf(dev, ta_cmd_buffer,
30354+ arg->ta_offset,
30355+ arg->ta_size,
30356+ PSB_ENGINE_TA, task->ta_cmds);
30357+ if (ret)
30358+ goto out_err;
30359+ }
30360+ if (raster_cmd_buffer) {
30361+ task->raster_cmd_size = arg->cmdbuf_size;
30362+ ret = psb_submit_copy_cmdbuf(dev, raster_cmd_buffer,
30363+ arg->cmdbuf_offset,
30364+ arg->cmdbuf_size,
30365+ PSB_ENGINE_TA, task->raster_cmds);
30366+ if (ret)
30367+ goto out_err;
30368+ }
30369+ if (oom_cmd_buffer && arg->oom_size != 0) {
30370+ task->oom_cmd_size = arg->oom_size;
30371+ ret = psb_submit_copy_cmdbuf(dev, oom_cmd_buffer,
30372+ arg->oom_offset,
30373+ arg->oom_size,
30374+ PSB_ENGINE_TA, task->oom_cmds);
30375+ if (ret)
30376+ goto out_err;
30377+ }
30378+ task->task_type = task_type;
30379+ task->flags = flags;
30380+ if (scene)
30381+ task->scene = psb_scene_ref(scene);
30382+
30383+ *task_p = task;
30384+ return 0;
30385+ out_err:
30386+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
30387+ *task_p = NULL;
30388+ return ret;
30389+}
30390+
30391+int psb_cmdbuf_ta(struct drm_file *priv,
30392+ struct drm_psb_cmdbuf_arg *arg,
30393+ struct drm_buffer_object *cmd_buffer,
30394+ struct drm_buffer_object *ta_buffer,
30395+ struct drm_buffer_object *oom_buffer,
30396+ struct psb_scene *scene,
30397+ struct psb_feedback_info *feedback,
30398+ struct drm_fence_arg *fence_arg)
30399+{
30400+ struct drm_device *dev = priv->minor->dev;
30401+ struct drm_psb_private *dev_priv = dev->dev_private;
30402+ struct drm_fence_object *fence = NULL;
30403+ struct psb_task *task = NULL;
30404+ int ret;
30405+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30406+ unsigned long irq_flags;
30407+
30408+ PSB_DEBUG_RENDER("Cmdbuf ta\n");
30409+
30410+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
30411+ if (ret)
30412+ return -EAGAIN;
30413+
30414+ mutex_lock(&dev->struct_mutex);
30415+ ret = psb_setup_task_devlocked(dev, arg, cmd_buffer, ta_buffer,
30416+ oom_buffer, scene,
30417+ psb_ta_task, PSB_ENGINE_TA,
30418+ PSB_FIRE_FLAG_RASTER_DEALLOC, &task);
30419+ mutex_unlock(&dev->struct_mutex);
30420+
30421+ if (ret)
30422+ goto out_err;
30423+
30424+ task->feedback = *feedback;
30425+
30426+ /*
30427+ * Hand the task over to the scheduler.
30428+ */
30429+
30430+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30431+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
30432+
30433+ psb_report_fence(scheduler, PSB_ENGINE_TA, task->sequence, 0, 1);
30434+
30435+ task->ta_complete_action = PSB_RASTER;
30436+ task->raster_complete_action = PSB_RETURN;
30437+
30438+ list_add_tail(&task->head, &scheduler->ta_queue);
30439+ PSB_DEBUG_RENDER("queued ta %u\n", task->sequence);
30440+
30441+ psb_schedule_ta(dev_priv, scheduler);
30442+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30443+
30444+ psb_fence_or_sync(priv, PSB_ENGINE_TA, arg, fence_arg, &fence);
30445+ drm_regs_fence(&dev_priv->use_manager, fence);
30446+ if (fence)
30447+ fence_arg->signaled |= 0x1;
30448+
30449+ out_err:
30450+ if (ret && ret != -EAGAIN)
30451+ DRM_ERROR("TA task queue job failed.\n");
30452+
30453+ if (fence) {
30454+#ifdef PSB_WAIT_FOR_TA_COMPLETION
30455+ drm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
30456+ _PSB_FENCE_TYPE_TA_DONE);
30457+#ifdef PSB_BE_PARANOID
30458+ drm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
30459+ _PSB_FENCE_TYPE_SCENE_DONE);
30460+#endif
30461+#endif
30462+ drm_fence_usage_deref_unlocked(&fence);
30463+ }
30464+ mutex_unlock(&dev_priv->reset_mutex);
30465+
30466+ return ret;
30467+}
30468+
30469+int psb_cmdbuf_raster(struct drm_file *priv,
30470+ struct drm_psb_cmdbuf_arg *arg,
30471+ struct drm_buffer_object *cmd_buffer,
30472+ struct drm_fence_arg *fence_arg)
30473+{
30474+ struct drm_device *dev = priv->minor->dev;
30475+ struct drm_psb_private *dev_priv = dev->dev_private;
30476+ struct drm_fence_object *fence = NULL;
30477+ struct psb_task *task = NULL;
30478+ int ret;
30479+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30480+ unsigned long irq_flags;
30481+
30482+ PSB_DEBUG_RENDER("Cmdbuf Raster\n");
30483+
30484+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
30485+ if (ret)
30486+ return -EAGAIN;
30487+
30488+ mutex_lock(&dev->struct_mutex);
30489+ ret = psb_setup_task_devlocked(dev, arg, cmd_buffer, NULL, NULL,
30490+ NULL, psb_raster_task,
30491+ PSB_ENGINE_TA, 0, &task);
30492+ mutex_unlock(&dev->struct_mutex);
30493+
30494+ if (ret)
30495+ goto out_err;
30496+
30497+ /*
30498+ * Hand the task over to the scheduler.
30499+ */
30500+
30501+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30502+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
30503+ psb_report_fence(scheduler, PSB_ENGINE_TA, task->sequence, 0, 1);
30504+ task->ta_complete_action = PSB_RASTER;
30505+ task->raster_complete_action = PSB_RETURN;
30506+
30507+ list_add_tail(&task->head, &scheduler->ta_queue);
30508+ PSB_DEBUG_RENDER("queued raster %u\n", task->sequence);
30509+ psb_schedule_ta(dev_priv, scheduler);
30510+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30511+
30512+ psb_fence_or_sync(priv, PSB_ENGINE_TA, arg, fence_arg, &fence);
30513+ drm_regs_fence(&dev_priv->use_manager, fence);
30514+ if (fence)
30515+ fence_arg->signaled |= 0x1;
30516+ out_err:
30517+ if (ret && ret != -EAGAIN)
30518+ DRM_ERROR("Raster task queue job failed.\n");
30519+
30520+ if (fence) {
30521+#ifdef PSB_WAIT_FOR_RASTER_COMPLETION
30522+ drm_fence_object_wait(fence, 1, 1, fence->type);
30523+#endif
30524+ drm_fence_usage_deref_unlocked(&fence);
30525+ }
30526+
30527+ mutex_unlock(&dev_priv->reset_mutex);
30528+
30529+ return ret;
30530+}
30531+
30532+#ifdef FIX_TG_16
30533+
30534+static int psb_check_2d_idle(struct drm_psb_private *dev_priv)
30535+{
30536+ if (psb_2d_trylock(dev_priv)) {
30537+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
30538+ !((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
30539+ _PSB_C2B_STATUS_BUSY))) {
30540+ return 0;
30541+ }
30542+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 0, 1) == 0)
30543+ psb_2D_irq_on(dev_priv);
30544+
30545+ PSB_WSGX32(PSB_2D_FENCE_BH, PSB_SGX_2D_SLAVE_PORT);
30546+ PSB_WSGX32(PSB_2D_FLUSH_BH, PSB_SGX_2D_SLAVE_PORT);
30547+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT);
30548+
30549+ psb_2d_atomic_unlock(dev_priv);
30550+ }
30551+
30552+ atomic_set(&dev_priv->ta_wait_2d, 1);
30553+ return -EBUSY;
30554+}
30555+
30556+static void psb_atomic_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
30557+{
30558+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30559+
30560+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d, 1, 0) == 1) {
30561+ psb_schedule_ta(dev_priv, scheduler);
30562+ if (atomic_read(&dev_priv->waiters_2d) != 0)
30563+ wake_up(&dev_priv->queue_2d);
30564+ }
30565+}
30566+
30567+void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
30568+{
30569+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30570+ unsigned long irq_flags;
30571+
30572+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30573+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 1, 0) == 1) {
30574+ atomic_set(&dev_priv->ta_wait_2d, 0);
30575+ psb_2D_irq_off(dev_priv);
30576+ psb_schedule_ta(dev_priv, scheduler);
30577+ if (atomic_read(&dev_priv->waiters_2d) != 0)
30578+ wake_up(&dev_priv->queue_2d);
30579+ }
30580+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30581+}
30582+
30583+/*
30584+ * 2D locking functions. Can't use a mutex since the trylock() and
30585+ * unlock() methods need to be accessible from interrupt context.
30586+ */
30587+
30588+static int psb_2d_trylock(struct drm_psb_private *dev_priv)
30589+{
30590+ return (atomic_cmpxchg(&dev_priv->lock_2d, 0, 1) == 0);
30591+}
30592+
30593+static void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv)
30594+{
30595+ atomic_set(&dev_priv->lock_2d, 0);
30596+ if (atomic_read(&dev_priv->waiters_2d) != 0)
30597+ wake_up(&dev_priv->queue_2d);
30598+}
30599+
30600+void psb_2d_unlock(struct drm_psb_private *dev_priv)
30601+{
30602+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
30603+ unsigned long irq_flags;
30604+
30605+ spin_lock_irqsave(&scheduler->lock, irq_flags);
30606+ psb_2d_atomic_unlock(dev_priv);
30607+ if (atomic_read(&dev_priv->ta_wait_2d) != 0)
30608+ psb_atomic_resume_ta_2d_idle(dev_priv);
30609+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
30610+}
30611+
30612+void psb_2d_lock(struct drm_psb_private *dev_priv)
30613+{
30614+ atomic_inc(&dev_priv->waiters_2d);
30615+ wait_event(dev_priv->queue_2d, atomic_read(&dev_priv->ta_wait_2d) == 0);
30616+ wait_event(dev_priv->queue_2d, psb_2d_trylock(dev_priv));
30617+ atomic_dec(&dev_priv->waiters_2d);
30618+}
30619+
30620+#endif
30621Index: linux-2.6.27/drivers/gpu/drm/psb/psb_schedule.h
30622===================================================================
30623--- /dev/null 1970-01-01 00:00:00.000000000 +0000
30624+++ linux-2.6.27/drivers/gpu/drm/psb/psb_schedule.h 2009-01-14 11:58:01.000000000 +0000
30625@@ -0,0 +1,170 @@
30626+/**************************************************************************
30627+ * Copyright (c) 2007, Intel Corporation.
30628+ * All Rights Reserved.
30629+ *
30630+ * This program is free software; you can redistribute it and/or modify it
30631+ * under the terms and conditions of the GNU General Public License,
30632+ * version 2, as published by the Free Software Foundation.
30633+ *
30634+ * This program is distributed in the hope it will be useful, but WITHOUT
30635+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
30636+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
30637+ * more details.
30638+ *
30639+ * You should have received a copy of the GNU General Public License along with
30640+ * this program; if not, write to the Free Software Foundation, Inc.,
30641+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
30642+ *
30643+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
30644+ * develop this driver.
30645+ *
30646+ **************************************************************************/
30647+/*
30648+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
30649+ */
30650+
30651+#ifndef _PSB_SCHEDULE_H_
30652+#define _PSB_SCHEDULE_H_
30653+
30654+#include "drmP.h"
30655+
30656+enum psb_task_type {
30657+ psb_ta_midscene_task,
30658+ psb_ta_task,
30659+ psb_raster_task,
30660+ psb_freescene_task
30661+};
30662+
30663+#define PSB_MAX_TA_CMDS 60
30664+#define PSB_MAX_RASTER_CMDS 60
30665+#define PSB_MAX_OOM_CMDS 6
30666+
30667+struct psb_xhw_buf {
30668+ struct list_head head;
30669+ int copy_back;
30670+ atomic_t done;
30671+ struct drm_psb_xhw_arg arg;
30672+
30673+};
30674+
30675+struct psb_feedback_info {
30676+ struct drm_buffer_object *bo;
30677+ struct page *page;
30678+ uint32_t offset;
30679+};
30680+
30681+struct psb_task {
30682+ struct list_head head;
30683+ struct psb_scene *scene;
30684+ struct psb_feedback_info feedback;
30685+ enum psb_task_type task_type;
30686+ uint32_t engine;
30687+ uint32_t sequence;
30688+ uint32_t ta_cmds[PSB_MAX_TA_CMDS];
30689+ uint32_t raster_cmds[PSB_MAX_RASTER_CMDS];
30690+ uint32_t oom_cmds[PSB_MAX_OOM_CMDS];
30691+ uint32_t ta_cmd_size;
30692+ uint32_t raster_cmd_size;
30693+ uint32_t oom_cmd_size;
30694+ uint32_t feedback_offset;
30695+ uint32_t ta_complete_action;
30696+ uint32_t raster_complete_action;
30697+ uint32_t hw_cookie;
30698+ uint32_t flags;
30699+ uint32_t reply_flags;
30700+ uint32_t aborting;
30701+ struct psb_xhw_buf buf;
30702+};
30703+
30704+struct psb_hw_scene {
30705+ struct list_head head;
30706+ uint32_t context_number;
30707+
30708+ /*
30709+ * This pointer does not refcount the last_scene_buffer,
30710+ * so we must make sure it is set to NULL before destroying
30711+ * the corresponding task.
30712+ */
30713+
30714+ struct psb_scene *last_scene;
30715+};
30716+
30717+struct psb_scene;
30718+struct drm_psb_private;
30719+
30720+struct psb_scheduler_seq {
30721+ uint32_t sequence;
30722+ int reported;
30723+};
30724+
30725+struct psb_scheduler {
30726+ struct drm_device *dev;
30727+ struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES];
30728+ struct psb_hw_scene hs[PSB_NUM_HW_SCENES];
30729+ struct mutex task_wq_mutex;
30730+ spinlock_t lock;
30731+ struct list_head hw_scenes;
30732+ struct list_head ta_queue;
30733+ struct list_head raster_queue;
30734+ struct list_head hp_raster_queue;
30735+ struct list_head task_done_queue;
30736+ struct psb_task *current_task[PSB_SCENE_NUM_ENGINES];
30737+ struct psb_task *feedback_task;
30738+ int ta_state;
30739+ struct psb_hw_scene *pending_hw_scene;
30740+ uint32_t pending_hw_scene_seq;
30741+ struct delayed_work wq;
30742+ struct psb_scene_pool *pool;
30743+ uint32_t idle_count;
30744+ int idle;
30745+ wait_queue_head_t idle_queue;
30746+ unsigned long ta_end_jiffies;
30747+ unsigned long raster_end_jiffies;
30748+ unsigned long total_raster_jiffies;
30749+};
30750+
30751+#define PSB_RF_FIRE_TA (1 << 0)
30752+#define PSB_RF_OOM (1 << 1)
30753+#define PSB_RF_OOM_REPLY (1 << 2)
30754+#define PSB_RF_TERMINATE (1 << 3)
30755+#define PSB_RF_TA_DONE (1 << 4)
30756+#define PSB_RF_FIRE_RASTER (1 << 5)
30757+#define PSB_RF_RASTER_DONE (1 << 6)
30758+#define PSB_RF_DEALLOC (1 << 7)
30759+
30760+extern struct psb_scene_pool *psb_alloc_scene_pool(struct drm_file *priv,
30761+ int shareable, uint32_t w,
30762+ uint32_t h);
30763+extern uint32_t psb_scene_handle(struct psb_scene *scene);
30764+extern int psb_scheduler_init(struct drm_device *dev,
30765+ struct psb_scheduler *scheduler);
30766+extern void psb_scheduler_takedown(struct psb_scheduler *scheduler);
30767+extern int psb_cmdbuf_ta(struct drm_file *priv,
30768+ struct drm_psb_cmdbuf_arg *arg,
30769+ struct drm_buffer_object *cmd_buffer,
30770+ struct drm_buffer_object *ta_buffer,
30771+ struct drm_buffer_object *oom_buffer,
30772+ struct psb_scene *scene,
30773+ struct psb_feedback_info *feedback,
30774+ struct drm_fence_arg *fence_arg);
30775+extern int psb_cmdbuf_raster(struct drm_file *priv,
30776+ struct drm_psb_cmdbuf_arg *arg,
30777+ struct drm_buffer_object *cmd_buffer,
30778+ struct drm_fence_arg *fence_arg);
30779+extern void psb_scheduler_handler(struct drm_psb_private *dev_priv,
30780+ uint32_t status);
30781+extern void psb_scheduler_pause(struct drm_psb_private *dev_priv);
30782+extern void psb_scheduler_restart(struct drm_psb_private *dev_priv);
30783+extern int psb_scheduler_idle(struct drm_psb_private *dev_priv);
30784+extern int psb_scheduler_finished(struct drm_psb_private *dev_priv);
30785+
30786+extern void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
30787+ int *lockup, int *idle);
30788+extern void psb_scheduler_reset(struct drm_psb_private *dev_priv,
30789+ int error_condition);
30790+extern int psb_forced_user_interrupt(struct drm_psb_private *dev_priv);
30791+extern void psb_scheduler_remove_scene_refs(struct psb_scene *scene);
30792+extern void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv);
30793+extern int psb_extend_raster_timeout(struct drm_psb_private *dev_priv);
30794+
30795+#endif
30796Index: linux-2.6.27/drivers/gpu/drm/psb/psb_setup.c
30797===================================================================
30798--- /dev/null 1970-01-01 00:00:00.000000000 +0000
30799+++ linux-2.6.27/drivers/gpu/drm/psb/psb_setup.c 2009-01-14 11:58:01.000000000 +0000
30800@@ -0,0 +1,17 @@
30801+#include "drmP.h"
30802+#include "drm.h"
30803+#include "drm_crtc.h"
30804+#include "drm_edid.h"
30805+#include "intel_drv.h"
30806+#include "psb_drv.h"
30807+#include "i915_reg.h"
30808+#include "intel_crt.c"
30809+
30810+/* Fixed name */
30811+#define ACPI_EDID_LCD "\\_SB_.PCI0.GFX0.DD04._DDC"
30812+#define ACPI_DOD "\\_SB_.PCI0.GFX0._DOD"
30813+
30814+#include "intel_lvds.c"
30815+#include "intel_sdvo.c"
30816+#include "intel_display.c"
30817+#include "intel_modes.c"
30818Index: linux-2.6.27/drivers/gpu/drm/psb/psb_sgx.c
30819===================================================================
30820--- /dev/null 1970-01-01 00:00:00.000000000 +0000
30821+++ linux-2.6.27/drivers/gpu/drm/psb/psb_sgx.c 2009-01-14 11:58:01.000000000 +0000
30822@@ -0,0 +1,1422 @@
30823+/**************************************************************************
30824+ * Copyright (c) 2007, Intel Corporation.
30825+ * All Rights Reserved.
30826+ *
30827+ * This program is free software; you can redistribute it and/or modify it
30828+ * under the terms and conditions of the GNU General Public License,
30829+ * version 2, as published by the Free Software Foundation.
30830+ *
30831+ * This program is distributed in the hope it will be useful, but WITHOUT
30832+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
30833+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
30834+ * more details.
30835+ *
30836+ * You should have received a copy of the GNU General Public License along with
30837+ * this program; if not, write to the Free Software Foundation, Inc.,
30838+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
30839+ *
30840+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
30841+ * develop this driver.
30842+ *
30843+ **************************************************************************/
30844+/*
30845+ */
30846+
30847+#include "drmP.h"
30848+#include "psb_drv.h"
30849+#include "psb_drm.h"
30850+#include "psb_reg.h"
30851+#include "psb_scene.h"
30852+
30853+#include "psb_msvdx.h"
30854+
30855+int psb_submit_video_cmdbuf(struct drm_device *dev,
30856+ struct drm_buffer_object *cmd_buffer,
30857+ unsigned long cmd_offset, unsigned long cmd_size,
30858+ struct drm_fence_object *fence);
30859+
30860+struct psb_dstbuf_cache {
30861+ unsigned int dst;
30862+ uint32_t *use_page;
30863+ unsigned int use_index;
30864+ uint32_t use_background;
30865+ struct drm_buffer_object *dst_buf;
30866+ unsigned long dst_offset;
30867+ uint32_t *dst_page;
30868+ unsigned int dst_page_offset;
30869+ struct drm_bo_kmap_obj dst_kmap;
30870+ int dst_is_iomem;
30871+};
30872+
30873+struct psb_buflist_item {
30874+ struct drm_buffer_object *bo;
30875+ void __user *data;
30876+ int ret;
30877+ int presumed_offset_correct;
30878+};
30879+
30880+
30881+#define PSB_REG_GRAN_SHIFT 2
30882+#define PSB_REG_GRANULARITY (1 << PSB_REG_GRAN_SHIFT)
30883+#define PSB_MAX_REG 0x1000
30884+
30885+static const uint32_t disallowed_ranges[][2] = {
30886+ {0x0000, 0x0200},
30887+ {0x0208, 0x0214},
30888+ {0x021C, 0x0224},
30889+ {0x0230, 0x0234},
30890+ {0x0248, 0x024C},
30891+ {0x0254, 0x0358},
30892+ {0x0428, 0x0428},
30893+ {0x0430, 0x043C},
30894+ {0x0498, 0x04B4},
30895+ {0x04CC, 0x04D8},
30896+ {0x04E0, 0x07FC},
30897+ {0x0804, 0x0A58},
30898+ {0x0A68, 0x0A80},
30899+ {0x0AA0, 0x0B1C},
30900+ {0x0B2C, 0x0CAC},
30901+ {0x0CB4, PSB_MAX_REG - PSB_REG_GRANULARITY}
30902+};
30903+
30904+static uint32_t psb_disallowed_regs[PSB_MAX_REG /
30905+ (PSB_REG_GRANULARITY *
30906+ (sizeof(uint32_t) << 3))];
30907+
30908+static inline int psb_disallowed(uint32_t reg)
30909+{
30910+ reg >>= PSB_REG_GRAN_SHIFT;
30911+ return ((psb_disallowed_regs[reg >> 5] & (1 << (reg & 31))) != 0);
30912+}
30913+
30914+void psb_init_disallowed(void)
30915+{
30916+ int i;
30917+ uint32_t reg, tmp;
30918+ static int initialized = 0;
30919+
30920+ if (initialized)
30921+ return;
30922+
30923+ initialized = 1;
30924+ memset(psb_disallowed_regs, 0, sizeof(psb_disallowed_regs));
30925+
30926+ for (i = 0; i < (sizeof(disallowed_ranges) / (2 * sizeof(uint32_t)));
30927+ ++i) {
30928+ for (reg = disallowed_ranges[i][0];
30929+ reg <= disallowed_ranges[i][1]; reg += 4) {
30930+ tmp = reg >> 2;
30931+ psb_disallowed_regs[tmp >> 5] |= (1 << (tmp & 31));
30932+ }
30933+ }
30934+}
30935+
30936+static int psb_memcpy_check(uint32_t * dst, const uint32_t * src, uint32_t size)
30937+{
30938+ size >>= 3;
30939+ while (size--) {
30940+ if (unlikely((*src >= 0x1000) || psb_disallowed(*src))) {
30941+ DRM_ERROR("Forbidden SGX register access: "
30942+ "0x%04x.\n", *src);
30943+ return -EPERM;
30944+ }
30945+ *dst++ = *src++;
30946+ *dst++ = *src++;
30947+ }
30948+ return 0;
30949+}
30950+
30951+static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
30952+ unsigned size)
30953+{
30954+ uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
30955+ int ret = 0;
30956+
30957+ retry:
30958+ if (avail < size) {
30959+#if 0
30960+ /* We'd ideally
30961+ * like to have an IRQ-driven event here.
30962+ */
30963+
30964+ psb_2D_irq_on(dev_priv);
30965+ DRM_WAIT_ON(ret, dev_priv->event_2d_queue, DRM_HZ,
30966+ ((avail = PSB_RSGX32(PSB_CR_2D_SOCIF)) >= size));
30967+ psb_2D_irq_off(dev_priv);
30968+ if (ret == 0)
30969+ return 0;
30970+ if (ret == -EINTR) {
30971+ ret = 0;
30972+ goto retry;
30973+ }
30974+#else
30975+ avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
30976+ goto retry;
30977+#endif
30978+ }
30979+ return ret;
30980+}
30981+
30982+int psb_2d_submit(struct drm_psb_private *dev_priv, uint32_t * cmdbuf,
30983+ unsigned size)
30984+{
30985+ int ret = 0;
30986+ int i;
30987+ unsigned submit_size;
30988+
30989+ while (size > 0) {
30990+ submit_size = (size < 0x60) ? size : 0x60;
30991+ size -= submit_size;
30992+ ret = psb_2d_wait_available(dev_priv, submit_size);
30993+ if (ret)
30994+ return ret;
30995+
30996+ submit_size <<= 2;
30997+
30998+ for (i = 0; i < submit_size; i += 4) {
30999+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
31000+ }
31001+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
31002+ }
31003+ return 0;
31004+}
31005+
31006+int psb_blit_sequence(struct drm_psb_private *dev_priv, uint32_t sequence)
31007+{
31008+ uint32_t buffer[8];
31009+ uint32_t *bufp = buffer;
31010+ int ret;
31011+
31012+ *bufp++ = PSB_2D_FENCE_BH;
31013+
31014+ *bufp++ = PSB_2D_DST_SURF_BH |
31015+ PSB_2D_DST_8888ARGB | (4 << PSB_2D_DST_STRIDE_SHIFT);
31016+ *bufp++ = dev_priv->comm_mmu_offset - dev_priv->mmu_2d_offset;
31017+
31018+ *bufp++ = PSB_2D_BLIT_BH |
31019+ PSB_2D_ROT_NONE |
31020+ PSB_2D_COPYORDER_TL2BR |
31021+ PSB_2D_DSTCK_DISABLE |
31022+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
31023+
31024+ *bufp++ = sequence << PSB_2D_FILLCOLOUR_SHIFT;
31025+ *bufp++ = (0 << PSB_2D_DST_XSTART_SHIFT) |
31026+ (0 << PSB_2D_DST_YSTART_SHIFT);
31027+ *bufp++ = (1 << PSB_2D_DST_XSIZE_SHIFT) | (1 << PSB_2D_DST_YSIZE_SHIFT);
31028+
31029+ *bufp++ = PSB_2D_FLUSH_BH;
31030+
31031+ psb_2d_lock(dev_priv);
31032+ ret = psb_2d_submit(dev_priv, buffer, bufp - buffer);
31033+ psb_2d_unlock(dev_priv);
31034+
31035+ if (!ret)
31036+ psb_schedule_watchdog(dev_priv);
31037+ return ret;
31038+}
31039+
31040+int psb_emit_2d_copy_blit(struct drm_device *dev,
31041+ uint32_t src_offset,
31042+ uint32_t dst_offset, uint32_t pages, int direction)
31043+{
31044+ uint32_t cur_pages;
31045+ struct drm_psb_private *dev_priv = dev->dev_private;
31046+ uint32_t buf[10];
31047+ uint32_t *bufp;
31048+ uint32_t xstart;
31049+ uint32_t ystart;
31050+ uint32_t blit_cmd;
31051+ uint32_t pg_add;
31052+ int ret = 0;
31053+
31054+ if (!dev_priv)
31055+ return 0;
31056+
31057+ if (direction) {
31058+ pg_add = (pages - 1) << PAGE_SHIFT;
31059+ src_offset += pg_add;
31060+ dst_offset += pg_add;
31061+ }
31062+
31063+ blit_cmd = PSB_2D_BLIT_BH |
31064+ PSB_2D_ROT_NONE |
31065+ PSB_2D_DSTCK_DISABLE |
31066+ PSB_2D_SRCCK_DISABLE |
31067+ PSB_2D_USE_PAT |
31068+ PSB_2D_ROP3_SRCCOPY |
31069+ (direction ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TL2BR);
31070+ xstart = (direction) ? ((PAGE_SIZE - 1) >> 2) : 0;
31071+
31072+ psb_2d_lock(dev_priv);
31073+ while (pages > 0) {
31074+ cur_pages = pages;
31075+ if (cur_pages > 2048)
31076+ cur_pages = 2048;
31077+ pages -= cur_pages;
31078+ ystart = (direction) ? cur_pages - 1 : 0;
31079+
31080+ bufp = buf;
31081+ *bufp++ = PSB_2D_FENCE_BH;
31082+
31083+ *bufp++ = PSB_2D_DST_SURF_BH | PSB_2D_DST_8888ARGB |
31084+ (PAGE_SIZE << PSB_2D_DST_STRIDE_SHIFT);
31085+ *bufp++ = dst_offset;
31086+ *bufp++ = PSB_2D_SRC_SURF_BH | PSB_2D_SRC_8888ARGB |
31087+ (PAGE_SIZE << PSB_2D_SRC_STRIDE_SHIFT);
31088+ *bufp++ = src_offset;
31089+ *bufp++ =
31090+ PSB_2D_SRC_OFF_BH | (xstart << PSB_2D_SRCOFF_XSTART_SHIFT) |
31091+ (ystart << PSB_2D_SRCOFF_YSTART_SHIFT);
31092+ *bufp++ = blit_cmd;
31093+ *bufp++ = (xstart << PSB_2D_DST_XSTART_SHIFT) |
31094+ (ystart << PSB_2D_DST_YSTART_SHIFT);
31095+ *bufp++ = ((PAGE_SIZE >> 2) << PSB_2D_DST_XSIZE_SHIFT) |
31096+ (cur_pages << PSB_2D_DST_YSIZE_SHIFT);
31097+
31098+ ret = psb_2d_submit(dev_priv, buf, bufp - buf);
31099+ if (ret)
31100+ goto out;
31101+ pg_add = (cur_pages << PAGE_SHIFT) * ((direction) ? -1 : 1);
31102+ src_offset += pg_add;
31103+ dst_offset += pg_add;
31104+ }
31105+ out:
31106+ psb_2d_unlock(dev_priv);
31107+ return ret;
31108+}
31109+
31110+void psb_init_2d(struct drm_psb_private *dev_priv)
31111+{
31112+ dev_priv->sequence_lock = SPIN_LOCK_UNLOCKED;
31113+ psb_reset(dev_priv, 1);
31114+ dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start;
31115+ PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE);
31116+ (void)PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE);
31117+}
31118+
31119+int psb_idle_2d(struct drm_device *dev)
31120+{
31121+ struct drm_psb_private *dev_priv = dev->dev_private;
31122+ unsigned long _end = jiffies + DRM_HZ;
31123+ int busy = 0;
31124+
31125+ /*
31126+ * First idle the 2D engine.
31127+ */
31128+
31129+ if (dev_priv->engine_lockup_2d)
31130+ return -EBUSY;
31131+
31132+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
31133+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
31134+ goto out;
31135+
31136+ do {
31137+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
31138+ } while (busy && !time_after_eq(jiffies, _end));
31139+
31140+ if (busy)
31141+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
31142+ if (busy)
31143+ goto out;
31144+
31145+ do {
31146+ busy =
31147+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY)
31148+ != 0);
31149+ } while (busy && !time_after_eq(jiffies, _end));
31150+ if (busy)
31151+ busy =
31152+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY)
31153+ != 0);
31154+
31155+ out:
31156+ if (busy)
31157+ dev_priv->engine_lockup_2d = 1;
31158+
31159+ return (busy) ? -EBUSY : 0;
31160+}
31161+
31162+int psb_idle_3d(struct drm_device *dev)
31163+{
31164+ struct drm_psb_private *dev_priv = dev->dev_private;
31165+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
31166+ int ret;
31167+
31168+ ret = wait_event_timeout(scheduler->idle_queue,
31169+ psb_scheduler_finished(dev_priv), DRM_HZ * 10);
31170+
31171+ return (ret < 1) ? -EBUSY : 0;
31172+}
31173+
31174+static void psb_dereference_buffers_locked(struct psb_buflist_item *buffers,
31175+ unsigned num_buffers)
31176+{
31177+ while (num_buffers--)
31178+ drm_bo_usage_deref_locked(&((buffers++)->bo));
31179+
31180+}
31181+
31182+static int psb_check_presumed(struct drm_bo_op_arg *arg,
31183+ struct drm_buffer_object *bo,
31184+ uint32_t __user * data, int *presumed_ok)
31185+{
31186+ struct drm_bo_op_req *req = &arg->d.req;
31187+ uint32_t hint_offset;
31188+ uint32_t hint = req->bo_req.hint;
31189+
31190+ *presumed_ok = 0;
31191+
31192+ if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
31193+ return 0;
31194+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
31195+ *presumed_ok = 1;
31196+ return 0;
31197+ }
31198+ if (bo->offset == req->bo_req.presumed_offset) {
31199+ *presumed_ok = 1;
31200+ return 0;
31201+ }
31202+
31203+ /*
31204+ * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
31205+ * the user-space IOCTL argument list, since the buffer has moved,
31206+ * we're about to apply relocations and we might subsequently
31207+ * hit an -EAGAIN. In that case the argument list will be reused by
31208+ * user-space, but the presumed offset is no longer valid.
31209+ *
31210+ * Needless to say, this is a bit ugly.
31211+ */
31212+
31213+ hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
31214+ hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
31215+ return __put_user(hint, data + hint_offset);
31216+}
31217+
31218+static int psb_validate_buffer_list(struct drm_file *file_priv,
31219+ unsigned fence_class,
31220+ unsigned long data,
31221+ struct psb_buflist_item *buffers,
31222+ unsigned *num_buffers)
31223+{
31224+ struct drm_bo_op_arg arg;
31225+ struct drm_bo_op_req *req = &arg.d.req;
31226+ int ret = 0;
31227+ unsigned buf_count = 0;
31228+ struct psb_buflist_item *item = buffers;
31229+
31230+ do {
31231+ if (buf_count >= *num_buffers) {
31232+ DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
31233+ ret = -EINVAL;
31234+ goto out_err;
31235+ }
31236+ item = buffers + buf_count;
31237+ item->bo = NULL;
31238+
31239+ if (copy_from_user(&arg, (void __user *)data, sizeof(arg))) {
31240+ ret = -EFAULT;
31241+ DRM_ERROR("Error copying validate list.\n"
31242+ "\tbuffer %d, user addr 0x%08lx %d\n",
31243+ buf_count, (unsigned long)data, sizeof(arg));
31244+ goto out_err;
31245+ }
31246+
31247+ ret = 0;
31248+ if (req->op != drm_bo_validate) {
31249+ DRM_ERROR
31250+ ("Buffer object operation wasn't \"validate\".\n");
31251+ ret = -EINVAL;
31252+ goto out_err;
31253+ }
31254+
31255+ item->ret = 0;
31256+ item->data = (void *)__user data;
31257+ ret = drm_bo_handle_validate(file_priv,
31258+ req->bo_req.handle,
31259+ fence_class,
31260+ req->bo_req.flags,
31261+ req->bo_req.mask,
31262+ req->bo_req.hint,
31263+ 0, NULL, &item->bo);
31264+ if (ret)
31265+ goto out_err;
31266+
31267+ PSB_DEBUG_GENERAL("Validated buffer at 0x%08lx\n",
31268+ buffers[buf_count].bo->offset);
31269+
31270+ buf_count++;
31271+
31272+
31273+ ret = psb_check_presumed(&arg, item->bo,
31274+ (uint32_t __user *)
31275+ (unsigned long) data,
31276+ &item->presumed_offset_correct);
31277+
31278+ if (ret)
31279+ goto out_err;
31280+
31281+ data = arg.next;
31282+ } while (data);
31283+
31284+ *num_buffers = buf_count;
31285+
31286+ return 0;
31287+ out_err:
31288+
31289+ *num_buffers = buf_count;
31290+ item->ret = (ret != -EAGAIN) ? ret : 0;
31291+ return ret;
31292+}
31293+
31294+int
31295+psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t * regs,
31296+ unsigned int cmds)
31297+{
31298+ int i;
31299+
31300+ /*
31301+ * cmds is 32-bit words.
31302+ */
31303+
31304+ cmds >>= 1;
31305+ for (i = 0; i < cmds; ++i) {
31306+ PSB_WSGX32(regs[1], regs[0]);
31307+ regs += 2;
31308+ }
31309+ wmb();
31310+ return 0;
31311+}
31312+
31313+/*
31314+ * Security: Block user-space writing to MMU mapping registers.
31315+ * This is important for security and brings Poulsbo DRM
31316+ * up to par with the other DRM drivers. Using this,
31317+ * user-space should not be able to map arbitrary memory
31318+ * pages to graphics memory, but all user-space processes
31319+ * basically have access to all buffer objects mapped to
31320+ * graphics memory.
31321+ */
31322+
31323+int
31324+psb_submit_copy_cmdbuf(struct drm_device *dev,
31325+ struct drm_buffer_object *cmd_buffer,
31326+ unsigned long cmd_offset,
31327+ unsigned long cmd_size,
31328+ int engine, uint32_t * copy_buffer)
31329+{
31330+ unsigned long cmd_end = cmd_offset + (cmd_size << 2);
31331+ struct drm_psb_private *dev_priv = dev->dev_private;
31332+ unsigned long cmd_page_offset = cmd_offset - (cmd_offset & PAGE_MASK);
31333+ unsigned long cmd_next;
31334+ struct drm_bo_kmap_obj cmd_kmap;
31335+ uint32_t *cmd_page;
31336+ unsigned cmds;
31337+ int is_iomem;
31338+ int ret = 0;
31339+
31340+ if (cmd_size == 0)
31341+ return 0;
31342+
31343+ if (engine == PSB_ENGINE_2D)
31344+ psb_2d_lock(dev_priv);
31345+
31346+ do {
31347+ cmd_next = drm_bo_offset_end(cmd_offset, cmd_end);
31348+ ret = drm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT,
31349+ 1, &cmd_kmap);
31350+
31351+ if (ret)
31352+ return ret;
31353+ cmd_page = drm_bmo_virtual(&cmd_kmap, &is_iomem);
31354+ cmd_page_offset = (cmd_offset & ~PAGE_MASK) >> 2;
31355+ cmds = (cmd_next - cmd_offset) >> 2;
31356+
31357+ switch (engine) {
31358+ case PSB_ENGINE_2D:
31359+ ret =
31360+ psb_2d_submit(dev_priv, cmd_page + cmd_page_offset,
31361+ cmds);
31362+ break;
31363+ case PSB_ENGINE_RASTERIZER:
31364+ case PSB_ENGINE_TA:
31365+ case PSB_ENGINE_HPRAST:
31366+ PSB_DEBUG_GENERAL("Reg copy.\n");
31367+ ret = psb_memcpy_check(copy_buffer,
31368+ cmd_page + cmd_page_offset,
31369+ cmds * sizeof(uint32_t));
31370+ copy_buffer += cmds;
31371+ break;
31372+ default:
31373+ ret = -EINVAL;
31374+ }
31375+ drm_bo_kunmap(&cmd_kmap);
31376+ if (ret)
31377+ break;
31378+ } while (cmd_offset = cmd_next, cmd_offset != cmd_end);
31379+
31380+ if (engine == PSB_ENGINE_2D)
31381+ psb_2d_unlock(dev_priv);
31382+
31383+ return ret;
31384+}
31385+
31386+static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
31387+{
31388+ if (dst_cache->dst_page) {
31389+ drm_bo_kunmap(&dst_cache->dst_kmap);
31390+ dst_cache->dst_page = NULL;
31391+ }
31392+ dst_cache->dst_buf = NULL;
31393+ dst_cache->dst = ~0;
31394+ dst_cache->use_page = NULL;
31395+}
31396+
31397+static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
31398+ struct psb_buflist_item *buffers,
31399+ unsigned int dst, unsigned long dst_offset)
31400+{
31401+ int ret;
31402+
31403+ PSB_DEBUG_RELOC("Destination buffer is %d.\n", dst);
31404+
31405+ if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
31406+ psb_clear_dstbuf_cache(dst_cache);
31407+ dst_cache->dst = dst;
31408+ dst_cache->dst_buf = buffers[dst].bo;
31409+ }
31410+
31411+ if (unlikely(dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
31412+ DRM_ERROR("Relocation destination out of bounds.\n");
31413+ return -EINVAL;
31414+ }
31415+
31416+ if (!drm_bo_same_page(dst_cache->dst_offset, dst_offset) ||
31417+ NULL == dst_cache->dst_page) {
31418+ if (NULL != dst_cache->dst_page) {
31419+ drm_bo_kunmap(&dst_cache->dst_kmap);
31420+ dst_cache->dst_page = NULL;
31421+ }
31422+
31423+ ret = drm_bo_kmap(dst_cache->dst_buf, dst_offset >> PAGE_SHIFT,
31424+ 1, &dst_cache->dst_kmap);
31425+ if (ret) {
31426+ DRM_ERROR("Could not map destination buffer for "
31427+ "relocation.\n");
31428+ return ret;
31429+ }
31430+
31431+ dst_cache->dst_page = drm_bmo_virtual(&dst_cache->dst_kmap,
31432+ &dst_cache->dst_is_iomem);
31433+ dst_cache->dst_offset = dst_offset & PAGE_MASK;
31434+ dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
31435+ }
31436+ return 0;
31437+}
31438+
31439+static int psb_apply_reloc(struct drm_psb_private *dev_priv,
31440+ uint32_t fence_class,
31441+ const struct drm_psb_reloc *reloc,
31442+ struct psb_buflist_item *buffers,
31443+ int num_buffers,
31444+ struct psb_dstbuf_cache *dst_cache,
31445+ int no_wait, int interruptible)
31446+{
31447+ int reg;
31448+ uint32_t val;
31449+ uint32_t background;
31450+ unsigned int index;
31451+ int ret;
31452+ unsigned int shift;
31453+ unsigned int align_shift;
31454+ uint32_t fence_type;
31455+ struct drm_buffer_object *reloc_bo;
31456+
31457+ PSB_DEBUG_RELOC("Reloc type %d\n"
31458+ "\t where 0x%04x\n"
31459+ "\t buffer 0x%04x\n"
31460+ "\t mask 0x%08x\n"
31461+ "\t shift 0x%08x\n"
31462+ "\t pre_add 0x%08x\n"
31463+ "\t background 0x%08x\n"
31464+ "\t dst_buffer 0x%08x\n"
31465+ "\t arg0 0x%08x\n"
31466+ "\t arg1 0x%08x\n",
31467+ reloc->reloc_op,
31468+ reloc->where,
31469+ reloc->buffer,
31470+ reloc->mask,
31471+ reloc->shift,
31472+ reloc->pre_add,
31473+ reloc->background,
31474+ reloc->dst_buffer, reloc->arg0, reloc->arg1);
31475+
31476+ if (unlikely(reloc->buffer >= num_buffers)) {
31477+ DRM_ERROR("Illegal relocation buffer %d.\n", reloc->buffer);
31478+ return -EINVAL;
31479+ }
31480+
31481+ if (buffers[reloc->buffer].presumed_offset_correct)
31482+ return 0;
31483+
31484+ if (unlikely(reloc->dst_buffer >= num_buffers)) {
31485+ DRM_ERROR("Illegal destination buffer for relocation %d.\n",
31486+ reloc->dst_buffer);
31487+ return -EINVAL;
31488+ }
31489+
31490+ ret = psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer,
31491+ reloc->where << 2);
31492+ if (ret)
31493+ return ret;
31494+
31495+ reloc_bo = buffers[reloc->buffer].bo;
31496+
31497+ if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
31498+ DRM_ERROR("Illegal relocation offset add.\n");
31499+ return -EINVAL;
31500+ }
31501+
31502+ switch (reloc->reloc_op) {
31503+ case PSB_RELOC_OP_OFFSET:
31504+ val = reloc_bo->offset + reloc->pre_add;
31505+ break;
31506+ case PSB_RELOC_OP_2D_OFFSET:
31507+ val = reloc_bo->offset + reloc->pre_add -
31508+ dev_priv->mmu_2d_offset;
31509+ if (unlikely(val >= PSB_2D_SIZE)) {
31510+ DRM_ERROR("2D relocation out of bounds\n");
31511+ return -EINVAL;
31512+ }
31513+ break;
31514+ case PSB_RELOC_OP_PDS_OFFSET:
31515+ val = reloc_bo->offset + reloc->pre_add - PSB_MEM_PDS_START;
31516+ if (unlikely(val >= (PSB_MEM_MMU_START - PSB_MEM_PDS_START))) {
31517+ DRM_ERROR("PDS relocation out of bounds\n");
31518+ return -EINVAL;
31519+ }
31520+ break;
31521+ case PSB_RELOC_OP_USE_OFFSET:
31522+ case PSB_RELOC_OP_USE_REG:
31523+
31524+ /*
31525+ * Security:
31526+ * Only allow VERTEX or PIXEL data masters, as
31527+ * shaders run under other data masters may in theory
31528+ * alter MMU mappings.
31529+ */
31530+
31531+ if (unlikely(reloc->arg1 != _PSB_CUC_DM_PIXEL &&
31532+ reloc->arg1 != _PSB_CUC_DM_VERTEX)) {
31533+ DRM_ERROR("Invalid data master in relocation. %d\n",
31534+ reloc->arg1);
31535+ return -EPERM;
31536+ }
31537+
31538+ fence_type = reloc_bo->fence_type;
31539+ ret = psb_grab_use_base(dev_priv,
31540+ reloc_bo->offset +
31541+ reloc->pre_add, reloc->arg0,
31542+ reloc->arg1, fence_class,
31543+ fence_type, no_wait,
31544+ interruptible, &reg, &val);
31545+ if (ret)
31546+ return ret;
31547+
31548+ val = (reloc->reloc_op == PSB_RELOC_OP_USE_REG) ? reg : val;
31549+ break;
31550+ default:
31551+ DRM_ERROR("Unimplemented relocation.\n");
31552+ return -EINVAL;
31553+ }
31554+
31555+ shift = (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
31556+ align_shift = (reloc->shift & PSB_RELOC_ALSHIFT_MASK) >>
31557+ PSB_RELOC_ALSHIFT_SHIFT;
31558+
31559+ val = ((val >> align_shift) << shift);
31560+ index = reloc->where - dst_cache->dst_page_offset;
31561+
31562+ background = reloc->background;
31563+
31564+ if (reloc->reloc_op == PSB_RELOC_OP_USE_OFFSET) {
31565+ if (dst_cache->use_page == dst_cache->dst_page &&
31566+ dst_cache->use_index == index)
31567+ background = dst_cache->use_background;
31568+ else
31569+ background = dst_cache->dst_page[index];
31570+ }
31571+#if 0
31572+ if (dst_cache->dst_page[index] != PSB_RELOC_MAGIC &&
31573+ reloc->reloc_op != PSB_RELOC_OP_USE_OFFSET)
31574+ DRM_ERROR("Inconsistent relocation 0x%08lx.\n",
31575+ (unsigned long)dst_cache->dst_page[index]);
31576+#endif
31577+
31578+ val = (background & ~reloc->mask) | (val & reloc->mask);
31579+ dst_cache->dst_page[index] = val;
31580+
31581+ if (reloc->reloc_op == PSB_RELOC_OP_USE_OFFSET ||
31582+ reloc->reloc_op == PSB_RELOC_OP_USE_REG) {
31583+ dst_cache->use_page = dst_cache->dst_page;
31584+ dst_cache->use_index = index;
31585+ dst_cache->use_background = val;
31586+ }
31587+
31588+ PSB_DEBUG_RELOC("Reloc buffer %d index 0x%08x, value 0x%08x\n",
31589+ reloc->dst_buffer, index, dst_cache->dst_page[index]);
31590+
31591+ return 0;
31592+}
31593+
31594+static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
31595+ unsigned int num_pages)
31596+{
31597+ int ret = 0;
31598+
31599+ spin_lock(&dev_priv->reloc_lock);
31600+ if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
31601+ dev_priv->rel_mapped_pages += num_pages;
31602+ ret = 1;
31603+ }
31604+ spin_unlock(&dev_priv->reloc_lock);
31605+ return ret;
31606+}
31607+
31608+static int psb_fixup_relocs(struct drm_file *file_priv,
31609+ uint32_t fence_class,
31610+ unsigned int num_relocs,
31611+ unsigned int reloc_offset,
31612+ uint32_t reloc_handle,
31613+ struct psb_buflist_item *buffers,
31614+ unsigned int num_buffers,
31615+ int no_wait, int interruptible)
31616+{
31617+ struct drm_device *dev = file_priv->minor->dev;
31618+ struct drm_psb_private *dev_priv =
31619+ (struct drm_psb_private *)dev->dev_private;
31620+ struct drm_buffer_object *reloc_buffer = NULL;
31621+ unsigned int reloc_num_pages;
31622+ unsigned int reloc_first_page;
31623+ unsigned int reloc_last_page;
31624+ struct psb_dstbuf_cache dst_cache;
31625+ struct drm_psb_reloc *reloc;
31626+ struct drm_bo_kmap_obj reloc_kmap;
31627+ int reloc_is_iomem;
31628+ int count;
31629+ int ret = 0;
31630+ int registered = 0;
31631+ int short_circuit = 1;
31632+ int i;
31633+
31634+ if (num_relocs == 0)
31635+ return 0;
31636+
31637+ for (i=0; i<num_buffers; ++i) {
31638+ if (!buffers[i].presumed_offset_correct) {
31639+ short_circuit = 0;
31640+ break;
31641+ }
31642+ }
31643+
31644+ if (short_circuit)
31645+ return 0;
31646+
31647+ memset(&dst_cache, 0, sizeof(dst_cache));
31648+ memset(&reloc_kmap, 0, sizeof(reloc_kmap));
31649+
31650+ mutex_lock(&dev->struct_mutex);
31651+ reloc_buffer = drm_lookup_buffer_object(file_priv, reloc_handle, 1);
31652+ mutex_unlock(&dev->struct_mutex);
31653+ if (!reloc_buffer)
31654+ goto out;
31655+
31656+ reloc_first_page = reloc_offset >> PAGE_SHIFT;
31657+ reloc_last_page =
31658+ (reloc_offset +
31659+ num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
31660+ reloc_num_pages = reloc_last_page - reloc_first_page + 1;
31661+ reloc_offset &= ~PAGE_MASK;
31662+
31663+ if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
31664+ DRM_ERROR("Relocation buffer is too large\n");
31665+ ret = -EINVAL;
31666+ goto out;
31667+ }
31668+
31669+ DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
31670+ (registered =
31671+ psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
31672+
31673+ if (ret == -EINTR) {
31674+ ret = -EAGAIN;
31675+ goto out;
31676+ }
31677+ if (ret) {
31678+ DRM_ERROR("Error waiting for space to map "
31679+ "relocation buffer.\n");
31680+ goto out;
31681+ }
31682+
31683+ ret = drm_bo_kmap(reloc_buffer, reloc_first_page,
31684+ reloc_num_pages, &reloc_kmap);
31685+
31686+ if (ret) {
31687+ DRM_ERROR("Could not map relocation buffer.\n"
31688+ "\tReloc buffer id 0x%08x.\n"
31689+ "\tReloc first page %d.\n"
31690+ "\tReloc num pages %d.\n",
31691+ reloc_handle, reloc_first_page, reloc_num_pages);
31692+ goto out;
31693+ }
31694+
31695+ reloc = (struct drm_psb_reloc *)
31696+ ((unsigned long)drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem) +
31697+ reloc_offset);
31698+
31699+ for (count = 0; count < num_relocs; ++count) {
31700+ ret = psb_apply_reloc(dev_priv, fence_class,
31701+ reloc, buffers,
31702+ num_buffers, &dst_cache,
31703+ no_wait, interruptible);
31704+ if (ret)
31705+ goto out1;
31706+ reloc++;
31707+ }
31708+
31709+ out1:
31710+ drm_bo_kunmap(&reloc_kmap);
31711+ out:
31712+ if (registered) {
31713+ spin_lock(&dev_priv->reloc_lock);
31714+ dev_priv->rel_mapped_pages -= reloc_num_pages;
31715+ spin_unlock(&dev_priv->reloc_lock);
31716+ DRM_WAKEUP(&dev_priv->rel_mapped_queue);
31717+ }
31718+
31719+ psb_clear_dstbuf_cache(&dst_cache);
31720+ if (reloc_buffer)
31721+ drm_bo_usage_deref_unlocked(&reloc_buffer);
31722+ return ret;
31723+}
31724+
31725+static int psb_cmdbuf_2d(struct drm_file *priv,
31726+ struct drm_psb_cmdbuf_arg *arg,
31727+ struct drm_buffer_object *cmd_buffer,
31728+ struct drm_fence_arg *fence_arg)
31729+{
31730+ struct drm_device *dev = priv->minor->dev;
31731+ struct drm_psb_private *dev_priv =
31732+ (struct drm_psb_private *)dev->dev_private;
31733+ int ret;
31734+
31735+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
31736+ if (ret)
31737+ return -EAGAIN;
31738+
31739+ ret = psb_submit_copy_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
31740+ arg->cmdbuf_size, PSB_ENGINE_2D, NULL);
31741+ if (ret)
31742+ goto out_unlock;
31743+
31744+ psb_fence_or_sync(priv, PSB_ENGINE_2D, arg, fence_arg, NULL);
31745+
31746+ mutex_lock(&cmd_buffer->mutex);
31747+ if (cmd_buffer->fence != NULL)
31748+ drm_fence_usage_deref_unlocked(&cmd_buffer->fence);
31749+ mutex_unlock(&cmd_buffer->mutex);
31750+ out_unlock:
31751+ mutex_unlock(&dev_priv->reset_mutex);
31752+ return ret;
31753+}
31754+
31755+#if 0
31756+static int psb_dump_page(struct drm_buffer_object *bo,
31757+ unsigned int page_offset, unsigned int num)
31758+{
31759+ struct drm_bo_kmap_obj kmobj;
31760+ int is_iomem;
31761+ uint32_t *p;
31762+ int ret;
31763+ unsigned int i;
31764+
31765+ ret = drm_bo_kmap(bo, page_offset, 1, &kmobj);
31766+ if (ret)
31767+ return ret;
31768+
31769+ p = drm_bmo_virtual(&kmobj, &is_iomem);
31770+ for (i = 0; i < num; ++i)
31771+ PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
31772+
31773+ drm_bo_kunmap(&kmobj);
31774+ return 0;
31775+}
31776+#endif
31777+
31778+static void psb_idle_engine(struct drm_device *dev, int engine)
31779+{
31780+ struct drm_psb_private *dev_priv =
31781+ (struct drm_psb_private *)dev->dev_private;
31782+ uint32_t dummy;
31783+
31784+ switch (engine) {
31785+ case PSB_ENGINE_2D:
31786+
31787+ /*
31788+ * Make sure we flush 2D properly using a dummy
31789+ * fence sequence emit.
31790+ */
31791+
31792+ (void)psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
31793+ &dummy, &dummy);
31794+ psb_2d_lock(dev_priv);
31795+ (void)psb_idle_2d(dev);
31796+ psb_2d_unlock(dev_priv);
31797+ break;
31798+ case PSB_ENGINE_TA:
31799+ case PSB_ENGINE_RASTERIZER:
31800+ case PSB_ENGINE_HPRAST:
31801+ (void)psb_idle_3d(dev);
31802+ break;
31803+ default:
31804+
31805+ /*
31806+ * FIXME: Insert video engine idle command here.
31807+ */
31808+
31809+ break;
31810+ }
31811+}
31812+
31813+void psb_fence_or_sync(struct drm_file *priv,
31814+ int engine,
31815+ struct drm_psb_cmdbuf_arg *arg,
31816+ struct drm_fence_arg *fence_arg,
31817+ struct drm_fence_object **fence_p)
31818+{
31819+ struct drm_device *dev = priv->minor->dev;
31820+ int ret;
31821+ struct drm_fence_object *fence;
31822+
31823+ ret = drm_fence_buffer_objects(dev, NULL, arg->fence_flags,
31824+ NULL, &fence);
31825+
31826+ if (ret) {
31827+
31828+ /*
31829+ * Fence creation failed.
31830+ * Fall back to synchronous operation and idle the engine.
31831+ */
31832+
31833+ psb_idle_engine(dev, engine);
31834+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
31835+
31836+ /*
31837+ * Communicate to user-space that
31838+ * fence creation has failed and that
31839+ * the engine is idle.
31840+ */
31841+
31842+ fence_arg->handle = ~0;
31843+ fence_arg->error = ret;
31844+ }
31845+
31846+ drm_putback_buffer_objects(dev);
31847+ if (fence_p)
31848+ *fence_p = NULL;
31849+ return;
31850+ }
31851+
31852+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
31853+
31854+ ret = drm_fence_add_user_object(priv, fence,
31855+ arg->fence_flags &
31856+ DRM_FENCE_FLAG_SHAREABLE);
31857+ if (!ret)
31858+ drm_fence_fill_arg(fence, fence_arg);
31859+ else {
31860+ /*
31861+ * Fence user object creation failed.
31862+ * We must idle the engine here as well, as user-
31863+ * space expects a fence object to wait on. Since we
31864+ * have a fence object we wait for it to signal
31865+ * to indicate engine "sufficiently" idle.
31866+ */
31867+
31868+ (void)drm_fence_object_wait(fence, 0, 1, fence->type);
31869+ drm_fence_usage_deref_unlocked(&fence);
31870+ fence_arg->handle = ~0;
31871+ fence_arg->error = ret;
31872+ }
31873+ }
31874+
31875+ if (fence_p)
31876+ *fence_p = fence;
31877+ else if (fence)
31878+ drm_fence_usage_deref_unlocked(&fence);
31879+}
31880+
31881+int psb_handle_copyback(struct drm_device *dev,
31882+ struct psb_buflist_item *buffers,
31883+ unsigned int num_buffers, int ret, void *data)
31884+{
31885+ struct drm_psb_private *dev_priv =
31886+ (struct drm_psb_private *)dev->dev_private;
31887+ struct drm_bo_op_arg arg;
31888+ struct psb_buflist_item *item = buffers;
31889+ struct drm_buffer_object *bo;
31890+ int err = ret;
31891+ int i;
31892+
31893+ /*
31894+ * Clear the unfenced use base register lists and buffer lists.
31895+ */
31896+
31897+ if (ret) {
31898+ drm_regs_fence(&dev_priv->use_manager, NULL);
31899+ drm_putback_buffer_objects(dev);
31900+ }
31901+
31902+ if (ret != -EAGAIN) {
31903+ for (i = 0; i < num_buffers; ++i) {
31904+ arg.handled = 1;
31905+ arg.d.rep.ret = item->ret;
31906+ bo = item->bo;
31907+ mutex_lock(&bo->mutex);
31908+ drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
31909+ mutex_unlock(&bo->mutex);
31910+ if (copy_to_user(item->data, &arg, sizeof(arg)))
31911+ err = -EFAULT;
31912+ ++item;
31913+ }
31914+ }
31915+
31916+ return err;
31917+}
31918+
31919+static int psb_cmdbuf_video(struct drm_file *priv,
31920+ struct drm_psb_cmdbuf_arg *arg,
31921+ unsigned int num_buffers,
31922+ struct drm_buffer_object *cmd_buffer,
31923+ struct drm_fence_arg *fence_arg)
31924+{
31925+ struct drm_device *dev = priv->minor->dev;
31926+ struct drm_fence_object *fence;
31927+ int ret;
31928+
31929+ /*
31930+ * Check this. Doesn't seem right. Have fencing done AFTER command
31931+ * submission and make sure drm_psb_idle idles the MSVDX completely.
31932+ */
31933+
31934+ psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, arg, fence_arg, &fence);
31935+ ret = psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
31936+ arg->cmdbuf_size, fence);
31937+
31938+ if (ret)
31939+ return ret;
31940+
31941+ drm_fence_usage_deref_unlocked(&fence);
31942+ mutex_lock(&cmd_buffer->mutex);
31943+ if (cmd_buffer->fence != NULL)
31944+ drm_fence_usage_deref_unlocked(&cmd_buffer->fence);
31945+ mutex_unlock(&cmd_buffer->mutex);
31946+ return 0;
31947+}
31948+
31949+int psb_feedback_buf(struct drm_file *file_priv,
31950+ uint32_t feedback_ops,
31951+ uint32_t handle,
31952+ uint32_t offset,
31953+ uint32_t feedback_breakpoints,
31954+ uint32_t feedback_size, struct psb_feedback_info *feedback)
31955+{
31956+ struct drm_buffer_object *bo;
31957+ struct page *page;
31958+ uint32_t page_no;
31959+ uint32_t page_offset;
31960+ int ret;
31961+
31962+ if (feedback_ops & ~PSB_FEEDBACK_OP_VISTEST) {
31963+ DRM_ERROR("Illegal feedback op.\n");
31964+ return -EINVAL;
31965+ }
31966+
31967+ if (feedback_breakpoints != 0) {
31968+ DRM_ERROR("Feedback breakpoints not implemented yet.\n");
31969+ return -EINVAL;
31970+ }
31971+
31972+ if (feedback_size < PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) {
31973+ DRM_ERROR("Feedback buffer size too small.\n");
31974+ return -EINVAL;
31975+ }
31976+
31977+ page_offset = offset & ~PAGE_MASK;
31978+ if ((PAGE_SIZE - PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t))
31979+ < page_offset) {
31980+ DRM_ERROR("Illegal feedback buffer alignment.\n");
31981+ return -EINVAL;
31982+ }
31983+
31984+ ret = drm_bo_handle_validate(file_priv,
31985+ handle,
31986+ PSB_ENGINE_TA,
31987+ DRM_BO_FLAG_MEM_LOCAL |
31988+ DRM_BO_FLAG_CACHED |
31989+ DRM_BO_FLAG_WRITE |
31990+ PSB_BO_FLAG_FEEDBACK,
31991+ DRM_BO_MASK_MEM |
31992+ DRM_BO_FLAG_CACHED |
31993+ DRM_BO_FLAG_WRITE |
31994+ PSB_BO_FLAG_FEEDBACK, 0, 0, NULL, &bo);
31995+ if (ret)
31996+ return ret;
31997+
31998+ page_no = offset >> PAGE_SHIFT;
31999+ if (page_no >= bo->num_pages) {
32000+ ret = -EINVAL;
32001+ DRM_ERROR("Illegal feedback buffer offset.\n");
32002+ goto out_unref;
32003+ }
32004+
32005+ if (bo->ttm == NULL) {
32006+ ret = -EINVAL;
32007+ DRM_ERROR("Vistest buffer without TTM.\n");
32008+ goto out_unref;
32009+ }
32010+
32011+ page = drm_ttm_get_page(bo->ttm, page_no);
32012+ if (!page) {
32013+ ret = -ENOMEM;
32014+ goto out_unref;
32015+ }
32016+
32017+ feedback->page = page;
32018+ feedback->bo = bo;
32019+ feedback->offset = page_offset;
32020+ return 0;
32021+
32022+ out_unref:
32023+ drm_bo_usage_deref_unlocked(&bo);
32024+ return ret;
32025+}
32026+
32027+int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
32028+ struct drm_file *file_priv)
32029+{
32030+ drm_psb_cmdbuf_arg_t *arg = data;
32031+ int ret = 0;
32032+ unsigned num_buffers;
32033+ struct drm_buffer_object *cmd_buffer = NULL;
32034+ struct drm_buffer_object *ta_buffer = NULL;
32035+ struct drm_buffer_object *oom_buffer = NULL;
32036+ struct drm_fence_arg fence_arg;
32037+ struct drm_psb_scene user_scene;
32038+ struct psb_scene_pool *pool = NULL;
32039+ struct psb_scene *scene = NULL;
32040+ struct drm_psb_private *dev_priv =
32041+ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
32042+ int engine;
32043+ struct psb_feedback_info feedback;
32044+
32045+ if (!dev_priv)
32046+ return -EINVAL;
32047+
32048+ ret = drm_bo_read_lock(&dev->bm.bm_lock);
32049+ if (ret)
32050+ return ret;
32051+
32052+ num_buffers = PSB_NUM_VALIDATE_BUFFERS;
32053+
32054+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
32055+ if (ret) {
32056+ drm_bo_read_unlock(&dev->bm.bm_lock);
32057+ return -EAGAIN;
32058+ }
32059+ if (unlikely(dev_priv->buffers == NULL)) {
32060+ dev_priv->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
32061+ sizeof(*dev_priv->buffers));
32062+ if (dev_priv->buffers == NULL) {
32063+ drm_bo_read_unlock(&dev->bm.bm_lock);
32064+ return -ENOMEM;
32065+ }
32066+ }
32067+
32068+
32069+ engine = (arg->engine == PSB_ENGINE_RASTERIZER) ?
32070+ PSB_ENGINE_TA : arg->engine;
32071+
32072+ ret =
32073+ psb_validate_buffer_list(file_priv, engine,
32074+ (unsigned long)arg->buffer_list,
32075+ dev_priv->buffers, &num_buffers);
32076+ if (ret)
32077+ goto out_err0;
32078+
32079+ ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
32080+ arg->reloc_offset, arg->reloc_handle,
32081+ dev_priv->buffers, num_buffers, 0, 1);
32082+ if (ret)
32083+ goto out_err0;
32084+
32085+ mutex_lock(&dev->struct_mutex);
32086+ cmd_buffer = drm_lookup_buffer_object(file_priv, arg->cmdbuf_handle, 1);
32087+ mutex_unlock(&dev->struct_mutex);
32088+ if (!cmd_buffer) {
32089+ ret = -EINVAL;
32090+ goto out_err0;
32091+ }
32092+
32093+ switch (arg->engine) {
32094+ case PSB_ENGINE_2D:
32095+ ret = psb_cmdbuf_2d(file_priv, arg, cmd_buffer, &fence_arg);
32096+ if (ret)
32097+ goto out_err0;
32098+ break;
32099+ case PSB_ENGINE_VIDEO:
32100+ ret =
32101+ psb_cmdbuf_video(file_priv, arg, num_buffers, cmd_buffer,
32102+ &fence_arg);
32103+ if (ret)
32104+ goto out_err0;
32105+ break;
32106+ case PSB_ENGINE_RASTERIZER:
32107+ ret = psb_cmdbuf_raster(file_priv, arg, cmd_buffer, &fence_arg);
32108+ if (ret)
32109+ goto out_err0;
32110+ break;
32111+ case PSB_ENGINE_TA:
32112+ if (arg->ta_handle == arg->cmdbuf_handle) {
32113+ mutex_lock(&dev->struct_mutex);
32114+ atomic_inc(&cmd_buffer->usage);
32115+ ta_buffer = cmd_buffer;
32116+ mutex_unlock(&dev->struct_mutex);
32117+ } else {
32118+ mutex_lock(&dev->struct_mutex);
32119+ ta_buffer =
32120+ drm_lookup_buffer_object(file_priv,
32121+ arg->ta_handle, 1);
32122+ mutex_unlock(&dev->struct_mutex);
32123+ if (!ta_buffer) {
32124+ ret = -EINVAL;
32125+ goto out_err0;
32126+ }
32127+ }
32128+ if (arg->oom_size != 0) {
32129+ if (arg->oom_handle == arg->cmdbuf_handle) {
32130+ mutex_lock(&dev->struct_mutex);
32131+ atomic_inc(&cmd_buffer->usage);
32132+ oom_buffer = cmd_buffer;
32133+ mutex_unlock(&dev->struct_mutex);
32134+ } else {
32135+ mutex_lock(&dev->struct_mutex);
32136+ oom_buffer =
32137+ drm_lookup_buffer_object(file_priv,
32138+ arg->oom_handle,
32139+ 1);
32140+ mutex_unlock(&dev->struct_mutex);
32141+ if (!oom_buffer) {
32142+ ret = -EINVAL;
32143+ goto out_err0;
32144+ }
32145+ }
32146+ }
32147+
32148+ ret = copy_from_user(&user_scene, (void __user *)
32149+ ((unsigned long)arg->scene_arg),
32150+ sizeof(user_scene));
32151+ if (ret)
32152+ goto out_err0;
32153+
32154+ if (!user_scene.handle_valid) {
32155+ pool = psb_scene_pool_alloc(file_priv, 0,
32156+ user_scene.num_buffers,
32157+ user_scene.w, user_scene.h);
32158+ if (!pool) {
32159+ ret = -ENOMEM;
32160+ goto out_err0;
32161+ }
32162+
32163+ user_scene.handle = psb_scene_pool_handle(pool);
32164+ user_scene.handle_valid = 1;
32165+ ret = copy_to_user((void __user *)
32166+ ((unsigned long)arg->scene_arg),
32167+ &user_scene, sizeof(user_scene));
32168+
32169+ if (ret)
32170+ goto out_err0;
32171+ } else {
32172+ mutex_lock(&dev->struct_mutex);
32173+ pool = psb_scene_pool_lookup_devlocked(file_priv,
32174+ user_scene.
32175+ handle, 1);
32176+ mutex_unlock(&dev->struct_mutex);
32177+ if (!pool) {
32178+ ret = -EINVAL;
32179+ goto out_err0;
32180+ }
32181+ }
32182+
32183+ mutex_lock(&dev_priv->reset_mutex);
32184+ ret = psb_validate_scene_pool(pool, 0, 0, 0,
32185+ user_scene.w,
32186+ user_scene.h,
32187+ arg->ta_flags &
32188+ PSB_TA_FLAG_LASTPASS, &scene);
32189+ mutex_unlock(&dev_priv->reset_mutex);
32190+
32191+ if (ret)
32192+ goto out_err0;
32193+
32194+ memset(&feedback, 0, sizeof(feedback));
32195+ if (arg->feedback_ops) {
32196+ ret = psb_feedback_buf(file_priv,
32197+ arg->feedback_ops,
32198+ arg->feedback_handle,
32199+ arg->feedback_offset,
32200+ arg->feedback_breakpoints,
32201+ arg->feedback_size, &feedback);
32202+ if (ret)
32203+ goto out_err0;
32204+ }
32205+ ret = psb_cmdbuf_ta(file_priv, arg, cmd_buffer, ta_buffer,
32206+ oom_buffer, scene, &feedback, &fence_arg);
32207+ if (ret)
32208+ goto out_err0;
32209+ break;
32210+ default:
32211+ DRM_ERROR("Unimplemented command submission mechanism (%x).\n",
32212+ arg->engine);
32213+ ret = -EINVAL;
32214+ goto out_err0;
32215+ }
32216+
32217+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
32218+ ret = copy_to_user((void __user *)
32219+ ((unsigned long)arg->fence_arg),
32220+ &fence_arg, sizeof(fence_arg));
32221+ }
32222+
32223+ out_err0:
32224+ ret =
32225+ psb_handle_copyback(dev, dev_priv->buffers, num_buffers, ret, data);
32226+ mutex_lock(&dev->struct_mutex);
32227+ if (scene)
32228+ psb_scene_unref_devlocked(&scene);
32229+ if (pool)
32230+ psb_scene_pool_unref_devlocked(&pool);
32231+ if (cmd_buffer)
32232+ drm_bo_usage_deref_locked(&cmd_buffer);
32233+ if (ta_buffer)
32234+ drm_bo_usage_deref_locked(&ta_buffer);
32235+ if (oom_buffer)
32236+ drm_bo_usage_deref_locked(&oom_buffer);
32237+
32238+ psb_dereference_buffers_locked(dev_priv->buffers, num_buffers);
32239+ mutex_unlock(&dev->struct_mutex);
32240+ mutex_unlock(&dev_priv->cmdbuf_mutex);
32241+
32242+ drm_bo_read_unlock(&dev->bm.bm_lock);
32243+ return ret;
32244+}
32245Index: linux-2.6.27/drivers/gpu/drm/psb/psb_xhw.c
32246===================================================================
32247--- /dev/null 1970-01-01 00:00:00.000000000 +0000
32248+++ linux-2.6.27/drivers/gpu/drm/psb/psb_xhw.c 2009-01-14 11:58:01.000000000 +0000
32249@@ -0,0 +1,614 @@
32250+/**************************************************************************
32251+ * Copyright (c) 2007, Intel Corporation.
32252+ * All Rights Reserved.
32253+ *
32254+ * This program is free software; you can redistribute it and/or modify it
32255+ * under the terms and conditions of the GNU General Public License,
32256+ * version 2, as published by the Free Software Foundation.
32257+ *
32258+ * This program is distributed in the hope it will be useful, but WITHOUT
32259+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
32260+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
32261+ * more details.
32262+ *
32263+ * You should have received a copy of the GNU General Public License along with
32264+ * this program; if not, write to the Free Software Foundation, Inc.,
32265+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
32266+ *
32267+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
32268+ * develop this driver.
32269+ *
32270+ **************************************************************************/
32271+/*
32272+ * Make calls into closed source X server code.
32273+ */
32274+
32275+#include "drmP.h"
32276+#include "psb_drv.h"
32277+
32278+void
32279+psb_xhw_clean_buf(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
32280+{
32281+ unsigned long irq_flags;
32282+
32283+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32284+ list_del_init(&buf->head);
32285+ if (dev_priv->xhw_cur_buf == buf)
32286+ dev_priv->xhw_cur_buf = NULL;
32287+ atomic_set(&buf->done, 1);
32288+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32289+}
32290+
32291+static inline int psb_xhw_add(struct drm_psb_private *dev_priv,
32292+ struct psb_xhw_buf *buf)
32293+{
32294+ unsigned long irq_flags;
32295+
32296+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32297+ atomic_set(&buf->done, 0);
32298+ if (unlikely(!dev_priv->xhw_submit_ok)) {
32299+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32300+ DRM_ERROR("No Xpsb 3D extension available.\n");
32301+ return -EINVAL;
32302+ }
32303+ if (!list_empty(&buf->head)) {
32304+ DRM_ERROR("Recursive list adding.\n");
32305+ goto out;
32306+ }
32307+ list_add_tail(&buf->head, &dev_priv->xhw_in);
32308+ wake_up_interruptible(&dev_priv->xhw_queue);
32309+ out:
32310+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32311+ return 0;
32312+}
32313+
32314+int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
32315+ struct psb_xhw_buf *buf,
32316+ uint32_t w,
32317+ uint32_t h,
32318+ uint32_t * hw_cookie,
32319+ uint32_t * bo_size,
32320+ uint32_t * clear_p_start, uint32_t * clear_num_pages)
32321+{
32322+ struct drm_psb_xhw_arg *xa = &buf->arg;
32323+ int ret;
32324+
32325+ buf->copy_back = 1;
32326+ xa->op = PSB_XHW_SCENE_INFO;
32327+ xa->irq_op = 0;
32328+ xa->issue_irq = 0;
32329+ xa->arg.si.w = w;
32330+ xa->arg.si.h = h;
32331+
32332+ ret = psb_xhw_add(dev_priv, buf);
32333+ if (ret)
32334+ return ret;
32335+
32336+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
32337+ atomic_read(&buf->done), DRM_HZ);
32338+
32339+ if (!atomic_read(&buf->done)) {
32340+ psb_xhw_clean_buf(dev_priv, buf);
32341+ return -EBUSY;
32342+ }
32343+
32344+ if (!xa->ret) {
32345+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
32346+ *bo_size = xa->arg.si.size;
32347+ *clear_p_start = xa->arg.si.clear_p_start;
32348+ *clear_num_pages = xa->arg.si.clear_num_pages;
32349+ }
32350+ return xa->ret;
32351+}
32352+
32353+int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
32354+ struct psb_xhw_buf *buf, uint32_t fire_flags)
32355+{
32356+ struct drm_psb_xhw_arg *xa = &buf->arg;
32357+
32358+ buf->copy_back = 0;
32359+ xa->op = PSB_XHW_FIRE_RASTER;
32360+ xa->issue_irq = 0;
32361+ xa->arg.sb.fire_flags = 0;
32362+
32363+ return psb_xhw_add(dev_priv, buf);
32364+}
32365+
32366+int psb_xhw_vistest(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
32367+{
32368+ struct drm_psb_xhw_arg *xa = &buf->arg;
32369+
32370+ buf->copy_back = 1;
32371+ xa->op = PSB_XHW_VISTEST;
32372+ /*
32373+ * Could perhaps decrease latency somewhat by
32374+ * issuing an irq in this case.
32375+ */
32376+ xa->issue_irq = 0;
32377+ xa->irq_op = PSB_UIRQ_VISTEST;
32378+ return psb_xhw_add(dev_priv, buf);
32379+}
32380+
32381+int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
32382+ struct psb_xhw_buf *buf,
32383+ uint32_t fire_flags,
32384+ uint32_t hw_context,
32385+ uint32_t * cookie,
32386+ uint32_t * oom_cmds,
32387+ uint32_t num_oom_cmds,
32388+ uint32_t offset, uint32_t engine, uint32_t flags)
32389+{
32390+ struct drm_psb_xhw_arg *xa = &buf->arg;
32391+
32392+ buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM);
32393+ xa->op = PSB_XHW_SCENE_BIND_FIRE;
32394+ xa->issue_irq = (buf->copy_back) ? 1 : 0;
32395+ if (unlikely(buf->copy_back))
32396+ xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ?
32397+ PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY;
32398+ else
32399+ xa->irq_op = 0;
32400+ xa->arg.sb.fire_flags = fire_flags;
32401+ xa->arg.sb.hw_context = hw_context;
32402+ xa->arg.sb.offset = offset;
32403+ xa->arg.sb.engine = engine;
32404+ xa->arg.sb.flags = flags;
32405+ xa->arg.sb.num_oom_cmds = num_oom_cmds;
32406+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
32407+ if (num_oom_cmds)
32408+ memcpy(xa->arg.sb.oom_cmds, oom_cmds,
32409+ sizeof(uint32_t) * num_oom_cmds);
32410+ return psb_xhw_add(dev_priv, buf);
32411+}
32412+
32413+int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
32414+{
32415+ struct drm_psb_xhw_arg *xa = &buf->arg;
32416+ int ret;
32417+
32418+ buf->copy_back = 1;
32419+ xa->op = PSB_XHW_RESET_DPM;
32420+ xa->issue_irq = 0;
32421+ xa->irq_op = 0;
32422+
32423+ ret = psb_xhw_add(dev_priv, buf);
32424+ if (ret)
32425+ return ret;
32426+
32427+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
32428+ atomic_read(&buf->done), 3 * DRM_HZ);
32429+
32430+ if (!atomic_read(&buf->done)) {
32431+ psb_xhw_clean_buf(dev_priv, buf);
32432+ return -EBUSY;
32433+ }
32434+
32435+ return xa->ret;
32436+}
32437+
32438+int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
32439+ struct psb_xhw_buf *buf, uint32_t * value)
32440+{
32441+ struct drm_psb_xhw_arg *xa = &buf->arg;
32442+ int ret;
32443+
32444+ *value = 0;
32445+
32446+ buf->copy_back = 1;
32447+ xa->op = PSB_XHW_CHECK_LOCKUP;
32448+ xa->issue_irq = 0;
32449+ xa->irq_op = 0;
32450+
32451+ ret = psb_xhw_add(dev_priv, buf);
32452+ if (ret)
32453+ return ret;
32454+
32455+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
32456+ atomic_read(&buf->done), DRM_HZ * 3);
32457+
32458+ if (!atomic_read(&buf->done)) {
32459+ psb_xhw_clean_buf(dev_priv, buf);
32460+ return -EBUSY;
32461+ }
32462+
32463+ if (!xa->ret)
32464+ *value = xa->arg.cl.value;
32465+
32466+ return xa->ret;
32467+}
32468+
32469+static int psb_xhw_terminate(struct drm_psb_private *dev_priv,
32470+ struct psb_xhw_buf *buf)
32471+{
32472+ struct drm_psb_xhw_arg *xa = &buf->arg;
32473+ unsigned long irq_flags;
32474+
32475+ buf->copy_back = 0;
32476+ xa->op = PSB_XHW_TERMINATE;
32477+ xa->issue_irq = 0;
32478+
32479+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32480+ dev_priv->xhw_submit_ok = 0;
32481+ atomic_set(&buf->done, 0);
32482+ if (!list_empty(&buf->head)) {
32483+ DRM_ERROR("Recursive list adding.\n");
32484+ goto out;
32485+ }
32486+ list_add_tail(&buf->head, &dev_priv->xhw_in);
32487+ out:
32488+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32489+ wake_up_interruptible(&dev_priv->xhw_queue);
32490+
32491+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
32492+ atomic_read(&buf->done), DRM_HZ / 10);
32493+
32494+ if (!atomic_read(&buf->done)) {
32495+ DRM_ERROR("Xpsb terminate timeout.\n");
32496+ psb_xhw_clean_buf(dev_priv, buf);
32497+ return -EBUSY;
32498+ }
32499+
32500+ return 0;
32501+}
32502+
32503+int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
32504+ struct psb_xhw_buf *buf,
32505+ uint32_t pages, uint32_t * hw_cookie, uint32_t * size)
32506+{
32507+ struct drm_psb_xhw_arg *xa = &buf->arg;
32508+ int ret;
32509+
32510+ buf->copy_back = 1;
32511+ xa->op = PSB_XHW_TA_MEM_INFO;
32512+ xa->issue_irq = 0;
32513+ xa->irq_op = 0;
32514+ xa->arg.bi.pages = pages;
32515+
32516+ ret = psb_xhw_add(dev_priv, buf);
32517+ if (ret)
32518+ return ret;
32519+
32520+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
32521+ atomic_read(&buf->done), DRM_HZ);
32522+
32523+ if (!atomic_read(&buf->done)) {
32524+ psb_xhw_clean_buf(dev_priv, buf);
32525+ return -EBUSY;
32526+ }
32527+
32528+ if (!xa->ret)
32529+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
32530+
32531+ *size = xa->arg.bi.size;
32532+ return xa->ret;
32533+}
32534+
32535+int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
32536+ struct psb_xhw_buf *buf,
32537+ uint32_t flags,
32538+ uint32_t param_offset,
32539+ uint32_t pt_offset, uint32_t * hw_cookie)
32540+{
32541+ struct drm_psb_xhw_arg *xa = &buf->arg;
32542+ int ret;
32543+
32544+ buf->copy_back = 1;
32545+ xa->op = PSB_XHW_TA_MEM_LOAD;
32546+ xa->issue_irq = 0;
32547+ xa->irq_op = 0;
32548+ xa->arg.bl.flags = flags;
32549+ xa->arg.bl.param_offset = param_offset;
32550+ xa->arg.bl.pt_offset = pt_offset;
32551+ memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie));
32552+
32553+ ret = psb_xhw_add(dev_priv, buf);
32554+ if (ret)
32555+ return ret;
32556+
32557+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
32558+ atomic_read(&buf->done), 3 * DRM_HZ);
32559+
32560+ if (!atomic_read(&buf->done)) {
32561+ psb_xhw_clean_buf(dev_priv, buf);
32562+ return -EBUSY;
32563+ }
32564+
32565+ if (!xa->ret)
32566+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
32567+
32568+ return xa->ret;
32569+}
32570+
32571+int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
32572+ struct psb_xhw_buf *buf, uint32_t * cookie)
32573+{
32574+ struct drm_psb_xhw_arg *xa = &buf->arg;
32575+
32576+ /*
32577+ * This calls the extensive closed source
32578+ * OOM handler, which resolves the condition and
32579+ * sends a reply telling the scheduler what to do
32580+ * with the task.
32581+ */
32582+
32583+ buf->copy_back = 1;
32584+ xa->op = PSB_XHW_OOM;
32585+ xa->issue_irq = 1;
32586+ xa->irq_op = PSB_UIRQ_OOM_REPLY;
32587+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
32588+
32589+ return psb_xhw_add(dev_priv, buf);
32590+}
32591+
32592+void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
32593+ struct psb_xhw_buf *buf,
32594+ uint32_t * cookie,
32595+ uint32_t * bca, uint32_t * rca, uint32_t * flags)
32596+{
32597+ struct drm_psb_xhw_arg *xa = &buf->arg;
32598+
32599+ /*
32600+ * Get info about how to schedule an OOM task.
32601+ */
32602+
32603+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
32604+ *bca = xa->arg.oom.bca;
32605+ *rca = xa->arg.oom.rca;
32606+ *flags = xa->arg.oom.flags;
32607+}
32608+
32609+void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
32610+ struct psb_xhw_buf *buf, uint32_t * cookie)
32611+{
32612+ struct drm_psb_xhw_arg *xa = &buf->arg;
32613+
32614+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
32615+}
32616+
32617+int psb_xhw_resume(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
32618+{
32619+ struct drm_psb_xhw_arg *xa = &buf->arg;
32620+
32621+ buf->copy_back = 0;
32622+ xa->op = PSB_XHW_RESUME;
32623+ xa->issue_irq = 0;
32624+ xa->irq_op = 0;
32625+ return psb_xhw_add(dev_priv, buf);
32626+}
32627+
32628+void psb_xhw_takedown(struct drm_psb_private *dev_priv)
32629+{
32630+}
32631+
32632+int psb_xhw_init(struct drm_device *dev)
32633+{
32634+ struct drm_psb_private *dev_priv =
32635+ (struct drm_psb_private *)dev->dev_private;
32636+ unsigned long irq_flags;
32637+
32638+ INIT_LIST_HEAD(&dev_priv->xhw_in);
32639+ dev_priv->xhw_lock = SPIN_LOCK_UNLOCKED;
32640+ atomic_set(&dev_priv->xhw_client, 0);
32641+ init_waitqueue_head(&dev_priv->xhw_queue);
32642+ init_waitqueue_head(&dev_priv->xhw_caller_queue);
32643+ mutex_init(&dev_priv->xhw_mutex);
32644+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32645+ dev_priv->xhw_on = 0;
32646+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32647+
32648+ return 0;
32649+}
32650+
32651+static int psb_xhw_init_init(struct drm_device *dev,
32652+ struct drm_file *file_priv,
32653+ struct drm_psb_xhw_init_arg *arg)
32654+{
32655+ struct drm_psb_private *dev_priv =
32656+ (struct drm_psb_private *)dev->dev_private;
32657+ int ret;
32658+ int is_iomem;
32659+
32660+ if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) {
32661+ unsigned long irq_flags;
32662+
32663+ mutex_lock(&dev->struct_mutex);
32664+ dev_priv->xhw_bo =
32665+ drm_lookup_buffer_object(file_priv, arg->buffer_handle, 1);
32666+ mutex_unlock(&dev->struct_mutex);
32667+ if (!dev_priv->xhw_bo) {
32668+ ret = -EINVAL;
32669+ goto out_err;
32670+ }
32671+ ret = drm_bo_kmap(dev_priv->xhw_bo, 0,
32672+ dev_priv->xhw_bo->num_pages,
32673+ &dev_priv->xhw_kmap);
32674+ if (ret) {
32675+ DRM_ERROR("Failed mapping X server "
32676+ "communications buffer.\n");
32677+ goto out_err0;
32678+ }
32679+ dev_priv->xhw = drm_bmo_virtual(&dev_priv->xhw_kmap, &is_iomem);
32680+ if (is_iomem) {
32681+ DRM_ERROR("X server communications buffer"
32682+ "is in device memory.\n");
32683+ ret = -EINVAL;
32684+ goto out_err1;
32685+ }
32686+ dev_priv->xhw_file = file_priv;
32687+
32688+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32689+ dev_priv->xhw_on = 1;
32690+ dev_priv->xhw_submit_ok = 1;
32691+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32692+ return 0;
32693+ } else {
32694+ DRM_ERROR("Xhw is already initialized.\n");
32695+ return -EBUSY;
32696+ }
32697+ out_err1:
32698+ dev_priv->xhw = NULL;
32699+ drm_bo_kunmap(&dev_priv->xhw_kmap);
32700+ out_err0:
32701+ drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
32702+ out_err:
32703+ atomic_dec(&dev_priv->xhw_client);
32704+ return ret;
32705+}
32706+
32707+static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv)
32708+{
32709+ struct psb_xhw_buf *cur_buf, *next;
32710+ unsigned long irq_flags;
32711+
32712+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32713+ dev_priv->xhw_submit_ok = 0;
32714+
32715+ list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) {
32716+ list_del_init(&cur_buf->head);
32717+ if (cur_buf->copy_back) {
32718+ cur_buf->arg.ret = -EINVAL;
32719+ }
32720+ atomic_set(&cur_buf->done, 1);
32721+ }
32722+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32723+ wake_up(&dev_priv->xhw_caller_queue);
32724+}
32725+
32726+void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
32727+ struct drm_file *file_priv, int closing)
32728+{
32729+
32730+ if (dev_priv->xhw_file == file_priv &&
32731+ atomic_add_unless(&dev_priv->xhw_client, -1, 0)) {
32732+
32733+ if (closing)
32734+ psb_xhw_queue_empty(dev_priv);
32735+ else {
32736+ struct psb_xhw_buf buf;
32737+ INIT_LIST_HEAD(&buf.head);
32738+
32739+ psb_xhw_terminate(dev_priv, &buf);
32740+ psb_xhw_queue_empty(dev_priv);
32741+ }
32742+
32743+ dev_priv->xhw = NULL;
32744+ drm_bo_kunmap(&dev_priv->xhw_kmap);
32745+ drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
32746+ dev_priv->xhw_file = NULL;
32747+ }
32748+}
32749+
32750+int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
32751+ struct drm_file *file_priv)
32752+{
32753+ struct drm_psb_xhw_init_arg *arg = (struct drm_psb_xhw_init_arg *)data;
32754+ struct drm_psb_private *dev_priv =
32755+ (struct drm_psb_private *)dev->dev_private;
32756+
32757+ switch (arg->operation) {
32758+ case PSB_XHW_INIT:
32759+ return psb_xhw_init_init(dev, file_priv, arg);
32760+ case PSB_XHW_TAKEDOWN:
32761+ psb_xhw_init_takedown(dev_priv, file_priv, 0);
32762+ }
32763+ return 0;
32764+}
32765+
32766+static int psb_xhw_in_empty(struct drm_psb_private *dev_priv)
32767+{
32768+ int empty;
32769+ unsigned long irq_flags;
32770+
32771+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32772+ empty = list_empty(&dev_priv->xhw_in);
32773+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32774+ return empty;
32775+}
32776+
32777+int psb_xhw_handler(struct drm_psb_private *dev_priv)
32778+{
32779+ unsigned long irq_flags;
32780+ struct drm_psb_xhw_arg *xa;
32781+ struct psb_xhw_buf *buf;
32782+
32783+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32784+
32785+ if (!dev_priv->xhw_on) {
32786+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32787+ return -EINVAL;
32788+ }
32789+
32790+ buf = dev_priv->xhw_cur_buf;
32791+ if (buf && buf->copy_back) {
32792+ xa = &buf->arg;
32793+ memcpy(xa, dev_priv->xhw, sizeof(*xa));
32794+ dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op;
32795+ atomic_set(&buf->done, 1);
32796+ wake_up(&dev_priv->xhw_caller_queue);
32797+ } else
32798+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
32799+
32800+ dev_priv->xhw_cur_buf = 0;
32801+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32802+ return 0;
32803+}
32804+
32805+int psb_xhw_ioctl(struct drm_device *dev, void *data,
32806+ struct drm_file *file_priv)
32807+{
32808+ struct drm_psb_private *dev_priv =
32809+ (struct drm_psb_private *)dev->dev_private;
32810+ unsigned long irq_flags;
32811+ struct drm_psb_xhw_arg *xa;
32812+ int ret;
32813+ struct list_head *list;
32814+ struct psb_xhw_buf *buf;
32815+
32816+ if (!dev_priv)
32817+ return -EINVAL;
32818+
32819+ if (mutex_lock_interruptible(&dev_priv->xhw_mutex))
32820+ return -EAGAIN;
32821+
32822+ if (psb_forced_user_interrupt(dev_priv)) {
32823+ mutex_unlock(&dev_priv->xhw_mutex);
32824+ return -EINVAL;
32825+ }
32826+
32827+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32828+ while (list_empty(&dev_priv->xhw_in)) {
32829+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32830+ ret = wait_event_interruptible_timeout(dev_priv->xhw_queue,
32831+ !psb_xhw_in_empty
32832+ (dev_priv), DRM_HZ);
32833+ if (ret == -ERESTARTSYS || ret == 0) {
32834+ mutex_unlock(&dev_priv->xhw_mutex);
32835+ return -EAGAIN;
32836+ }
32837+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
32838+ }
32839+
32840+ list = dev_priv->xhw_in.next;
32841+ list_del_init(list);
32842+
32843+ buf = list_entry(list, struct psb_xhw_buf, head);
32844+ xa = &buf->arg;
32845+ memcpy(dev_priv->xhw, xa, sizeof(*xa));
32846+
32847+ if (unlikely(buf->copy_back))
32848+ dev_priv->xhw_cur_buf = buf;
32849+ else {
32850+ atomic_set(&buf->done, 1);
32851+ dev_priv->xhw_cur_buf = NULL;
32852+ }
32853+
32854+ if (xa->op == PSB_XHW_TERMINATE) {
32855+ dev_priv->xhw_on = 0;
32856+ wake_up(&dev_priv->xhw_caller_queue);
32857+ }
32858+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
32859+
32860+ mutex_unlock(&dev_priv->xhw_mutex);
32861+
32862+ return 0;
32863+}
32864Index: linux-2.6.27/drivers/gpu/drm/Kconfig
32865===================================================================
32866--- linux-2.6.27.orig/drivers/gpu/drm/Kconfig 2009-01-14 11:54:35.000000000 +0000
32867+++ linux-2.6.27/drivers/gpu/drm/Kconfig 2009-01-14 11:58:01.000000000 +0000
32868@@ -105,3 +105,9 @@
32869 help
32870 Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
32871 chipset. If M is selected the module will be called savage.
32872+
32873+config DRM_PSB
32874+ tristate "Intel Poulsbo"
32875+ depends on DRM && PCI && I2C_ALGOBIT
32876+ help
32877+ Choose this option if you have an Intel Poulsbo chipset.
32878Index: linux-2.6.27/include/drm/drm_crtc.h
32879===================================================================
32880--- /dev/null 1970-01-01 00:00:00.000000000 +0000
32881+++ linux-2.6.27/include/drm/drm_crtc.h 2009-01-14 12:01:13.000000000 +0000
32882@@ -0,0 +1,592 @@
32883+/*
32884+ * Copyright © 2006 Keith Packard
32885+ * Copyright © 2007 Intel Corporation
32886+ * Jesse Barnes <jesse.barnes@intel.com>
32887+ */
32888+#ifndef __DRM_CRTC_H__
32889+#define __DRM_CRTC_H__
32890+
32891+#include <linux/i2c.h>
32892+#include <linux/spinlock.h>
32893+#include <linux/types.h>
32894+#include <linux/idr.h>
32895+
32896+#include <linux/fb.h>
32897+
32898+struct drm_device;
32899+
32900+/*
32901+ * Note on terminology: here, for brevity and convenience, we refer to output
32902+ * control chips as 'CRTCs'. They can control any type of output, VGA, LVDS,
32903+ * DVI, etc. And 'screen' refers to the whole of the visible display, which
32904+ * may span multiple monitors (and therefore multiple CRTC and output
32905+ * structures).
32906+ */
32907+
32908+enum drm_mode_status {
32909+ MODE_OK = 0, /* Mode OK */
32910+ MODE_HSYNC, /* hsync out of range */
32911+ MODE_VSYNC, /* vsync out of range */
32912+ MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
32913+ MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
32914+ MODE_BAD_WIDTH, /* requires an unsupported linepitch */
32915+ MODE_NOMODE, /* no mode with a maching name */
32916+ MODE_NO_INTERLACE, /* interlaced mode not supported */
32917+ MODE_NO_DBLESCAN, /* doublescan mode not supported */
32918+ MODE_NO_VSCAN, /* multiscan mode not supported */
32919+ MODE_MEM, /* insufficient video memory */
32920+ MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
32921+ MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
32922+ MODE_MEM_VIRT, /* insufficient video memory given virtual size */
32923+ MODE_NOCLOCK, /* no fixed clock available */
32924+ MODE_CLOCK_HIGH, /* clock required is too high */
32925+ MODE_CLOCK_LOW, /* clock required is too low */
32926+ MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
32927+ MODE_BAD_HVALUE, /* horizontal timing was out of range */
32928+ MODE_BAD_VVALUE, /* vertical timing was out of range */
32929+ MODE_BAD_VSCAN, /* VScan value out of range */
32930+ MODE_HSYNC_NARROW, /* horizontal sync too narrow */
32931+ MODE_HSYNC_WIDE, /* horizontal sync too wide */
32932+ MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
32933+ MODE_HBLANK_WIDE, /* horizontal blanking too wide */
32934+ MODE_VSYNC_NARROW, /* vertical sync too narrow */
32935+ MODE_VSYNC_WIDE, /* vertical sync too wide */
32936+ MODE_VBLANK_NARROW, /* vertical blanking too narrow */
32937+ MODE_VBLANK_WIDE, /* vertical blanking too wide */
32938+ MODE_PANEL, /* exceeds panel dimensions */
32939+ MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
32940+ MODE_ONE_WIDTH, /* only one width is supported */
32941+ MODE_ONE_HEIGHT, /* only one height is supported */
32942+ MODE_ONE_SIZE, /* only one resolution is supported */
32943+ MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
32944+ MODE_UNVERIFIED = -3, /* mode needs to reverified */
32945+ MODE_BAD = -2, /* unspecified reason */
32946+ MODE_ERROR = -1 /* error condition */
32947+};
32948+
32949+#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
32950+ DRM_MODE_TYPE_CRTC_C)
32951+
32952+#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
32953+ .name = nm, .status = 0, .type = (t), .clock = (c), \
32954+ .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
32955+ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
32956+ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
32957+ .vscan = (vs), .flags = (f), .vrefresh = 0
32958+
32959+struct drm_display_mode {
32960+ /* Header */
32961+ struct list_head head;
32962+ char name[DRM_DISPLAY_MODE_LEN];
32963+ int mode_id;
32964+ int output_count;
32965+ enum drm_mode_status status;
32966+ int type;
32967+
32968+ /* Proposed mode values */
32969+ int clock;
32970+ int hdisplay;
32971+ int hsync_start;
32972+ int hsync_end;
32973+ int htotal;
32974+ int hskew;
32975+ int vdisplay;
32976+ int vsync_start;
32977+ int vsync_end;
32978+ int vtotal;
32979+ int vscan;
32980+ unsigned int flags;
32981+
32982+ /* Actual mode we give to hw */
32983+ int clock_index;
32984+ int synth_clock;
32985+ int crtc_hdisplay;
32986+ int crtc_hblank_start;
32987+ int crtc_hblank_end;
32988+ int crtc_hsync_start;
32989+ int crtc_hsync_end;
32990+ int crtc_htotal;
32991+ int crtc_hskew;
32992+ int crtc_vdisplay;
32993+ int crtc_vblank_start;
32994+ int crtc_vblank_end;
32995+ int crtc_vsync_start;
32996+ int crtc_vsync_end;
32997+ int crtc_vtotal;
32998+ int crtc_hadjusted;
32999+ int crtc_vadjusted;
33000+
33001+ /* Driver private mode info */
33002+ int private_size;
33003+ int *private;
33004+ int private_flags;
33005+
33006+ int vrefresh;
33007+ float hsync;
33008+};
33009+
33010+/* Video mode flags */
33011+#define V_PHSYNC (1<<0)
33012+#define V_NHSYNC (1<<1)
33013+#define V_PVSYNC (1<<2)
33014+#define V_NVSYNC (1<<3)
33015+#define V_INTERLACE (1<<4)
33016+#define V_DBLSCAN (1<<5)
33017+#define V_CSYNC (1<<6)
33018+#define V_PCSYNC (1<<7)
33019+#define V_NCSYNC (1<<8)
33020+#define V_HSKEW (1<<9) /* hskew provided */
33021+#define V_BCAST (1<<10)
33022+#define V_PIXMUX (1<<11)
33023+#define V_DBLCLK (1<<12)
33024+#define V_CLKDIV2 (1<<13)
33025+
33026+#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
33027+#define DPMSModeOn 0
33028+#define DPMSModeStandby 1
33029+#define DPMSModeSuspend 2
33030+#define DPMSModeOff 3
33031+
33032+enum drm_output_status {
33033+ output_status_connected = 1,
33034+ output_status_disconnected = 2,
33035+ output_status_unknown = 3,
33036+};
33037+
33038+enum subpixel_order {
33039+ SubPixelUnknown = 0,
33040+ SubPixelHorizontalRGB,
33041+ SubPixelHorizontalBGR,
33042+ SubPixelVerticalRGB,
33043+ SubPixelVerticalBGR,
33044+ SubPixelNone,
33045+};
33046+
33047+/*
33048+ * Describes a given display (e.g. CRT or flat panel) and its limitations.
33049+ */
33050+struct drm_display_info {
33051+ char name[DRM_DISPLAY_INFO_LEN];
33052+ /* Input info */
33053+ bool serration_vsync;
33054+ bool sync_on_green;
33055+ bool composite_sync;
33056+ bool separate_syncs;
33057+ bool blank_to_black;
33058+ unsigned char video_level;
33059+ bool digital;
33060+ /* Physical size */
33061+ unsigned int width_mm;
33062+ unsigned int height_mm;
33063+
33064+ /* Display parameters */
33065+ unsigned char gamma; /* FIXME: storage format */
33066+ bool gtf_supported;
33067+ bool standard_color;
33068+ enum {
33069+ monochrome,
33070+ rgb,
33071+ other,
33072+ unknown,
33073+ } display_type;
33074+ bool active_off_supported;
33075+ bool suspend_supported;
33076+ bool standby_supported;
33077+
33078+ /* Color info FIXME: storage format */
33079+ unsigned short redx, redy;
33080+ unsigned short greenx, greeny;
33081+ unsigned short bluex, bluey;
33082+ unsigned short whitex, whitey;
33083+
33084+ /* Clock limits FIXME: storage format */
33085+ unsigned int min_vfreq, max_vfreq;
33086+ unsigned int min_hfreq, max_hfreq;
33087+ unsigned int pixel_clock;
33088+
33089+ /* White point indices FIXME: storage format */
33090+ unsigned int wpx1, wpy1;
33091+ unsigned int wpgamma1;
33092+ unsigned int wpx2, wpy2;
33093+ unsigned int wpgamma2;
33094+
33095+ /* Preferred mode (if any) */
33096+ struct drm_display_mode *preferred_mode;
33097+ char *raw_edid; /* if any */
33098+};
33099+
33100+struct drm_framebuffer {
33101+ struct drm_device *dev;
33102+ struct list_head head;
33103+ int id; /* idr assigned */
33104+ unsigned int pitch;
33105+ unsigned long offset;
33106+ unsigned int width;
33107+ unsigned int height;
33108+ /* depth can be 15 or 16 */
33109+ unsigned int depth;
33110+ int bits_per_pixel;
33111+ int flags;
33112+ struct drm_buffer_object *bo;
33113+ void *fbdev;
33114+ u32 pseudo_palette[16];
33115+ struct drm_bo_kmap_obj kmap;
33116+ struct list_head filp_head;
33117+};
33118+
33119+struct drm_property_enum {
33120+ struct list_head head;
33121+ uint32_t value;
33122+ unsigned char name[DRM_PROP_NAME_LEN];
33123+};
33124+
33125+struct drm_property {
33126+ struct list_head head;
33127+ int id; /* idr assigned */
33128+ uint32_t flags;
33129+ char name[DRM_PROP_NAME_LEN];
33130+ uint32_t num_values;
33131+ uint32_t *values;
33132+
33133+ struct list_head enum_list;
33134+};
33135+
33136+struct drm_crtc;
33137+struct drm_output;
33138+
33139+/**
33140+ * drm_crtc_funcs - control CRTCs for a given device
33141+ * @dpms: control display power levels
33142+ * @save: save CRTC state
33143+ * @resore: restore CRTC state
33144+ * @lock: lock the CRTC
33145+ * @unlock: unlock the CRTC
33146+ * @shadow_allocate: allocate shadow pixmap
33147+ * @shadow_create: create shadow pixmap for rotation support
33148+ * @shadow_destroy: free shadow pixmap
33149+ * @mode_fixup: fixup proposed mode
33150+ * @mode_set: set the desired mode on the CRTC
33151+ * @gamma_set: specify color ramp for CRTC
33152+ * @cleanup: cleanup driver private state prior to close
33153+ *
33154+ * The drm_crtc_funcs structure is the central CRTC management structure
33155+ * in the DRM. Each CRTC controls one or more outputs (note that the name
33156+ * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc.
33157+ * outputs, not just CRTs).
33158+ *
33159+ * Each driver is responsible for filling out this structure at startup time,
33160+ * in addition to providing other modesetting features, like i2c and DDC
33161+ * bus accessors.
33162+ */
33163+struct drm_crtc_funcs {
33164+ /*
33165+ * Control power levels on the CRTC. If the mode passed in is
33166+ * unsupported, the provider must use the next lowest power level.
33167+ */
33168+ void (*dpms)(struct drm_crtc *crtc, int mode);
33169+
33170+ /* JJJ: Are these needed? */
33171+ /* Save CRTC state */
33172+ void (*save)(struct drm_crtc *crtc); /* suspend? */
33173+ /* Restore CRTC state */
33174+ void (*restore)(struct drm_crtc *crtc); /* resume? */
33175+ bool (*lock)(struct drm_crtc *crtc);
33176+ void (*unlock)(struct drm_crtc *crtc);
33177+
33178+ void (*prepare)(struct drm_crtc *crtc);
33179+ void (*commit)(struct drm_crtc *crtc);
33180+
33181+ /* Provider can fixup or change mode timings before modeset occurs */
33182+ bool (*mode_fixup)(struct drm_crtc *crtc,
33183+ struct drm_display_mode *mode,
33184+ struct drm_display_mode *adjusted_mode);
33185+ /* Actually set the mode */
33186+ void (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
33187+ struct drm_display_mode *adjusted_mode, int x, int y);
33188+ /* Set gamma on the CRTC */
33189+ void (*gamma_set)(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
33190+ int regno);
33191+ /* Driver cleanup routine */
33192+ void (*cleanup)(struct drm_crtc *crtc);
33193+};
33194+
33195+/**
33196+ * drm_crtc - central CRTC control structure
33197+ * @enabled: is this CRTC enabled?
33198+ * @x: x position on screen
33199+ * @y: y position on screen
33200+ * @desired_mode: new desired mode
33201+ * @desired_x: desired x for desired_mode
33202+ * @desired_y: desired y for desired_mode
33203+ * @funcs: CRTC control functions
33204+ * @driver_private: arbitrary driver data
33205+ *
33206+ * Each CRTC may have one or more outputs associated with it. This structure
33207+ * allows the CRTC to be controlled.
33208+ */
33209+struct drm_crtc {
33210+ struct drm_device *dev;
33211+ struct list_head head;
33212+
33213+ int id; /* idr assigned */
33214+
33215+ /* framebuffer the output is currently bound to */
33216+ struct drm_framebuffer *fb;
33217+
33218+ bool enabled;
33219+
33220+ /* JJJ: are these needed? */
33221+ bool cursor_in_range;
33222+ bool cursor_shown;
33223+
33224+ struct drm_display_mode mode;
33225+
33226+ int x, y;
33227+ struct drm_display_mode *desired_mode;
33228+ int desired_x, desired_y;
33229+ const struct drm_crtc_funcs *funcs;
33230+ void *driver_private;
33231+
33232+ /* RRCrtcPtr randr_crtc? */
33233+};
33234+
33235+extern struct drm_crtc *drm_crtc_create(struct drm_device *dev,
33236+ const struct drm_crtc_funcs *funcs);
33237+
33238+/**
33239+ * drm_output_funcs - control outputs on a given device
33240+ * @init: setup this output
33241+ * @dpms: set power state (see drm_crtc_funcs above)
33242+ * @save: save output state
33243+ * @restore: restore output state
33244+ * @mode_valid: is this mode valid on the given output?
33245+ * @mode_fixup: try to fixup proposed mode for this output
33246+ * @mode_set: set this mode
33247+ * @detect: is this output active?
33248+ * @get_modes: get mode list for this output
33249+ * @set_property: property for this output may need update
33250+ * @cleanup: output is going away, cleanup
33251+ *
33252+ * Each CRTC may have one or more outputs attached to it. The functions
33253+ * below allow the core DRM code to control outputs, enumerate available modes,
33254+ * etc.
33255+ */
33256+struct drm_output_funcs {
33257+ void (*init)(struct drm_output *output);
33258+ void (*dpms)(struct drm_output *output, int mode);
33259+ void (*save)(struct drm_output *output);
33260+ void (*restore)(struct drm_output *output);
33261+ int (*mode_valid)(struct drm_output *output,
33262+ struct drm_display_mode *mode);
33263+ bool (*mode_fixup)(struct drm_output *output,
33264+ struct drm_display_mode *mode,
33265+ struct drm_display_mode *adjusted_mode);
33266+ void (*prepare)(struct drm_output *output);
33267+ void (*commit)(struct drm_output *output);
33268+ void (*mode_set)(struct drm_output *output,
33269+ struct drm_display_mode *mode,
33270+ struct drm_display_mode *adjusted_mode);
33271+ enum drm_output_status (*detect)(struct drm_output *output);
33272+ int (*get_modes)(struct drm_output *output);
33273+ /* JJJ: type checking for properties via property value type */
33274+ bool (*set_property)(struct drm_output *output, int prop, void *val);
33275+ void (*cleanup)(struct drm_output *output);
33276+};
33277+
33278+#define DRM_OUTPUT_MAX_UMODES 16
33279+#define DRM_OUTPUT_MAX_PROPERTY 16
33280+#define DRM_OUTPUT_LEN 32
33281+/**
33282+ * drm_output - central DRM output control structure
33283+ * @crtc: CRTC this output is currently connected to, NULL if none
33284+ * @possible_crtcs: bitmap of CRTCS this output could be attached to
33285+ * @possible_clones: bitmap of possible outputs this output could clone
33286+ * @interlace_allowed: can this output handle interlaced modes?
33287+ * @doublescan_allowed: can this output handle doublescan?
33288+ * @available_modes: modes available on this output (from get_modes() + user)
33289+ * @initial_x: initial x position for this output
33290+ * @initial_y: initial y position for this output
33291+ * @status: output connected?
33292+ * @subpixel_order: for this output
33293+ * @mm_width: displayable width of output in mm
33294+ * @mm_height: displayable height of output in mm
33295+ * @name: name of output (should be one of a few standard names)
33296+ * @funcs: output control functions
33297+ * @driver_private: private driver data
33298+ *
33299+ * Each output may be connected to one or more CRTCs, or may be clonable by
33300+ * another output if they can share a CRTC. Each output also has a specific
33301+ * position in the broader display (referred to as a 'screen' though it could
33302+ * span multiple monitors).
33303+ */
33304+struct drm_output {
33305+ struct drm_device *dev;
33306+ struct list_head head;
33307+ struct drm_crtc *crtc;
33308+ int id; /* idr assigned */
33309+ unsigned long possible_crtcs;
33310+ unsigned long possible_clones;
33311+ bool interlace_allowed;
33312+ bool doublescan_allowed;
33313+ struct list_head modes; /* list of modes on this output */
33314+
33315+ /*
33316+ OptionInfoPtr options;
33317+ XF86ConfMonitorPtr conf_monitor;
33318+ */
33319+ int initial_x, initial_y;
33320+ enum drm_output_status status;
33321+
33322+ /* these are modes added by probing with DDC or the BIOS */
33323+ struct list_head probed_modes;
33324+
33325+ /* xf86MonPtr MonInfo; */
33326+ enum subpixel_order subpixel_order;
33327+ int mm_width, mm_height;
33328+ struct drm_display_info *monitor_info; /* if any */
33329+ char name[DRM_OUTPUT_LEN];
33330+ const struct drm_output_funcs *funcs;
33331+ void *driver_private;
33332+
33333+ u32 user_mode_ids[DRM_OUTPUT_MAX_UMODES];
33334+
33335+ u32 property_ids[DRM_OUTPUT_MAX_PROPERTY];
33336+ u32 property_values[DRM_OUTPUT_MAX_PROPERTY];
33337+};
33338+
33339+/**
33340+ * struct drm_mode_config_funcs - configure CRTCs for a given screen layout
33341+ * @resize: adjust CRTCs as necessary for the proposed layout
33342+ *
33343+ * Currently only a resize hook is available. DRM will call back into the
33344+ * driver with a new screen width and height. If the driver can't support
33345+ * the proposed size, it can return false. Otherwise it should adjust
33346+ * the CRTC<->output mappings as needed and update its view of the screen.
33347+ */
33348+struct drm_mode_config_funcs {
33349+ bool (*resize)(struct drm_device *dev, int width, int height);
33350+};
33351+
33352+/**
33353+ * drm_mode_config - Mode configuration control structure
33354+ *
33355+ */
33356+struct drm_mode_config {
33357+ struct mutex mutex; /* protects configuration and IDR */
33358+ struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, output, modes - just makes life easier */
33359+ /* this is limited to one for now */
33360+ int num_fb;
33361+ struct list_head fb_list;
33362+ int num_output;
33363+ struct list_head output_list;
33364+
33365+ /* int compat_output? */
33366+ int num_crtc;
33367+ struct list_head crtc_list;
33368+
33369+ struct list_head usermode_list;
33370+
33371+ struct list_head property_list;
33372+
33373+ int min_width, min_height;
33374+ int max_width, max_height;
33375+ /* DamagePtr rotationDamage? */
33376+ /* DGA stuff? */
33377+ struct drm_mode_config_funcs *funcs;
33378+ unsigned long fb_base;
33379+};
33380+
33381+struct drm_output *drm_output_create(struct drm_device *dev,
33382+ const struct drm_output_funcs *funcs,
33383+ const char *name);
33384+extern void drm_output_destroy(struct drm_output *output);
33385+extern bool drm_output_rename(struct drm_output *output, const char *name);
33386+extern void drm_fb_release(struct file *filp);
33387+
33388+extern struct edid *drm_get_edid(struct drm_output *output,
33389+ struct i2c_adapter *adapter);
33390+extern int drm_add_edid_modes(struct drm_output *output, struct edid *edid);
33391+extern void drm_mode_probed_add(struct drm_output *output, struct drm_display_mode *mode);
33392+extern void drm_mode_remove(struct drm_output *output, struct drm_display_mode *mode);
33393+extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
33394+ struct drm_display_mode *mode);
33395+extern void drm_mode_debug_printmodeline(struct drm_device *dev,
33396+ struct drm_display_mode *mode);
33397+extern void drm_mode_config_init(struct drm_device *dev);
33398+extern void drm_mode_config_cleanup(struct drm_device *dev);
33399+extern void drm_mode_set_name(struct drm_display_mode *mode);
33400+extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
33401+extern void drm_disable_unused_functions(struct drm_device *dev);
33402+
33403+extern void drm_mode_addmode(struct drm_device *dev, struct drm_display_mode *user_mode);
33404+extern int drm_mode_rmmode(struct drm_device *dev, struct drm_display_mode *mode);
33405+
33406+/* for us by fb module */
33407+extern int drm_mode_attachmode_crtc(struct drm_device *dev,
33408+ struct drm_crtc *crtc,
33409+ struct drm_display_mode *mode);
33410+extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
33411+
33412+extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
33413+extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
33414+extern void drm_mode_list_concat(struct list_head *head,
33415+ struct list_head *new);
33416+extern void drm_mode_validate_size(struct drm_device *dev,
33417+ struct list_head *mode_list,
33418+ int maxX, int maxY, int maxPitch);
33419+extern void drm_mode_prune_invalid(struct drm_device *dev,
33420+ struct list_head *mode_list, bool verbose);
33421+extern void drm_mode_sort(struct list_head *mode_list);
33422+extern int drm_mode_vrefresh(struct drm_display_mode *mode);
33423+extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
33424+ int adjust_flags);
33425+extern void drm_mode_output_list_update(struct drm_output *output);
33426+
33427+extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
33428+extern bool drm_initial_config(struct drm_device *dev, bool cangrow);
33429+extern void drm_framebuffer_set_object(struct drm_device *dev,
33430+ unsigned long handle);
33431+extern struct drm_framebuffer *drm_framebuffer_create(struct drm_device *dev);
33432+extern void drm_framebuffer_destroy(struct drm_framebuffer *fb);
33433+extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
33434+extern int drmfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
33435+extern bool drm_crtc_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
33436+ int x, int y);
33437+
33438+extern int drm_output_attach_property(struct drm_output *output,
33439+ struct drm_property *property, int init_val);
33440+extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
33441+ const char *name, int num_values);
33442+extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
33443+extern int drm_property_add_enum(struct drm_property *property, int index,
33444+ uint32_t value, const char *name);
33445+
33446+/* IOCTLs */
33447+extern int drm_mode_getresources(struct drm_device *dev,
33448+ void *data, struct drm_file *file_priv);
33449+
33450+extern int drm_mode_getcrtc(struct drm_device *dev,
33451+ void *data, struct drm_file *file_priv);
33452+extern int drm_mode_getoutput(struct drm_device *dev,
33453+ void *data, struct drm_file *file_priv);
33454+extern int drm_mode_setcrtc(struct drm_device *dev,
33455+ void *data, struct drm_file *file_priv);
33456+extern int drm_mode_addfb(struct drm_device *dev,
33457+ void *data, struct drm_file *file_priv);
33458+extern int drm_mode_rmfb(struct drm_device *dev,
33459+ void *data, struct drm_file *file_priv);
33460+extern int drm_mode_getfb(struct drm_device *dev,
33461+ void *data, struct drm_file *file_priv);
33462+extern int drm_mode_addmode_ioctl(struct drm_device *dev,
33463+ void *data, struct drm_file *file_priv);
33464+extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
33465+ void *data, struct drm_file *file_priv);
33466+extern int drm_mode_attachmode_ioctl(struct drm_device *dev,
33467+ void *data, struct drm_file *file_priv);
33468+extern int drm_mode_detachmode_ioctl(struct drm_device *dev,
33469+ void *data, struct drm_file *file_priv);
33470+
33471+extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
33472+ void *data, struct drm_file *file_priv);
33473+#endif /* __DRM_CRTC_H__ */
33474+
33475Index: linux-2.6.27/include/drm/drm_edid.h
33476===================================================================
33477--- /dev/null 1970-01-01 00:00:00.000000000 +0000
33478+++ linux-2.6.27/include/drm/drm_edid.h 2009-01-14 11:58:01.000000000 +0000
33479@@ -0,0 +1,179 @@
33480+#ifndef __DRM_EDID_H__
33481+#define __DRM_EDID_H__
33482+
33483+#include <linux/types.h>
33484+
33485+#define EDID_LENGTH 128
33486+#define DDC_ADDR 0x50
33487+
33488+#ifdef BIG_ENDIAN
33489+#error "EDID structure is little endian, need big endian versions"
33490+#endif
33491+
33492+struct est_timings {
33493+ u8 t1;
33494+ u8 t2;
33495+ u8 mfg_rsvd;
33496+} __attribute__((packed));
33497+
33498+struct std_timing {
33499+ u8 hsize; /* need to multiply by 8 then add 248 */
33500+ u8 vfreq:6; /* need to add 60 */
33501+ u8 aspect_ratio:2; /* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
33502+} __attribute__((packed));
33503+
33504+/* If detailed data is pixel timing */
33505+struct detailed_pixel_timing {
33506+ u8 hactive_lo;
33507+ u8 hblank_lo;
33508+ u8 hblank_hi:4;
33509+ u8 hactive_hi:4;
33510+ u8 vactive_lo;
33511+ u8 vblank_lo;
33512+ u8 vblank_hi:4;
33513+ u8 vactive_hi:4;
33514+ u8 hsync_offset_lo;
33515+ u8 hsync_pulse_width_lo;
33516+ u8 vsync_pulse_width_lo:4;
33517+ u8 vsync_offset_lo:4;
33518+ u8 hsync_pulse_width_hi:2;
33519+ u8 hsync_offset_hi:2;
33520+ u8 vsync_pulse_width_hi:2;
33521+ u8 vsync_offset_hi:2;
33522+ u8 width_mm_lo;
33523+ u8 height_mm_lo;
33524+ u8 height_mm_hi:4;
33525+ u8 width_mm_hi:4;
33526+ u8 hborder;
33527+ u8 vborder;
33528+ u8 unknown0:1;
33529+ u8 vsync_positive:1;
33530+ u8 hsync_positive:1;
33531+ u8 separate_sync:2;
33532+ u8 stereo:1;
33533+ u8 unknown6:1;
33534+ u8 interlaced:1;
33535+} __attribute__((packed));
33536+
33537+/* If it's not pixel timing, it'll be one of the below */
33538+struct detailed_data_string {
33539+ u8 str[13];
33540+} __attribute__((packed));
33541+
33542+struct detailed_data_monitor_range {
33543+ u8 min_vfreq;
33544+ u8 max_vfreq;
33545+ u8 min_hfreq_khz;
33546+ u8 max_hfreq_khz;
33547+ u8 pixel_clock_mhz; /* need to multiply by 10 */
33548+ u16 sec_gtf_toggle; /* A000=use above, 20=use below */ /* FIXME: byte order */
33549+ u8 hfreq_start_khz; /* need to multiply by 2 */
33550+ u8 c; /* need to divide by 2 */
33551+ u16 m; /* FIXME: byte order */
33552+ u8 k;
33553+ u8 j; /* need to divide by 2 */
33554+} __attribute__((packed));
33555+
33556+struct detailed_data_wpindex {
33557+ u8 white_y_lo:2;
33558+ u8 white_x_lo:2;
33559+ u8 pad:4;
33560+ u8 white_x_hi;
33561+ u8 white_y_hi;
33562+ u8 gamma; /* need to divide by 100 then add 1 */
33563+} __attribute__((packed));
33564+
33565+struct detailed_data_color_point {
33566+ u8 windex1;
33567+ u8 wpindex1[3];
33568+ u8 windex2;
33569+ u8 wpindex2[3];
33570+} __attribute__((packed));
33571+
33572+struct detailed_non_pixel {
33573+ u8 pad1;
33574+ u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
33575+ fb=color point data, fa=standard timing data,
33576+ f9=undefined, f8=mfg. reserved */
33577+ u8 pad2;
33578+ union {
33579+ struct detailed_data_string str;
33580+ struct detailed_data_monitor_range range;
33581+ struct detailed_data_wpindex color;
33582+ struct std_timing timings[5];
33583+ } data;
33584+} __attribute__((packed));
33585+
33586+#define EDID_DETAIL_STD_MODES 0xfa
33587+#define EDID_DETAIL_MONITOR_CPDATA 0xfb
33588+#define EDID_DETAIL_MONITOR_NAME 0xfc
33589+#define EDID_DETAIL_MONITOR_RANGE 0xfd
33590+#define EDID_DETAIL_MONITOR_STRING 0xfe
33591+#define EDID_DETAIL_MONITOR_SERIAL 0xff
33592+
33593+struct detailed_timing {
33594+ u16 pixel_clock; /* need to multiply by 10 KHz */ /* FIXME: byte order */
33595+ union {
33596+ struct detailed_pixel_timing pixel_data;
33597+ struct detailed_non_pixel other_data;
33598+ } data;
33599+} __attribute__((packed));
33600+
33601+struct edid {
33602+ u8 header[8];
33603+ /* Vendor & product info */
33604+ u16 mfg_id; /* FIXME: byte order */
33605+ u16 prod_code; /* FIXME: byte order */
33606+ u32 serial; /* FIXME: byte order */
33607+ u8 mfg_week;
33608+ u8 mfg_year;
33609+ /* EDID version */
33610+ u8 version;
33611+ u8 revision;
33612+ /* Display info: */
33613+ /* input definition */
33614+ u8 serration_vsync:1;
33615+ u8 sync_on_green:1;
33616+ u8 composite_sync:1;
33617+ u8 separate_syncs:1;
33618+ u8 blank_to_black:1;
33619+ u8 video_level:2;
33620+ u8 digital:1; /* bits below must be zero if set */
33621+ u8 width_cm;
33622+ u8 height_cm;
33623+ u8 gamma;
33624+ /* feature support */
33625+ u8 default_gtf:1;
33626+ u8 preferred_timing:1;
33627+ u8 standard_color:1;
33628+ u8 display_type:2; /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
33629+ u8 pm_active_off:1;
33630+ u8 pm_suspend:1;
33631+ u8 pm_standby:1;
33632+ /* Color characteristics */
33633+ u8 red_green_lo;
33634+ u8 black_white_lo;
33635+ u8 red_x;
33636+ u8 red_y;
33637+ u8 green_x;
33638+ u8 green_y;
33639+ u8 blue_x;
33640+ u8 blue_y;
33641+ u8 white_x;
33642+ u8 white_y;
33643+ /* Est. timings and mfg rsvd timings*/
33644+ struct est_timings established_timings;
33645+ /* Standard timings 1-8*/
33646+ struct std_timing standard_timings[8];
33647+ /* Detailing timings 1-4 */
33648+ struct detailed_timing detailed_timings[4];
33649+ /* Number of 128 byte ext. blocks */
33650+ u8 extensions;
33651+ /* Checksum */
33652+ u8 checksum;
33653+} __attribute__((packed));
33654+
33655+extern unsigned char *drm_ddc_read(struct i2c_adapter *adapter);
33656+extern int drm_get_acpi_edid(char *method, char *edid, ssize_t length);
33657+
33658+#endif /* __DRM_EDID_H__ */
33659Index: linux-2.6.27/include/drm/drm_objects.h
33660===================================================================
33661--- /dev/null 1970-01-01 00:00:00.000000000 +0000
33662+++ linux-2.6.27/include/drm/drm_objects.h 2009-01-14 11:58:01.000000000 +0000
33663@@ -0,0 +1,717 @@
33664+/**************************************************************************
33665+ *
33666+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
33667+ * All Rights Reserved.
33668+ *
33669+ * Permission is hereby granted, free of charge, to any person obtaining a
33670+ * copy of this software and associated documentation files (the
33671+ * "Software"), to deal in the Software without restriction, including
33672+ * without limitation the rights to use, copy, modify, merge, publish,
33673+ * distribute, sub license, and/or sell copies of the Software, and to
33674+ * permit persons to whom the Software is furnished to do so, subject to
33675+ * the following conditions:
33676+ *
33677+ * The above copyright notice and this permission notice (including the
33678+ * next paragraph) shall be included in all copies or substantial portions
33679+ * of the Software.
33680+ *
33681+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33682+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33683+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
33684+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
33685+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
33686+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
33687+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
33688+ *
33689+ **************************************************************************/
33690+/*
33691+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
33692+ */
33693+
33694+#ifndef _DRM_OBJECTS_H
33695+#define _DRM_OBJECTS_H
33696+
33697+struct drm_device;
33698+struct drm_bo_mem_reg;
33699+
33700+/***************************************************
33701+ * User space objects. (drm_object.c)
33702+ */
33703+
33704+#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
33705+
33706+enum drm_object_type {
33707+ drm_fence_type,
33708+ drm_buffer_type,
33709+ drm_lock_type,
33710+ /*
33711+ * Add other user space object types here.
33712+ */
33713+ drm_driver_type0 = 256,
33714+ drm_driver_type1,
33715+ drm_driver_type2,
33716+ drm_driver_type3,
33717+ drm_driver_type4
33718+};
33719+
33720+/*
33721+ * A user object is a structure that helps the drm give out user handles
33722+ * to kernel internal objects and to keep track of these objects so that
33723+ * they can be destroyed, for example when the user space process exits.
33724+ * Designed to be accessible using a user space 32-bit handle.
33725+ */
33726+
33727+struct drm_user_object {
33728+ struct drm_hash_item hash;
33729+ struct list_head list;
33730+ enum drm_object_type type;
33731+ atomic_t refcount;
33732+ int shareable;
33733+ struct drm_file *owner;
33734+ void (*ref_struct_locked) (struct drm_file *priv,
33735+ struct drm_user_object *obj,
33736+ enum drm_ref_type ref_action);
33737+ void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
33738+ enum drm_ref_type unref_action);
33739+ void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
33740+};
33741+
33742+/*
33743+ * A ref object is a structure which is used to
33744+ * keep track of references to user objects and to keep track of these
33745+ * references so that they can be destroyed for example when the user space
33746+ * process exits. Designed to be accessible using a pointer to the _user_ object.
33747+ */
33748+
33749+struct drm_ref_object {
33750+ struct drm_hash_item hash;
33751+ struct list_head list;
33752+ atomic_t refcount;
33753+ enum drm_ref_type unref_action;
33754+};
33755+
33756+/**
33757+ * Must be called with the struct_mutex held.
33758+ */
33759+
33760+extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
33761+ int shareable);
33762+/**
33763+ * Must be called with the struct_mutex held.
33764+ */
33765+
33766+extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
33767+ uint32_t key);
33768+
33769+/*
33770+ * Must be called with the struct_mutex held. May temporarily release it.
33771+ */
33772+
33773+extern int drm_add_ref_object(struct drm_file *priv,
33774+ struct drm_user_object *referenced_object,
33775+ enum drm_ref_type ref_action);
33776+
33777+/*
33778+ * Must be called with the struct_mutex held.
33779+ */
33780+
33781+struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
33782+ struct drm_user_object *referenced_object,
33783+ enum drm_ref_type ref_action);
33784+/*
33785+ * Must be called with the struct_mutex held.
33786+ * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
33787+ * release the struct_mutex before calling drm_remove_ref_object.
33788+ * This function may temporarily release the struct_mutex.
33789+ */
33790+
33791+extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
33792+extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
33793+ enum drm_object_type type,
33794+ struct drm_user_object **object);
33795+extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
33796+ enum drm_object_type type);
33797+
33798+/***************************************************
33799+ * Fence objects. (drm_fence.c)
33800+ */
33801+
33802+struct drm_fence_object {
33803+ struct drm_user_object base;
33804+ struct drm_device *dev;
33805+ atomic_t usage;
33806+
33807+ /*
33808+ * The below three fields are protected by the fence manager spinlock.
33809+ */
33810+
33811+ struct list_head ring;
33812+ int fence_class;
33813+ uint32_t native_types;
33814+ uint32_t type;
33815+ uint32_t signaled_types;
33816+ uint32_t sequence;
33817+ uint32_t waiting_types;
33818+ uint32_t error;
33819+};
33820+
33821+#define _DRM_FENCE_CLASSES 8
33822+
33823+struct drm_fence_class_manager {
33824+ struct list_head ring;
33825+ uint32_t pending_flush;
33826+ uint32_t waiting_types;
33827+ wait_queue_head_t fence_queue;
33828+ uint32_t highest_waiting_sequence;
33829+ uint32_t latest_queued_sequence;
33830+};
33831+
33832+struct drm_fence_manager {
33833+ int initialized;
33834+ rwlock_t lock;
33835+ struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES];
33836+ uint32_t num_classes;
33837+ atomic_t count;
33838+};
33839+
33840+struct drm_fence_driver {
33841+ unsigned long *waiting_jiffies;
33842+ uint32_t num_classes;
33843+ uint32_t wrap_diff;
33844+ uint32_t flush_diff;
33845+ uint32_t sequence_mask;
33846+
33847+ /*
33848+ * Driver implemented functions:
33849+ * has_irq() : 1 if the hardware can update the indicated type_flags using an
33850+ * irq handler. 0 if polling is required.
33851+ *
33852+ * emit() : Emit a sequence number to the command stream.
33853+ * Return the sequence number.
33854+ *
33855+ * flush() : Make sure the flags indicated in fc->pending_flush will eventually
33856+ * signal for fc->highest_received_sequence and all preceding sequences.
33857+ * Acknowledge by clearing the flags fc->pending_flush.
33858+ *
33859+ * poll() : Call drm_fence_handler with any new information.
33860+ *
33861+ * needed_flush() : Given the current state of the fence->type flags and previusly
33862+ * executed or queued flushes, return the type_flags that need flushing.
33863+ *
33864+ * wait(): Wait for the "mask" flags to signal on a given fence, performing
33865+ * whatever's necessary to make this happen.
33866+ */
33867+
33868+ int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
33869+ uint32_t flags);
33870+ int (*emit) (struct drm_device *dev, uint32_t fence_class,
33871+ uint32_t flags, uint32_t *breadcrumb,
33872+ uint32_t *native_type);
33873+ void (*flush) (struct drm_device *dev, uint32_t fence_class);
33874+ void (*poll) (struct drm_device *dev, uint32_t fence_class,
33875+ uint32_t types);
33876+ uint32_t (*needed_flush) (struct drm_fence_object *fence);
33877+ int (*wait) (struct drm_fence_object *fence, int lazy,
33878+ int interruptible, uint32_t mask);
33879+};
33880+
33881+extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
33882+ int interruptible, uint32_t mask,
33883+ unsigned long end_jiffies);
33884+extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
33885+ uint32_t sequence, uint32_t type,
33886+ uint32_t error);
33887+extern void drm_fence_manager_init(struct drm_device *dev);
33888+extern void drm_fence_manager_takedown(struct drm_device *dev);
33889+extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
33890+ uint32_t sequence);
33891+extern int drm_fence_object_flush(struct drm_fence_object *fence,
33892+ uint32_t type);
33893+extern int drm_fence_object_signaled(struct drm_fence_object *fence,
33894+ uint32_t type);
33895+extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
33896+extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
33897+extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
33898+extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
33899+ struct drm_fence_object *src);
33900+extern int drm_fence_object_wait(struct drm_fence_object *fence,
33901+ int lazy, int ignore_signals, uint32_t mask);
33902+extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
33903+ uint32_t fence_flags, uint32_t fence_class,
33904+ struct drm_fence_object **c_fence);
33905+extern int drm_fence_object_emit(struct drm_fence_object *fence,
33906+ uint32_t fence_flags, uint32_t class,
33907+ uint32_t type);
33908+extern void drm_fence_fill_arg(struct drm_fence_object *fence,
33909+ struct drm_fence_arg *arg);
33910+
33911+extern int drm_fence_add_user_object(struct drm_file *priv,
33912+ struct drm_fence_object *fence,
33913+ int shareable);
33914+
33915+extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
33916+ struct drm_file *file_priv);
33917+extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data,
33918+ struct drm_file *file_priv);
33919+extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data,
33920+ struct drm_file *file_priv);
33921+extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data,
33922+ struct drm_file *file_priv);
33923+extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data,
33924+ struct drm_file *file_priv);
33925+extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data,
33926+ struct drm_file *file_priv);
33927+extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data,
33928+ struct drm_file *file_priv);
33929+extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data,
33930+ struct drm_file *file_priv);
33931+extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
33932+ struct drm_file *file_priv);
33933+/**************************************************
33934+ *TTMs
33935+ */
33936+
33937+/*
33938+ * The ttm backend GTT interface. (In our case AGP).
33939+ * Any similar type of device (PCIE?)
33940+ * needs only to implement these functions to be usable with the TTM interface.
33941+ * The AGP backend implementation lives in drm_agpsupport.c
33942+ * basically maps these calls to available functions in agpgart.
33943+ * Each drm device driver gets an
33944+ * additional function pointer that creates these types,
33945+ * so that the device can choose the correct aperture.
33946+ * (Multiple AGP apertures, etc.)
33947+ * Most device drivers will let this point to the standard AGP implementation.
33948+ */
33949+
33950+#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
33951+#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
33952+
33953+struct drm_ttm_backend;
33954+struct drm_ttm_backend_func {
33955+ int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
33956+ int (*populate) (struct drm_ttm_backend *backend,
33957+ unsigned long num_pages, struct page **pages);
33958+ void (*clear) (struct drm_ttm_backend *backend);
33959+ int (*bind) (struct drm_ttm_backend *backend,
33960+ struct drm_bo_mem_reg *bo_mem);
33961+ int (*unbind) (struct drm_ttm_backend *backend);
33962+ void (*destroy) (struct drm_ttm_backend *backend);
33963+};
33964+
33965+
33966+struct drm_ttm_backend {
33967+ struct drm_device *dev;
33968+ uint32_t flags;
33969+ struct drm_ttm_backend_func *func;
33970+};
33971+
33972+struct drm_ttm {
33973+ struct page *dummy_read_page;
33974+ struct page **pages;
33975+ uint32_t page_flags;
33976+ unsigned long num_pages;
33977+ atomic_t vma_count;
33978+ struct drm_device *dev;
33979+ int destroy;
33980+ uint32_t mapping_offset;
33981+ struct drm_ttm_backend *be;
33982+ enum {
33983+ ttm_bound,
33984+ ttm_evicted,
33985+ ttm_unbound,
33986+ ttm_unpopulated,
33987+ } state;
33988+
33989+};
33990+
33991+extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
33992+extern int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
33993+extern void drm_ttm_unbind(struct drm_ttm *ttm);
33994+extern void drm_ttm_evict(struct drm_ttm *ttm);
33995+extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
33996+extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
33997+extern void drm_ttm_cache_flush(void);
33998+extern int drm_ttm_populate(struct drm_ttm *ttm);
33999+extern int drm_ttm_set_user(struct drm_ttm *ttm,
34000+ struct task_struct *tsk,
34001+ int write,
34002+ unsigned long start,
34003+ unsigned long num_pages,
34004+ struct page *dummy_read_page);
34005+unsigned long drm_ttm_size(struct drm_device *dev,
34006+ unsigned long num_pages,
34007+ int user_bo);
34008+
34009+
34010+/*
34011+ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
34012+ * this which calls this function iff there are no vmas referencing it anymore.
34013+ * Otherwise it is called when the last vma exits.
34014+ */
34015+
34016+extern int drm_destroy_ttm(struct drm_ttm *ttm);
34017+
34018+#define DRM_FLAG_MASKED(_old, _new, _mask) {\
34019+(_old) ^= (((_old) ^ (_new)) & (_mask)); \
34020+}
34021+
34022+#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
34023+#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
34024+
34025+/*
34026+ * Page flags.
34027+ */
34028+
34029+#define DRM_TTM_PAGE_UNCACHED (1 << 0)
34030+#define DRM_TTM_PAGE_USED (1 << 1)
34031+#define DRM_TTM_PAGE_BOUND (1 << 2)
34032+#define DRM_TTM_PAGE_PRESENT (1 << 3)
34033+#define DRM_TTM_PAGE_VMALLOC (1 << 4)
34034+#define DRM_TTM_PAGE_USER (1 << 5)
34035+#define DRM_TTM_PAGE_USER_WRITE (1 << 6)
34036+#define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
34037+#define DRM_TTM_PAGE_USER_DMA (1 << 8)
34038+
34039+/***************************************************
34040+ * Buffer objects. (drm_bo.c, drm_bo_move.c)
34041+ */
34042+
34043+struct drm_bo_mem_reg {
34044+ struct drm_mm_node *mm_node;
34045+ unsigned long size;
34046+ unsigned long num_pages;
34047+ uint32_t page_alignment;
34048+ uint32_t mem_type;
34049+ uint64_t flags;
34050+ uint64_t mask;
34051+ uint32_t desired_tile_stride;
34052+ uint32_t hw_tile_stride;
34053+};
34054+
34055+enum drm_bo_type {
34056+ drm_bo_type_dc,
34057+ drm_bo_type_user,
34058+ drm_bo_type_kernel, /* for initial kernel allocations */
34059+};
34060+
34061+struct drm_buffer_object {
34062+ struct drm_device *dev;
34063+ struct drm_user_object base;
34064+
34065+ /*
34066+ * If there is a possibility that the usage variable is zero,
34067+ * then dev->struct_mutext should be locked before incrementing it.
34068+ */
34069+
34070+ atomic_t usage;
34071+ unsigned long buffer_start;
34072+ enum drm_bo_type type;
34073+ unsigned long offset;
34074+ atomic_t mapped;
34075+ struct drm_bo_mem_reg mem;
34076+
34077+ struct list_head lru;
34078+ struct list_head ddestroy;
34079+
34080+ uint32_t fence_type;
34081+ uint32_t fence_class;
34082+ uint32_t new_fence_type;
34083+ uint32_t new_fence_class;
34084+ struct drm_fence_object *fence;
34085+ uint32_t priv_flags;
34086+ wait_queue_head_t event_queue;
34087+ struct mutex mutex;
34088+ unsigned long num_pages;
34089+ unsigned long reserved_size;
34090+
34091+ /* For pinned buffers */
34092+ struct drm_mm_node *pinned_node;
34093+ uint32_t pinned_mem_type;
34094+ struct list_head pinned_lru;
34095+
34096+ /* For vm */
34097+ struct drm_ttm *ttm;
34098+ struct drm_map_list map_list;
34099+ uint32_t memory_type;
34100+ unsigned long bus_offset;
34101+ uint32_t vm_flags;
34102+ void *iomap;
34103+
34104+#ifdef DRM_ODD_MM_COMPAT
34105+ /* dev->struct_mutex only protected. */
34106+ struct list_head vma_list;
34107+ struct list_head p_mm_list;
34108+#endif
34109+
34110+};
34111+
34112+#define _DRM_BO_FLAG_UNFENCED 0x00000001
34113+#define _DRM_BO_FLAG_EVICTED 0x00000002
34114+
34115+struct drm_mem_type_manager {
34116+ int has_type;
34117+ int use_type;
34118+ struct drm_mm manager;
34119+ struct list_head lru;
34120+ struct list_head pinned;
34121+ uint32_t flags;
34122+ uint32_t drm_bus_maptype;
34123+ unsigned long gpu_offset;
34124+ unsigned long io_offset;
34125+ unsigned long io_size;
34126+ void *io_addr;
34127+};
34128+
34129+struct drm_bo_lock {
34130+ struct drm_user_object base;
34131+ wait_queue_head_t queue;
34132+ atomic_t write_lock_pending;
34133+ atomic_t readers;
34134+};
34135+
34136+#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
34137+#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
34138+#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */
34139+#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap
34140+ before kernel access. */
34141+#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
34142+#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
34143+
34144+struct drm_buffer_manager {
34145+ struct drm_bo_lock bm_lock;
34146+ struct mutex evict_mutex;
34147+ int nice_mode;
34148+ int initialized;
34149+ struct drm_file *last_to_validate;
34150+ struct drm_mem_type_manager man[DRM_BO_MEM_TYPES];
34151+ struct list_head unfenced;
34152+ struct list_head ddestroy;
34153+ struct delayed_work wq;
34154+ uint32_t fence_type;
34155+ unsigned long cur_pages;
34156+ atomic_t count;
34157+ struct page *dummy_read_page;
34158+};
34159+
34160+struct drm_bo_driver {
34161+ const uint32_t *mem_type_prio;
34162+ const uint32_t *mem_busy_prio;
34163+ uint32_t num_mem_type_prio;
34164+ uint32_t num_mem_busy_prio;
34165+ struct drm_ttm_backend *(*create_ttm_backend_entry)
34166+ (struct drm_device *dev);
34167+ int (*backend_size) (struct drm_device *dev,
34168+ unsigned long num_pages);
34169+ int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
34170+ uint32_t *type);
34171+ int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
34172+ int (*init_mem_type) (struct drm_device *dev, uint32_t type,
34173+ struct drm_mem_type_manager *man);
34174+ uint32_t(*evict_mask) (struct drm_buffer_object *bo);
34175+ int (*move) (struct drm_buffer_object *bo,
34176+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
34177+ void (*ttm_cache_flush)(struct drm_ttm *ttm);
34178+
34179+ /*
34180+ * command_stream_barrier
34181+ *
34182+ * @dev: The drm device.
34183+ *
34184+ * @bo: The buffer object to validate.
34185+ *
34186+ * @new_fence_class: The new fence class for the buffer object.
34187+ *
34188+ * @new_fence_type: The new fence type for the buffer object.
34189+ *
34190+ * @no_wait: whether this should give up and return -EBUSY
34191+ * if this operation would require sleeping
34192+ *
34193+ * Insert a command stream barrier that makes sure that the
34194+ * buffer is idle once the commands associated with the
34195+ * current validation are starting to execute. If an error
34196+ * condition is returned, or the function pointer is NULL,
34197+ * the drm core will force buffer idle
34198+ * during validation.
34199+ */
34200+
34201+ int (*command_stream_barrier) (struct drm_buffer_object *bo,
34202+ uint32_t new_fence_class,
34203+ uint32_t new_fence_type,
34204+ int no_wait);
34205+};
34206+
34207+/*
34208+ * buffer objects (drm_bo.c)
34209+ */
34210+extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
34211+extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
34212+extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
34213+extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
34214+extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
34215+extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
34216+extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
34217+extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
34218+extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
34219+extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
34220+extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
34221+extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
34222+extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
34223+extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
34224+extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
34225+extern int drm_bo_driver_finish(struct drm_device *dev);
34226+extern int drm_bo_driver_init(struct drm_device *dev);
34227+extern int drm_bo_pci_offset(struct drm_device *dev,
34228+ struct drm_bo_mem_reg *mem,
34229+ unsigned long *bus_base,
34230+ unsigned long *bus_offset,
34231+ unsigned long *bus_size);
34232+extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem);
34233+
34234+extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo);
34235+extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
34236+extern void drm_putback_buffer_objects(struct drm_device *dev);
34237+extern int drm_fence_buffer_objects(struct drm_device *dev,
34238+ struct list_head *list,
34239+ uint32_t fence_flags,
34240+ struct drm_fence_object *fence,
34241+ struct drm_fence_object **used_fence);
34242+extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
34243+extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
34244+ enum drm_bo_type type, uint64_t mask,
34245+ uint32_t hint, uint32_t page_alignment,
34246+ unsigned long buffer_start,
34247+ struct drm_buffer_object **bo);
34248+extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
34249+ int no_wait);
34250+extern int drm_bo_mem_space(struct drm_buffer_object *bo,
34251+ struct drm_bo_mem_reg *mem, int no_wait);
34252+extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
34253+ uint64_t new_mem_flags,
34254+ int no_wait, int move_unfenced);
34255+extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);
34256+extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
34257+ unsigned long p_offset, unsigned long p_size);
34258+extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
34259+ uint32_t fence_class, uint64_t flags,
34260+ uint64_t mask, uint32_t hint,
34261+ int use_old_fence_class,
34262+ struct drm_bo_info_rep *rep,
34263+ struct drm_buffer_object **bo_rep);
34264+extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
34265+ uint32_t handle,
34266+ int check_owner);
34267+extern int drm_bo_do_validate(struct drm_buffer_object *bo,
34268+ uint64_t flags, uint64_t mask, uint32_t hint,
34269+ uint32_t fence_class,
34270+ int no_wait,
34271+ struct drm_bo_info_rep *rep);
34272+extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
34273+ struct drm_bo_info_rep *rep);
34274+/*
34275+ * Buffer object memory move- and map helpers.
34276+ * drm_bo_move.c
34277+ */
34278+
34279+extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
34280+ int evict, int no_wait,
34281+ struct drm_bo_mem_reg *new_mem);
34282+extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
34283+ int evict,
34284+ int no_wait, struct drm_bo_mem_reg *new_mem);
34285+extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
34286+ int evict, int no_wait,
34287+ uint32_t fence_class, uint32_t fence_type,
34288+ uint32_t fence_flags,
34289+ struct drm_bo_mem_reg *new_mem);
34290+extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
34291+extern unsigned long drm_bo_offset_end(unsigned long offset,
34292+ unsigned long end);
34293+
34294+struct drm_bo_kmap_obj {
34295+ void *virtual;
34296+ struct page *page;
34297+ enum {
34298+ bo_map_iomap,
34299+ bo_map_vmap,
34300+ bo_map_kmap,
34301+ bo_map_premapped,
34302+ } bo_kmap_type;
34303+};
34304+
34305+static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
34306+{
34307+ *is_iomem = (map->bo_kmap_type == bo_map_iomap ||
34308+ map->bo_kmap_type == bo_map_premapped);
34309+ return map->virtual;
34310+}
34311+extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
34312+extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
34313+ unsigned long num_pages, struct drm_bo_kmap_obj *map);
34314+
34315+
34316+/*
34317+ * drm_regman.c
34318+ */
34319+
34320+struct drm_reg {
34321+ struct list_head head;
34322+ struct drm_fence_object *fence;
34323+ uint32_t fence_type;
34324+ uint32_t new_fence_type;
34325+};
34326+
34327+struct drm_reg_manager {
34328+ struct list_head free;
34329+ struct list_head lru;
34330+ struct list_head unfenced;
34331+
34332+ int (*reg_reusable)(const struct drm_reg *reg, const void *data);
34333+ void (*reg_destroy)(struct drm_reg *reg);
34334+};
34335+
34336+extern int drm_regs_alloc(struct drm_reg_manager *manager,
34337+ const void *data,
34338+ uint32_t fence_class,
34339+ uint32_t fence_type,
34340+ int interruptible,
34341+ int no_wait,
34342+ struct drm_reg **reg);
34343+
34344+extern void drm_regs_fence(struct drm_reg_manager *regs,
34345+ struct drm_fence_object *fence);
34346+
34347+extern void drm_regs_free(struct drm_reg_manager *manager);
34348+extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg);
34349+extern void drm_regs_init(struct drm_reg_manager *manager,
34350+ int (*reg_reusable)(const struct drm_reg *,
34351+ const void *),
34352+ void (*reg_destroy)(struct drm_reg *));
34353+
34354+extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
34355+ void **virtual);
34356+extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
34357+ void *virtual);
34358+/*
34359+ * drm_bo_lock.c
34360+ * Simple replacement for the hardware lock on buffer manager init and clean.
34361+ */
34362+
34363+
34364+extern void drm_bo_init_lock(struct drm_bo_lock *lock);
34365+extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
34366+extern int drm_bo_read_lock(struct drm_bo_lock *lock);
34367+extern int drm_bo_write_lock(struct drm_bo_lock *lock,
34368+ struct drm_file *file_priv);
34369+
34370+extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
34371+ struct drm_file *file_priv);
34372+
34373+#ifdef CONFIG_DEBUG_MUTEXES
34374+#define DRM_ASSERT_LOCKED(_mutex) \
34375+ BUG_ON(!mutex_is_locked(_mutex) || \
34376+ ((_mutex)->owner != current_thread_info()))
34377+#else
34378+#define DRM_ASSERT_LOCKED(_mutex)
34379+#endif
34380+#endif
diff --git a/meta-moblin/packages/linux/linux-moblin_2.6.27.bb b/meta-moblin/packages/linux/linux-moblin_2.6.27.bb
index 420974372d..685956a706 100644
--- a/meta-moblin/packages/linux/linux-moblin_2.6.27.bb
+++ b/meta-moblin/packages/linux/linux-moblin_2.6.27.bb
@@ -1,6 +1,6 @@
1require linux-moblin.inc 1require linux-moblin.inc
2 2
3PR = "r4" 3PR = "r6"
4PE = "1" 4PE = "1"
5 5
6DEFAULT_PREFERENCE = "-1" 6DEFAULT_PREFERENCE = "-1"
@@ -54,4 +54,6 @@ SRC_URI = "${KERNELORG_MIRROR}pub/linux/kernel/v2.6/linux-2.6.27.tar.bz2 \
54 file://defconfig-menlow \ 54 file://defconfig-menlow \
55 file://defconfig-netbook" 55 file://defconfig-netbook"
56 56
57SRC_URI_append_menlow = "psb-driver.patch;patch=1"
58
57S = "${WORKDIR}/linux-2.6.27" 59S = "${WORKDIR}/linux-2.6.27"