summaryrefslogtreecommitdiffstats
path: root/meta/recipes-kernel/linux/linux-netbook-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/recipes-kernel/linux/linux-netbook-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch')
-rw-r--r--meta/recipes-kernel/linux/linux-netbook-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch13580
1 files changed, 13580 insertions, 0 deletions
diff --git a/meta/recipes-kernel/linux/linux-netbook-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch b/meta/recipes-kernel/linux/linux-netbook-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch
new file mode 100644
index 0000000000..7f81eb82fe
--- /dev/null
+++ b/meta/recipes-kernel/linux/linux-netbook-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch
@@ -0,0 +1,13580 @@
1Index: linux-2.6.33/drivers/pci/pci.c
2===================================================================
3--- linux-2.6.33.orig/drivers/pci/pci.c
4+++ linux-2.6.33/drivers/pci/pci.c
5@@ -297,6 +297,49 @@ int pci_find_ext_capability(struct pci_d
6 }
7 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
8
9+/**
10+ * pci_bus_find_ext_capability - find an extended capability
11+ * @bus: the PCI bus to query
12+ * @devfn: PCI device to query
13+ * @cap: capability code
14+ *
15+ * Like pci_find_ext_capability() but works for pci devices that do not have a
16+ * pci_dev structure set up yet.
17+ *
18+ * Returns the address of the requested capability structure within the
19+ * device's PCI configuration space or 0 in case the device does not
20+ * support it.
21+ */
22+int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
23+ int cap)
24+{
25+ u32 header;
26+ int ttl;
27+ int pos = PCI_CFG_SPACE_SIZE;
28+
29+ /* minimum 8 bytes per capability */
30+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
31+
32+ if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
33+ return 0;
34+ if (header == 0xffffffff || header == 0)
35+ return 0;
36+
37+ while (ttl-- > 0) {
38+ if (PCI_EXT_CAP_ID(header) == cap)
39+ return pos;
40+
41+ pos = PCI_EXT_CAP_NEXT(header);
42+ if (pos < PCI_CFG_SPACE_SIZE)
43+ break;
44+
45+ if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
46+ break;
47+ }
48+
49+ return 0;
50+}
51+
52 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
53 {
54 int rc, ttl = PCI_FIND_CAP_TTL;
55Index: linux-2.6.33/include/linux/pci.h
56===================================================================
57--- linux-2.6.33.orig/include/linux/pci.h
58+++ linux-2.6.33/include/linux/pci.h
59@@ -631,6 +631,8 @@ enum pci_lost_interrupt_reason pci_lost_
60 int pci_find_capability(struct pci_dev *dev, int cap);
61 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
62 int pci_find_ext_capability(struct pci_dev *dev, int cap);
63+int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
64+ int cap);
65 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
66 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
67 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
68Index: linux-2.6.33/arch/x86/include/asm/numaq.h
69===================================================================
70--- linux-2.6.33.orig/arch/x86/include/asm/numaq.h
71+++ linux-2.6.33/arch/x86/include/asm/numaq.h
72@@ -30,6 +30,7 @@
73
74 extern int found_numaq;
75 extern int get_memcfg_numaq(void);
76+extern int pci_numaq_init(void);
77
78 extern void *xquad_portio;
79
80Index: linux-2.6.33/arch/x86/include/asm/pci.h
81===================================================================
82--- linux-2.6.33.orig/arch/x86/include/asm/pci.h
83+++ linux-2.6.33/arch/x86/include/asm/pci.h
84@@ -45,8 +45,15 @@ static inline int pci_proc_domain(struct
85
86 #ifdef CONFIG_PCI
87 extern unsigned int pcibios_assign_all_busses(void);
88+extern int pci_legacy_init(void);
89+# ifdef CONFIG_ACPI
90+# define x86_default_pci_init pci_acpi_init
91+# else
92+# define x86_default_pci_init pci_legacy_init
93+# endif
94 #else
95-#define pcibios_assign_all_busses() 0
96+# define pcibios_assign_all_busses() 0
97+# define x86_default_pci_init NULL
98 #endif
99
100 extern unsigned long pci_mem_start;
101Index: linux-2.6.33/arch/x86/include/asm/pci_x86.h
102===================================================================
103--- linux-2.6.33.orig/arch/x86/include/asm/pci_x86.h
104+++ linux-2.6.33/arch/x86/include/asm/pci_x86.h
105@@ -82,7 +82,6 @@ struct irq_routing_table {
106
107 extern unsigned int pcibios_irq_mask;
108
109-extern int pcibios_scanned;
110 extern spinlock_t pci_config_lock;
111
112 extern int (*pcibios_enable_irq)(struct pci_dev *dev);
113@@ -111,10 +110,10 @@ extern void __init dmi_check_skip_isa_al
114
115 /* some common used subsys_initcalls */
116 extern int __init pci_acpi_init(void);
117-extern int __init pcibios_irq_init(void);
118-extern int __init pci_visws_init(void);
119-extern int __init pci_numaq_init(void);
120+extern void __init pcibios_irq_init(void);
121 extern int __init pcibios_init(void);
122+extern int pci_legacy_init(void);
123+extern void pcibios_fixup_irqs(void);
124
125 /* pci-mmconfig.c */
126
127@@ -182,3 +181,17 @@ static inline void mmio_config_writel(vo
128 {
129 asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory");
130 }
131+
132+#ifdef CONFIG_PCI
133+# ifdef CONFIG_ACPI
134+# define x86_default_pci_init pci_acpi_init
135+# else
136+# define x86_default_pci_init pci_legacy_init
137+# endif
138+# define x86_default_pci_init_irq pcibios_irq_init
139+# define x86_default_pci_fixup_irqs pcibios_fixup_irqs
140+#else
141+# define x86_default_pci_init NULL
142+# define x86_default_pci_init_irq NULL
143+# define x86_default_pci_fixup_irqs NULL
144+#endif
145Index: linux-2.6.33/arch/x86/include/asm/setup.h
146===================================================================
147--- linux-2.6.33.orig/arch/x86/include/asm/setup.h
148+++ linux-2.6.33/arch/x86/include/asm/setup.h
149@@ -37,10 +37,8 @@ void setup_bios_corruption_check(void);
150
151 #ifdef CONFIG_X86_VISWS
152 extern void visws_early_detect(void);
153-extern int is_visws_box(void);
154 #else
155 static inline void visws_early_detect(void) { }
156-static inline int is_visws_box(void) { return 0; }
157 #endif
158
159 extern unsigned long saved_video_mode;
160Index: linux-2.6.33/arch/x86/include/asm/visws/cobalt.h
161===================================================================
162--- linux-2.6.33.orig/arch/x86/include/asm/visws/cobalt.h
163+++ linux-2.6.33/arch/x86/include/asm/visws/cobalt.h
164@@ -122,4 +122,6 @@ extern char visws_board_type;
165
166 extern char visws_board_rev;
167
168+extern int pci_visws_init(void);
169+
170 #endif /* _ASM_X86_VISWS_COBALT_H */
171Index: linux-2.6.33/arch/x86/include/asm/x86_init.h
172===================================================================
173--- linux-2.6.33.orig/arch/x86/include/asm/x86_init.h
174+++ linux-2.6.33/arch/x86/include/asm/x86_init.h
175@@ -99,6 +99,18 @@ struct x86_init_iommu {
176 };
177
178 /**
179+ * struct x86_init_pci - platform specific pci init functions
180+ * @init: platform specific pci init
181+ * @init_irq: platform specific pci irq init
182+ * @fixup_irqs: platform specific pci irq fixup
183+ */
184+struct x86_init_pci {
185+ int (*init)(void);
186+ void (*init_irq)(void);
187+ void (*fixup_irqs)(void);
188+};
189+
190+/**
191 * struct x86_init_ops - functions for platform specific setup
192 *
193 */
194@@ -110,6 +122,7 @@ struct x86_init_ops {
195 struct x86_init_paging paging;
196 struct x86_init_timers timers;
197 struct x86_init_iommu iommu;
198+ struct x86_init_pci pci;
199 };
200
201 /**
202Index: linux-2.6.33/arch/x86/kernel/acpi/boot.c
203===================================================================
204--- linux-2.6.33.orig/arch/x86/kernel/acpi/boot.c
205+++ linux-2.6.33/arch/x86/kernel/acpi/boot.c
206@@ -35,6 +35,7 @@
207 #include <linux/ioport.h>
208 #include <linux/pci.h>
209
210+#include <asm/pci_x86.h>
211 #include <asm/pgtable.h>
212 #include <asm/io_apic.h>
213 #include <asm/apic.h>
214@@ -1603,6 +1604,9 @@ int __init acpi_boot_init(void)
215
216 acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
217
218+ if (!acpi_noirq)
219+ x86_init.pci.init = pci_acpi_init;
220+
221 return 0;
222 }
223
224Index: linux-2.6.33/arch/x86/kernel/apic/numaq_32.c
225===================================================================
226--- linux-2.6.33.orig/arch/x86/kernel/apic/numaq_32.c
227+++ linux-2.6.33/arch/x86/kernel/apic/numaq_32.c
228@@ -277,6 +277,7 @@ static __init void early_check_numaq(voi
229 x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus;
230 x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info;
231 x86_init.timers.tsc_pre_init = numaq_tsc_init;
232+ x86_init.pci.init = pci_numaq_init;
233 }
234 }
235
236Index: linux-2.6.33/arch/x86/kernel/visws_quirks.c
237===================================================================
238--- linux-2.6.33.orig/arch/x86/kernel/visws_quirks.c
239+++ linux-2.6.33/arch/x86/kernel/visws_quirks.c
240@@ -49,11 +49,6 @@ extern int no_broadcast;
241 char visws_board_type = -1;
242 char visws_board_rev = -1;
243
244-int is_visws_box(void)
245-{
246- return visws_board_type >= 0;
247-}
248-
249 static void __init visws_time_init(void)
250 {
251 printk(KERN_INFO "Starting Cobalt Timer system clock\n");
252@@ -242,6 +237,8 @@ void __init visws_early_detect(void)
253 x86_init.irqs.pre_vector_init = visws_pre_intr_init;
254 x86_init.irqs.trap_init = visws_trap_init;
255 x86_init.timers.timer_init = visws_time_init;
256+ x86_init.pci.init = pci_visws_init;
257+ x86_init.pci.init_irq = x86_init_noop;
258
259 /*
260 * Install reboot quirks:
261@@ -508,7 +505,7 @@ static struct irq_chip cobalt_irq_type =
262 */
263 static unsigned int startup_piix4_master_irq(unsigned int irq)
264 {
265- init_8259A(0);
266+ legacy_pic->init(0);
267
268 return startup_cobalt_irq(irq);
269 }
270@@ -531,10 +528,7 @@ static struct irq_chip piix4_master_irq_
271
272
273 static struct irq_chip piix4_virtual_irq_type = {
274- .name = "PIIX4-virtual",
275- .shutdown = disable_8259A_irq,
276- .enable = enable_8259A_irq,
277- .disable = disable_8259A_irq,
278+ .typename = "PIIX4-virtual",
279 };
280
281
282@@ -609,7 +603,7 @@ static irqreturn_t piix4_master_intr(int
283 handle_IRQ_event(realirq, desc->action);
284
285 if (!(desc->status & IRQ_DISABLED))
286- enable_8259A_irq(realirq);
287+ legacy_pic->chip->unmask(realirq);
288
289 return IRQ_HANDLED;
290
291@@ -628,6 +622,12 @@ static struct irqaction cascade_action =
292 .name = "cascade",
293 };
294
295+static inline void set_piix4_virtual_irq_type(void)
296+{
297+ piix4_virtual_irq_type.shutdown = i8259A_chip.mask;
298+ piix4_virtual_irq_type.enable = i8259A_chip.unmask;
299+ piix4_virtual_irq_type.disable = i8259A_chip.mask;
300+}
301
302 void init_VISWS_APIC_irqs(void)
303 {
304@@ -653,6 +653,7 @@ void init_VISWS_APIC_irqs(void)
305 desc->chip = &piix4_master_irq_type;
306 }
307 else if (i < CO_IRQ_APIC0) {
308+ set_piix4_virtual_irq_type();
309 desc->chip = &piix4_virtual_irq_type;
310 }
311 else if (IS_CO_APIC(i)) {
312Index: linux-2.6.33/arch/x86/kernel/x86_init.c
313===================================================================
314--- linux-2.6.33.orig/arch/x86/kernel/x86_init.c
315+++ linux-2.6.33/arch/x86/kernel/x86_init.c
316@@ -4,9 +4,11 @@
317 * For licencing details see kernel-base/COPYING
318 */
319 #include <linux/init.h>
320+#include <linux/ioport.h>
321
322 #include <asm/bios_ebda.h>
323 #include <asm/paravirt.h>
324+#include <asm/pci_x86.h>
325 #include <asm/mpspec.h>
326 #include <asm/setup.h>
327 #include <asm/apic.h>
328@@ -70,6 +72,12 @@ struct x86_init_ops x86_init __initdata
329 .iommu = {
330 .iommu_init = iommu_init_noop,
331 },
332+
333+ .pci = {
334+ .init = x86_default_pci_init,
335+ .init_irq = x86_default_pci_init_irq,
336+ .fixup_irqs = x86_default_pci_fixup_irqs,
337+ },
338 };
339
340 struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
341Index: linux-2.6.33/arch/x86/pci/acpi.c
342===================================================================
343--- linux-2.6.33.orig/arch/x86/pci/acpi.c
344+++ linux-2.6.33/arch/x86/pci/acpi.c
345@@ -282,17 +282,14 @@ int __init pci_acpi_init(void)
346 {
347 struct pci_dev *dev = NULL;
348
349- if (pcibios_scanned)
350- return 0;
351-
352 if (acpi_noirq)
353- return 0;
354+ return -ENODEV;
355
356 printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
357 acpi_irq_penalty_init();
358- pcibios_scanned++;
359 pcibios_enable_irq = acpi_pci_irq_enable;
360 pcibios_disable_irq = acpi_pci_irq_disable;
361+ x86_init.pci.init_irq = x86_init_noop;
362
363 if (pci_routeirq) {
364 /*
365Index: linux-2.6.33/arch/x86/pci/common.c
366===================================================================
367--- linux-2.6.33.orig/arch/x86/pci/common.c
368+++ linux-2.6.33/arch/x86/pci/common.c
369@@ -72,12 +72,6 @@ struct pci_ops pci_root_ops = {
370 };
371
372 /*
373- * legacy, numa, and acpi all want to call pcibios_scan_root
374- * from their initcalls. This flag prevents that.
375- */
376-int pcibios_scanned;
377-
378-/*
379 * This interrupt-safe spinlock protects all accesses to PCI
380 * configuration space.
381 */
382Index: linux-2.6.33/arch/x86/pci/legacy.c
383===================================================================
384--- linux-2.6.33.orig/arch/x86/pci/legacy.c
385+++ linux-2.6.33/arch/x86/pci/legacy.c
386@@ -35,16 +35,13 @@ static void __devinit pcibios_fixup_peer
387 }
388 }
389
390-static int __init pci_legacy_init(void)
391+int __init pci_legacy_init(void)
392 {
393 if (!raw_pci_ops) {
394 printk("PCI: System does not support PCI\n");
395 return 0;
396 }
397
398- if (pcibios_scanned++)
399- return 0;
400-
401 printk("PCI: Probing PCI hardware\n");
402 pci_root_bus = pcibios_scan_root(0);
403 if (pci_root_bus)
404@@ -55,18 +52,15 @@ static int __init pci_legacy_init(void)
405
406 int __init pci_subsys_init(void)
407 {
408-#ifdef CONFIG_X86_NUMAQ
409- pci_numaq_init();
410-#endif
411-#ifdef CONFIG_ACPI
412- pci_acpi_init();
413-#endif
414-#ifdef CONFIG_X86_VISWS
415- pci_visws_init();
416-#endif
417- pci_legacy_init();
418+ /*
419+ * The init function returns an non zero value when
420+ * pci_legacy_init should be invoked.
421+ */
422+ if (x86_init.pci.init())
423+ pci_legacy_init();
424+
425 pcibios_fixup_peer_bridges();
426- pcibios_irq_init();
427+ x86_init.pci.init_irq();
428 pcibios_init();
429
430 return 0;
431Index: linux-2.6.33/arch/x86/pci/numaq_32.c
432===================================================================
433--- linux-2.6.33.orig/arch/x86/pci/numaq_32.c
434+++ linux-2.6.33/arch/x86/pci/numaq_32.c
435@@ -152,14 +152,8 @@ int __init pci_numaq_init(void)
436 {
437 int quad;
438
439- if (!found_numaq)
440- return 0;
441-
442 raw_pci_ops = &pci_direct_conf1_mq;
443
444- if (pcibios_scanned++)
445- return 0;
446-
447 pci_root_bus = pcibios_scan_root(0);
448 if (pci_root_bus)
449 pci_bus_add_devices(pci_root_bus);
450Index: linux-2.6.33/arch/x86/pci/visws.c
451===================================================================
452--- linux-2.6.33.orig/arch/x86/pci/visws.c
453+++ linux-2.6.33/arch/x86/pci/visws.c
454@@ -69,9 +69,6 @@ void __init pcibios_update_irq(struct pc
455
456 int __init pci_visws_init(void)
457 {
458- if (!is_visws_box())
459- return -1;
460-
461 pcibios_enable_irq = &pci_visws_enable_irq;
462 pcibios_disable_irq = &pci_visws_disable_irq;
463
464@@ -90,5 +87,6 @@ int __init pci_visws_init(void)
465 pci_scan_bus_with_sysdata(pci_bus1);
466 pci_fixup_irqs(pci_common_swizzle, visws_map_irq);
467 pcibios_resource_survey();
468- return 0;
469+ /* Request bus scan */
470+ return 1;
471 }
472Index: linux-2.6.33/arch/x86/pci/irq.c
473===================================================================
474--- linux-2.6.33.orig/arch/x86/pci/irq.c
475+++ linux-2.6.33/arch/x86/pci/irq.c
476@@ -53,7 +53,7 @@ struct irq_router_handler {
477 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
478 };
479
480-int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
481+int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
482 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
483
484 /*
485@@ -1016,7 +1016,7 @@ static int pcibios_lookup_irq(struct pci
486 return 1;
487 }
488
489-static void __init pcibios_fixup_irqs(void)
490+void __init pcibios_fixup_irqs(void)
491 {
492 struct pci_dev *dev = NULL;
493 u8 pin;
494@@ -1110,12 +1110,12 @@ static struct dmi_system_id __initdata p
495 { }
496 };
497
498-int __init pcibios_irq_init(void)
499+void __init pcibios_irq_init(void)
500 {
501 DBG(KERN_DEBUG "PCI: IRQ init\n");
502
503- if (pcibios_enable_irq || raw_pci_ops == NULL)
504- return 0;
505+ if (raw_pci_ops == NULL)
506+ return;
507
508 dmi_check_system(pciirq_dmi_table);
509
510@@ -1142,9 +1142,7 @@ int __init pcibios_irq_init(void)
511 pirq_table = NULL;
512 }
513
514- pcibios_enable_irq = pirq_enable_irq;
515-
516- pcibios_fixup_irqs();
517+ x86_init.pci.fixup_irqs();
518
519 if (io_apic_assign_pci_irqs && pci_routeirq) {
520 struct pci_dev *dev = NULL;
521@@ -1157,8 +1155,6 @@ int __init pcibios_irq_init(void)
522 for_each_pci_dev(dev)
523 pirq_enable_irq(dev);
524 }
525-
526- return 0;
527 }
528
529 static void pirq_penalize_isa_irq(int irq, int active)
530Index: linux-2.6.33/arch/x86/kernel/apic/apic.c
531===================================================================
532--- linux-2.6.33.orig/arch/x86/kernel/apic/apic.c
533+++ linux-2.6.33/arch/x86/kernel/apic/apic.c
534@@ -718,6 +718,9 @@ static int __init calibrate_APIC_clock(v
535 */
536 void __init setup_boot_APIC_clock(void)
537 {
538+ /* we rely on global clockevent for calibration */
539+ if (global_clock_event == NULL)
540+ return;
541 /*
542 * The local apic timer can be disabled via the kernel
543 * commandline or from the CPU detection code. Register the lapic
544@@ -1390,7 +1393,7 @@ void __init enable_IR_x2apic(void)
545 }
546
547 local_irq_save(flags);
548- mask_8259A();
549+ legacy_pic->mask_all();
550 mask_IO_APIC_setup(ioapic_entries);
551
552 if (dmar_table_init_ret)
553@@ -1422,7 +1425,7 @@ void __init enable_IR_x2apic(void)
554 nox2apic:
555 if (!ret) /* IR enabling failed */
556 restore_IO_APIC_setup(ioapic_entries);
557- unmask_8259A();
558+ legacy_pic->restore_mask();
559 local_irq_restore(flags);
560
561 out:
562@@ -2018,7 +2021,7 @@ static int lapic_resume(struct sys_devic
563 }
564
565 mask_IO_APIC_setup(ioapic_entries);
566- mask_8259A();
567+ legacy_pic->mask_all();
568 }
569
570 if (x2apic_mode)
571@@ -2062,7 +2065,7 @@ static int lapic_resume(struct sys_devic
572
573 if (intr_remapping_enabled) {
574 reenable_intr_remapping(x2apic_mode);
575- unmask_8259A();
576+ legacy_pic->restore_mask();
577 restore_IO_APIC_setup(ioapic_entries);
578 free_ioapic_entries(ioapic_entries);
579 }
580Index: linux-2.6.33/arch/x86/kernel/apic/io_apic.c
581===================================================================
582--- linux-2.6.33.orig/arch/x86/kernel/apic/io_apic.c
583+++ linux-2.6.33/arch/x86/kernel/apic/io_apic.c
584@@ -94,10 +94,8 @@ struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCE
585 /* # of MP IRQ source entries */
586 int mp_irq_entries;
587
588-/* Number of legacy interrupts */
589-static int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
590 /* GSI interrupts */
591-static int nr_irqs_gsi = NR_IRQS_LEGACY;
592+int nr_irqs_gsi = NR_IRQS_LEGACY;
593
594 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
595 int mp_bus_id_to_type[MAX_MP_BUSSES];
596@@ -140,33 +138,10 @@ static struct irq_pin_list *get_one_free
597
598 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
599 #ifdef CONFIG_SPARSE_IRQ
600-static struct irq_cfg irq_cfgx[] = {
601+static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
602 #else
603-static struct irq_cfg irq_cfgx[NR_IRQS] = {
604+static struct irq_cfg irq_cfgx[NR_IRQS];
605 #endif
606- [0] = { .vector = IRQ0_VECTOR, },
607- [1] = { .vector = IRQ1_VECTOR, },
608- [2] = { .vector = IRQ2_VECTOR, },
609- [3] = { .vector = IRQ3_VECTOR, },
610- [4] = { .vector = IRQ4_VECTOR, },
611- [5] = { .vector = IRQ5_VECTOR, },
612- [6] = { .vector = IRQ6_VECTOR, },
613- [7] = { .vector = IRQ7_VECTOR, },
614- [8] = { .vector = IRQ8_VECTOR, },
615- [9] = { .vector = IRQ9_VECTOR, },
616- [10] = { .vector = IRQ10_VECTOR, },
617- [11] = { .vector = IRQ11_VECTOR, },
618- [12] = { .vector = IRQ12_VECTOR, },
619- [13] = { .vector = IRQ13_VECTOR, },
620- [14] = { .vector = IRQ14_VECTOR, },
621- [15] = { .vector = IRQ15_VECTOR, },
622-};
623-
624-void __init io_apic_disable_legacy(void)
625-{
626- nr_legacy_irqs = 0;
627- nr_irqs_gsi = 0;
628-}
629
630 int __init arch_early_irq_init(void)
631 {
632@@ -176,16 +151,23 @@ int __init arch_early_irq_init(void)
633 int node;
634 int i;
635
636+ if (!legacy_pic->nr_legacy_irqs) {
637+ nr_irqs_gsi = 0;
638+ io_apic_irqs = ~0UL;
639+ }
640+
641 cfg = irq_cfgx;
642 count = ARRAY_SIZE(irq_cfgx);
643 node= cpu_to_node(boot_cpu_id);
644
645 for (i = 0; i < count; i++) {
646+ if (i < legacy_pic->nr_legacy_irqs)
647+ cfg[i].vector = IRQ0_VECTOR + i;
648 desc = irq_to_desc(i);
649 desc->chip_data = &cfg[i];
650 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
651 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
652- if (i < nr_legacy_irqs)
653+ if (i < legacy_pic->nr_legacy_irqs)
654 cpumask_setall(cfg[i].domain);
655 }
656
657@@ -865,7 +847,7 @@ static int __init find_isa_irq_apic(int
658 */
659 static int EISA_ELCR(unsigned int irq)
660 {
661- if (irq < nr_legacy_irqs) {
662+ if (irq < legacy_pic->nr_legacy_irqs) {
663 unsigned int port = 0x4d0 + (irq >> 3);
664 return (inb(port) >> (irq & 7)) & 1;
665 }
666@@ -1461,8 +1443,8 @@ static void setup_IO_APIC_irq(int apic_i
667 }
668
669 ioapic_register_intr(irq, desc, trigger);
670- if (irq < nr_legacy_irqs)
671- disable_8259A_irq(irq);
672+ if (irq < legacy_pic->nr_legacy_irqs)
673+ legacy_pic->chip->mask(irq);
674
675 ioapic_write_entry(apic_id, pin, entry);
676 }
677@@ -1875,7 +1857,7 @@ __apicdebuginit(void) print_PIC(void)
678 unsigned int v;
679 unsigned long flags;
680
681- if (!nr_legacy_irqs)
682+ if (!legacy_pic->nr_legacy_irqs)
683 return;
684
685 printk(KERN_DEBUG "\nprinting PIC contents\n");
686@@ -1959,7 +1941,7 @@ void __init enable_IO_APIC(void)
687 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
688 }
689
690- if (!nr_legacy_irqs)
691+ if (!legacy_pic->nr_legacy_irqs)
692 return;
693
694 for(apic = 0; apic < nr_ioapics; apic++) {
695@@ -2016,7 +1998,7 @@ void disable_IO_APIC(void)
696 */
697 clear_IO_APIC();
698
699- if (!nr_legacy_irqs)
700+ if (!legacy_pic->nr_legacy_irqs)
701 return;
702
703 /*
704@@ -2249,9 +2231,9 @@ static unsigned int startup_ioapic_irq(u
705 struct irq_cfg *cfg;
706
707 spin_lock_irqsave(&ioapic_lock, flags);
708- if (irq < nr_legacy_irqs) {
709- disable_8259A_irq(irq);
710- if (i8259A_irq_pending(irq))
711+ if (irq < legacy_pic->nr_legacy_irqs) {
712+ legacy_pic->chip->mask(irq);
713+ if (legacy_pic->irq_pending(irq))
714 was_pending = 1;
715 }
716 cfg = irq_cfg(irq);
717@@ -2784,8 +2766,8 @@ static inline void init_IO_APIC_traps(vo
718 * so default to an old-fashioned 8259
719 * interrupt if we can..
720 */
721- if (irq < nr_legacy_irqs)
722- make_8259A_irq(irq);
723+ if (irq < legacy_pic->nr_legacy_irqs)
724+ legacy_pic->make_irq(irq);
725 else
726 /* Strange. Oh, well.. */
727 desc->chip = &no_irq_chip;
728@@ -2942,7 +2924,7 @@ static inline void __init check_timer(vo
729 /*
730 * get/set the timer IRQ vector:
731 */
732- disable_8259A_irq(0);
733+ legacy_pic->chip->mask(0);
734 assign_irq_vector(0, cfg, apic->target_cpus());
735
736 /*
737@@ -2955,7 +2937,7 @@ static inline void __init check_timer(vo
738 * automatically.
739 */
740 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
741- init_8259A(1);
742+ legacy_pic->init(1);
743 #ifdef CONFIG_X86_32
744 {
745 unsigned int ver;
746@@ -3014,7 +2996,7 @@ static inline void __init check_timer(vo
747 if (timer_irq_works()) {
748 if (nmi_watchdog == NMI_IO_APIC) {
749 setup_nmi();
750- enable_8259A_irq(0);
751+ legacy_pic->chip->unmask(0);
752 }
753 if (disable_timer_pin_1 > 0)
754 clear_IO_APIC_pin(0, pin1);
755@@ -3037,14 +3019,14 @@ static inline void __init check_timer(vo
756 */
757 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
758 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
759- enable_8259A_irq(0);
760+ legacy_pic->chip->unmask(0);
761 if (timer_irq_works()) {
762 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
763 timer_through_8259 = 1;
764 if (nmi_watchdog == NMI_IO_APIC) {
765- disable_8259A_irq(0);
766+ legacy_pic->chip->mask(0);
767 setup_nmi();
768- enable_8259A_irq(0);
769+ legacy_pic->chip->unmask(0);
770 }
771 goto out;
772 }
773@@ -3052,7 +3034,7 @@ static inline void __init check_timer(vo
774 * Cleanup, just in case ...
775 */
776 local_irq_disable();
777- disable_8259A_irq(0);
778+ legacy_pic->chip->mask(0);
779 clear_IO_APIC_pin(apic2, pin2);
780 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
781 }
782@@ -3071,22 +3053,22 @@ static inline void __init check_timer(vo
783
784 lapic_register_intr(0, desc);
785 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
786- enable_8259A_irq(0);
787+ legacy_pic->chip->unmask(0);
788
789 if (timer_irq_works()) {
790 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
791 goto out;
792 }
793 local_irq_disable();
794- disable_8259A_irq(0);
795+ legacy_pic->chip->mask(0);
796 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
797 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
798
799 apic_printk(APIC_QUIET, KERN_INFO
800 "...trying to set up timer as ExtINT IRQ...\n");
801
802- init_8259A(0);
803- make_8259A_irq(0);
804+ legacy_pic->init(0);
805+ legacy_pic->make_irq(0);
806 apic_write(APIC_LVT0, APIC_DM_EXTINT);
807
808 unlock_ExtINT_logic();
809@@ -3128,7 +3110,7 @@ void __init setup_IO_APIC(void)
810 /*
811 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
812 */
813- io_apic_irqs = nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
814+ io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
815
816 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
817 /*
818@@ -3139,7 +3121,7 @@ void __init setup_IO_APIC(void)
819 sync_Arb_IDs();
820 setup_IO_APIC_irqs();
821 init_IO_APIC_traps();
822- if (nr_legacy_irqs)
823+ if (legacy_pic->nr_legacy_irqs)
824 check_timer();
825 }
826
827@@ -3932,7 +3914,7 @@ static int __io_apic_set_pci_routing(str
828 /*
829 * IRQs < 16 are already in the irq_2_pin[] map
830 */
831- if (irq >= nr_legacy_irqs) {
832+ if (irq >= legacy_pic->nr_legacy_irqs) {
833 cfg = desc->chip_data;
834 if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) {
835 printk(KERN_INFO "can not add pin %d for irq %d\n",
836@@ -4310,3 +4292,25 @@ void __init mp_register_ioapic(int id, u
837
838 nr_ioapics++;
839 }
840+
841+/* Enable IOAPIC early just for system timer */
842+void __init pre_init_apic_IRQ0(void)
843+{
844+ struct irq_cfg *cfg;
845+ struct irq_desc *desc;
846+
847+ printk(KERN_INFO "Early APIC setup for system timer0\n");
848+#ifndef CONFIG_SMP
849+ phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
850+#endif
851+ desc = irq_to_desc_alloc_node(0, 0);
852+
853+ setup_local_APIC();
854+
855+ cfg = irq_cfg(0);
856+ add_pin_to_irq_node(cfg, 0, 0, 0);
857+ set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
858+
859+ /* FIXME: get trigger and polarity from mp_irqs[] */
860+ setup_IO_APIC_irq(0, 0, 0, desc, 0, 0);
861+}
862Index: linux-2.6.33/arch/x86/kernel/smpboot.c
863===================================================================
864--- linux-2.6.33.orig/arch/x86/kernel/smpboot.c
865+++ linux-2.6.33/arch/x86/kernel/smpboot.c
866@@ -48,6 +48,7 @@
867 #include <linux/err.h>
868 #include <linux/nmi.h>
869 #include <linux/tboot.h>
870+#include <linux/stackprotector.h>
871
872 #include <asm/acpi.h>
873 #include <asm/desc.h>
874@@ -67,6 +68,7 @@
875 #include <linux/mc146818rtc.h>
876
877 #include <asm/smpboot_hooks.h>
878+#include <asm/i8259.h>
879
880 #ifdef CONFIG_X86_32
881 u8 apicid_2_node[MAX_APICID];
882@@ -286,9 +288,9 @@ notrace static void __cpuinit start_seco
883 check_tsc_sync_target();
884
885 if (nmi_watchdog == NMI_IO_APIC) {
886- disable_8259A_irq(0);
887+ legacy_pic->chip->mask(0);
888 enable_NMI_through_LVT0();
889- enable_8259A_irq(0);
890+ legacy_pic->chip->unmask(0);
891 }
892
893 #ifdef CONFIG_X86_32
894@@ -324,6 +326,9 @@ notrace static void __cpuinit start_seco
895 /* enable local interrupts */
896 local_irq_enable();
897
898+ /* to prevent fake stack check failure in clock setup */
899+ boot_init_stack_canary();
900+
901 x86_cpuinit.setup_percpu_clockev();
902
903 wmb();
904Index: linux-2.6.33/Documentation/kernel-parameters.txt
905===================================================================
906--- linux-2.6.33.orig/Documentation/kernel-parameters.txt
907+++ linux-2.6.33/Documentation/kernel-parameters.txt
908@@ -1738,6 +1738,12 @@ and is between 256 and 4096 characters.
909 nomfgpt [X86-32] Disable Multi-Function General Purpose
910 Timer usage (for AMD Geode machines).
911
912+ x86_mrst_timer [X86-32,APBT]
913+ choose timer option for x86 moorestown mid platform.
914+ two valid options are apbt timer only and lapic timer
915+ plus one apbt timer for broadcast timer.
916+ x86_mrst_timer=apbt_only | lapic_and_apbt
917+
918 norandmaps Don't use address space randomization. Equivalent to
919 echo 0 > /proc/sys/kernel/randomize_va_space
920
921Index: linux-2.6.33/arch/x86/Kconfig
922===================================================================
923--- linux-2.6.33.orig/arch/x86/Kconfig
924+++ linux-2.6.33/arch/x86/Kconfig
925@@ -390,6 +390,7 @@ config X86_MRST
926 bool "Moorestown MID platform"
927 depends on X86_32
928 depends on X86_EXTENDED_PLATFORM
929+ select APB_TIMER
930 ---help---
931 Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
932 Internet Device(MID) platform. Moorestown consists of two chips:
933@@ -398,6 +399,14 @@ config X86_MRST
934 nor standard legacy replacement devices/features. e.g. Moorestown does
935 not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
936
937+config MRST_SPI_UART_BOOT_MSG
938+ def_bool y
939+ prompt "Moorestown SPI UART boot message"
940+ depends on (X86_MRST && X86_32)
941+ help
942+ Enable this to see boot message during protected mode boot phase, such as
943+ kernel decompression, BAUD rate is set at 115200 8n1
944+
945 config X86_RDC321X
946 bool "RDC R-321x SoC"
947 depends on X86_32
948@@ -612,6 +621,24 @@ config HPET_EMULATE_RTC
949 def_bool y
950 depends on HPET_TIMER && (RTC=y || RTC=m || RTC_DRV_CMOS=m || RTC_DRV_CMOS=y)
951
952+config APB_TIMER
953+ def_bool y if X86_MRST
954+ prompt "Langwell APB Timer Support" if X86_MRST
955+ help
956+ APB timer is the replacement for 8254, HPET on X86 MID platforms.
957+ The APBT provides a stable time base on SMP
958+ systems, unlike the TSC, but it is more expensive to access,
959+ as it is off-chip. APB timers are always running regardless of CPU
960+ C states, they are used as per CPU clockevent device when possible.
961+
962+config LNW_IPC
963+ def_bool n
964+ prompt "Langwell IPC Support" if (X86_32 || X86_MRST)
965+ depends on X86_MRST
966+ help
967+ IPC unit is used on Moorestown to bridge the communications
968+ between IA and SCU.
969+
970 # Mark as embedded because too many people got it wrong.
971 # The code disables itself when not needed.
972 config DMI
973Index: linux-2.6.33/arch/x86/include/asm/apb_timer.h
974===================================================================
975--- /dev/null
976+++ linux-2.6.33/arch/x86/include/asm/apb_timer.h
977@@ -0,0 +1,72 @@
978+/*
979+ * apb_timer.h: Driver for Langwell APB timer based on Synopsis DesignWare
980+ *
981+ * (C) Copyright 2009 Intel Corporation
982+ * Author: Jacob Pan (jacob.jun.pan@intel.com)
983+ *
984+ * This program is free software; you can redistribute it and/or
985+ * modify it under the terms of the GNU General Public License
986+ * as published by the Free Software Foundation; version 2
987+ * of the License.
988+ *
989+ * Note:
990+ */
991+
992+#ifndef ASM_X86_APBT_H
993+#define ASM_X86_APBT_H
994+#include <linux/sfi.h>
995+
996+#ifdef CONFIG_APB_TIMER
997+
998+/* Langwell DW APB timer registers */
999+#define APBTMR_N_LOAD_COUNT 0x00
1000+#define APBTMR_N_CURRENT_VALUE 0x04
1001+#define APBTMR_N_CONTROL 0x08
1002+#define APBTMR_N_EOI 0x0c
1003+#define APBTMR_N_INT_STATUS 0x10
1004+
1005+#define APBTMRS_INT_STATUS 0xa0
1006+#define APBTMRS_EOI 0xa4
1007+#define APBTMRS_RAW_INT_STATUS 0xa8
1008+#define APBTMRS_COMP_VERSION 0xac
1009+#define APBTMRS_REG_SIZE 0x14
1010+
1011+/* register bits */
1012+#define APBTMR_CONTROL_ENABLE (1<<0)
1013+#define APBTMR_CONTROL_MODE_PERIODIC (1<<1) /*1: periodic 0:free running */
1014+#define APBTMR_CONTROL_INT (1<<2)
1015+
1016+/* default memory mapped register base */
1017+#define LNW_SCU_ADDR 0xFF100000
1018+#define LNW_EXT_TIMER_OFFSET 0x1B800
1019+#define APBT_DEFAULT_BASE (LNW_SCU_ADDR+LNW_EXT_TIMER_OFFSET)
1020+#define LNW_EXT_TIMER_PGOFFSET 0x800
1021+
1022+/* APBT clock speed range from PCLK to fabric base, 25-100MHz */
1023+#define APBT_MAX_FREQ 50
1024+#define APBT_MIN_FREQ 1
1025+#define APBT_MMAP_SIZE 1024
1026+
1027+#define APBT_DEV_USED 1
1028+
1029+#define SFI_MTMR_MAX_NUM 8
1030+
1031+extern void apbt_time_init(void);
1032+extern struct clock_event_device *global_clock_event;
1033+extern unsigned long apbt_quick_calibrate(void);
1034+extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
1035+extern void apbt_setup_secondary_clock(void);
1036+extern unsigned int boot_cpu_id;
1037+extern int disable_apbt_percpu;
1038+
1039+extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
1040+extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
1041+extern int sfi_mtimer_num;
1042+
1043+#else /* CONFIG_APB_TIMER */
1044+
1045+static inline unsigned long apbt_quick_calibrate(void) {return 0; }
1046+static inline void apbt_time_init(void) {return 0; }
1047+
1048+#endif
1049+#endif /* ASM_X86_APBT_H */
1050Index: linux-2.6.33/arch/x86/kernel/Makefile
1051===================================================================
1052--- linux-2.6.33.orig/arch/x86/kernel/Makefile
1053+++ linux-2.6.33/arch/x86/kernel/Makefile
1054@@ -57,6 +57,12 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
1055 obj-y += cpu/
1056 obj-y += acpi/
1057 obj-$(CONFIG_SFI) += sfi.o
1058+sfi-processor-objs += sfi/sfi_processor_core.o
1059+sfi-processor-objs += sfi/sfi_processor_idle.o
1060+sfi-processor-objs += sfi/sfi_processor_perflib.o
1061+
1062+obj-$(CONFIG_SFI_PROCESSOR_PM) += sfi-processor.o
1063+
1064 obj-y += reboot.o
1065 obj-$(CONFIG_MCA) += mca_32.o
1066 obj-$(CONFIG_X86_MSR) += msr.o
1067@@ -85,8 +91,11 @@ obj-$(CONFIG_DOUBLEFAULT) += doublefaul
1068 obj-$(CONFIG_KGDB) += kgdb.o
1069 obj-$(CONFIG_VM86) += vm86_32.o
1070 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
1071+obj-$(CONFIG_X86_MRST_EARLY_PRINTK) += mrst_earlyprintk.o
1072
1073 obj-$(CONFIG_HPET_TIMER) += hpet.o
1074+obj-$(CONFIG_APB_TIMER) += apb_timer.o
1075+obj-$(CONFIG_LNW_IPC) += ipc_mrst.o
1076
1077 obj-$(CONFIG_K8_NB) += k8.o
1078 obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
1079@@ -105,7 +114,7 @@ obj-$(CONFIG_SCx200) += scx200.o
1080 scx200-y += scx200_32.o
1081
1082 obj-$(CONFIG_OLPC) += olpc.o
1083-obj-$(CONFIG_X86_MRST) += mrst.o
1084+obj-$(CONFIG_X86_MRST) += mrst.o vrtc.o
1085
1086 microcode-y := microcode_core.o
1087 microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o
1088Index: linux-2.6.33/arch/x86/kernel/apb_timer.c
1089===================================================================
1090--- /dev/null
1091+++ linux-2.6.33/arch/x86/kernel/apb_timer.c
1092@@ -0,0 +1,765 @@
1093+/*
1094+ * apb_timer.c: Driver for Langwell APB timers
1095+ *
1096+ * (C) Copyright 2009 Intel Corporation
1097+ * Author: Jacob Pan (jacob.jun.pan@intel.com)
1098+ *
1099+ * This program is free software; you can redistribute it and/or
1100+ * modify it under the terms of the GNU General Public License
1101+ * as published by the Free Software Foundation; version 2
1102+ * of the License.
1103+ *
1104+ * Note:
1105+ * Langwell is the south complex of Intel Moorestown MID platform. There are
1106+ * eight external timers in total that can be used by the operating system.
1107+ * The timer information, such as frequency and addresses, is provided to the
1108+ * OS via SFI tables.
1109+ * Timer interrupts are routed via FW/HW emulated IOAPIC independently via
1110+ * individual redirection table entries (RTE).
1111+ * Unlike HPET, there is no master counter, therefore one of the timers are
1112+ * used as clocksource. The overall allocation looks like:
1113+ * - timer 0 - NR_CPUs for per cpu timer
1114+ * - one timer for clocksource
1115+ * - one timer for watchdog driver.
1116+ * It is also worth notice that APB timer does not support true one-shot mode,
1117+ * free-running mode will be used here to emulate one-shot mode.
1118+ * APB timer can also be used as broadcast timer along with per cpu local APIC
1119+ * timer, but by default APB timer has higher rating than local APIC timers.
1120+ */
1121+
1122+#include <linux/clocksource.h>
1123+#include <linux/clockchips.h>
1124+#include <linux/delay.h>
1125+#include <linux/errno.h>
1126+#include <linux/init.h>
1127+#include <linux/sysdev.h>
1128+#include <linux/pm.h>
1129+#include <linux/pci.h>
1130+#include <linux/sfi.h>
1131+#include <linux/interrupt.h>
1132+#include <linux/cpu.h>
1133+#include <linux/irq.h>
1134+
1135+#include <asm/fixmap.h>
1136+#include <asm/apb_timer.h>
1137+
1138+#define APBT_MASK CLOCKSOURCE_MASK(32)
1139+#define APBT_SHIFT 22
1140+#define APBT_CLOCKEVENT_RATING 150
1141+#define APBT_CLOCKSOURCE_RATING 250
1142+#define APBT_MIN_DELTA_USEC 200
1143+
1144+#define EVT_TO_APBT_DEV(evt) container_of(evt, struct apbt_dev, evt)
1145+#define APBT_CLOCKEVENT0_NUM (0)
1146+#define APBT_CLOCKEVENT1_NUM (1)
1147+#define APBT_CLOCKSOURCE_NUM (2)
1148+
1149+static unsigned long apbt_address;
1150+static int apb_timer_block_enabled;
1151+static void __iomem *apbt_virt_address;
1152+static int phy_cs_timer_id;
1153+
1154+/*
1155+ * Common DW APB timer info
1156+ */
1157+static uint64_t apbt_freq;
1158+
1159+static void apbt_set_mode(enum clock_event_mode mode,
1160+ struct clock_event_device *evt);
1161+static int apbt_next_event(unsigned long delta,
1162+ struct clock_event_device *evt);
1163+static cycle_t apbt_read_clocksource(struct clocksource *cs);
1164+static void apbt_restart_clocksource(void);
1165+
1166+struct apbt_dev {
1167+ struct clock_event_device evt;
1168+ unsigned int num;
1169+ int cpu;
1170+ unsigned int irq;
1171+ unsigned int tick;
1172+ unsigned int count;
1173+ unsigned int flags;
1174+ char name[10];
1175+};
1176+
1177+int disable_apbt_percpu __cpuinitdata;
1178+
1179+#ifdef CONFIG_SMP
1180+static unsigned int apbt_num_timers_used;
1181+static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
1182+static struct apbt_dev *apbt_devs;
1183+#endif
1184+
1185+static inline unsigned long apbt_readl_reg(unsigned long a)
1186+{
1187+ return readl(apbt_virt_address + a);
1188+}
1189+
1190+static inline void apbt_writel_reg(unsigned long d, unsigned long a)
1191+{
1192+ writel(d, apbt_virt_address + a);
1193+}
1194+
1195+static inline unsigned long apbt_readl(int n, unsigned long a)
1196+{
1197+ return readl(apbt_virt_address + a + n * APBTMRS_REG_SIZE);
1198+}
1199+
1200+static inline void apbt_writel(int n, unsigned long d, unsigned long a)
1201+{
1202+ writel(d, apbt_virt_address + a + n * APBTMRS_REG_SIZE);
1203+}
1204+
1205+static inline void apbt_set_mapping(void)
1206+{
1207+ struct sfi_timer_table_entry *mtmr;
1208+
1209+ if (apbt_virt_address) {
1210+ pr_debug("APBT base already mapped\n");
1211+ return;
1212+ }
1213+ mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
1214+ if (mtmr == NULL) {
1215+ printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
1216+ APBT_CLOCKEVENT0_NUM);
1217+ return;
1218+ }
1219+ apbt_address = (unsigned long)mtmr->phys_addr;
1220+ if (!apbt_address) {
1221+ printk(KERN_WARNING "No timer base from SFI, use default\n");
1222+ apbt_address = APBT_DEFAULT_BASE;
1223+ }
1224+ apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE);
1225+ if (apbt_virt_address) {
1226+ pr_debug("Mapped APBT physical addr %p at virtual addr %p\n",\
1227+ (void *)apbt_address, (void *)apbt_virt_address);
1228+ } else {
1229+ pr_debug("Failed mapping APBT phy address at %p\n",\
1230+ (void *)apbt_address);
1231+ goto panic_noapbt;
1232+ }
1233+ apbt_freq = mtmr->freq_hz / USEC_PER_SEC;
1234+ sfi_free_mtmr(mtmr);
1235+
1236+ /* Now figure out the physical timer id for clocksource device */
1237+ mtmr = sfi_get_mtmr(APBT_CLOCKSOURCE_NUM);
1238+ if (mtmr == NULL)
1239+ goto panic_noapbt;
1240+
1241+ /* Now figure out the physical timer id */
1242+ phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff)
1243+ / APBTMRS_REG_SIZE;
1244+ pr_debug("Use timer %d for clocksource\n", phy_cs_timer_id);
1245+ return;
1246+
1247+panic_noapbt:
1248+ panic("Failed to setup APB system timer\n");
1249+
1250+}
1251+
1252+static inline void apbt_clear_mapping(void)
1253+{
1254+ iounmap(apbt_virt_address);
1255+ apbt_virt_address = NULL;
1256+}
1257+
1258+/*
1259+ * APBT timer interrupt enable / disable
1260+ */
1261+static inline int is_apbt_capable(void)
1262+{
1263+ return apbt_virt_address ? 1 : 0;
1264+}
1265+
1266+static struct clocksource clocksource_apbt = {
1267+ .name = "apbt",
1268+ .rating = APBT_CLOCKSOURCE_RATING,
1269+ .read = apbt_read_clocksource,
1270+ .mask = APBT_MASK,
1271+ .shift = APBT_SHIFT,
1272+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
1273+ .resume = apbt_restart_clocksource,
1274+};
1275+
1276+/* boot APB clock event device */
1277+static struct clock_event_device apbt_clockevent = {
1278+ .name = "apbt0",
1279+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
1280+ .set_mode = apbt_set_mode,
1281+ .set_next_event = apbt_next_event,
1282+ .shift = APBT_SHIFT,
1283+ .irq = 0,
1284+ .rating = APBT_CLOCKEVENT_RATING,
1285+};
1286+
1287+/*
1288+ * if user does not want to use per CPU apb timer, just give it a lower rating
1289+ * than local apic timer and skip the late per cpu timer init.
1290+ */
1291+static inline int __init setup_x86_mrst_timer(char *arg)
1292+{
1293+ if (!arg)
1294+ return -EINVAL;
1295+
1296+ if (strcmp("apbt_only", arg) == 0)
1297+ disable_apbt_percpu = 0;
1298+ else if (strcmp("lapic_and_apbt", arg) == 0)
1299+ disable_apbt_percpu = 1;
1300+ else {
1301+ pr_warning("X86 MRST timer option %s not recognised"
1302+ " use x86_mrst_timer=apbt_only or lapic_and_apbt\n",
1303+ arg);
1304+ return -EINVAL;
1305+ }
1306+ return 0;
1307+}
1308+__setup("x86_mrst_timer=", setup_x86_mrst_timer);
1309+
1310+/*
1311+ * start count down from 0xffff_ffff. this is done by toggling the enable bit
1312+ * then load initial load count to ~0.
1313+ */
1314+static void apbt_start_counter(int n)
1315+{
1316+ unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
1317+
1318+ ctrl &= ~APBTMR_CONTROL_ENABLE;
1319+ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
1320+ apbt_writel(n, ~0, APBTMR_N_LOAD_COUNT);
1321+ /* enable, mask interrupt */
1322+ ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
1323+ ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT);
1324+ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
1325+ /* read it once to get cached counter value initialized */
1326+ apbt_read_clocksource(&clocksource_apbt);
1327+}
1328+
1329+static irqreturn_t apbt_interrupt_handler(int irq, void *data)
1330+{
1331+ struct apbt_dev *dev = (struct apbt_dev *)data;
1332+ struct clock_event_device *aevt = &dev->evt;
1333+
1334+ if (!aevt->event_handler) {
1335+ printk(KERN_INFO "Spurious APBT timer interrupt on %d\n",
1336+ dev->num);
1337+ return IRQ_NONE;
1338+ }
1339+ aevt->event_handler(aevt);
1340+ return IRQ_HANDLED;
1341+}
1342+
1343+static void apbt_restart_clocksource(void)
1344+{
1345+ apbt_start_counter(phy_cs_timer_id);
1346+}
1347+
1348+/* Setup IRQ routing via IOAPIC */
1349+#ifdef CONFIG_SMP
1350+static void apbt_setup_irq(struct apbt_dev *adev)
1351+{
1352+ struct irq_chip *chip;
1353+ struct irq_desc *desc;
1354+
1355+ /* timer0 irq has been setup early */
1356+ if (adev->irq == 0)
1357+ return;
1358+ desc = irq_to_desc(adev->irq);
1359+ chip = get_irq_chip(adev->irq);
1360+ disable_irq(adev->irq);
1361+ desc->status |= IRQ_MOVE_PCNTXT;
1362+ irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
1363+ /* APB timer irqs are set up as mp_irqs, timer is edge triggerred */
1364+ set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge");
1365+ enable_irq(adev->irq);
1366+ if (system_state == SYSTEM_BOOTING)
1367+ if (request_irq(adev->irq, apbt_interrupt_handler,
1368+ IRQF_TIMER | IRQF_DISABLED|IRQF_NOBALANCING, adev->name, adev)) {
1369+ printk(KERN_ERR "Failed request IRQ for APBT%d\n", adev->num);
1370+ }
1371+}
1372+#endif
1373+
1374+static void apbt_enable_int(int n)
1375+{
1376+ unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
1377+ /* clear pending intr */
1378+ apbt_readl(n, APBTMR_N_EOI);
1379+ ctrl &= ~APBTMR_CONTROL_INT;
1380+ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
1381+}
1382+
1383+static void apbt_disable_int(int n)
1384+{
1385+ unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
1386+
1387+ ctrl |= APBTMR_CONTROL_INT;
1388+ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
1389+}
1390+
1391+
1392+static int apbt_clockevent_register(void)
1393+{
1394+ struct sfi_timer_table_entry *mtmr;
1395+
1396+ mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
1397+ if (mtmr == NULL) {
1398+ printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
1399+ APBT_CLOCKEVENT0_NUM);
1400+ return -ENODEV;
1401+ }
1402+
1403+ /*
1404+ * We need to calculate the scaled math multiplication factor for
1405+ * nanosecond to apbt tick conversion.
1406+ * mult = (nsec/cycle)*2^APBT_SHIFT
1407+ */
1408+ apbt_clockevent.mult = div_sc((unsigned long) mtmr->freq_hz
1409+ , NSEC_PER_SEC, APBT_SHIFT);
1410+
1411+ /* Calculate the min / max delta */
1412+ apbt_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
1413+ &apbt_clockevent);
1414+ apbt_clockevent.min_delta_ns = clockevent_delta2ns(
1415+ APBT_MIN_DELTA_USEC*apbt_freq,
1416+ &apbt_clockevent);
1417+ /*
1418+ * Start apbt with the boot cpu mask and make it
1419+ * global if not used for per cpu timer.
1420+ */
1421+ apbt_clockevent.cpumask = cpumask_of(smp_processor_id());
1422+
1423+ if (disable_apbt_percpu) {
1424+ apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
1425+ global_clock_event = &apbt_clockevent;
1426+ printk(KERN_DEBUG "%s clockevent registered as global\n",
1427+ global_clock_event->name);
1428+ }
1429+ if (request_irq(apbt_clockevent.irq, apbt_interrupt_handler,
1430+ IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
1431+ apbt_clockevent.name, &apbt_clockevent)) {
1432+ printk(KERN_ERR "Failed request IRQ for APBT%d\n",
1433+ apbt_clockevent.irq);
1434+ }
1435+
1436+ clockevents_register_device(&apbt_clockevent);
1437+ /* Start APBT 0 interrupts */
1438+ apbt_enable_int(APBT_CLOCKEVENT0_NUM);
1439+
1440+ sfi_free_mtmr(mtmr);
1441+ return 0;
1442+}
1443+
1444+#ifdef CONFIG_SMP
1445+/* Should be called with per cpu */
1446+void apbt_setup_secondary_clock(void)
1447+{
1448+ struct apbt_dev *adev;
1449+ struct clock_event_device *aevt;
1450+ int cpu;
1451+
1452+ /* Don't register boot CPU clockevent */
1453+ cpu = smp_processor_id();
1454+ if (cpu == boot_cpu_id)
1455+ return;
1456+ /*
1457+ * We need to calculate the scaled math multiplication factor for
1458+ * nanosecond to apbt tick conversion.
1459+ * mult = (nsec/cycle)*2^APBT_SHIFT
1460+ */
1461+ printk(KERN_INFO "Init per CPU clockevent %d\n", cpu);
1462+ adev = &per_cpu(cpu_apbt_dev, cpu);
1463+ aevt = &adev->evt;
1464+
1465+ memcpy(aevt, &apbt_clockevent, sizeof(*aevt));
1466+ aevt->cpumask = cpumask_of(cpu);
1467+ aevt->name = adev->name;
1468+ aevt->mode = CLOCK_EVT_MODE_UNUSED;
1469+
1470+ printk(KERN_INFO "Registering CPU %d clockevent device %s, mask %08x\n",
1471+ cpu, aevt->name, *(u32 *)aevt->cpumask);
1472+
1473+ apbt_setup_irq(adev);
1474+
1475+ clockevents_register_device(aevt);
1476+
1477+ apbt_enable_int(cpu);
1478+
1479+ return;
1480+}
1481+
1482+static int apbt_cpuhp_notify(struct notifier_block *n,
1483+ unsigned long action, void *hcpu)
1484+{
1485+ unsigned long cpu = (unsigned long)hcpu;
1486+ struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu);
1487+
1488+ switch (action & 0xf) {
1489+ case CPU_DEAD:
1490+ apbt_disable_int(cpu);
1491+ if (system_state == SYSTEM_RUNNING)
1492+ pr_debug("skipping APBT CPU %lu offline\n", cpu);
1493+ else if (adev) {
1494+ pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
1495+ free_irq(adev->irq, adev);
1496+ }
1497+ break;
1498+ }
1499+ return NOTIFY_OK;
1500+}
1501+
1502+static __init int apbt_late_init(void)
1503+{
1504+ if (disable_apbt_percpu)
1505+ return 0;
1506+ /* This notifier should be called after workqueue is ready */
1507+ hotcpu_notifier(apbt_cpuhp_notify, -20);
1508+ return 0;
1509+}
1510+fs_initcall(apbt_late_init);
1511+#else
1512+
1513+void apbt_setup_secondary_clock(void) {}
1514+
1515+#endif /* CONFIG_SMP */
1516+
1517+static void apbt_set_mode(enum clock_event_mode mode,
1518+ struct clock_event_device *evt)
1519+{
1520+ unsigned long ctrl;
1521+ uint64_t delta;
1522+ int timer_num;
1523+ struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
1524+
1525+ timer_num = adev->num;
1526+ pr_debug("%s CPU %d timer %d mode=%d\n",
1527+ __func__, first_cpu(*evt->cpumask), timer_num, mode);
1528+
1529+ switch (mode) {
1530+ case CLOCK_EVT_MODE_PERIODIC:
1531+ delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * apbt_clockevent.mult;
1532+ delta >>= apbt_clockevent.shift;
1533+ ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
1534+ ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
1535+ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
1536+ /*
1537+ * DW APB p. 46, have to disable timer before load counter,
1538+ * may cause sync problem.
1539+ */
1540+ ctrl &= ~APBTMR_CONTROL_ENABLE;
1541+ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
1542+ udelay(1);
1543+ pr_debug("Setting clock period %d for HZ %d\n", (int)delta, HZ);
1544+ apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT);
1545+ ctrl |= APBTMR_CONTROL_ENABLE;
1546+ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
1547+ break;
1548+ /* APB timer does not have one-shot mode, use free running mode */
1549+ case CLOCK_EVT_MODE_ONESHOT:
1550+ ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
1551+ /*
1552+ * set free running mode, this mode will let timer reload max
1553+ * timeout which will give time (3min on 25MHz clock) to rearm
1554+ * the next event, therefore emulate the one-shot mode.
1555+ */
1556+ ctrl &= ~APBTMR_CONTROL_ENABLE;
1557+ ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
1558+
1559+ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
1560+ /* write again to set free running mode */
1561+ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
1562+
1563+ /*
1564+ * DW APB p. 46, load counter with all 1s before starting free
1565+ * running mode.
1566+ */
1567+ apbt_writel(timer_num, ~0, APBTMR_N_LOAD_COUNT);
1568+ ctrl &= ~APBTMR_CONTROL_INT;
1569+ ctrl |= APBTMR_CONTROL_ENABLE;
1570+ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
1571+ break;
1572+
1573+ case CLOCK_EVT_MODE_UNUSED:
1574+ case CLOCK_EVT_MODE_SHUTDOWN:
1575+ apbt_disable_int(timer_num);
1576+ ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
1577+ ctrl &= ~APBTMR_CONTROL_ENABLE;
1578+ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
1579+ break;
1580+
1581+ case CLOCK_EVT_MODE_RESUME:
1582+ apbt_enable_int(timer_num);
1583+ break;
1584+ }
1585+}
1586+
1587+static int apbt_next_event(unsigned long delta,
1588+ struct clock_event_device *evt)
1589+{
1590+ unsigned long ctrl;
1591+ int timer_num;
1592+
1593+ struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
1594+
1595+ timer_num = adev->num;
1596+ /* Disable timer */
1597+ ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
1598+ ctrl &= ~APBTMR_CONTROL_ENABLE;
1599+ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
1600+ /* write new count */
1601+ apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT);
1602+ ctrl |= APBTMR_CONTROL_ENABLE;
1603+ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
1604+ return 0;
1605+}
1606+
1607+/*
1608+ * APB timer clock is not in sync with pclk on Langwell, which translates to
1609+ * unreliable read value caused by sampling error. the error does not add up
1610+ * overtime and only happens when sampling a 0 as a 1 by mistake. so the time
1611+ * would go backwards. the following code is trying to prevent time traveling
1612+ * backwards. little bit paranoid.
1613+ */
1614+static cycle_t apbt_read_clocksource(struct clocksource *cs)
1615+{
1616+ unsigned long t0, t1, t2;
1617+ static unsigned long last_read;
1618+
1619+bad_count:
1620+ t1 = apbt_readl(phy_cs_timer_id,
1621+ APBTMR_N_CURRENT_VALUE);
1622+ t2 = apbt_readl(phy_cs_timer_id,
1623+ APBTMR_N_CURRENT_VALUE);
1624+ if (unlikely(t1 < t2)) {
1625+ pr_debug("APBT: read current count error %lx:%lx:%lx\n",
1626+ t1, t2, t2 - t1);
1627+ goto bad_count;
1628+ }
1629+ /*
1630+ * check against cached last read, makes sure time does not go back.
1631+ * it could be a normal rollover but we will do tripple check anyway
1632+ */
1633+ if (unlikely(t2 > last_read)) {
1634+ /* check if we have a normal rollover */
1635+ unsigned long raw_intr_status =
1636+ apbt_readl_reg(APBTMRS_RAW_INT_STATUS);
1637+ /*
1638+ * cs timer interrupt is masked but raw intr bit is set if
1639+ * rollover occurs. then we read EOI reg to clear it.
1640+ */
1641+ if (raw_intr_status & (1 << phy_cs_timer_id)) {
1642+ apbt_readl(phy_cs_timer_id, APBTMR_N_EOI);
1643+ goto out;
1644+ }
1645+ pr_debug("APB CS going back %lx:%lx:%lx ",
1646+ t2, last_read, t2 - last_read);
1647+bad_count_x3:
1648+ pr_debug(KERN_INFO "tripple check enforced\n");
1649+ t0 = apbt_readl(phy_cs_timer_id,
1650+ APBTMR_N_CURRENT_VALUE);
1651+ udelay(1);
1652+ t1 = apbt_readl(phy_cs_timer_id,
1653+ APBTMR_N_CURRENT_VALUE);
1654+ udelay(1);
1655+ t2 = apbt_readl(phy_cs_timer_id,
1656+ APBTMR_N_CURRENT_VALUE);
1657+ if ((t2 > t1) || (t1 > t0)) {
1658+ printk(KERN_ERR "Error: APB CS tripple check failed\n");
1659+ goto bad_count_x3;
1660+ }
1661+ }
1662+out:
1663+ last_read = t2;
1664+ return (cycle_t)~t2;
1665+}
1666+
1667+static int apbt_clocksource_register(void)
1668+{
1669+ u64 start, now;
1670+ cycle_t t1;
1671+
1672+ /* Start the counter, use timer 2 as source, timer 0/1 for event */
1673+ apbt_start_counter(phy_cs_timer_id);
1674+
1675+ /* Verify whether apbt counter works */
1676+ t1 = apbt_read_clocksource(&clocksource_apbt);
1677+ rdtscll(start);
1678+
1679+ /*
1680+ * We don't know the TSC frequency yet, but waiting for
1681+ * 200000 TSC cycles is safe:
1682+ * 4 GHz == 50us
1683+ * 1 GHz == 200us
1684+ */
1685+ do {
1686+ rep_nop();
1687+ rdtscll(now);
1688+ } while ((now - start) < 200000UL);
1689+
1690+ /* APBT is the only always on clocksource, it has to work! */
1691+ if (t1 == apbt_read_clocksource(&clocksource_apbt))
1692+ panic("APBT counter not counting. APBT disabled\n");
1693+
1694+ /*
1695+ * initialize and register APBT clocksource
1696+ * convert that to ns/clock cycle
1697+ * mult = (ns/c) * 2^APBT_SHIFT
1698+ */
1699+ clocksource_apbt.mult = div_sc(MSEC_PER_SEC,
1700+ (unsigned long) apbt_freq, APBT_SHIFT);
1701+ clocksource_register(&clocksource_apbt);
1702+
1703+ return 0;
1704+}
1705+
1706+/*
1707+ * Early setup the APBT timer, only use timer 0 for booting then switch to
1708+ * per CPU timer if possible.
1709+ * returns 1 if per cpu apbt is setup
1710+ * returns 0 if no per cpu apbt is chosen
1711+ * panic if set up failed, this is the only platform timer on Moorestown.
1712+ */
1713+void __init apbt_time_init(void)
1714+{
1715+#ifdef CONFIG_SMP
1716+ int i;
1717+ struct sfi_timer_table_entry *p_mtmr;
1718+ unsigned int percpu_timer;
1719+ struct apbt_dev *adev;
1720+#endif
1721+
1722+ if (apb_timer_block_enabled)
1723+ return;
1724+ apbt_set_mapping();
1725+ if (apbt_virt_address) {
1726+ pr_debug("Found APBT version 0x%lx\n",\
1727+ apbt_readl_reg(APBTMRS_COMP_VERSION));
1728+ } else
1729+ goto out_noapbt;
1730+ /*
1731+ * Read the frequency and check for a sane value, for ESL model
1732+ * we extend the possible clock range to allow time scaling.
1733+ */
1734+
1735+ if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) {
1736+ pr_debug("APBT has invalid freq 0x%llx\n", apbt_freq);
1737+ goto out_noapbt;
1738+ }
1739+ if (apbt_clocksource_register()) {
1740+ pr_debug("APBT has failed to register clocksource\n");
1741+ goto out_noapbt;
1742+ }
1743+ if (!apbt_clockevent_register())
1744+ apb_timer_block_enabled = 1;
1745+ else {
1746+ pr_debug("APBT has failed to register clockevent\n");
1747+ goto out_noapbt;
1748+ }
1749+#ifdef CONFIG_SMP
1750+ /* kernel cmdline disable apb timer, so we will use lapic timers */
1751+ if (disable_apbt_percpu) {
1752+ printk(KERN_INFO "apbt: disabled per cpu timer\n");
1753+ return;
1754+ }
1755+ pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus());
1756+ if (num_possible_cpus() <= sfi_mtimer_num) {
1757+ percpu_timer = 1;
1758+ apbt_num_timers_used = num_possible_cpus();
1759+ } else {
1760+ percpu_timer = 0;
1761+ apbt_num_timers_used = 1;
1762+ adev = &per_cpu(cpu_apbt_dev, 0);
1763+ adev->flags &= ~APBT_DEV_USED;
1764+ }
1765+ pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used);
1766+
1767+ /* here we set up per CPU timer data structure */
1768+ apbt_devs = kzalloc(sizeof(struct apbt_dev) * apbt_num_timers_used,
1769+ GFP_KERNEL);
1770+ if (!apbt_devs) {
1771+ printk(KERN_ERR "Failed to allocate APB timer devices\n");
1772+ return;
1773+ }
1774+ for (i = 0; i < apbt_num_timers_used; i++) {
1775+ adev = &per_cpu(cpu_apbt_dev, i);
1776+ adev->num = i;
1777+ adev->cpu = i;
1778+ p_mtmr = sfi_get_mtmr(i);
1779+ if (p_mtmr) {
1780+ adev->tick = p_mtmr->freq_hz;
1781+ adev->irq = p_mtmr->irq;
1782+ } else
1783+ printk(KERN_ERR "Failed to get timer for cpu %d\n", i);
1784+ adev->count = 0;
1785+ sprintf(adev->name, "apbt%d", i);
1786+ }
1787+#endif
1788+
1789+ return;
1790+
1791+out_noapbt:
1792+ apbt_clear_mapping();
1793+ apb_timer_block_enabled = 0;
1794+ panic("failed to enable APB timer\n");
1795+}
1796+
1797+static inline void apbt_disable(int n)
1798+{
1799+ if (is_apbt_capable()) {
1800+ unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
1801+ ctrl &= ~APBTMR_CONTROL_ENABLE;
1802+ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
1803+ }
1804+}
1805+
1806+/* called before apb_timer_enable, use early map */
1807+unsigned long apbt_quick_calibrate()
1808+{
1809+ int i, scale;
1810+ u64 old, new;
1811+ cycle_t t1, t2;
1812+ unsigned long khz = 0;
1813+ u32 loop, shift;
1814+
1815+ apbt_set_mapping();
1816+ apbt_start_counter(phy_cs_timer_id);
1817+
1818+ /* check if the timer can count down, otherwise return */
1819+ old = apbt_read_clocksource(&clocksource_apbt);
1820+ i = 10000;
1821+ while (--i) {
1822+ if (old != apbt_read_clocksource(&clocksource_apbt))
1823+ break;
1824+ }
1825+ if (!i)
1826+ goto failed;
1827+
1828+ /* count 16 ms */
1829+ loop = (apbt_freq * 1000) << 4;
1830+
1831+ /* restart the timer to ensure it won't get to 0 in the calibration */
1832+ apbt_start_counter(phy_cs_timer_id);
1833+
1834+ old = apbt_read_clocksource(&clocksource_apbt);
1835+ old += loop;
1836+
1837+ t1 = __native_read_tsc();
1838+
1839+ do {
1840+ new = apbt_read_clocksource(&clocksource_apbt);
1841+ } while (new < old);
1842+
1843+ t2 = __native_read_tsc();
1844+
1845+ shift = 5;
1846+ if (unlikely(loop >> shift == 0)) {
1847+ printk(KERN_INFO
1848+ "APBT TSC calibration failed, not enough resolution\n");
1849+ return 0;
1850+ }
1851+ scale = (int)div_u64((t2 - t1), loop >> shift);
1852+ khz = (scale * apbt_freq * 1000) >> shift;
1853+ printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz);
1854+ return khz;
1855+failed:
1856+ return 0;
1857+}
1858Index: linux-2.6.33/arch/x86/include/asm/mrst.h
1859===================================================================
1860--- /dev/null
1861+++ linux-2.6.33/arch/x86/include/asm/mrst.h
1862@@ -0,0 +1,16 @@
1863+/*
1864+ * mrst.h: Intel Moorestown platform specific setup code
1865+ *
1866+ * (C) Copyright 2009 Intel Corporation
1867+ *
1868+ * This program is free software; you can redistribute it and/or
1869+ * modify it under the terms of the GNU General Public License
1870+ * as published by the Free Software Foundation; version 2
1871+ * of the License.
1872+ */
1873+#ifndef _ASM_X86_MRST_H
1874+#define _ASM_X86_MRST_H
1875+extern int pci_mrst_init(void);
1876+int __init sfi_parse_mrtc(struct sfi_table_header *table);
1877+
1878+#endif /* _ASM_X86_MRST_H */
1879Index: linux-2.6.33/arch/x86/kernel/mrst.c
1880===================================================================
1881--- linux-2.6.33.orig/arch/x86/kernel/mrst.c
1882+++ linux-2.6.33/arch/x86/kernel/mrst.c
1883@@ -2,16 +2,234 @@
1884 * mrst.c: Intel Moorestown platform specific setup code
1885 *
1886 * (C) Copyright 2008 Intel Corporation
1887- * Author: Jacob Pan (jacob.jun.pan@intel.com)
1888 *
1889 * This program is free software; you can redistribute it and/or
1890 * modify it under the terms of the GNU General Public License
1891 * as published by the Free Software Foundation; version 2
1892 * of the License.
1893 */
1894+
1895 #include <linux/init.h>
1896+#include <linux/kernel.h>
1897+#include <linux/sfi.h>
1898+#include <linux/bitmap.h>
1899+#include <linux/threads.h>
1900+#include <linux/spi/spi.h>
1901+#include <linux/spi/langwell_pmic_gpio.h>
1902+#include <linux/i2c.h>
1903+#include <linux/sfi.h>
1904+#include <linux/i2c/pca953x.h>
1905+#include <linux/gpio_keys.h>
1906+#include <linux/input.h>
1907+#include <linux/platform_device.h>
1908+#include <linux/irq.h>
1909
1910+#include <asm/string.h>
1911 #include <asm/setup.h>
1912+#include <asm/mpspec_def.h>
1913+#include <asm/hw_irq.h>
1914+#include <asm/apic.h>
1915+#include <asm/io_apic.h>
1916+#include <asm/apb_timer.h>
1917+#include <asm/io.h>
1918+#include <asm/mrst.h>
1919+#include <asm/vrtc.h>
1920+#include <asm/ipc_defs.h>
1921+#include <asm/reboot.h>
1922+#include <asm/i8259.h>
1923+
1924+#define LANGWELL_GPIO_ALT_ADDR 0xff12c038
1925+#define MRST_I2C_BUSNUM 3
1926+#define SFI_MRTC_MAX 8
1927+
1928+static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
1929+static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
1930+int sfi_mtimer_num;
1931+
1932+struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
1933+EXPORT_SYMBOL_GPL(sfi_mrtc_array);
1934+int sfi_mrtc_num;
1935+
1936+static inline void assign_to_mp_irq(struct mpc_intsrc *m,
1937+ struct mpc_intsrc *mp_irq)
1938+{
1939+ memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
1940+}
1941+
1942+static inline int mp_irq_cmp(struct mpc_intsrc *mp_irq,
1943+ struct mpc_intsrc *m)
1944+{
1945+ return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
1946+}
1947+
1948+static void save_mp_irq(struct mpc_intsrc *m)
1949+{
1950+ int i;
1951+
1952+ for (i = 0; i < mp_irq_entries; i++) {
1953+ if (!mp_irq_cmp(&mp_irqs[i], m))
1954+ return;
1955+ }
1956+
1957+ assign_to_mp_irq(m, &mp_irqs[mp_irq_entries]);
1958+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
1959+ panic("Max # of irq sources exceeded!!\n");
1960+}
1961+
1962+/* parse all the mtimer info to a global mtimer array */
1963+static int __init sfi_parse_mtmr(struct sfi_table_header *table)
1964+{
1965+ struct sfi_table_simple *sb;
1966+ struct sfi_timer_table_entry *pentry;
1967+ struct mpc_intsrc mp_irq;
1968+ int totallen;
1969+
1970+ sb = (struct sfi_table_simple *)table;
1971+ if (!sfi_mtimer_num) {
1972+ sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb,
1973+ struct sfi_timer_table_entry);
1974+ pentry = (struct sfi_timer_table_entry *) sb->pentry;
1975+ totallen = sfi_mtimer_num * sizeof(*pentry);
1976+ memcpy(sfi_mtimer_array, pentry, totallen);
1977+ }
1978+
1979+ printk(KERN_INFO "SFI: MTIMER info (num = %d):\n", sfi_mtimer_num);
1980+ pentry = sfi_mtimer_array;
1981+ for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) {
1982+ printk(KERN_INFO "timer[%d]: paddr = 0x%08x, freq = %dHz,"
1983+ " irq = %d\n", totallen, (u32)pentry->phys_addr,
1984+ pentry->freq_hz, pentry->irq);
1985+ if (!pentry->irq)
1986+ continue;
1987+ mp_irq.type = MP_IOAPIC;
1988+ mp_irq.irqtype = mp_INT;
1989+ mp_irq.irqflag = 0;
1990+ mp_irq.srcbus = 0;
1991+ mp_irq.srcbusirq = pentry->irq; /* IRQ */
1992+ mp_irq.dstapic = MP_APIC_ALL;
1993+ mp_irq.dstirq = pentry->irq;
1994+ save_mp_irq(&mp_irq);
1995+ }
1996+
1997+ return 0;
1998+}
1999+
2000+struct sfi_timer_table_entry *sfi_get_mtmr(int hint)
2001+{
2002+ int i;
2003+ if (hint < sfi_mtimer_num) {
2004+ if (!sfi_mtimer_usage[hint]) {
2005+ printk(KERN_DEBUG "hint taken for timer %d irq %d\n",\
2006+ hint, sfi_mtimer_array[hint].irq);
2007+ sfi_mtimer_usage[hint] = 1;
2008+ return &sfi_mtimer_array[hint];
2009+ }
2010+ }
2011+ /* take the first timer available */
2012+ for (i = 0; i < sfi_mtimer_num;) {
2013+ if (!sfi_mtimer_usage[i]) {
2014+ sfi_mtimer_usage[i] = 1;
2015+ return &sfi_mtimer_array[i];
2016+ }
2017+ i++;
2018+ }
2019+ return NULL;
2020+}
2021+
2022+void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr)
2023+{
2024+ int i;
2025+ for (i = 0; i < sfi_mtimer_num;) {
2026+ if (mtmr->irq == sfi_mtimer_array[i].irq) {
2027+ sfi_mtimer_usage[i] = 0;
2028+ return;
2029+ }
2030+ i++;
2031+ }
2032+}
2033+
2034+/* parse all the mrtc info to a global mrtc array */
2035+int __init sfi_parse_mrtc(struct sfi_table_header *table)
2036+{
2037+ struct sfi_table_simple *sb;
2038+ struct sfi_rtc_table_entry *pentry;
2039+ struct mpc_intsrc mp_irq;
2040+
2041+ int totallen;
2042+
2043+ sb = (struct sfi_table_simple *)table;
2044+ if (!sfi_mrtc_num) {
2045+ sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb,
2046+ struct sfi_rtc_table_entry);
2047+ pentry = (struct sfi_rtc_table_entry *)sb->pentry;
2048+ totallen = sfi_mrtc_num * sizeof(*pentry);
2049+ memcpy(sfi_mrtc_array, pentry, totallen);
2050+ }
2051+
2052+ printk(KERN_INFO "SFI: RTC info (num = %d):\n", sfi_mrtc_num);
2053+ pentry = sfi_mrtc_array;
2054+ for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
2055+ printk(KERN_INFO "RTC[%d]: paddr = 0x%08x, irq = %d\n",
2056+ totallen, (u32)pentry->phys_addr, pentry->irq);
2057+ mp_irq.type = MP_IOAPIC;
2058+ mp_irq.irqtype = mp_INT;
2059+ mp_irq.irqflag = 0;
2060+ mp_irq.srcbus = 0;
2061+ mp_irq.srcbusirq = pentry->irq; /* IRQ */
2062+ mp_irq.dstapic = MP_APIC_ALL;
2063+ mp_irq.dstirq = pentry->irq;
2064+ save_mp_irq(&mp_irq);
2065+ }
2066+ return 0;
2067+}
2068+
2069+/*
2070+ * the secondary clock in Moorestown can be APBT or LAPIC clock, default to
2071+ * APBT but cmdline option can also override it.
2072+ */
2073+static void __cpuinit mrst_setup_secondary_clock(void)
2074+{
2075+ /* restore default lapic clock if disabled by cmdline */
2076+ if (disable_apbt_percpu)
2077+ return setup_secondary_APIC_clock();
2078+ apbt_setup_secondary_clock();
2079+}
2080+
2081+static unsigned long __init mrst_calibrate_tsc(void)
2082+{
2083+ unsigned long flags, fast_calibrate;
2084+
2085+ local_irq_save(flags);
2086+ fast_calibrate = apbt_quick_calibrate();
2087+ local_irq_restore(flags);
2088+
2089+ if (fast_calibrate)
2090+ return fast_calibrate;
2091+
2092+ return 0;
2093+}
2094+
2095+void __init mrst_time_init(void)
2096+{
2097+ sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
2098+ pre_init_apic_IRQ0();
2099+ apbt_time_init();
2100+}
2101+
2102+void __init mrst_rtc_init(void)
2103+{
2104+ sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc);
2105+}
2106+
2107+static void mrst_power_off(void)
2108+{
2109+ lnw_ipc_single_cmd(0xf1, 1, 0, 0);
2110+}
2111+
2112+static void mrst_reboot(void)
2113+{
2114+ lnw_ipc_single_cmd(0xf1, 0, 0, 0);
2115+}
2116
2117 /*
2118 * Moorestown specific x86_init function overrides and early setup
2119@@ -21,4 +239,241 @@ void __init x86_mrst_early_setup(void)
2120 {
2121 x86_init.resources.probe_roms = x86_init_noop;
2122 x86_init.resources.reserve_resources = x86_init_noop;
2123+ x86_init.timers.timer_init = mrst_time_init;
2124+ x86_init.irqs.pre_vector_init = x86_init_noop;
2125+
2126+ x86_cpuinit.setup_percpu_clockev = mrst_setup_secondary_clock;
2127+
2128+ x86_platform.calibrate_tsc = mrst_calibrate_tsc;
2129+ x86_platform.get_wallclock = vrtc_get_time;
2130+ x86_platform.set_wallclock = vrtc_set_mmss;
2131+
2132+ x86_init.pci.init = pci_mrst_init;
2133+ x86_init.pci.fixup_irqs = x86_init_noop;
2134+
2135+ x86_init.oem.banner = mrst_rtc_init;
2136+ legacy_pic = &null_legacy_pic;
2137+
2138+ /* Moorestown specific power_off/restart method */
2139+ pm_power_off = mrst_power_off;
2140+ machine_ops.emergency_restart = mrst_reboot;
2141 }
2142+
2143+/*
2144+ * the dummy SPI2 salves are in SPIB table with host_num = 0, but their
2145+ * chip_selects begin with MRST_SPI2_CS_START, this will save a dummy ugly
2146+ * SPI2 controller driver
2147+ */
2148+#define MRST_SPI2_CS_START 4
2149+static struct langwell_pmic_gpio_platform_data pmic_gpio_pdata;
2150+
2151+static int __init sfi_parse_spib(struct sfi_table_header *table)
2152+{
2153+ struct sfi_table_simple *sb;
2154+ struct sfi_spi_table_entry *pentry;
2155+ struct spi_board_info *info;
2156+ int num, i, j;
2157+ int ioapic;
2158+ struct io_apic_irq_attr irq_attr;
2159+
2160+ sb = (struct sfi_table_simple *)table;
2161+ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_spi_table_entry);
2162+ pentry = (struct sfi_spi_table_entry *) sb->pentry;
2163+
2164+ info = kzalloc(num * sizeof(*info), GFP_KERNEL);
2165+ if (!info) {
2166+ pr_info("%s(): Error in kzalloc\n", __func__);
2167+ return -ENOMEM;
2168+ }
2169+
2170+ if (num)
2171+ pr_info("Moorestown SPI devices info:\n");
2172+
2173+ for (i = 0, j = 0; i < num; i++, pentry++) {
2174+ strncpy(info[j].modalias, pentry->name, 16);
2175+ info[j].irq = pentry->irq_info;
2176+ info[j].bus_num = pentry->host_num;
2177+ info[j].chip_select = pentry->cs;
2178+ info[j].max_speed_hz = 3125000; /* hard coded */
2179+ if (info[i].chip_select >= MRST_SPI2_CS_START) {
2180+ /* these SPI2 devices are not exposed to system as PCI
2181+ * devices, but they have separate RTE entry in IOAPIC
2182+ * so we have to enable them one by one here
2183+ */
2184+ ioapic = mp_find_ioapic(info[j].irq);
2185+ irq_attr.ioapic = ioapic;
2186+ irq_attr.ioapic_pin = info[j].irq;
2187+ irq_attr.trigger = 1;
2188+ irq_attr.polarity = 1;
2189+ io_apic_set_pci_routing(NULL, info[j].irq,
2190+ &irq_attr);
2191+ }
2192+ info[j].platform_data = pentry->dev_info;
2193+
2194+ if (!strcmp(pentry->name, "pmic_gpio")) {
2195+ memcpy(&pmic_gpio_pdata, pentry->dev_info, 8);
2196+ pmic_gpio_pdata.gpiointr = 0xffffeff8;
2197+ info[j].platform_data = &pmic_gpio_pdata;
2198+ }
2199+ pr_info("info[%d]: name = %16s, irq = 0x%04x, bus = %d, "
2200+ "cs = %d\n", j, info[j].modalias, info[j].irq,
2201+ info[j].bus_num, info[j].chip_select);
2202+ j++;
2203+ }
2204+ spi_register_board_info(info, j);
2205+ kfree(info);
2206+ return 0;
2207+}
2208+
2209+static struct pca953x_platform_data max7315_pdata;
2210+static struct pca953x_platform_data max7315_pdata_2;
2211+
2212+static int __init sfi_parse_i2cb(struct sfi_table_header *table)
2213+{
2214+ struct sfi_table_simple *sb;
2215+ struct sfi_i2c_table_entry *pentry;
2216+ struct i2c_board_info *info[MRST_I2C_BUSNUM];
2217+ int table_length[MRST_I2C_BUSNUM] = {0};
2218+ int num, i, j, busnum;
2219+
2220+ sb = (struct sfi_table_simple *)table;
2221+ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_i2c_table_entry);
2222+ pentry = (struct sfi_i2c_table_entry *) sb->pentry;
2223+
2224+ if (num <= 0)
2225+ return -ENODEV;
2226+
2227+ for (busnum = 0; busnum < MRST_I2C_BUSNUM; busnum++) {
2228+ info[busnum] = kzalloc(num * sizeof(**info), GFP_KERNEL);
2229+ if (!info[busnum]) {
2230+ pr_info("%s(): Error in kzalloc\n", __func__);
2231+ while (busnum--)
2232+ kfree(info[busnum]);
2233+ return -ENOMEM;
2234+ }
2235+ }
2236+
2237+ if (num)
2238+ pr_info("Moorestown I2C devices info:\n");
2239+
2240+ for (busnum = 0, j = 0; j < num; j++, pentry++) {
2241+ busnum = pentry->host_num;
2242+ if (busnum >= MRST_I2C_BUSNUM || busnum < 0)
2243+ continue;
2244+
2245+ i = table_length[busnum];
2246+ strncpy(info[busnum][i].type, pentry->name, 16);
2247+ info[busnum][i].irq = pentry->irq_info;
2248+ info[busnum][i].addr = pentry->addr;
2249+ info[busnum][i].platform_data = pentry->dev_info;
2250+ table_length[busnum]++;
2251+
2252+ if (!strcmp(pentry->name, "i2c_max7315")) {
2253+ strcpy(info[busnum][i].type, "max7315");
2254+ memcpy(&max7315_pdata, pentry->dev_info, 10);
2255+ info[busnum][i].platform_data = &max7315_pdata;
2256+ }
2257+ else if (!strcmp(pentry->name, "i2c_max7315_2")) {
2258+ strcpy(info[busnum][i].type, "max7315");
2259+ memcpy(&max7315_pdata_2, pentry->dev_info, 10);
2260+ info[busnum][i].platform_data = &max7315_pdata_2;
2261+ }
2262+
2263+ pr_info("info[%d]: bus = %d, name = %16s, irq = 0x%04x, addr = "
2264+ "0x%x\n", i, busnum, info[busnum][i].type,
2265+ info[busnum][i].irq, info[busnum][i].addr);
2266+ }
2267+
2268+ for (busnum = 0; busnum < MRST_I2C_BUSNUM; busnum++) {
2269+ i2c_register_board_info(busnum, info[busnum],
2270+ table_length[busnum]);
2271+ }
2272+
2273+ return 0;
2274+}
2275+
2276+/* setting multi-function-pin */
2277+static void set_alt_func(void)
2278+{
2279+ u32 __iomem *mem = ioremap_nocache(LANGWELL_GPIO_ALT_ADDR, 16);
2280+ u32 value;
2281+
2282+ if (!mem) {
2283+ pr_err("can not map GPIO controller address.\n");
2284+ return;
2285+ }
2286+ value = (readl(mem + 1) & 0x0000ffff) | 0x55550000;
2287+ writel(value, mem + 1);
2288+ value = (readl(mem + 2) & 0xf0000000) | 0x05555555;
2289+ writel(value, mem + 2);
2290+ value = (readl(mem + 3) & 0xfff000ff) | 0x00055500;
2291+ writel(value, mem + 3);
2292+
2293+ iounmap(mem);
2294+}
2295+
2296+static int __init mrst_platform_init(void)
2297+{
2298+ sfi_table_parse(SFI_SIG_SPIB, NULL, NULL, sfi_parse_spib);
2299+ sfi_table_parse(SFI_SIG_I2CB, NULL, NULL, sfi_parse_i2cb);
2300+ set_alt_func();
2301+ return 0;
2302+}
2303+
2304+arch_initcall(mrst_platform_init);
2305+
2306+static struct gpio_keys_button gpio_button[] = {
2307+ [0] = {
2308+ .desc = "power button1",
2309+ .code = KEY_POWER,
2310+ .type = EV_KEY,
2311+ .active_low = 1,
2312+ .debounce_interval = 3000, /*soft debounce*/
2313+ .gpio = 65,
2314+ },
2315+ [1] = {
2316+ .desc = "programmable button1",
2317+ .code = KEY_PROG1,
2318+ .type = EV_KEY,
2319+ .active_low = 1,
2320+ .debounce_interval = 20,
2321+ .gpio = 66,
2322+ },
2323+ [2] = {
2324+ .desc = "programmable button2",
2325+ .code = KEY_PROG2,
2326+ .type = EV_KEY,
2327+ .active_low = 1,
2328+ .debounce_interval = 20,
2329+ .gpio = 69
2330+ },
2331+ [3] = {
2332+ .desc = "lid switch",
2333+ .code = SW_LID,
2334+ .type = EV_SW,
2335+ .active_low = 1,
2336+ .debounce_interval = 20,
2337+ .gpio = 101
2338+ },
2339+};
2340+
2341+static struct gpio_keys_platform_data mrst_gpio_keys = {
2342+ .buttons = gpio_button,
2343+ .rep = 1,
2344+ .nbuttons = sizeof(gpio_button) / sizeof(struct gpio_keys_button),
2345+};
2346+
2347+static struct platform_device pb_device = {
2348+ .name = "gpio-keys",
2349+ .id = -1,
2350+ .dev = {
2351+ .platform_data = &mrst_gpio_keys,
2352+ },
2353+};
2354+
2355+static int __init pb_keys_init(void)
2356+{
2357+ return platform_device_register(&pb_device);
2358+}
2359+
2360+late_initcall(pb_keys_init);
2361Index: linux-2.6.33/arch/x86/include/asm/io_apic.h
2362===================================================================
2363--- linux-2.6.33.orig/arch/x86/include/asm/io_apic.h
2364+++ linux-2.6.33/arch/x86/include/asm/io_apic.h
2365@@ -143,8 +143,6 @@ extern int noioapicreroute;
2366 /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
2367 extern int timer_through_8259;
2368
2369-extern void io_apic_disable_legacy(void);
2370-
2371 /*
2372 * If we use the IO-APIC for IRQ routing, disable automatic
2373 * assignment of PCI IRQ's.
2374@@ -189,6 +187,7 @@ extern struct mp_ioapic_gsi mp_gsi_rout
2375 int mp_find_ioapic(int gsi);
2376 int mp_find_ioapic_pin(int ioapic, int gsi);
2377 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
2378+extern void __init pre_init_apic_IRQ0(void);
2379
2380 #else /* !CONFIG_X86_IO_APIC */
2381
2382Index: linux-2.6.33/arch/x86/pci/mmconfig-shared.c
2383===================================================================
2384--- linux-2.6.33.orig/arch/x86/pci/mmconfig-shared.c
2385+++ linux-2.6.33/arch/x86/pci/mmconfig-shared.c
2386@@ -601,7 +601,8 @@ static void __init __pci_mmcfg_init(int
2387 if (!known_bridge)
2388 acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
2389
2390- pci_mmcfg_reject_broken(early);
2391+ if (!acpi_disabled)
2392+ pci_mmcfg_reject_broken(early);
2393
2394 if (list_empty(&pci_mmcfg_list))
2395 return;
2396Index: linux-2.6.33/arch/x86/pci/Makefile
2397===================================================================
2398--- linux-2.6.33.orig/arch/x86/pci/Makefile
2399+++ linux-2.6.33/arch/x86/pci/Makefile
2400@@ -13,7 +13,7 @@ obj-$(CONFIG_X86_VISWS) += visws.o
2401
2402 obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
2403
2404-obj-y += common.o early.o
2405+obj-y += common.o early.o mrst.o
2406 obj-y += amd_bus.o
2407 obj-$(CONFIG_X86_64) += bus_numa.o
2408
2409Index: linux-2.6.33/arch/x86/pci/mrst.c
2410===================================================================
2411--- /dev/null
2412+++ linux-2.6.33/arch/x86/pci/mrst.c
2413@@ -0,0 +1,262 @@
2414+/*
2415+ * Moorestown PCI support
2416+ * Copyright (c) 2008 Intel Corporation
2417+ * Jesse Barnes <jesse.barnes@intel.com>
2418+ *
2419+ * Moorestown has an interesting PCI implementation:
2420+ * - configuration space is memory mapped (as defined by MCFG)
2421+ * - Lincroft devices also have a real, type 1 configuration space
2422+ * - Early Lincroft silicon has a type 1 access bug that will cause
2423+ * a hang if non-existent devices are accessed
2424+ * - some devices have the "fixed BAR" capability, which means
2425+ * they can't be relocated or modified; check for that during
2426+ * BAR sizing
2427+ *
2428+ * So, we use the MCFG space for all reads and writes, but also send
2429+ * Lincroft writes to type 1 space. But only read/write if the device
2430+ * actually exists, otherwise return all 1s for reads and bit bucket
2431+ * the writes.
2432+ */
2433+
2434+#include <linux/sched.h>
2435+#include <linux/pci.h>
2436+#include <linux/ioport.h>
2437+#include <linux/init.h>
2438+#include <linux/dmi.h>
2439+
2440+#include <asm/acpi.h>
2441+#include <asm/segment.h>
2442+#include <asm/io.h>
2443+#include <asm/smp.h>
2444+#include <asm/pci_x86.h>
2445+#include <asm/hw_irq.h>
2446+
2447+#define PCIE_CAP_OFFSET 0x100
2448+
2449+/* Fixed BAR fields */
2450+#define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
2451+#define PCI_FIXED_BAR_0_SIZE 0x04
2452+#define PCI_FIXED_BAR_1_SIZE 0x08
2453+#define PCI_FIXED_BAR_2_SIZE 0x0c
2454+#define PCI_FIXED_BAR_3_SIZE 0x10
2455+#define PCI_FIXED_BAR_4_SIZE 0x14
2456+#define PCI_FIXED_BAR_5_SIZE 0x1c
2457+
2458+/**
2459+ * fixed_bar_cap - return the offset of the fixed BAR cap if found
2460+ * @bus: PCI bus
2461+ * @devfn: device in question
2462+ *
2463+ * Look for the fixed BAR cap on @bus and @devfn, returning its offset
2464+ * if found or 0 otherwise.
2465+ */
2466+static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn)
2467+{
2468+ int pos;
2469+ u32 pcie_cap = 0, cap_data;
2470+ if (!raw_pci_ext_ops) return 0;
2471+
2472+ pos = PCIE_CAP_OFFSET;
2473+ while (pos) {
2474+ if (raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
2475+ devfn, pos, 4, &pcie_cap))
2476+ return 0;
2477+
2478+ if (pcie_cap == 0xffffffff)
2479+ return 0;
2480+
2481+ if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) {
2482+ raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
2483+ devfn, pos + 4, 4, &cap_data);
2484+ if ((cap_data & 0xffff) == PCIE_VNDR_CAP_ID_FIXED_BAR)
2485+ return pos;
2486+ }
2487+
2488+ pos = pcie_cap >> 20;
2489+ }
2490+
2491+ return 0;
2492+}
2493+
2494+static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
2495+ int reg, int len, u32 val, int offset)
2496+{
2497+ u32 size;
2498+ unsigned int domain, busnum;
2499+ int bar = (reg - PCI_BASE_ADDRESS_0) >> 2;
2500+
2501+ domain = pci_domain_nr(bus);
2502+ busnum = bus->number;
2503+
2504+ if (val == ~0 && len == 4) {
2505+ unsigned long decode;
2506+
2507+ raw_pci_ext_ops->read(domain, busnum, devfn,
2508+ offset + 8 + (bar * 4), 4, &size);
2509+
2510+ /* Turn the size into a decode pattern for the sizing code */
2511+ if (size) {
2512+ decode = size - 1;
2513+ decode |= decode >> 1;
2514+ decode |= decode >> 2;
2515+ decode |= decode >> 4;
2516+ decode |= decode >> 8;
2517+ decode |= decode >> 16;
2518+ decode++;
2519+ decode = ~(decode - 1);
2520+ } else {
2521+ decode = ~0;
2522+ }
2523+
2524+ /*
2525+ * If val is all ones, the core code is trying to size the reg,
2526+ * so update the mmconfig space with the real size.
2527+ *
2528+ * Note: this assumes the fixed size we got is a power of two.
2529+ */
2530+ return raw_pci_ext_ops->write(domain, busnum, devfn, reg, 4,
2531+ decode);
2532+ }
2533+
2534+ /* This is some other kind of BAR write, so just do it. */
2535+ return raw_pci_ext_ops->write(domain, busnum, devfn, reg, len, val);
2536+}
2537+
2538+/**
2539+ * type1_access_ok - check whether to use type 1
2540+ * @bus: bus number
2541+ * @devfn: device & function in question
2542+ *
2543+ * If the bus is on a Lincroft chip and it exists, or is not on a Lincroft at
2544+ * all, the we can go ahead with any reads & writes. If it's on a Lincroft,
2545+ * but doesn't exist, avoid the access altogether to keep the chip from
2546+ * hanging.
2547+ */
2548+static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
2549+{
2550+ /* This is a workaround for A0 LNC bug where PCI status register does
2551+ * not have new CAP bit set. can not be written by SW either.
2552+ *
2553+ * PCI header type in real LNC indicates a single function device, this
2554+ * will prevent probing other devices under the same function in PCI
2555+ * shim. Therefore, use the header type in shim instead.
2556+ */
2557+ if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
2558+ return 0;
2559+ if (bus == 0 && (devfn == PCI_DEVFN(2, 0) || devfn == PCI_DEVFN(0, 0)))
2560+ return 1;
2561+ return 0; /* langwell on others */
2562+}
2563+
2564+static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
2565+ int size, u32 *value)
2566+{
2567+ if (type1_access_ok(bus->number, devfn, where))
2568+ return pci_direct_conf1.read(pci_domain_nr(bus), bus->number,
2569+ devfn, where, size, value);
2570+ return raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
2571+ devfn, where, size, value);
2572+}
2573+
2574+static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
2575+ int size, u32 value)
2576+{
2577+ int offset;
2578+
2579+ /* On MRST, there is no PCI ROM BAR, this will cause a subsequent read
2580+ * to ROM BAR return 0 then being ignored.
2581+ */
2582+ if (where == PCI_ROM_ADDRESS)
2583+ return 0;
2584+
2585+ /*
2586+ * Devices with fixed BARs need special handling:
2587+ * - BAR sizing code will save, write ~0, read size, restore
2588+ * - so writes to fixed BARs need special handling
2589+ * - other writes to fixed BAR devices should go through mmconfig
2590+ */
2591+ offset = fixed_bar_cap(bus, devfn);
2592+ if (offset &&
2593+ (where >= PCI_BASE_ADDRESS_0 && where <= PCI_BASE_ADDRESS_5)) {
2594+ return pci_device_update_fixed(bus, devfn, where, size, value,
2595+ offset);
2596+ }
2597+
2598+ /*
2599+ * On Moorestown update both real & mmconfig space
2600+ * Note: early Lincroft silicon can't handle type 1 accesses to
2601+ * non-existent devices, so just eat the write in that case.
2602+ */
2603+ if (type1_access_ok(bus->number, devfn, where))
2604+ return pci_direct_conf1.write(pci_domain_nr(bus), bus->number,
2605+ devfn, where, size, value);
2606+ return raw_pci_ext_ops->write(pci_domain_nr(bus), bus->number, devfn,
2607+ where, size, value);
2608+}
2609+
2610+static int mrst_pci_irq_enable(struct pci_dev *dev)
2611+{
2612+ u8 pin;
2613+ struct io_apic_irq_attr irq_attr;
2614+
2615+ if (!dev->irq)
2616+ return 0;
2617+
2618+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
2619+
2620+ /* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
2621+ * IOAPIC RTE entries, so we just enable RTE for the device.
2622+ */
2623+ irq_attr.ioapic = mp_find_ioapic(dev->irq);
2624+ irq_attr.ioapic_pin = dev->irq;
2625+ irq_attr.trigger = 1; /* level */
2626+ irq_attr.polarity = 1; /* active low */
2627+ io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr);
2628+
2629+ return 0;
2630+}
2631+
2632+struct pci_ops pci_mrst_ops = {
2633+ .read = pci_read,
2634+ .write = pci_write,
2635+};
2636+
2637+/**
2638+ * pci_mrst_init - installs pci_mrst_ops
2639+ *
2640+ * Moorestown has an interesting PCI implementation (see above).
2641+ * Called when the early platform detection installs it.
2642+ */
2643+int __init pci_mrst_init(void)
2644+{
2645+ printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
2646+ pci_mmcfg_late_init();
2647+ pcibios_enable_irq = mrst_pci_irq_enable;
2648+ pci_root_ops = pci_mrst_ops;
2649+ /* Continue with standard init */
2650+ return 1;
2651+}
2652+
2653+/*
2654+ * Langwell devices reside at fixed offsets, don't try to move them.
2655+ */
2656+static void __devinit pci_fixed_bar_fixup(struct pci_dev *dev)
2657+{
2658+ unsigned long offset;
2659+ u32 size;
2660+ int i;
2661+
2662+ /* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */
2663+ offset = fixed_bar_cap(dev->bus, dev->devfn);
2664+ if (!offset || PCI_DEVFN(2, 0) == dev->devfn ||
2665+ PCI_DEVFN(2, 2) == dev->devfn)
2666+ return;
2667+
2668+ for (i = 0; i < PCI_ROM_RESOURCE; i++) {
2669+ pci_read_config_dword(dev, offset + 8 + (i * 4), &size);
2670+ dev->resource[i].end = dev->resource[i].start + size - 1;
2671+ dev->resource[i].flags |= IORESOURCE_PCI_FIXED;
2672+ }
2673+}
2674+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_fixed_bar_fixup);
2675+
2676Index: linux-2.6.33/include/linux/pci_regs.h
2677===================================================================
2678--- linux-2.6.33.orig/include/linux/pci_regs.h
2679+++ linux-2.6.33/include/linux/pci_regs.h
2680@@ -507,6 +507,7 @@
2681 #define PCI_EXT_CAP_ID_VC 2
2682 #define PCI_EXT_CAP_ID_DSN 3
2683 #define PCI_EXT_CAP_ID_PWR 4
2684+#define PCI_EXT_CAP_ID_VNDR 11
2685 #define PCI_EXT_CAP_ID_ACS 13
2686 #define PCI_EXT_CAP_ID_ARI 14
2687 #define PCI_EXT_CAP_ID_ATS 15
2688Index: linux-2.6.33/arch/x86/include/asm/fixmap.h
2689===================================================================
2690--- linux-2.6.33.orig/arch/x86/include/asm/fixmap.h
2691+++ linux-2.6.33/arch/x86/include/asm/fixmap.h
2692@@ -114,6 +114,10 @@ enum fixed_addresses {
2693 FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
2694 FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
2695 __end_of_permanent_fixed_addresses,
2696+
2697+#ifdef CONFIG_X86_MRST
2698+ FIX_LNW_VRTC,
2699+#endif
2700 /*
2701 * 256 temporary boot-time mappings, used by early_ioremap(),
2702 * before ioremap() is functional.
2703Index: linux-2.6.33/arch/x86/include/asm/vrtc.h
2704===================================================================
2705--- /dev/null
2706+++ linux-2.6.33/arch/x86/include/asm/vrtc.h
2707@@ -0,0 +1,30 @@
2708+#ifndef _MRST_VRTC_H
2709+#define _MRST_VRTC_H
2710+
2711+#ifdef CONFIG_X86_MRST
2712+extern unsigned char vrtc_cmos_read(unsigned char reg);
2713+extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
2714+
2715+extern struct sfi_rtc_table_entry sfi_mrtc_array[];
2716+extern int sfi_mrtc_num;
2717+
2718+extern unsigned long vrtc_get_time(void);
2719+extern int vrtc_set_mmss(unsigned long nowtime);
2720+
2721+#define MRST_VRTC_PGOFFSET (0xc00)
2722+
2723+#else
2724+static inline unsigned char vrtc_cmos_read(unsigned char reg)
2725+{
2726+ return 0xff;
2727+}
2728+
2729+static inline void vrtc_cmos_write(unsigned char val, unsigned char reg)
2730+{
2731+ return;
2732+}
2733+#endif
2734+
2735+#define MRST_VRTC_MAP_SZ (1024)
2736+
2737+#endif
2738Index: linux-2.6.33/arch/x86/kernel/vrtc.c
2739===================================================================
2740--- /dev/null
2741+++ linux-2.6.33/arch/x86/kernel/vrtc.c
2742@@ -0,0 +1,116 @@
2743+/*
2744+ * vrtc.c: Driver for virtual RTC device on Intel MID platform
2745+ *
2746+ * (C) Copyright 2009 Intel Corporation
2747+ *
2748+ * This program is free software; you can redistribute it and/or
2749+ * modify it under the terms of the GNU General Public License
2750+ * as published by the Free Software Foundation; version 2
2751+ * of the License.
2752+ *
2753+ * Note:
2754+ * VRTC is emulated by system controller firmware, the real HW
2755+ * RTC is located in the PMIC device. SCU FW shadows PMIC RTC
2756+ * in a memory mapped IO space that is visible to the host IA
2757+ * processor. However, any updates to VRTC requires an IPI call
2758+ * to the SCU FW.
2759+ *
2760+ * This driver is based on RTC CMOS driver.
2761+ */
2762+
2763+#include <linux/kernel.h>
2764+#include <linux/module.h>
2765+#include <linux/sfi.h>
2766+
2767+#include <asm/vrtc.h>
2768+#include <asm/time.h>
2769+#include <asm/fixmap.h>
2770+
2771+static unsigned char *vrtc_va __read_mostly;
2772+
2773+static void vrtc_init_mmap(void)
2774+{
2775+ unsigned long rtc_paddr = sfi_mrtc_array[0].phys_addr;
2776+
2777+ BUG_ON(!rtc_paddr);
2778+
2779+ /* vRTC's register address may not be page aligned */
2780+ set_fixmap_nocache(FIX_LNW_VRTC, rtc_paddr);
2781+ vrtc_va = (unsigned char __iomem *)__fix_to_virt(FIX_LNW_VRTC);
2782+ vrtc_va += rtc_paddr & ~PAGE_MASK;
2783+}
2784+
2785+unsigned char vrtc_cmos_read(unsigned char reg)
2786+{
2787+ unsigned char retval;
2788+
2789+ /* vRTC's registers range from 0x0 to 0xD */
2790+ if (reg > 0xd)
2791+ return 0xff;
2792+
2793+ if (unlikely(!vrtc_va))
2794+ vrtc_init_mmap();
2795+
2796+ lock_cmos_prefix(reg);
2797+ retval = *(vrtc_va + (reg << 2));
2798+ lock_cmos_suffix(reg);
2799+ return retval;
2800+}
2801+EXPORT_SYMBOL(vrtc_cmos_read);
2802+
2803+void vrtc_cmos_write(unsigned char val, unsigned char reg)
2804+{
2805+ if (reg > 0xd)
2806+ return;
2807+
2808+ if (unlikely(!vrtc_va))
2809+ vrtc_init_mmap();
2810+
2811+ lock_cmos_prefix(reg);
2812+ *(vrtc_va + (reg << 2)) = val;
2813+ lock_cmos_suffix(reg);
2814+}
2815+EXPORT_SYMBOL(vrtc_cmos_write);
2816+
2817+unsigned long vrtc_get_time(void)
2818+{
2819+ u8 sec, min, hour, mday, mon;
2820+ u32 year;
2821+
2822+ while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
2823+ cpu_relax();
2824+
2825+ sec = vrtc_cmos_read(RTC_SECONDS);
2826+ min = vrtc_cmos_read(RTC_MINUTES);
2827+ hour = vrtc_cmos_read(RTC_HOURS);
2828+ mday = vrtc_cmos_read(RTC_DAY_OF_MONTH);
2829+ mon = vrtc_cmos_read(RTC_MONTH);
2830+ year = vrtc_cmos_read(RTC_YEAR);
2831+
2832+ /* vRTC YEAR reg contains the offset to 1970 */
2833+ year += 1970;
2834+
2835+ printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d "
2836+ "mon: %d year: %d\n", sec, min, hour, mday, mon, year);
2837+
2838+ return mktime(year, mon, mday, hour, min, sec);
2839+}
2840+
2841+/* Only care about the minutes and seconds */
2842+int vrtc_set_mmss(unsigned long nowtime)
2843+{
2844+ int real_sec, real_min;
2845+ int vrtc_min;
2846+
2847+ vrtc_min = vrtc_cmos_read(RTC_MINUTES);
2848+
2849+ real_sec = nowtime % 60;
2850+ real_min = nowtime / 60;
2851+ if (((abs(real_min - vrtc_min) + 15)/30) & 1)
2852+ real_min += 30;
2853+ real_min %= 60;
2854+
2855+ vrtc_cmos_write(real_sec, RTC_SECONDS);
2856+ vrtc_cmos_write(real_min, RTC_MINUTES);
2857+ return 0;
2858+}
2859Index: linux-2.6.33/drivers/rtc/Kconfig
2860===================================================================
2861--- linux-2.6.33.orig/drivers/rtc/Kconfig
2862+++ linux-2.6.33/drivers/rtc/Kconfig
2863@@ -423,6 +423,19 @@ config RTC_DRV_CMOS
2864 This driver can also be built as a module. If so, the module
2865 will be called rtc-cmos.
2866
2867+config RTC_DRV_VRTC
2868+ tristate "Virtual RTC for MRST"
2869+ depends on X86_MRST
2870+ default y if X86_MRST
2871+
2872+ help
2873+ Say "yes" here to get direct support for the real time clock
2874+ found in Moorestown platform. The VRTC is a emulated RTC that
2875+ Derive its clock source from a realy RTC in PMIC. MC146818
2876+ stype programming interface is most conserved other than any
2877+ updates is done via IPC calls to the system controller FW.
2878+
2879+
2880 config RTC_DRV_DS1216
2881 tristate "Dallas DS1216"
2882 depends on SNI_RM
2883Index: linux-2.6.33/drivers/rtc/Makefile
2884===================================================================
2885--- linux-2.6.33.orig/drivers/rtc/Makefile
2886+++ linux-2.6.33/drivers/rtc/Makefile
2887@@ -28,6 +28,7 @@ obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq48
2888 obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
2889 obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
2890 obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o
2891+obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o
2892 obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
2893 obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o
2894 obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o
2895Index: linux-2.6.33/drivers/rtc/rtc-mrst.c
2896===================================================================
2897--- /dev/null
2898+++ linux-2.6.33/drivers/rtc/rtc-mrst.c
2899@@ -0,0 +1,660 @@
2900+/*
2901+ * rtc-mrst.c: Driver for Moorestown virtual RTC
2902+ *
2903+ * (C) Copyright 2009 Intel Corporation
2904+ * Author: Jacob Pan (jacob.jun.pan@intel.com)
2905+ * Feng Tang (feng.tang@intel.com)
2906+ *
2907+ * This program is free software; you can redistribute it and/or
2908+ * modify it under the terms of the GNU General Public License
2909+ * as published by the Free Software Foundation; version 2
2910+ * of the License.
2911+ *
2912+ * Note:
2913+ * VRTC is emulated by system controller firmware, the real HW
2914+ * RTC is located in the PMIC device. SCU FW shadows PMIC RTC
2915+ * in a memory mapped IO space that is visible to the host IA
2916+ * processor. However, any updates to VRTC requires an IPI call
2917+ * to the SCU FW.
2918+ *
2919+ * This driver is based on RTC CMOS driver.
2920+ */
2921+
2922+/*
2923+ * Note:
2924+ * * MRST vRTC only support binary mode and 24H mode
2925+ * * MRST vRTC only support PIE and AIE, no UIE
2926+ * * its alarm function is also limited to hr/min/sec.
2927+ * * so far it doesn't support wake event func
2928+ */
2929+
2930+#include <linux/mod_devicetable.h>
2931+#include <linux/platform_device.h>
2932+#include <linux/interrupt.h>
2933+#include <linux/spinlock.h>
2934+#include <linux/kernel.h>
2935+#include <linux/module.h>
2936+#include <linux/init.h>
2937+#include <linux/sfi.h>
2938+
2939+#include <asm-generic/rtc.h>
2940+
2941+#include <asm/ipc_defs.h>
2942+#include <asm/vrtc.h>
2943+
2944+struct mrst_rtc {
2945+ struct rtc_device *rtc;
2946+ struct device *dev;
2947+ int irq;
2948+ struct resource *iomem;
2949+
2950+ void (*wake_on)(struct device *);
2951+ void (*wake_off)(struct device *);
2952+
2953+ u8 enabled_wake;
2954+ u8 suspend_ctrl;
2955+
2956+ /* Newer hardware extends the original register set */
2957+ u8 day_alrm;
2958+ u8 mon_alrm;
2959+ u8 century;
2960+};
2961+
2962+/* both platform and pnp busses use negative numbers for invalid irqs */
2963+#define is_valid_irq(n) ((n) >= 0)
2964+
2965+static const char driver_name[] = "rtc_mrst";
2966+
2967+#define RTC_IRQMASK (RTC_PF | RTC_AF)
2968+
2969+static inline int is_intr(u8 rtc_intr)
2970+{
2971+ if (!(rtc_intr & RTC_IRQF))
2972+ return 0;
2973+ return rtc_intr & RTC_IRQMASK;
2974+}
2975+
2976+/*
2977+ * rtc_time's year contains the increment over 1900, but vRTC's YEAR
2978+ * register can't be programmed to value larger than 0x64, so vRTC
2979+ * driver chose to use 1970 (UNIX time start point) as the base, and
2980+ * do the translation in read/write time
2981+ */
2982+static int mrst_read_time(struct device *dev, struct rtc_time *time)
2983+{
2984+ unsigned long flags;
2985+
2986+ if (rtc_is_updating())
2987+ mdelay(20);
2988+
2989+ spin_lock_irqsave(&rtc_lock, flags);
2990+ time->tm_sec = vrtc_cmos_read(RTC_SECONDS);
2991+ time->tm_min = vrtc_cmos_read(RTC_MINUTES);
2992+ time->tm_hour = vrtc_cmos_read(RTC_HOURS);
2993+ time->tm_mday = vrtc_cmos_read(RTC_DAY_OF_MONTH);
2994+ time->tm_mon = vrtc_cmos_read(RTC_MONTH);
2995+ time->tm_year = vrtc_cmos_read(RTC_YEAR);
2996+ spin_unlock_irqrestore(&rtc_lock, flags);
2997+
2998+ /* Adjust for the 1970/1900 */
2999+ time->tm_year += 70;
3000+ time->tm_mon--;
3001+ return RTC_24H;
3002+}
3003+
3004+static int mrst_set_time(struct device *dev, struct rtc_time *time)
3005+{
3006+ int ret;
3007+ unsigned long flags;
3008+ unsigned char mon, day, hrs, min, sec;
3009+ unsigned int yrs;
3010+
3011+ yrs = time->tm_year;
3012+ mon = time->tm_mon + 1; /* tm_mon starts at zero */
3013+ day = time->tm_mday;
3014+ hrs = time->tm_hour;
3015+ min = time->tm_min;
3016+ sec = time->tm_sec;
3017+
3018+ if (yrs < 70 || yrs > 138)
3019+ return -EINVAL;
3020+ yrs -= 70;
3021+
3022+ spin_lock_irqsave(&rtc_lock, flags);
3023+
3024+ /* Need think about leap year */
3025+ vrtc_cmos_write(yrs, RTC_YEAR);
3026+ vrtc_cmos_write(mon, RTC_MONTH);
3027+ vrtc_cmos_write(day, RTC_DAY_OF_MONTH);
3028+ vrtc_cmos_write(hrs, RTC_HOURS);
3029+ vrtc_cmos_write(min, RTC_MINUTES);
3030+ vrtc_cmos_write(sec, RTC_SECONDS);
3031+
3032+ ret = lnw_ipc_single_cmd(IPC_VRTC_CMD, IPC_VRTC_SET_TIME, 0, 0);
3033+ spin_unlock_irqrestore(&rtc_lock, flags);
3034+ return ret;
3035+}
3036+
3037+static int mrst_read_alarm(struct device *dev, struct rtc_wkalrm *t)
3038+{
3039+ struct mrst_rtc *mrst = dev_get_drvdata(dev);
3040+ unsigned char rtc_control;
3041+
3042+ if (!is_valid_irq(mrst->irq))
3043+ return -EIO;
3044+
3045+ /* Basic alarms only support hour, minute, and seconds fields.
3046+ * Some also support day and month, for alarms up to a year in
3047+ * the future.
3048+ */
3049+ t->time.tm_mday = -1;
3050+ t->time.tm_mon = -1;
3051+ t->time.tm_year = -1;
3052+
3053+ /* vRTC only supports binary mode */
3054+ spin_lock_irq(&rtc_lock);
3055+ t->time.tm_sec = vrtc_cmos_read(RTC_SECONDS_ALARM);
3056+ t->time.tm_min = vrtc_cmos_read(RTC_MINUTES_ALARM);
3057+ t->time.tm_hour = vrtc_cmos_read(RTC_HOURS_ALARM);
3058+
3059+ rtc_control = vrtc_cmos_read(RTC_CONTROL);
3060+ spin_unlock_irq(&rtc_lock);
3061+
3062+ t->enabled = !!(rtc_control & RTC_AIE);
3063+ t->pending = 0;
3064+
3065+ return 0;
3066+}
3067+
3068+static void mrst_checkintr(struct mrst_rtc *mrst, unsigned char rtc_control)
3069+{
3070+ unsigned char rtc_intr;
3071+
3072+ /*
3073+ * NOTE after changing RTC_xIE bits we always read INTR_FLAGS;
3074+ * allegedly some older rtcs need that to handle irqs properly
3075+ */
3076+ rtc_intr = vrtc_cmos_read(RTC_INTR_FLAGS);
3077+ rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
3078+ if (is_intr(rtc_intr))
3079+ rtc_update_irq(mrst->rtc, 1, rtc_intr);
3080+}
3081+
3082+static void mrst_irq_enable(struct mrst_rtc *mrst, unsigned char mask)
3083+{
3084+ unsigned char rtc_control;
3085+
3086+ /*
3087+ * Flush any pending IRQ status, notably for update irqs,
3088+ * before we enable new IRQs
3089+ */
3090+ rtc_control = vrtc_cmos_read(RTC_CONTROL);
3091+ mrst_checkintr(mrst, rtc_control);
3092+
3093+ rtc_control |= mask;
3094+ vrtc_cmos_write(rtc_control, RTC_CONTROL);
3095+
3096+ mrst_checkintr(mrst, rtc_control);
3097+}
3098+
3099+static void mrst_irq_disable(struct mrst_rtc *mrst, unsigned char mask)
3100+{
3101+ unsigned char rtc_control;
3102+
3103+ rtc_control = vrtc_cmos_read(RTC_CONTROL);
3104+ rtc_control &= ~mask;
3105+ vrtc_cmos_write(rtc_control, RTC_CONTROL);
3106+ mrst_checkintr(mrst, rtc_control);
3107+}
3108+
3109+static int mrst_set_alarm(struct device *dev, struct rtc_wkalrm *t)
3110+{
3111+ struct mrst_rtc *mrst = dev_get_drvdata(dev);
3112+ unsigned char hrs, min, sec;
3113+ int ret = 0;
3114+
3115+ if (!is_valid_irq(mrst->irq))
3116+ return -EIO;
3117+
3118+ hrs = t->time.tm_hour;
3119+ min = t->time.tm_min;
3120+ sec = t->time.tm_sec;
3121+
3122+ spin_lock_irq(&rtc_lock);
3123+ /* Next rtc irq must not be from previous alarm setting */
3124+ mrst_irq_disable(mrst, RTC_AIE);
3125+
3126+ /* Update alarm */
3127+ vrtc_cmos_write(hrs, RTC_HOURS_ALARM);
3128+ vrtc_cmos_write(min, RTC_MINUTES_ALARM);
3129+ vrtc_cmos_write(sec, RTC_SECONDS_ALARM);
3130+
3131+ ret = lnw_ipc_single_cmd(IPC_VRTC_CMD, IPC_VRTC_SET_ALARM, 0, 0);
3132+ spin_unlock_irq(&rtc_lock);
3133+
3134+ if (ret)
3135+ return ret;
3136+
3137+ spin_lock_irq(&rtc_lock);
3138+ if (t->enabled)
3139+ mrst_irq_enable(mrst, RTC_AIE);
3140+
3141+ spin_unlock_irq(&rtc_lock);
3142+
3143+ return 0;
3144+}
3145+
3146+
3147+static int mrst_irq_set_state(struct device *dev, int enabled)
3148+{
3149+ struct mrst_rtc *mrst = dev_get_drvdata(dev);
3150+ unsigned long flags;
3151+
3152+ if (!is_valid_irq(mrst->irq))
3153+ return -ENXIO;
3154+
3155+ spin_lock_irqsave(&rtc_lock, flags);
3156+
3157+ if (enabled)
3158+ mrst_irq_enable(mrst, RTC_PIE);
3159+ else
3160+ mrst_irq_disable(mrst, RTC_PIE);
3161+
3162+ spin_unlock_irqrestore(&rtc_lock, flags);
3163+ return 0;
3164+}
3165+
3166+#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
3167+
3168+/* Currently, the vRTC doesn't support UIE ON/OFF */
3169+static int
3170+mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
3171+{
3172+ struct mrst_rtc *mrst = dev_get_drvdata(dev);
3173+ unsigned long flags;
3174+
3175+ switch (cmd) {
3176+ case RTC_AIE_OFF:
3177+ case RTC_AIE_ON:
3178+ if (!is_valid_irq(mrst->irq))
3179+ return -EINVAL;
3180+ break;
3181+ default:
3182+ /* PIE ON/OFF is handled by mrst_irq_set_state() */
3183+ return -ENOIOCTLCMD;
3184+ }
3185+
3186+ spin_lock_irqsave(&rtc_lock, flags);
3187+ switch (cmd) {
3188+ case RTC_AIE_OFF: /* alarm off */
3189+ mrst_irq_disable(mrst, RTC_AIE);
3190+ break;
3191+ case RTC_AIE_ON: /* alarm on */
3192+ mrst_irq_enable(mrst, RTC_AIE);
3193+ break;
3194+ }
3195+ spin_unlock_irqrestore(&rtc_lock, flags);
3196+ return 0;
3197+}
3198+
3199+#else
3200+#define mrst_rtc_ioctl NULL
3201+#endif
3202+
3203+#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
3204+
3205+static int mrst_procfs(struct device *dev, struct seq_file *seq)
3206+{
3207+ unsigned char rtc_control, valid;
3208+
3209+ spin_lock_irq(&rtc_lock);
3210+ rtc_control = vrtc_cmos_read(RTC_CONTROL);
3211+ valid = vrtc_cmos_read(RTC_VALID);
3212+ spin_unlock_irq(&rtc_lock);
3213+
3214+ return seq_printf(seq,
3215+ "periodic_IRQ\t: %s\n"
3216+ "square_wave\t: %s\n"
3217+ "BCD\t\t: %s\n"
3218+ "DST_enable\t: %s\n"
3219+ "periodic_freq\t: daily\n",
3220+ (rtc_control & RTC_PIE) ? "yes" : "no",
3221+ (rtc_control & RTC_SQWE) ? "yes" : "no",
3222+ (rtc_control & RTC_DM_BINARY) ? "no" : "yes",
3223+ (rtc_control & RTC_DST_EN) ? "yes" : "no");
3224+}
3225+
3226+#else
3227+#define mrst_procfs NULL
3228+#endif
3229+
3230+static const struct rtc_class_ops mrst_rtc_ops = {
3231+ .ioctl = mrst_rtc_ioctl,
3232+ .read_time = mrst_read_time,
3233+ .set_time = mrst_set_time,
3234+ .read_alarm = mrst_read_alarm,
3235+ .set_alarm = mrst_set_alarm,
3236+ .proc = mrst_procfs,
3237+ .irq_set_freq = NULL,
3238+ .irq_set_state = mrst_irq_set_state,
3239+};
3240+
3241+static struct mrst_rtc mrst_rtc;
3242+
3243+/*
3244+ * When vRTC IRQ is captured by SCU FW, FW will clear the AIE bit in
3245+ * Reg B, so no need for this driver to clear it
3246+ */
3247+static irqreturn_t mrst_interrupt(int irq, void *p)
3248+{
3249+ u8 irqstat;
3250+
3251+ spin_lock(&rtc_lock);
3252+ /* This read will clear all IRQ flags inside Reg C */
3253+ irqstat = vrtc_cmos_read(RTC_INTR_FLAGS);
3254+ spin_unlock(&rtc_lock);
3255+
3256+ irqstat &= RTC_IRQMASK | RTC_IRQF;
3257+ if (is_intr(irqstat)) {
3258+ rtc_update_irq(p, 1, irqstat);
3259+ return IRQ_HANDLED;
3260+ } else {
3261+ printk(KERN_ERR "vRTC: error in IRQ handler\n");
3262+ return IRQ_NONE;
3263+ }
3264+}
3265+
3266+static int __init
3267+vrtc_mrst_do_probe(struct device *dev, struct resource *iomem, int rtc_irq)
3268+{
3269+ int retval = 0;
3270+ unsigned char rtc_control;
3271+
3272+ /* There can be only one ... */
3273+ if (mrst_rtc.dev)
3274+ return -EBUSY;
3275+
3276+ if (!iomem)
3277+ return -ENODEV;
3278+
3279+ iomem = request_mem_region(iomem->start,
3280+ iomem->end + 1 - iomem->start,
3281+ driver_name);
3282+ if (!iomem) {
3283+ dev_dbg(dev, "i/o mem already in use.\n");
3284+ return -EBUSY;
3285+ }
3286+
3287+ mrst_rtc.irq = rtc_irq;
3288+ mrst_rtc.iomem = iomem;
3289+
3290+ mrst_rtc.day_alrm = 0;
3291+ mrst_rtc.mon_alrm = 0;
3292+ mrst_rtc.century = 0;
3293+ mrst_rtc.wake_on = NULL;
3294+ mrst_rtc.wake_off = NULL;
3295+
3296+ mrst_rtc.rtc = rtc_device_register(driver_name, dev,
3297+ &mrst_rtc_ops, THIS_MODULE);
3298+ if (IS_ERR(mrst_rtc.rtc)) {
3299+ retval = PTR_ERR(mrst_rtc.rtc);
3300+ goto cleanup0;
3301+ }
3302+
3303+ mrst_rtc.dev = dev;
3304+ dev_set_drvdata(dev, &mrst_rtc);
3305+ rename_region(iomem, dev_name(&mrst_rtc.rtc->dev));
3306+
3307+ spin_lock_irq(&rtc_lock);
3308+ mrst_irq_disable(&mrst_rtc, RTC_PIE | RTC_AIE);
3309+ rtc_control = vrtc_cmos_read(RTC_CONTROL);
3310+ spin_unlock_irq(&rtc_lock);
3311+
3312+ if (!(rtc_control & RTC_24H) || (rtc_control & (RTC_DM_BINARY)))
3313+ dev_dbg(dev, "TODO: support more than 24-hr BCD mode \n");
3314+
3315+ if (is_valid_irq(rtc_irq)) {
3316+ irq_handler_t rtc_mrst_int_handler;
3317+ rtc_mrst_int_handler = mrst_interrupt;
3318+
3319+ retval = request_irq(rtc_irq, rtc_mrst_int_handler,
3320+ IRQF_DISABLED, dev_name(&mrst_rtc.rtc->dev),
3321+ mrst_rtc.rtc);
3322+ if (retval < 0) {
3323+ dev_dbg(dev, "IRQ %d is already in use, err %d\n",
3324+ rtc_irq, retval);
3325+ goto cleanup1;
3326+ }
3327+ }
3328+
3329+ pr_info("vRTC driver for Moorewtown is initialized\n");
3330+ return 0;
3331+
3332+cleanup1:
3333+ mrst_rtc.dev = NULL;
3334+ rtc_device_unregister(mrst_rtc.rtc);
3335+cleanup0:
3336+ release_region(iomem->start, iomem->end + 1 - iomem->start);
3337+ pr_warning("vRTC driver for Moorewtown initialization Failed!!\n");
3338+ return retval;
3339+}
3340+
3341+static void rtc_mrst_do_shutdown(void)
3342+{
3343+ spin_lock_irq(&rtc_lock);
3344+ mrst_irq_disable(&mrst_rtc, RTC_IRQMASK);
3345+ spin_unlock_irq(&rtc_lock);
3346+}
3347+
3348+static void __exit rtc_mrst_do_remove(struct device *dev)
3349+{
3350+ struct mrst_rtc *mrst = dev_get_drvdata(dev);
3351+ struct resource *iomem;
3352+
3353+ rtc_mrst_do_shutdown();
3354+
3355+ if (is_valid_irq(mrst->irq))
3356+ free_irq(mrst->irq, mrst->rtc);
3357+
3358+ rtc_device_unregister(mrst->rtc);
3359+ mrst->rtc = NULL;
3360+
3361+ iomem = mrst->iomem;
3362+ release_region(iomem->start, iomem->end + 1 - iomem->start);
3363+ mrst->iomem = NULL;
3364+
3365+ mrst->dev = NULL;
3366+ dev_set_drvdata(dev, NULL);
3367+}
3368+
3369+#ifdef CONFIG_PM
3370+
3371+static int mrst_suspend(struct device *dev, pm_message_t mesg)
3372+{
3373+ struct mrst_rtc *mrst = dev_get_drvdata(dev);
3374+ unsigned char tmp;
3375+
3376+ /* Only the alarm might be a wakeup event source */
3377+ spin_lock_irq(&rtc_lock);
3378+ mrst->suspend_ctrl = tmp = vrtc_cmos_read(RTC_CONTROL);
3379+ if (tmp & (RTC_PIE | RTC_AIE)) {
3380+ unsigned char mask;
3381+
3382+ if (device_may_wakeup(dev))
3383+ mask = RTC_IRQMASK & ~RTC_AIE;
3384+ else
3385+ mask = RTC_IRQMASK;
3386+ tmp &= ~mask;
3387+ vrtc_cmos_write(tmp, RTC_CONTROL);
3388+
3389+ mrst_checkintr(mrst, tmp);
3390+ }
3391+ spin_unlock_irq(&rtc_lock);
3392+
3393+ if (tmp & RTC_AIE) {
3394+ mrst->enabled_wake = 1;
3395+ if (mrst->wake_on)
3396+ mrst->wake_on(dev);
3397+ else
3398+ enable_irq_wake(mrst->irq);
3399+ }
3400+
3401+ pr_debug("%s: suspend%s, ctrl %02x\n",
3402+ dev_name(&mrst_rtc.rtc->dev),
3403+ (tmp & RTC_AIE) ? ", alarm may wake" : "",
3404+ tmp);
3405+
3406+ return 0;
3407+}
3408+
3409+/*
3410+ * We want RTC alarms to wake us from e.g. ACPI G2/S5 "soft off", even
3411+ * after a detour through G3 "mechanical off", although the ACPI spec
3412+ * says wakeup should only work from G1/S4 "hibernate". To most users,
3413+ * distinctions between S4 and S5 are pointless. So when the hardware
3414+ * allows, don't draw that distinction.
3415+ */
3416+static inline int mrst_poweroff(struct device *dev)
3417+{
3418+ return mrst_suspend(dev, PMSG_HIBERNATE);
3419+}
3420+
3421+static int mrst_resume(struct device *dev)
3422+{
3423+ struct mrst_rtc *mrst = dev_get_drvdata(dev);
3424+ unsigned char tmp = mrst->suspend_ctrl;
3425+
3426+ /* Re-enable any irqs previously active */
3427+ if (tmp & RTC_IRQMASK) {
3428+ unsigned char mask;
3429+
3430+ if (mrst->enabled_wake) {
3431+ if (mrst->wake_off)
3432+ mrst->wake_off(dev);
3433+ else
3434+ disable_irq_wake(mrst->irq);
3435+ mrst->enabled_wake = 0;
3436+ }
3437+
3438+ spin_lock_irq(&rtc_lock);
3439+ do {
3440+ vrtc_cmos_write(tmp, RTC_CONTROL);
3441+
3442+ mask = vrtc_cmos_read(RTC_INTR_FLAGS);
3443+ mask &= (tmp & RTC_IRQMASK) | RTC_IRQF;
3444+ if (!is_intr(mask))
3445+ break;
3446+
3447+ rtc_update_irq(mrst->rtc, 1, mask);
3448+ tmp &= ~RTC_AIE;
3449+ } while (mask & RTC_AIE);
3450+ spin_unlock_irq(&rtc_lock);
3451+ }
3452+
3453+ pr_debug("%s: resume, ctrl %02x\n",
3454+ dev_name(&mrst_rtc.rtc->dev),
3455+ tmp);
3456+
3457+ return 0;
3458+}
3459+
3460+#else
3461+#define mrst_suspend NULL
3462+#define mrst_resume NULL
3463+
3464+static inline int mrst_poweroff(struct device *dev)
3465+{
3466+ return -ENOSYS;
3467+}
3468+
3469+#endif
3470+
3471+
3472+/*----------------------------------------------------------------*/
3473+
3474+/* Platform setup should have set up an RTC device, when PNP is
3475+ * unavailable ... this could happen even on (older) PCs.
3476+ */
3477+
3478+static int __init vrtc_mrst_platform_probe(struct platform_device *pdev)
3479+{
3480+ return vrtc_mrst_do_probe(&pdev->dev,
3481+ platform_get_resource(pdev, IORESOURCE_MEM, 0),
3482+ platform_get_irq(pdev, 0));
3483+}
3484+
3485+static int __exit vrtc_mrst_platform_remove(struct platform_device *pdev)
3486+{
3487+ rtc_mrst_do_remove(&pdev->dev);
3488+ return 0;
3489+}
3490+
3491+static void vrtc_mrst_platform_shutdown(struct platform_device *pdev)
3492+{
3493+ if (system_state == SYSTEM_POWER_OFF && !mrst_poweroff(&pdev->dev))
3494+ return;
3495+
3496+ rtc_mrst_do_shutdown();
3497+}
3498+
3499+/* Work with hotplug and coldplug */
3500+MODULE_ALIAS("platform:vrtc_mrst");
3501+
3502+static struct platform_driver vrtc_mrst_platform_driver = {
3503+ .remove = __exit_p(vrtc_mrst_platform_remove),
3504+ .shutdown = vrtc_mrst_platform_shutdown,
3505+ .driver = {
3506+ .name = (char *) driver_name,
3507+ .suspend = mrst_suspend,
3508+ .resume = mrst_resume,
3509+ }
3510+};
3511+
3512+/*
3513+ * Moorestown platform has memory mapped virtual RTC device that emulates
3514+ * the programming interface of the RTC.
3515+ */
3516+
3517+static struct resource vrtc_resources[] = {
3518+ [0] = {
3519+ .flags = IORESOURCE_MEM,
3520+ },
3521+ [1] = {
3522+ .flags = IORESOURCE_IRQ,
3523+ }
3524+};
3525+
3526+static struct platform_device vrtc_device = {
3527+ .name = "rtc_mrst",
3528+ .id = -1,
3529+ .resource = vrtc_resources,
3530+ .num_resources = ARRAY_SIZE(vrtc_resources),
3531+};
3532+
3533+static int __init vrtc_mrst_init(void)
3534+{
3535+ /* iomem resource */
3536+ vrtc_resources[0].start = sfi_mrtc_array[0].phys_addr;
3537+ vrtc_resources[0].end = sfi_mrtc_array[0].phys_addr +
3538+ MRST_VRTC_MAP_SZ;
3539+ /* irq resource */
3540+ vrtc_resources[1].start = sfi_mrtc_array[0].irq;
3541+ vrtc_resources[1].end = sfi_mrtc_array[0].irq;
3542+
3543+ platform_device_register(&vrtc_device);
3544+ return platform_driver_probe(&vrtc_mrst_platform_driver,
3545+ vrtc_mrst_platform_probe);
3546+}
3547+
3548+static void __exit vrtc_mrst_exit(void)
3549+{
3550+ platform_driver_unregister(&vrtc_mrst_platform_driver);
3551+ platform_device_unregister(&vrtc_device);
3552+}
3553+
3554+module_init(vrtc_mrst_init);
3555+module_exit(vrtc_mrst_exit);
3556+
3557+MODULE_AUTHOR("Jacob Pan; Feng Tang");
3558+MODULE_DESCRIPTION("Driver for Moorestown virtual RTC");
3559+MODULE_LICENSE("GPL");
3560Index: linux-2.6.33/drivers/spi/Kconfig
3561===================================================================
3562--- linux-2.6.33.orig/drivers/spi/Kconfig
3563+++ linux-2.6.33/drivers/spi/Kconfig
3564@@ -302,6 +302,18 @@ config SPI_NUC900
3565 select SPI_BITBANG
3566 help
3567 SPI driver for Nuvoton NUC900 series ARM SoCs
3568+config SPI_MRST
3569+ tristate "SPI controller driver for Intel Moorestown platform "
3570+ depends on SPI_MASTER && PCI && X86_MRST
3571+ help
3572+ This is the SPI controller master driver for Intel Moorestown platform
3573+
3574+config SPI_MRST_DMA
3575+ boolean "Enable DMA for MRST SPI0 controller"
3576+ default y
3577+ depends on SPI_MRST && INTEL_LNW_DMAC2
3578+ help
3579+ This has to be enabled after Moorestown DMAC2 driver is enabled
3580
3581 #
3582 # Add new SPI master controllers in alphabetical order above this line
3583Index: linux-2.6.33/drivers/spi/Makefile
3584===================================================================
3585--- linux-2.6.33.orig/drivers/spi/Makefile
3586+++ linux-2.6.33/drivers/spi/Makefile
3587@@ -42,6 +42,7 @@ obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.
3588 obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
3589 obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
3590 obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
3591+obj-$(CONFIG_SPI_MRST) += mrst_spi.o
3592
3593 # special build for s3c24xx spi driver with fiq support
3594 spi_s3c24xx_hw-y := spi_s3c24xx.o
3595Index: linux-2.6.33/drivers/spi/mrst_spi.c
3596===================================================================
3597--- /dev/null
3598+++ linux-2.6.33/drivers/spi/mrst_spi.c
3599@@ -0,0 +1,1382 @@
3600+/*
3601+ * mrst_spi.c - Moorestown SPI controller driver (referring pxa2xx_spi.c)
3602+ *
3603+ * Copyright (C) Intel 2008 Feng Tang <feng.tang@intel.com>
3604+ *
3605+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3606+ *
3607+ * This program is free software; you can redistribute it and/or modify
3608+ * it under the terms of the GNU General Public License as published by
3609+ * the Free Software Foundation; either version 2 of the License, or
3610+ * (at your option) any later version.
3611+ *
3612+ * This program is distributed in the hope that it will be useful,
3613+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
3614+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3615+ * GNU General Public License for more details.
3616+ *
3617+ * You should have received a copy of the GNU General Public License
3618+ * along with this program; if not, write to the Free Software
3619+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
3620+ *
3621+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3622+ *
3623+ */
3624+
3625+/* Note:
3626+ *
3627+ * * FW will create a SPI device info block table, and driver need parse
3628+ * them out and use register_board_info to register them to kernel
3629+ */
3630+
3631+#include <linux/delay.h>
3632+#include <linux/highmem.h>
3633+#include <linux/pci.h>
3634+#include <linux/dma-mapping.h>
3635+#include <linux/interrupt.h>
3636+
3637+#include <linux/spi/spi.h>
3638+#include <linux/spi/mrst_spi.h>
3639+
3640+#define MRST_MAX_DMA_LEN 2047
3641+#ifdef CONFIG_SPI_MRST_DMA
3642+#include <linux/lnw_dma.h>
3643+#endif
3644+
3645+#ifdef CONFIG_DEBUG_FS
3646+#include <linux/debugfs.h>
3647+#endif
3648+
3649+#define DRIVER_NAME "mrst_spi"
3650+
3651+#define START_STATE ((void *)0)
3652+#define RUNNING_STATE ((void *)1)
3653+#define DONE_STATE ((void *)2)
3654+#define ERROR_STATE ((void *)-1)
3655+
3656+#define QUEUE_RUNNING 0
3657+#define QUEUE_STOPPED 1
3658+
3659+#define MRST_SPI_DEASSERT 0
3660+#define MRST_SPI_ASSERT 1
3661+
3662+/* HW info for MRST CLk Control Unit, one 32b reg */
3663+#define MRST_SPI_CLK_BASE 100000000 /* 100m */
3664+#define MRST_CLK_SPI0_REG 0xff11d86c
3665+#define CLK_SPI_BDIV_OFFSET 0
3666+#define CLK_SPI_BDIV_MASK 0x00000007
3667+#define CLK_SPI_CDIV_OFFSET 9
3668+#define CLK_SPI_CDIV_MASK 0x00000e00
3669+#define CLK_SPI_CDIV_100M 0x0
3670+#define CLK_SPI_CDIV_50M 0x1
3671+#define CLK_SPI_CDIV_33M 0x2
3672+#define CLK_SPI_CDIV_25M 0x3
3673+#define CLK_SPI_DISABLE_OFFSET 8
3674+
3675+/* per controller struct */
3676+struct driver_data {
3677+ /* Driver model hookup */
3678+ struct pci_dev *pdev;
3679+ struct spi_master *master;
3680+
3681+ struct spi_device *devices;
3682+ struct spi_device *cur_dev;
3683+ enum mrst_ssi_type type;
3684+
3685+ /* phy and virtual register addresses */
3686+ void *paddr;
3687+ void *vaddr;
3688+ u32 iolen;
3689+ int irq;
3690+ dma_addr_t dma_addr;
3691+ u32 freq; /* controller core clk freqency in Hz */
3692+
3693+ /* Driver message queue */
3694+ struct workqueue_struct *workqueue;
3695+ struct work_struct pump_messages;
3696+ spinlock_t lock;
3697+ struct list_head queue;
3698+ int busy;
3699+ int run;
3700+
3701+ /* Message Transfer pump */
3702+ struct tasklet_struct pump_transfers;
3703+
3704+ /* Current message transfer state info */
3705+ struct spi_message *cur_msg;
3706+ struct spi_transfer *cur_transfer;
3707+ struct chip_data *cur_chip;
3708+ struct chip_data *prev_chip;
3709+ size_t len;
3710+ void *tx;
3711+ void *tx_end;
3712+ void *rx;
3713+ void *rx_end;
3714+ int dma_mapped;
3715+ dma_addr_t rx_dma;
3716+ dma_addr_t tx_dma;
3717+ size_t rx_map_len;
3718+ size_t tx_map_len;
3719+ u8 n_bytes; /* current is a 1/2 bytes op */
3720+ u8 max_bits_per_word; /* SPI0's maxim width is 16 bits */
3721+ u32 dma_width;
3722+ int cs_change;
3723+ int (*write)(struct driver_data *drv_data);
3724+ int (*read)(struct driver_data *drv_data);
3725+ irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
3726+ void (*cs_control)(u32 command);
3727+
3728+#ifdef CONFIG_DEBUG_FS
3729+ struct dentry *debugfs;
3730+#endif
3731+
3732+ int dma_inited;
3733+
3734+#ifdef CONFIG_SPI_MRST_DMA
3735+ struct lnw_dma_slave dmas_tx;
3736+ struct lnw_dma_slave dmas_rx;
3737+ struct dma_chan *txchan;
3738+ struct dma_chan *rxchan;
3739+ int txdma_done;
3740+ int rxdma_done;
3741+
3742+ u64 tx_param;
3743+ u64 rx_param;
3744+ struct pci_dev *dma_dev;
3745+#endif
3746+};
3747+
3748+/* slave spi_dev related */
3749+struct chip_data {
3750+ /* cr0 and cr1 are only 16b valid */
3751+ u16 cr0;
3752+ u16 cr1;
3753+
3754+ u8 cs; /* chip select pin */
3755+ u8 n_bytes; /* current is a 1/2/4 byte op */
3756+ u8 tmode; /* TR/TO/RO/EEPROM */
3757+ u8 type; /* SPI/SSP/MicroWire */
3758+
3759+ u8 poll_mode; /* 1 means use poll mode */
3760+
3761+ u32 dma_width;
3762+ u32 rx_threshold;
3763+ u32 tx_threshold;
3764+ u8 enable_dma;
3765+ u8 bits_per_word;
3766+ u16 clk_div; /* baud rate divider */
3767+ u32 speed_hz; /* baud rate */
3768+ int (*write)(struct driver_data *drv_data);
3769+ int (*read)(struct driver_data *drv_data);
3770+ void (*cs_control)(u32 command);
3771+};
3772+
3773+#ifdef CONFIG_SPI_MRST_DMA
3774+static bool chan_filter(struct dma_chan *chan, void *param)
3775+{
3776+ struct driver_data *drv_data = param;
3777+ bool ret = false;
3778+
3779+ if (chan->device->dev == &drv_data->dma_dev->dev)
3780+ ret = true;
3781+ return ret;
3782+}
3783+
3784+static void mrst_spi_dma_init(struct driver_data *drv_data)
3785+{
3786+ struct lnw_dma_slave *rxs, *txs;
3787+ dma_cap_mask_t mask;
3788+ struct pci_dev *dmac2;
3789+
3790+ drv_data->txchan = NULL;
3791+ drv_data->rxchan = NULL;
3792+
3793+ /* mrst spi0 controller only work with mrst dma contrller 2 */
3794+ dmac2 = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL);
3795+ if (!dmac2) {
3796+ printk(KERN_WARNING
3797+ "MRST SPI0: can't find DMAC2, dma init failed\n");
3798+ return;
3799+ } else
3800+ drv_data->dma_dev = dmac2;
3801+
3802+ /* 1. init rx channel */
3803+ rxs = &drv_data->dmas_rx;
3804+
3805+ rxs->dirn = DMA_FROM_DEVICE;
3806+ rxs->hs_mode = LNW_DMA_HW_HS;
3807+ rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
3808+ rxs->src_width = LNW_DMA_WIDTH_16BIT;
3809+ rxs->dst_width = LNW_DMA_WIDTH_32BIT;
3810+ rxs->src_msize = LNW_DMA_MSIZE_16;
3811+ rxs->dst_msize = LNW_DMA_MSIZE_16;
3812+
3813+ dma_cap_zero(mask);
3814+ dma_cap_set(DMA_MEMCPY, mask);
3815+ dma_cap_set(DMA_SLAVE, mask);
3816+
3817+ drv_data->rxchan = dma_request_channel(mask, chan_filter,
3818+ drv_data);
3819+ if (!drv_data->rxchan)
3820+ goto err_exit;
3821+ drv_data->rxchan->private = rxs;
3822+
3823+ /* 2. init tx channel */
3824+ txs = &drv_data->dmas_tx;
3825+
3826+ txs->dirn = DMA_TO_DEVICE;
3827+ txs->hs_mode = LNW_DMA_HW_HS;
3828+ txs->cfg_mode = LNW_DMA_MEM_TO_PER;
3829+ txs->src_width = LNW_DMA_WIDTH_32BIT;
3830+ txs->dst_width = LNW_DMA_WIDTH_16BIT;
3831+ txs->src_msize = LNW_DMA_MSIZE_16;
3832+ txs->dst_msize = LNW_DMA_MSIZE_16;
3833+
3834+ dma_cap_set(DMA_SLAVE, mask);
3835+ dma_cap_set(DMA_MEMCPY, mask);
3836+
3837+ drv_data->txchan = dma_request_channel(mask, chan_filter,
3838+ drv_data);
3839+ if (!drv_data->txchan)
3840+ goto free_rxchan;
3841+ drv_data->txchan->private = txs;
3842+
3843+ /* set the dma done bit to 1 */
3844+ drv_data->dma_inited = 1;
3845+ drv_data->txdma_done = 1;
3846+ drv_data->rxdma_done = 1;
3847+
3848+ drv_data->tx_param = ((u64)(u32)drv_data << 32)
3849+ | (u32)(&drv_data->txdma_done);
3850+ drv_data->rx_param = ((u64)(u32)drv_data << 32)
3851+ | (u32)(&drv_data->rxdma_done);
3852+ return;
3853+
3854+free_rxchan:
3855+ dma_release_channel(drv_data->rxchan);
3856+err_exit:
3857+ pci_dev_put(dmac2);
3858+ return;
3859+}
3860+
3861+static void mrst_spi_dma_exit(struct driver_data *drv_data)
3862+{
3863+ dma_release_channel(drv_data->txchan);
3864+ dma_release_channel(drv_data->rxchan);
3865+ pci_dev_put(drv_data->dma_dev);
3866+}
3867+
3868+
3869+static inline void unmap_dma_buffers(struct driver_data *drv_data);
3870+static void transfer_complete(struct driver_data *drv_data);
3871+
3872+static void mrst_spi_dma_done(void *arg)
3873+{
3874+ u64 *param = arg;
3875+ struct driver_data *drv_data;
3876+ int *done;
3877+
3878+ drv_data = (struct driver_data *)(u32)(*param >> 32);
3879+ done = (int *)(u32)(*param & 0xffffffff);
3880+
3881+ *done = 1;
3882+ /* wait till both tx/rx channels are done */
3883+ if (!drv_data->txdma_done || !drv_data->rxdma_done)
3884+ return;
3885+
3886+ transfer_complete(drv_data);
3887+}
3888+#endif
3889+
3890+
3891+#ifdef CONFIG_DEBUG_FS
3892+static int spi_show_regs_open(struct inode *inode, struct file *file)
3893+{
3894+ file->private_data = inode->i_private;
3895+ return 0;
3896+}
3897+
3898+#define SPI_REGS_BUFSIZE 1024
3899+static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
3900+ size_t count, loff_t *ppos)
3901+{
3902+ char *buf;
3903+ u32 len = 0;
3904+ ssize_t ret;
3905+ struct driver_data *drv_data;
3906+ void *reg;
3907+
3908+ drv_data = (struct driver_data *)file->private_data;
3909+ reg = drv_data->vaddr;
3910+
3911+ buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
3912+ if (!buf)
3913+ return 0;
3914+
3915+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3916+ "MRST SPI0 registers:\n");
3917+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3918+ "=================================\n");
3919+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3920+ "CTRL0: \t\t0x%08x\n", read_ctrl0(reg));
3921+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3922+ "CTRL1: \t\t0x%08x\n", read_ctrl1(reg));
3923+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3924+ "SSIENR: \t0x%08x\n", read_ssienr(reg));
3925+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3926+ "SER: \t\t0x%08x\n", read_ser(reg));
3927+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3928+ "BAUDR: \t\t0x%08x\n", read_baudr(reg));
3929+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3930+ "TXFTLR: \t0x%08x\n", read_txftlr(reg));
3931+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3932+ "RXFTLR: \t0x%08x\n", read_rxftlr(reg));
3933+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3934+ "TXFLR: \t\t0x%08x\n", read_txflr(reg));
3935+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3936+ "RXFLR: \t\t0x%08x\n", read_rxflr(reg));
3937+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3938+ "SR: \t\t0x%08x\n", read_sr(reg));
3939+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3940+ "IMR: \t\t0x%08x\n", read_imr(reg));
3941+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3942+ "ISR: \t\t0x%08x\n", read_isr(reg));
3943+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3944+ "DMACR: \t\t0x%08x\n", read_dmacr(reg));
3945+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3946+ "DMATDLR: \t0x%08x\n", read_dmatdlr(reg));
3947+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3948+ "DMARDLR: \t0x%08x\n", read_dmardlr(reg));
3949+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
3950+ "=================================\n");
3951+
3952+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
3953+ kfree(buf);
3954+ return ret;
3955+}
3956+
3957+static const struct file_operations mrst_spi_regs_ops = {
3958+ .owner = THIS_MODULE,
3959+ .open = spi_show_regs_open,
3960+ .read = spi_show_regs,
3961+};
3962+
3963+static int mrst_spi_debugfs_init(struct driver_data *drv_data)
3964+{
3965+ drv_data->debugfs = debugfs_create_dir("mrst_spi", NULL);
3966+ if (!drv_data->debugfs)
3967+ return -ENOMEM;
3968+
3969+ debugfs_create_file("registers", S_IFREG | S_IRUGO,
3970+ drv_data->debugfs, (void *)drv_data, &mrst_spi_regs_ops);
3971+ return 0;
3972+}
3973+
3974+static void mrst_spi_debugfs_remove(struct driver_data *drv_data)
3975+{
3976+ if (drv_data->debugfs)
3977+ debugfs_remove_recursive(drv_data->debugfs);
3978+}
3979+
3980+#else
3981+static inline int mrst_spi_debugfs_init(struct driver_data *drv_data)
3982+{
3983+}
3984+
3985+static inline void mrst_spi_debugfs_remove(struct driver_data *drv_data)
3986+{
3987+}
3988+#endif /* CONFIG_DEBUG_FS */
3989+
3990+static int flush(struct driver_data *drv_data)
3991+{
3992+ unsigned long limit = loops_per_jiffy << 1;
3993+ void *reg = drv_data->vaddr;
3994+
3995+ while (read_sr(reg) & SR_RF_NOT_EMPT) {
3996+ limit = loops_per_jiffy << 1;
3997+ while ((read_sr(reg) & SR_BUSY) && limit--)
3998+ ;
3999+ read_dr(reg);
4000+ }
4001+ return limit;
4002+}
4003+
4004+static void null_cs_control(u32 command)
4005+{
4006+}
4007+
4008+static int null_writer(struct driver_data *drv_data)
4009+{
4010+ void *reg = drv_data->vaddr;
4011+ u8 n_bytes = drv_data->n_bytes;
4012+
4013+ if (!(read_sr(reg) & SR_TF_NOT_FULL)
4014+ || (drv_data->tx == drv_data->tx_end))
4015+ return 0;
4016+
4017+ write_dr(0, reg);
4018+ drv_data->tx += n_bytes;
4019+ return 1;
4020+}
4021+
4022+static int null_reader(struct driver_data *drv_data)
4023+{
4024+ void *reg = drv_data->vaddr;
4025+ u8 n_bytes = drv_data->n_bytes;
4026+
4027+ while ((read_sr(reg) & SR_RF_NOT_EMPT)
4028+ && (drv_data->rx < drv_data->rx_end)) {
4029+ read_dr(reg);
4030+ drv_data->rx += n_bytes;
4031+ }
4032+ return drv_data->rx == drv_data->rx_end;
4033+}
4034+
4035+static int u8_writer(struct driver_data *drv_data)
4036+{
4037+ void *reg = drv_data->vaddr;
4038+
4039+ if (!(read_sr(reg) & SR_TF_NOT_FULL)
4040+ || (drv_data->tx == drv_data->tx_end))
4041+ return 0;
4042+
4043+ write_dr(*(u8 *)(drv_data->tx), reg);
4044+ ++drv_data->tx;
4045+
4046+ while (read_sr(reg) & SR_BUSY)
4047+ ;
4048+ return 1;
4049+}
4050+
4051+static int u8_reader(struct driver_data *drv_data)
4052+{
4053+ void *reg = drv_data->vaddr;
4054+
4055+ while ((read_sr(reg) & SR_RF_NOT_EMPT)
4056+ && (drv_data->rx < drv_data->rx_end)) {
4057+ *(u8 *)(drv_data->rx) = read_dr(reg);
4058+ ++drv_data->rx;
4059+ }
4060+
4061+ while (read_sr(reg) & SR_BUSY)
4062+ ;
4063+ return drv_data->rx == drv_data->rx_end;
4064+}
4065+
4066+static int u16_writer(struct driver_data *drv_data)
4067+{
4068+ void *reg = drv_data->vaddr;
4069+
4070+ if (!(read_sr(reg) & SR_TF_NOT_FULL)
4071+ || (drv_data->tx == drv_data->tx_end))
4072+ return 0;
4073+
4074+ write_dr(*(u16 *)(drv_data->tx), reg);
4075+ drv_data->tx += 2;
4076+ while (read_sr(reg) & SR_BUSY)
4077+ ;
4078+
4079+ return 1;
4080+}
4081+
4082+static int u16_reader(struct driver_data *drv_data)
4083+{
4084+ void *reg = drv_data->vaddr;
4085+ u16 temp;
4086+
4087+ while ((read_sr(reg) & SR_RF_NOT_EMPT)
4088+ && (drv_data->rx < drv_data->rx_end)) {
4089+ temp = read_dr(reg);
4090+ *(u16 *)(drv_data->rx) = temp;
4091+ drv_data->rx += 2;
4092+ }
4093+
4094+ while (read_sr(reg) & SR_BUSY)
4095+ ;
4096+
4097+ return drv_data->rx == drv_data->rx_end;
4098+}
4099+
4100+static void *next_transfer(struct driver_data *drv_data)
4101+{
4102+ struct spi_message *msg = drv_data->cur_msg;
4103+ struct spi_transfer *trans = drv_data->cur_transfer;
4104+
4105+ /* Move to next transfer */
4106+ if (trans->transfer_list.next != &msg->transfers) {
4107+ drv_data->cur_transfer =
4108+ list_entry(trans->transfer_list.next,
4109+ struct spi_transfer,
4110+ transfer_list);
4111+ return RUNNING_STATE;
4112+ } else
4113+ return DONE_STATE;
4114+}
4115+
4116+/*
4117+ * Note: first step is the protocol driver prepares
4118+ * a dma-capable memory, and this func just need translate
4119+ * the virt addr to physical
4120+ */
4121+static int map_dma_buffers(struct driver_data *drv_data)
4122+{
4123+ if (!drv_data->cur_msg->is_dma_mapped || !drv_data->dma_inited
4124+ || !drv_data->cur_chip->enable_dma)
4125+ return 0;
4126+
4127+ if (drv_data->cur_transfer->tx_dma)
4128+ drv_data->tx_dma = drv_data->cur_transfer->tx_dma;
4129+
4130+ if (drv_data->cur_transfer->rx_dma)
4131+ drv_data->rx_dma = drv_data->cur_transfer->rx_dma;
4132+
4133+ return 1;
4134+}
4135+
4136+static inline void unmap_dma_buffers(struct driver_data *drv_data)
4137+{
4138+ if (!drv_data->dma_mapped)
4139+ return;
4140+ drv_data->dma_mapped = 0;
4141+}
4142+
4143+/* caller already set message->status; dma and pio irqs are blocked */
4144+static void giveback(struct driver_data *drv_data)
4145+{
4146+ struct spi_transfer *last_transfer;
4147+ unsigned long flags;
4148+ struct spi_message *msg;
4149+
4150+ spin_lock_irqsave(&drv_data->lock, flags);
4151+ msg = drv_data->cur_msg;
4152+ drv_data->cur_msg = NULL;
4153+ drv_data->cur_transfer = NULL;
4154+ drv_data->prev_chip = drv_data->cur_chip;
4155+ drv_data->cur_chip = NULL;
4156+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
4157+ spin_unlock_irqrestore(&drv_data->lock, flags);
4158+
4159+ last_transfer = list_entry(msg->transfers.prev,
4160+ struct spi_transfer,
4161+ transfer_list);
4162+
4163+ if (!last_transfer->cs_change)
4164+ drv_data->cs_control(MRST_SPI_DEASSERT);
4165+
4166+ msg->state = NULL;
4167+ if (msg->complete)
4168+ msg->complete(msg->context);
4169+}
4170+
4171+static void dma_transfer(struct driver_data *drv_data, int cs_change)
4172+{
4173+#ifdef CONFIG_SPI_MRST_DMA
4174+ void *reg = drv_data->vaddr;
4175+ struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
4176+ struct dma_chan *txchan, *rxchan;
4177+ enum dma_ctrl_flags flag;
4178+ u16 dmacr = 0;
4179+
4180+ /* 1. setup DMA related registers */
4181+ if (cs_change) {
4182+ mrst_spi_enable(reg, 0);
4183+
4184+ write_dmardlr(0xf, reg);
4185+ write_dmatdlr(0x10, reg);
4186+
4187+ if (drv_data->tx_dma)
4188+ dmacr |= 0x2;
4189+ if (drv_data->rx_dma)
4190+ dmacr |= 0x1;
4191+
4192+ write_dmacr(dmacr, reg);
4193+ mrst_spi_enable(reg, 1);
4194+ }
4195+
4196+ if (drv_data->tx_dma)
4197+ drv_data->txdma_done = 0;
4198+
4199+ if (drv_data->rx_dma)
4200+ drv_data->rxdma_done = 0;
4201+
4202+ /* 2. start the TX dma transfer */
4203+ txchan = drv_data->txchan;
4204+ rxchan = drv_data->rxchan;
4205+
4206+ flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
4207+
4208+ if (drv_data->tx_dma) {
4209+ txdesc = txchan->device->device_prep_dma_memcpy(txchan,
4210+ drv_data->dma_addr, drv_data->tx_dma,
4211+ drv_data->len, flag);
4212+
4213+ txdesc->callback = mrst_spi_dma_done;
4214+ txdesc->callback_param = &drv_data->tx_param;
4215+ }
4216+
4217+ /* 3. start the RX dma transfer */
4218+ if (drv_data->rx_dma) {
4219+ rxdesc = rxchan->device->device_prep_dma_memcpy(rxchan,
4220+ drv_data->rx_dma, drv_data->dma_addr,
4221+ drv_data->len, flag);
4222+
4223+ rxdesc->callback = mrst_spi_dma_done;
4224+ rxdesc->callback_param = &drv_data->rx_param;
4225+ }
4226+
4227+ /* rx must be started before tx due to spi instinct */
4228+ if (rxdesc)
4229+ rxdesc->tx_submit(rxdesc);
4230+ if (txdesc)
4231+ txdesc->tx_submit(txdesc);
4232+#endif
4233+}
4234+
4235+static void int_error_stop(struct driver_data *drv_data, const char *msg)
4236+{
4237+ void *reg = drv_data->vaddr;
4238+
4239+ /* Stop and reset hw */
4240+ flush(drv_data);
4241+ write_ssienr(0, reg);
4242+
4243+ dev_err(&drv_data->pdev->dev, "%s\n", msg);
4244+
4245+ drv_data->cur_msg->state = ERROR_STATE;
4246+ tasklet_schedule(&drv_data->pump_transfers);
4247+}
4248+
4249+static void transfer_complete(struct driver_data *drv_data)
4250+{
4251+ /* Update total byte transfered return count actual bytes read */
4252+ drv_data->cur_msg->actual_length += drv_data->len;
4253+
4254+ /* Move to next transfer */
4255+ drv_data->cur_msg->state = next_transfer(drv_data);
4256+
4257+ /* handle end of message */
4258+ if (drv_data->cur_msg->state == DONE_STATE) {
4259+ drv_data->cur_msg->status = 0;
4260+ giveback(drv_data);
4261+ } else
4262+ tasklet_schedule(&drv_data->pump_transfers);
4263+}
4264+
4265+static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
4266+{
4267+ void *reg = drv_data->vaddr;
4268+ u32 irq_status, irq_mask = 0x3f;
4269+
4270+ irq_status = read_isr(reg) & irq_mask;
4271+
4272+ /* error handling */
4273+ if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
4274+ read_txoicr(reg);
4275+ read_rxoicr(reg);
4276+ read_rxuicr(reg);
4277+ int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
4278+ return IRQ_HANDLED;
4279+ }
4280+
4281+ /* INT comes from tx */
4282+ if (drv_data->tx && (irq_status & SPI_INT_TXEI))
4283+ while (drv_data->tx < drv_data->tx_end) {
4284+ drv_data->write(drv_data);
4285+
4286+ if (drv_data->tx == drv_data->tx_end) {
4287+ spi_mask_intr(reg, SPI_INT_TXEI);
4288+ transfer_complete(drv_data);
4289+ }
4290+ }
4291+
4292+ /* INT comes from rx */
4293+ if (drv_data->rx && (irq_status & SPI_INT_RXFI)) {
4294+ if (drv_data->read(drv_data))
4295+ transfer_complete(drv_data);
4296+ }
4297+
4298+ return IRQ_HANDLED;
4299+}
4300+
4301+static irqreturn_t mrst_spi_irq(int irq, void *dev_id)
4302+{
4303+ struct driver_data *drv_data = dev_id;
4304+ void *reg = drv_data->vaddr;
4305+
4306+ if (!drv_data->cur_msg) {
4307+ spi_mask_intr(reg, SPI_INT_TXEI);
4308+ /* Never fail */
4309+ return IRQ_HANDLED;
4310+ }
4311+
4312+ return drv_data->transfer_handler(drv_data);
4313+}
4314+
4315+/* must be called inside pump_transfers() */
4316+static void poll_transfer(struct driver_data *drv_data)
4317+{
4318+ if (drv_data->tx)
4319+ while (drv_data->write(drv_data))
4320+ drv_data->read(drv_data);
4321+
4322+ drv_data->read(drv_data);
4323+ transfer_complete(drv_data);
4324+}
4325+
4326+static void pump_transfers(unsigned long data)
4327+{
4328+ struct driver_data *drv_data = (struct driver_data *)data;
4329+ struct spi_message *message = NULL;
4330+ struct spi_transfer *transfer = NULL;
4331+ struct spi_transfer *previous = NULL;
4332+ struct spi_device *spi = NULL;
4333+ struct chip_data *chip = NULL;
4334+ void *reg = drv_data->vaddr;
4335+ u8 bits = 0;
4336+ u8 imask = 0;
4337+ u8 cs_change = 0;
4338+ u16 rxint_level = 0;
4339+ u16 txint_level = 0;
4340+ u16 clk_div = 0;
4341+ u32 speed = 0;
4342+ u32 cr0 = 0;
4343+
4344+ /* get current state information */
4345+ message = drv_data->cur_msg;
4346+ transfer = drv_data->cur_transfer;
4347+ chip = drv_data->cur_chip;
4348+ spi = message->spi;
4349+
4350+ if (unlikely(!chip->clk_div)) {
4351+ /* default for 115200 UART device */
4352+ if (chip->speed_hz)
4353+ chip->clk_div = drv_data->freq / chip->speed_hz;
4354+ else
4355+ chip->clk_div = drv_data->freq / 115200;
4356+ }
4357+
4358+ /* handle for abort */
4359+ if (message->state == ERROR_STATE) {
4360+ message->status = -EIO;
4361+ goto early_exit;
4362+ }
4363+
4364+ /* handle end of message */
4365+ if (message->state == DONE_STATE) {
4366+ message->status = 0;
4367+ goto early_exit;
4368+ }
4369+
4370+ /* delay if requested at end of transfer*/
4371+ if (message->state == RUNNING_STATE) {
4372+ previous = list_entry(transfer->transfer_list.prev,
4373+ struct spi_transfer,
4374+ transfer_list);
4375+ if (previous->delay_usecs)
4376+ udelay(previous->delay_usecs);
4377+ }
4378+
4379+ drv_data->n_bytes = chip->n_bytes;
4380+ drv_data->dma_width = chip->dma_width;
4381+ drv_data->cs_control = chip->cs_control;
4382+
4383+ drv_data->rx_dma = transfer->rx_dma;
4384+ drv_data->tx_dma = transfer->tx_dma;
4385+ drv_data->tx = (void *)transfer->tx_buf;
4386+ drv_data->tx_end = drv_data->tx + transfer->len;
4387+ drv_data->rx = transfer->rx_buf;
4388+ drv_data->rx_end = drv_data->rx + transfer->len;
4389+ drv_data->write = drv_data->tx ? chip->write : null_writer;
4390+ drv_data->read = drv_data->rx ? chip->read : null_reader;
4391+ drv_data->cs_change = transfer->cs_change;
4392+ drv_data->len = drv_data->cur_transfer->len;
4393+ if (chip != drv_data->prev_chip)
4394+ cs_change = 1;
4395+
4396+ /* handle per transfer options for bpw and speed */
4397+ cr0 = chip->cr0;
4398+ if (transfer->speed_hz) {
4399+ speed = chip->speed_hz;
4400+
4401+ if (transfer->speed_hz != speed) {
4402+ speed = transfer->speed_hz;
4403+ if (speed > drv_data->freq) {
4404+ printk(KERN_ERR "MRST SPI0: unsupported"
4405+ "freq: %dHz\n", speed);
4406+ message->status = -EIO;
4407+ goto early_exit;
4408+ }
4409+
4410+ /* clk_div doesn't support odd number */
4411+ clk_div = (drv_data->freq + speed - 1) / speed;
4412+ clk_div = ((clk_div + 1) >> 1) << 1;
4413+
4414+ chip->speed_hz = speed;
4415+ chip->clk_div = clk_div;
4416+ }
4417+ }
4418+
4419+ if (transfer->bits_per_word) {
4420+ bits = transfer->bits_per_word;
4421+
4422+ switch (bits) {
4423+ case 8:
4424+ drv_data->n_bytes = 1;
4425+ drv_data->dma_width = 1;
4426+ drv_data->read = drv_data->read != null_reader ?
4427+ u8_reader : null_reader;
4428+ drv_data->write = drv_data->write != null_writer ?
4429+ u8_writer : null_writer;
4430+ break;
4431+ case 16:
4432+ drv_data->n_bytes = 2;
4433+ drv_data->dma_width = 2;
4434+ drv_data->read = drv_data->read != null_reader ?
4435+ u16_reader : null_reader;
4436+ drv_data->write = drv_data->write != null_writer ?
4437+ u16_writer : null_writer;
4438+ break;
4439+ default:
4440+ printk(KERN_ERR "MRST SPI0: unsupported bits:"
4441+ "%db\n", bits);
4442+ message->status = -EIO;
4443+ goto early_exit;
4444+ }
4445+
4446+ cr0 = (bits - 1)
4447+ | (chip->type << SPI_FRF_OFFSET)
4448+ | (spi->mode << SPI_MODE_OFFSET)
4449+ | (chip->tmode << SPI_TMOD_OFFSET);
4450+ }
4451+
4452+ message->state = RUNNING_STATE;
4453+
4454+ /* try to map dma buffer and do a dma transfer if successful */
4455+ drv_data->dma_mapped = 0;
4456+ if (drv_data->len && (drv_data->len <= MRST_MAX_DMA_LEN))
4457+ drv_data->dma_mapped = map_dma_buffers(drv_data);
4458+
4459+ if (!drv_data->dma_mapped && !chip->poll_mode) {
4460+ if (drv_data->rx) {
4461+ if (drv_data->len >= SPI_INT_THRESHOLD)
4462+ rxint_level = SPI_INT_THRESHOLD;
4463+ else
4464+ rxint_level = drv_data->len;
4465+ imask |= SPI_INT_RXFI;
4466+ }
4467+
4468+ if (drv_data->tx)
4469+ imask |= SPI_INT_TXEI;
4470+ drv_data->transfer_handler = interrupt_transfer;
4471+ }
4472+
4473+ /*
4474+ * reprogram registers only if
4475+ * 1. chip select changes
4476+ * 2. clk_div is changes
4477+ * 3. control value changes
4478+ */
4479+ if (read_ctrl0(reg) != cr0 || cs_change || clk_div) {
4480+ mrst_spi_enable(reg, 0);
4481+
4482+ if (read_ctrl0(reg) != cr0)
4483+ write_ctrl0(cr0, reg);
4484+
4485+ if (txint_level)
4486+ write_txftlr(txint_level, reg);
4487+
4488+ if (rxint_level)
4489+ write_rxftlr(rxint_level, reg);
4490+
4491+ /* set the interrupt mask, for poll mode just diable all int */
4492+ spi_mask_intr(reg, 0xff);
4493+ if (!chip->poll_mode)
4494+ spi_umask_intr(reg, imask);
4495+
4496+ spi_enable_clk(reg, clk_div ? clk_div : chip->clk_div);
4497+ spi_chip_sel(reg, spi->chip_select);
4498+ mrst_spi_enable(reg, 1);
4499+
4500+ if (cs_change)
4501+ drv_data->prev_chip = chip;
4502+ }
4503+
4504+ if (drv_data->dma_mapped)
4505+ dma_transfer(drv_data, cs_change);
4506+
4507+ if (chip->poll_mode)
4508+ poll_transfer(drv_data);
4509+
4510+ return;
4511+
4512+early_exit:
4513+ giveback(drv_data);
4514+ return;
4515+}
4516+
4517+static void pump_messages(struct work_struct *work)
4518+{
4519+ struct driver_data *drv_data =
4520+ container_of(work, struct driver_data, pump_messages);
4521+ unsigned long flags;
4522+
4523+ /* Lock queue and check for queue work */
4524+ spin_lock_irqsave(&drv_data->lock, flags);
4525+ if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
4526+ drv_data->busy = 0;
4527+ spin_unlock_irqrestore(&drv_data->lock, flags);
4528+ return;
4529+ }
4530+
4531+ /* Make sure we are not already running a message */
4532+ if (drv_data->cur_msg) {
4533+ spin_unlock_irqrestore(&drv_data->lock, flags);
4534+ return;
4535+ }
4536+
4537+ /* Extract head of queue */
4538+ drv_data->cur_msg = list_entry(drv_data->queue.next,
4539+ struct spi_message, queue);
4540+ list_del_init(&drv_data->cur_msg->queue);
4541+
4542+ /* Initial message state*/
4543+ drv_data->cur_msg->state = START_STATE;
4544+ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
4545+ struct spi_transfer,
4546+ transfer_list);
4547+ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
4548+
4549+ /* Mark as busy and launch transfers */
4550+ tasklet_schedule(&drv_data->pump_transfers);
4551+
4552+ drv_data->busy = 1;
4553+ spin_unlock_irqrestore(&drv_data->lock, flags);
4554+}
4555+
4556+/* spi_device use this to queue in the their spi_msg */
4557+static int mrst_spi_transfer(struct spi_device *spi, struct spi_message *msg)
4558+{
4559+ struct driver_data *drv_data = spi_master_get_devdata(spi->master);
4560+ unsigned long flags;
4561+
4562+ spin_lock_irqsave(&drv_data->lock, flags);
4563+
4564+ if (drv_data->run == QUEUE_STOPPED) {
4565+ spin_unlock_irqrestore(&drv_data->lock, flags);
4566+ return -ESHUTDOWN;
4567+ }
4568+
4569+ msg->actual_length = 0;
4570+ msg->status = -EINPROGRESS;
4571+ msg->state = START_STATE;
4572+
4573+ list_add_tail(&msg->queue, &drv_data->queue);
4574+
4575+ if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) {
4576+
4577+ if (drv_data->cur_transfer || drv_data->cur_msg)
4578+ queue_work(drv_data->workqueue,
4579+ &drv_data->pump_messages);
4580+ else {
4581+ /* if no other data transaction in air, just go */
4582+ spin_unlock_irqrestore(&drv_data->lock, flags);
4583+ pump_messages(&drv_data->pump_messages);
4584+ return 0;
4585+ }
4586+ }
4587+
4588+ spin_unlock_irqrestore(&drv_data->lock, flags);
4589+ return 0;
4590+}
4591+
4592+/* this may be called twice for each spi dev */
4593+static int mrst_spi_setup(struct spi_device *spi)
4594+{
4595+ struct mrst_spi_chip *chip_info = NULL;
4596+ struct chip_data *chip;
4597+
4598+ if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
4599+ return -EINVAL;
4600+
4601+ /* Only alloc on first setup */
4602+ chip = spi_get_ctldata(spi);
4603+ if (!chip) {
4604+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
4605+ if (!chip)
4606+ return -ENOMEM;
4607+
4608+ chip->cs_control = null_cs_control;
4609+ chip->enable_dma = 0;
4610+ }
4611+
4612+ /* protocol drivers may change the chip settings, so...
4613+ * if chip_info exists, use it */
4614+ chip_info = spi->controller_data;
4615+
4616+ /* chip_info doesn't always exist */
4617+ if (chip_info) {
4618+ if (chip_info->cs_control)
4619+ chip->cs_control = chip_info->cs_control;
4620+
4621+ chip->poll_mode = chip_info->poll_mode;
4622+ chip->type = chip_info->type;
4623+
4624+ chip->rx_threshold = 0;
4625+ chip->tx_threshold = 0;
4626+
4627+ chip->enable_dma = chip_info->enable_dma;
4628+ }
4629+
4630+ if (spi->bits_per_word <= 8) {
4631+ chip->n_bytes = 1;
4632+ chip->dma_width = 1;
4633+ chip->read = u8_reader;
4634+ chip->write = u8_writer;
4635+ } else if (spi->bits_per_word <= 16) {
4636+ chip->n_bytes = 2;
4637+ chip->dma_width = 2;
4638+ chip->read = u16_reader;
4639+ chip->write = u16_writer;
4640+ } else {
4641+ /* never take >16b case for MRST SPIC */
4642+ dev_err(&spi->dev, "invalid wordsize\n");
4643+ return -ENODEV;
4644+ }
4645+
4646+ chip->bits_per_word = spi->bits_per_word;
4647+ chip->speed_hz = spi->max_speed_hz;
4648+ chip->tmode = 0; /* Tx & Rx */
4649+ /* default SPI mode is SCPOL = 0, SCPH = 0 */
4650+ chip->cr0 = (chip->bits_per_word - 1)
4651+ | (chip->type << SPI_FRF_OFFSET)
4652+ | (spi->mode << SPI_MODE_OFFSET)
4653+ | (chip->tmode << SPI_TMOD_OFFSET);
4654+
4655+ spi_set_ctldata(spi, chip);
4656+ return 0;
4657+}
4658+
4659+static void mrst_spi_cleanup(struct spi_device *spi)
4660+{
4661+ struct chip_data *chip = spi_get_ctldata(spi);
4662+
4663+ kfree(chip);
4664+}
4665+
4666+static int __init init_queue(struct driver_data *drv_data)
4667+{
4668+ INIT_LIST_HEAD(&drv_data->queue);
4669+ spin_lock_init(&drv_data->lock);
4670+
4671+ drv_data->run = QUEUE_STOPPED;
4672+ drv_data->busy = 0;
4673+
4674+ tasklet_init(&drv_data->pump_transfers,
4675+ pump_transfers, (unsigned long)drv_data);
4676+
4677+ INIT_WORK(&drv_data->pump_messages, pump_messages);
4678+ drv_data->workqueue = create_singlethread_workqueue(
4679+ dev_name(drv_data->master->dev.parent));
4680+ if (drv_data->workqueue == NULL)
4681+ return -EBUSY;
4682+
4683+ return 0;
4684+}
4685+
4686+static int start_queue(struct driver_data *drv_data)
4687+{
4688+ unsigned long flags;
4689+
4690+ spin_lock_irqsave(&drv_data->lock, flags);
4691+
4692+ if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
4693+ spin_unlock_irqrestore(&drv_data->lock, flags);
4694+ return -EBUSY;
4695+ }
4696+
4697+ drv_data->run = QUEUE_RUNNING;
4698+ drv_data->cur_msg = NULL;
4699+ drv_data->cur_transfer = NULL;
4700+ drv_data->cur_chip = NULL;
4701+ drv_data->prev_chip = NULL;
4702+ spin_unlock_irqrestore(&drv_data->lock, flags);
4703+
4704+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
4705+
4706+ return 0;
4707+}
4708+
4709+static int stop_queue(struct driver_data *drv_data)
4710+{
4711+ unsigned long flags;
4712+ unsigned limit = 500;
4713+ int status = 0;
4714+
4715+ spin_lock_irqsave(&drv_data->lock, flags);
4716+ drv_data->run = QUEUE_STOPPED;
4717+ while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
4718+ spin_unlock_irqrestore(&drv_data->lock, flags);
4719+ msleep(10);
4720+ spin_lock_irqsave(&drv_data->lock, flags);
4721+ }
4722+
4723+ if (!list_empty(&drv_data->queue) || drv_data->busy)
4724+ status = -EBUSY;
4725+ spin_unlock_irqrestore(&drv_data->lock, flags);
4726+
4727+ return status;
4728+}
4729+
4730+static int destroy_queue(struct driver_data *drv_data)
4731+{
4732+ int status;
4733+
4734+ status = stop_queue(drv_data);
4735+ if (status != 0)
4736+ return status;
4737+ destroy_workqueue(drv_data->workqueue);
4738+ return 0;
4739+}
4740+
4741+/* restart the spic, disable all interrupts, clean rx fifo */
4742+static void spi_hw_init(struct driver_data *drv_data)
4743+{
4744+ void *reg = drv_data->vaddr;
4745+
4746+ mrst_spi_enable(reg, 0x0);
4747+ spi_mask_intr(reg, 0xff);
4748+ mrst_spi_enable(reg, 0x1);
4749+
4750+ flush(drv_data);
4751+}
4752+
4753+static int __devinit mrst_spi_probe(struct pci_dev *pdev,
4754+ const struct pci_device_id *ent)
4755+{
4756+ int ret;
4757+ struct driver_data *drv_data;
4758+ struct spi_master *master;
4759+ struct device *dev = &pdev->dev;
4760+ u32 *clk_reg, clk_cdiv;
4761+ int pci_bar = 0;
4762+
4763+ BUG_ON(pdev == NULL);
4764+ BUG_ON(ent == NULL);
4765+
4766+ printk(KERN_INFO "MRST: found PCI SPI controller(ID: %04x:%04x)\n",
4767+ pdev->vendor, pdev->device);
4768+
4769+ ret = pci_enable_device(pdev);
4770+ if (ret)
4771+ return ret;
4772+
4773+ master = spi_alloc_master(dev, sizeof(struct driver_data));
4774+ if (!master) {
4775+ ret = -ENOMEM;
4776+ goto exit;
4777+ }
4778+
4779+ drv_data = spi_master_get_devdata(master);
4780+ drv_data->master = master;
4781+ drv_data->pdev = pdev;
4782+ drv_data->type = SSI_MOTO_SPI;
4783+ drv_data->prev_chip = NULL;
4784+
4785+ /* get basic io resource and map it */
4786+ drv_data->paddr = (void *)pci_resource_start(pdev, pci_bar);
4787+ drv_data->iolen = pci_resource_len(pdev, pci_bar);
4788+ drv_data->dma_addr = (dma_addr_t)(drv_data->paddr + 0x60);
4789+
4790+ ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
4791+ if (ret)
4792+ goto err_free_master;
4793+
4794+ drv_data->vaddr = ioremap_nocache((unsigned long)drv_data->paddr,
4795+ drv_data->iolen);
4796+ if (!drv_data->vaddr) {
4797+ ret = -ENOMEM;
4798+ goto err_free_pci;
4799+ }
4800+
4801+ clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
4802+ if (!clk_reg) {
4803+ ret = -ENOMEM;
4804+ goto err_iounmap;
4805+ }
4806+
4807+ /* get SPI controller operating freq info */
4808+ clk_cdiv = ((*clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET;
4809+ drv_data->freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
4810+ iounmap(clk_reg);
4811+
4812+ drv_data->irq = pdev->irq;
4813+ ret = request_irq(drv_data->irq, mrst_spi_irq, 0,
4814+ "mrst_spic0", drv_data);
4815+ if (ret < 0) {
4816+ dev_err(&pdev->dev, "can not get IRQ\n");
4817+ goto err_iounmap;
4818+ }
4819+
4820+ spin_lock_init(&drv_data->lock);
4821+
4822+ master->mode_bits = SPI_CPOL | SPI_CPHA;
4823+
4824+ master->bus_num = 0;
4825+ master->num_chipselect = 16;
4826+ master->cleanup = mrst_spi_cleanup;
4827+ master->setup = mrst_spi_setup;
4828+ master->transfer = mrst_spi_transfer;
4829+
4830+ drv_data->dma_inited = 0;
4831+#ifdef CONFIG_SPI_MRST_DMA
4832+ mrst_spi_dma_init(drv_data);
4833+#endif
4834+
4835+ /* basic HW init */
4836+ spi_hw_init(drv_data);
4837+
4838+ /* Initial and start queue */
4839+ ret = init_queue(drv_data);
4840+ if (ret) {
4841+ dev_err(&pdev->dev, "problem initializing queue\n");
4842+ goto err_diable_hw;
4843+ }
4844+ ret = start_queue(drv_data);
4845+ if (ret) {
4846+ dev_err(&pdev->dev, "problem starting queue\n");
4847+ goto err_diable_hw;
4848+ }
4849+
4850+ ret = spi_register_master(master);
4851+ if (ret) {
4852+ dev_err(&pdev->dev, "problem registering spi master\n");
4853+ goto err_queue_alloc;
4854+ }
4855+
4856+ /* PCI hook and SPI hook use the same drv data */
4857+ pci_set_drvdata(pdev, drv_data);
4858+ mrst_spi_debugfs_init(drv_data);
4859+
4860+ return 0;
4861+
4862+err_queue_alloc:
4863+ destroy_queue(drv_data);
4864+#ifdef CONFIG_SPI_MRST_DMA
4865+ mrst_spi_dma_exit(drv_data);
4866+#endif
4867+err_diable_hw:
4868+ mrst_spi_enable(drv_data->vaddr, 0);
4869+ free_irq(drv_data->irq, drv_data);
4870+err_iounmap:
4871+ iounmap(drv_data->vaddr);
4872+err_free_pci:
4873+ pci_release_region(pdev, pci_bar);
4874+err_free_master:
4875+ spi_master_put(master);
4876+exit:
4877+ pci_disable_device(pdev);
4878+ return ret;
4879+}
4880+
4881+static void __devexit mrst_spi_remove(struct pci_dev *pdev)
4882+{
4883+ struct driver_data *drv_data = pci_get_drvdata(pdev);
4884+ void *reg;
4885+ int status = 0;
4886+
4887+ if (!drv_data)
4888+ return;
4889+
4890+ mrst_spi_debugfs_remove(drv_data);
4891+ pci_set_drvdata(pdev, NULL);
4892+
4893+ /* remove the queue */
4894+ status = destroy_queue(drv_data);
4895+ if (status != 0)
4896+ dev_err(&pdev->dev, "mrst_spi_remove: workqueue will not "
4897+ "complete, message memory not freed\n");
4898+
4899+#ifdef CONFIG_SPI_MRST_DMA
4900+ mrst_spi_dma_exit(drv_data);
4901+#endif
4902+
4903+ reg = drv_data->vaddr;
4904+ mrst_spi_enable(reg, 0);
4905+ spi_disable_clk(reg);
4906+
4907+ /* release IRQ */
4908+ free_irq(drv_data->irq, drv_data);
4909+
4910+ iounmap(drv_data->vaddr);
4911+ pci_release_region(pdev, 0);
4912+
4913+ /* disconnect from the SPI framework */
4914+ spi_unregister_master(drv_data->master);
4915+ pci_disable_device(pdev);
4916+}
4917+
4918+#ifdef CONFIG_PM
4919+static int mrst_spi_suspend(struct pci_dev *pdev, pm_message_t state)
4920+{
4921+ struct driver_data *drv_data = pci_get_drvdata(pdev);
4922+ void *reg = drv_data->vaddr;
4923+ int status = 0;
4924+
4925+ status = stop_queue(drv_data);
4926+ if (status)
4927+ return status;
4928+
4929+ mrst_spi_enable(reg, 0);
4930+ spi_disable_clk(reg);
4931+ return status;
4932+}
4933+
4934+static int mrst_spi_resume(struct pci_dev *pdev)
4935+{
4936+ struct driver_data *drv_data = pci_get_drvdata(pdev);
4937+ int status = 0;
4938+
4939+ spi_hw_init(drv_data);
4940+
4941+ /* Start the queue running */
4942+ status = start_queue(drv_data);
4943+ if (status)
4944+ dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
4945+ return status;
4946+}
4947+#else
4948+#define mrst_spi_suspend NULL
4949+#define mrst_spi_resume NULL
4950+#endif
4951+
4952+static const struct pci_device_id pci_ids[] __devinitdata = {
4953+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
4954+ {},
4955+};
4956+
4957+static struct pci_driver mrst_spi_driver = {
4958+ .name = DRIVER_NAME,
4959+ .id_table = pci_ids,
4960+ .probe = mrst_spi_probe,
4961+ .remove = __devexit_p(mrst_spi_remove),
4962+ .suspend = mrst_spi_suspend,
4963+ .resume = mrst_spi_resume,
4964+};
4965+
4966+static int __init mrst_spi_init(void)
4967+{
4968+ return pci_register_driver(&mrst_spi_driver);
4969+}
4970+
4971+static void __exit mrst_spi_exit(void)
4972+{
4973+ pci_unregister_driver(&mrst_spi_driver);
4974+}
4975+
4976+module_init(mrst_spi_init);
4977+module_exit(mrst_spi_exit);
4978+
4979+MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
4980+MODULE_DESCRIPTION("Intel Moorestown SPI controller driver");
4981+MODULE_LICENSE("GPL v2");
4982Index: linux-2.6.33/include/linux/spi/mrst_spi.h
4983===================================================================
4984--- /dev/null
4985+++ linux-2.6.33/include/linux/spi/mrst_spi.h
4986@@ -0,0 +1,162 @@
4987+#ifndef MRST_SPI_HEADER_H
4988+#define MRST_SPI_HEADER_H
4989+#include <linux/io.h>
4990+
4991+/* bit fields in CTRLR0 */
4992+#define SPI_DFS_OFFSET 0
4993+
4994+#define SPI_FRF_OFFSET 4
4995+#define SPI_FRF_SPI 0x0
4996+#define SPI_FRF_SSP 0x1
4997+#define SPI_FRF_MICROWIRE 0x2
4998+#define SPI_FRF_RESV 0x3
4999+
5000+#define SPI_MODE_OFFSET 6
5001+#define SPI_SCPH_OFFSET 6
5002+#define SPI_SCOL_OFFSET 7
5003+#define SPI_TMOD_OFFSET 8
5004+#define SPI_TMOD_TR 0x0 /* xmit & recv */
5005+#define SPI_TMOD_TO 0x1 /* xmit only */
5006+#define SPI_TMOD_RO 0x2 /* recv only */
5007+#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
5008+
5009+#define SPI_SLVOE_OFFSET 10
5010+#define SPI_SRL_OFFSET 11
5011+#define SPI_CFS_OFFSET 12
5012+
5013+/* bit fields in SR, 7 bits */
5014+#define SR_MASK 0x7f /* cover 7 bits */
5015+#define SR_BUSY (1 << 0)
5016+#define SR_TF_NOT_FULL (1 << 1)
5017+#define SR_TF_EMPT (1 << 2)
5018+#define SR_RF_NOT_EMPT (1 << 3)
5019+#define SR_RF_FULL (1 << 4)
5020+#define SR_TX_ERR (1 << 5)
5021+#define SR_DCOL (1 << 6)
5022+
5023+/* bit fields in ISR, IMR, RISR, 7 bits */
5024+#define SPI_INT_TXEI (1 << 0)
5025+#define SPI_INT_TXOI (1 << 1)
5026+#define SPI_INT_RXUI (1 << 2)
5027+#define SPI_INT_RXOI (1 << 3)
5028+#define SPI_INT_RXFI (1 << 4)
5029+#define SPI_INT_MSTI (1 << 5)
5030+
5031+/* TX RX interrupt level threshhold, max can be 256 */
5032+#define SPI_INT_THRESHOLD 32
5033+
5034+#define DEFINE_MRST_SPI_RW_REG(reg, off) \
5035+static inline u32 read_##reg(void *p) \
5036+{ return readl(p + (off)); } \
5037+static inline void write_##reg(u32 v, void *p) \
5038+{ writel(v, p + (off)); }
5039+
5040+#define DEFINE_MRST_SPI_RO_REG(reg, off) \
5041+static inline u32 read_##reg(void *p) \
5042+{ return readl(p + (off)); } \
5043+
5044+DEFINE_MRST_SPI_RW_REG(ctrl0, 0x00)
5045+DEFINE_MRST_SPI_RW_REG(ctrl1, 0x04)
5046+DEFINE_MRST_SPI_RW_REG(ssienr, 0x08)
5047+DEFINE_MRST_SPI_RW_REG(mwcr, 0x0c)
5048+DEFINE_MRST_SPI_RW_REG(ser, 0x10)
5049+DEFINE_MRST_SPI_RW_REG(baudr, 0x14)
5050+DEFINE_MRST_SPI_RW_REG(txftlr, 0x18)
5051+DEFINE_MRST_SPI_RW_REG(rxftlr, 0x1c)
5052+DEFINE_MRST_SPI_RO_REG(txflr, 0x20)
5053+DEFINE_MRST_SPI_RO_REG(rxflr, 0x24)
5054+DEFINE_MRST_SPI_RO_REG(sr, 0x28)
5055+DEFINE_MRST_SPI_RW_REG(imr, 0x2c)
5056+DEFINE_MRST_SPI_RO_REG(isr, 0x30)
5057+DEFINE_MRST_SPI_RO_REG(risr, 0x34)
5058+DEFINE_MRST_SPI_RO_REG(txoicr, 0x38)
5059+DEFINE_MRST_SPI_RO_REG(rxoicr, 0x3c)
5060+DEFINE_MRST_SPI_RO_REG(rxuicr, 0x40)
5061+DEFINE_MRST_SPI_RO_REG(msticr, 0x44)
5062+DEFINE_MRST_SPI_RO_REG(icr, 0x48)
5063+DEFINE_MRST_SPI_RW_REG(dmacr, 0x4c)
5064+DEFINE_MRST_SPI_RW_REG(dmatdlr, 0x50)
5065+DEFINE_MRST_SPI_RW_REG(dmardlr, 0x54)
5066+DEFINE_MRST_SPI_RO_REG(idr, 0x58)
5067+DEFINE_MRST_SPI_RO_REG(version, 0x5c)
5068+DEFINE_MRST_SPI_RW_REG(dr, 0x60)
5069+
5070+static inline void mrst_spi_enable(void *reg, int enable)
5071+{
5072+ if (enable)
5073+ write_ssienr(0x1, reg);
5074+ else
5075+ write_ssienr(0x0, reg);
5076+}
5077+
5078+static inline void spi_enable_clk(void *reg, u16 div)
5079+{
5080+ write_baudr(div, reg);
5081+}
5082+
5083+static inline void spi_chip_sel(void *reg, u16 cs)
5084+{
5085+ if (cs > 4)
5086+ return;
5087+ write_ser((1 << cs), reg);
5088+}
5089+
5090+static inline void spi_disable_clk(void *reg)
5091+{
5092+ /* set the divider to 0 will diable the clock */
5093+ write_baudr(0, reg);
5094+}
5095+
5096+/* disable some INT */
5097+static inline void spi_mask_intr(void *reg, u32 mask)
5098+{
5099+ u32 imr;
5100+ imr = read_imr(reg) & ~mask;
5101+ write_imr(imr, reg);
5102+}
5103+
5104+/* enable INT */
5105+static inline void spi_umask_intr(void *reg, u32 mask)
5106+{
5107+ u32 imr;
5108+ imr = read_imr(reg) | mask;
5109+ write_imr(imr, reg);
5110+}
5111+
5112+enum mrst_ssi_type {
5113+ SSI_MOTO_SPI = 0,
5114+ SSI_TI_SSP,
5115+ SSI_NS_MICROWIRE,
5116+};
5117+
5118+/* usually will be controller_data for SPI slave devices */
5119+struct mrst_spi_chip {
5120+ u8 poll_mode; /* 0 for contoller polling mode */
5121+ u8 type; /* SPI/SSP/Micrwire */
5122+ u8 enable_dma;
5123+ void (*cs_control)(u32 command);
5124+};
5125+
5126+#define SPI_DIB_NAME_LEN 16
5127+#define SPI_DIB_SPEC_INFO_LEN 10
5128+
5129+#define MRST_GPE_IRQ_VIA_GPIO_BIT (1 << 15)
5130+/* SPI device info block related */
5131+struct spi_dib_header {
5132+ u32 signature;
5133+ u32 length;
5134+ u8 rev;
5135+ u8 checksum;
5136+ u8 dib[0];
5137+} __attribute__((packed));
5138+
5139+struct spi_dib {
5140+ u16 host_num;
5141+ u16 cs;
5142+ u16 irq;
5143+ char name[SPI_DIB_NAME_LEN];
5144+ u8 dev_data[SPI_DIB_SPEC_INFO_LEN];
5145+} __attribute__((packed));
5146+
5147+extern struct console early_mrst_console;
5148+#endif /* #ifndef MRST_SPI_HEADER_H */
5149Index: linux-2.6.33/drivers/serial/Kconfig
5150===================================================================
5151--- linux-2.6.33.orig/drivers/serial/Kconfig
5152+++ linux-2.6.33/drivers/serial/Kconfig
5153@@ -688,6 +688,27 @@ config SERIAL_SA1100_CONSOLE
5154 your boot loader (lilo or loadlin) about how to pass options to the
5155 kernel at boot time.)
5156
5157+config SERIAL_MAX3110
5158+ tristate "SPI UART driver for Max3110"
5159+ depends on SPI_MRST
5160+ select SERIAL_CORE
5161+ select SERIAL_CORE_CONSOLE
5162+ help
5163+ This is the UART protocol driver for MAX3110 device on
5164+ Intel Moorestown platform
5165+
5166+config MRST_MAX3110
5167+ boolean "Add Max3110 support for Moorestown platform"
5168+ default y
5169+ depends on SERIAL_MAX3110
5170+
5171+config MRST_MAX3110_IRQ
5172+ boolean "Enable GPIO IRQ for Max3110 over Moorestown"
5173+ default n
5174+ depends on MRST_MAX3110 && GPIO_LANGWELL
5175+ help
5176+ This has to be enabled after Moorestown GPIO driver is loaded
5177+
5178 config SERIAL_BFIN
5179 tristate "Blackfin serial port support"
5180 depends on BLACKFIN
5181Index: linux-2.6.33/drivers/serial/Makefile
5182===================================================================
5183--- linux-2.6.33.orig/drivers/serial/Makefile
5184+++ linux-2.6.33/drivers/serial/Makefile
5185@@ -82,3 +82,4 @@ obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgd
5186 obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
5187 obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
5188 obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
5189+obj-$(CONFIG_SERIAL_MAX3110) += max3110.o
5190Index: linux-2.6.33/drivers/serial/max3110.c
5191===================================================================
5192--- /dev/null
5193+++ linux-2.6.33/drivers/serial/max3110.c
5194@@ -0,0 +1,850 @@
5195+/*
5196+ * max3110.c - spi uart protocol driver for Maxim 3110 on Moorestown
5197+ *
5198+ * Copyright (C) Intel 2008 Feng Tang <feng.tang@intel.com>
5199+ *
5200+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5201+ *
5202+ * This program is free software; you can redistribute it and/or modify
5203+ * it under the terms of the GNU General Public License as published by
5204+ * the Free Software Foundation; either version 2 of the License, or
5205+ * (at your option) any later version.
5206+ *
5207+ * This program is distributed in the hope that it will be useful,
5208+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
5209+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5210+ * GNU General Public License for more details.
5211+ *
5212+ * You should have received a copy of the GNU General Public License
5213+ * along with this program; if not, write to the Free Software
5214+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
5215+ *
5216+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5217+ *
5218+ */
5219+
5220+/*
5221+ * Note:
5222+ * 1. From Max3110 spec, the Rx FIFO has 8 words, while the Tx FIFO only has
5223+ * 1 word. If SPI master controller doesn't support sclk frequency change,
5224+ * then the char need be sent out one by one with some delay
5225+ *
5226+ * 2. Currently only RX availabe interrrupt is used, no need for waiting TXE
5227+ * interrupt for a low speed UART device
5228+ */
5229+
5230+#include <linux/module.h>
5231+#include <linux/ioport.h>
5232+#include <linux/init.h>
5233+#include <linux/console.h>
5234+#include <linux/sysrq.h>
5235+#include <linux/platform_device.h>
5236+#include <linux/tty.h>
5237+#include <linux/tty_flip.h>
5238+#include <linux/serial_core.h>
5239+#include <linux/serial_reg.h>
5240+
5241+#include <linux/kthread.h>
5242+#include <linux/delay.h>
5243+#include <asm/atomic.h>
5244+#include <linux/spi/spi.h>
5245+#include <linux/spi/mrst_spi.h>
5246+
5247+#include "max3110.h"
5248+
5249+#define PR_FMT "max3110: "
5250+
5251+struct uart_max3110 {
5252+ struct uart_port port;
5253+ struct spi_device *spi;
5254+ char *name;
5255+
5256+ wait_queue_head_t wq;
5257+ struct task_struct *main_thread;
5258+ struct task_struct *read_thread;
5259+ int mthread_up;
5260+ spinlock_t lock;
5261+
5262+ u32 baud;
5263+ u16 cur_conf;
5264+ u8 clock;
5265+ u8 parity, word_7bits;
5266+
5267+ atomic_t uart_tx_need;
5268+
5269+ /* console related */
5270+ struct circ_buf con_xmit;
5271+ atomic_t con_tx_need;
5272+
5273+ /* irq related */
5274+ u16 irq;
5275+ atomic_t irq_pending;
5276+};
5277+
5278+/* global data structure, may need be removed */
5279+struct uart_max3110 *pmax;
5280+static inline void receive_char(struct uart_max3110 *max, u8 ch);
5281+static void receive_chars(struct uart_max3110 *max,
5282+ unsigned char *str, int len);
5283+static int max3110_read_multi(struct uart_max3110 *max, int len, u8 *buf);
5284+static void max3110_console_receive(struct uart_max3110 *max);
5285+
5286+int max3110_write_then_read(struct uart_max3110 *max,
5287+ const u8 *txbuf, u8 *rxbuf, unsigned len, int always_fast)
5288+{
5289+ struct spi_device *spi = max->spi;
5290+ struct spi_message message;
5291+ struct spi_transfer x;
5292+ int ret;
5293+
5294+ if (!txbuf || !rxbuf)
5295+ return -EINVAL;
5296+
5297+ spi_message_init(&message);
5298+ memset(&x, 0, sizeof x);
5299+ x.len = len;
5300+ x.tx_buf = txbuf;
5301+ x.rx_buf = rxbuf;
5302+ spi_message_add_tail(&x, &message);
5303+
5304+ if (always_fast)
5305+ x.speed_hz = 3125000;
5306+ else if (max->baud)
5307+ x.speed_hz = max->baud;
5308+
5309+ /* Do the i/o */
5310+ ret = spi_sync(spi, &message);
5311+ return ret;
5312+}
5313+
5314+/* Write a u16 to the device, and return one u16 read back */
5315+int max3110_out(struct uart_max3110 *max, const u16 out)
5316+{
5317+ u16 tmp;
5318+ int ret;
5319+
5320+ ret = max3110_write_then_read(max, (u8 *)&out, (u8 *)&tmp, 2, 1);
5321+ if (ret)
5322+ return ret;
5323+
5324+ /* If some valid data is read back */
5325+ if (tmp & MAX3110_READ_DATA_AVAILABLE)
5326+ receive_char(max, (tmp & 0xff));
5327+
5328+ return ret;
5329+}
5330+
5331+#define MAX_READ_LEN 20
5332+/*
5333+ * This is usually used to read data from SPIC RX FIFO, which doesn't
5334+ * need any delay like flushing character out. It returns how many
5335+ * valide bytes are read back
5336+ */
5337+static int max3110_read_multi(struct uart_max3110 *max, int len, u8 *buf)
5338+{
5339+ u16 out[MAX_READ_LEN], in[MAX_READ_LEN];
5340+ u8 *pbuf, valid_str[MAX_READ_LEN];
5341+ int i, j, bytelen;
5342+
5343+ if (len > MAX_READ_LEN) {
5344+ pr_err(PR_FMT "read len %d is too large\n", len);
5345+ return 0;
5346+ }
5347+
5348+ bytelen = len * 2;
5349+ memset(out, 0, bytelen);
5350+ memset(in, 0, bytelen);
5351+
5352+ if (max3110_write_then_read(max, (u8 *)out, (u8 *)in, bytelen, 1))
5353+ return 0;
5354+
5355+ /* If caller don't provide a buffer, then handle received char */
5356+ pbuf = buf ? buf : valid_str;
5357+
5358+ for (i = 0, j = 0; i < len; i++) {
5359+ if (in[i] & MAX3110_READ_DATA_AVAILABLE)
5360+ pbuf[j++] = (u8)(in[i] & 0xff);
5361+ }
5362+
5363+ if (j && (pbuf == valid_str))
5364+ receive_chars(max, valid_str, j);
5365+
5366+ return j;
5367+}
5368+
5369+static void serial_m3110_con_putchar(struct uart_port *port, int ch)
5370+{
5371+ struct uart_max3110 *max =
5372+ container_of(port, struct uart_max3110, port);
5373+ struct circ_buf *xmit = &max->con_xmit;
5374+
5375+ if (uart_circ_chars_free(xmit)) {
5376+ xmit->buf[xmit->head] = (char)ch;
5377+ xmit->head = (xmit->head + 1) & (PAGE_SIZE - 1);
5378+ }
5379+
5380+ if (!atomic_read(&max->con_tx_need)) {
5381+ atomic_set(&max->con_tx_need, 1);
5382+ wake_up_process(max->main_thread);
5383+ }
5384+}
5385+
5386+/*
5387+ * Print a string to the serial port trying not to disturb
5388+ * any possible real use of the port...
5389+ *
5390+ * The console_lock must be held when we get here.
5391+ */
5392+static void serial_m3110_con_write(struct console *co,
5393+ const char *s, unsigned int count)
5394+{
5395+ if (!pmax)
5396+ return;
5397+
5398+ uart_console_write(&pmax->port, s, count, serial_m3110_con_putchar);
5399+}
5400+
5401+static int __init
5402+serial_m3110_con_setup(struct console *co, char *options)
5403+{
5404+ struct uart_max3110 *max = pmax;
5405+ int baud = 115200;
5406+ int bits = 8;
5407+ int parity = 'n';
5408+ int flow = 'n';
5409+
5410+ pr_info(PR_FMT "setting up console\n");
5411+
5412+ if (!max) {
5413+ pr_err(PR_FMT "pmax is NULL, return");
5414+ return -ENODEV;
5415+ }
5416+
5417+ if (options)
5418+ uart_parse_options(options, &baud, &parity, &bits, &flow);
5419+
5420+ return uart_set_options(&max->port, co, baud, parity, bits, flow);
5421+}
5422+
5423+static struct tty_driver *serial_m3110_con_device(struct console *co,
5424+ int *index)
5425+{
5426+ struct uart_driver *p = co->data;
5427+ *index = co->index;
5428+ return p->tty_driver;
5429+}
5430+
5431+static struct uart_driver serial_m3110_reg;
5432+static struct console serial_m3110_console = {
5433+ .name = "ttyS",
5434+ .write = serial_m3110_con_write,
5435+ .device = serial_m3110_con_device,
5436+ .setup = serial_m3110_con_setup,
5437+ .flags = CON_PRINTBUFFER,
5438+ .index = -1,
5439+ .data = &serial_m3110_reg,
5440+};
5441+
5442+#define MRST_CONSOLE (&serial_m3110_console)
5443+
5444+static unsigned int serial_m3110_tx_empty(struct uart_port *port)
5445+{
5446+ return 1;
5447+}
5448+
5449+static void serial_m3110_stop_tx(struct uart_port *port)
5450+{
5451+ return;
5452+}
5453+
5454+/* stop_rx will be called in spin_lock env */
5455+static void serial_m3110_stop_rx(struct uart_port *port)
5456+{
5457+ return;
5458+}
5459+
5460+#define WORDS_PER_XFER 128
5461+static inline void send_circ_buf(struct uart_max3110 *max,
5462+ struct circ_buf *xmit)
5463+{
5464+ int len, left = 0;
5465+ u16 obuf[WORDS_PER_XFER], ibuf[WORDS_PER_XFER];
5466+ u8 valid_str[WORDS_PER_XFER];
5467+ int i, j;
5468+
5469+ while (!uart_circ_empty(xmit)) {
5470+ left = uart_circ_chars_pending(xmit);
5471+ while (left) {
5472+ len = (left >= WORDS_PER_XFER) ? WORDS_PER_XFER : left;
5473+
5474+ memset(obuf, 0, len * 2);
5475+ memset(ibuf, 0, len * 2);
5476+ for (i = 0; i < len; i++) {
5477+ obuf[i] = (u8)xmit->buf[xmit->tail] | WD_TAG;
5478+ xmit->tail = (xmit->tail + 1) &
5479+ (UART_XMIT_SIZE - 1);
5480+ }
5481+ max3110_write_then_read(max, (u8 *)obuf,
5482+ (u8 *)ibuf, len * 2, 0);
5483+
5484+ for (i = 0, j = 0; i < len; i++) {
5485+ if (ibuf[i] & MAX3110_READ_DATA_AVAILABLE)
5486+ valid_str[j++] = (u8)(ibuf[i] & 0xff);
5487+ }
5488+
5489+ if (j)
5490+ receive_chars(max, valid_str, j);
5491+
5492+ max->port.icount.tx += len;
5493+ left -= len;
5494+ }
5495+ }
5496+}
5497+
5498+static void transmit_char(struct uart_max3110 *max)
5499+{
5500+ struct uart_port *port = &max->port;
5501+ struct circ_buf *xmit = &port->state->xmit;
5502+
5503+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
5504+ return;
5505+
5506+ send_circ_buf(max, xmit);
5507+
5508+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
5509+ uart_write_wakeup(port);
5510+
5511+ if (uart_circ_empty(xmit))
5512+ serial_m3110_stop_tx(port);
5513+}
5514+
5515+/* This will be called by uart_write() and tty_write, can't
5516+ * go to sleep */
5517+static void serial_m3110_start_tx(struct uart_port *port)
5518+{
5519+ struct uart_max3110 *max =
5520+ container_of(port, struct uart_max3110, port);
5521+
5522+ if (!atomic_read(&max->uart_tx_need)) {
5523+ atomic_set(&max->uart_tx_need, 1);
5524+ wake_up_process(max->main_thread);
5525+ }
5526+}
5527+
5528+static void receive_chars(struct uart_max3110 *max, unsigned char *str, int len)
5529+{
5530+ struct uart_port *port = &max->port;
5531+ struct tty_struct *tty;
5532+ int usable;
5533+
5534+ /* If uart is not opened, just return */
5535+ if (!port->state)
5536+ return;
5537+
5538+ tty = port->state->port.tty;
5539+ if (!tty)
5540+ return; /* receive some char before the tty is opened */
5541+
5542+ while (len) {
5543+ usable = tty_buffer_request_room(tty, len);
5544+ if (usable) {
5545+ tty_insert_flip_string(tty, str, usable);
5546+ str += usable;
5547+ port->icount.rx += usable;
5548+ tty_flip_buffer_push(tty);
5549+ }
5550+ len -= usable;
5551+ }
5552+}
5553+
5554+static inline void receive_char(struct uart_max3110 *max, u8 ch)
5555+{
5556+ receive_chars(max, &ch, 1);
5557+}
5558+
5559+static void max3110_console_receive(struct uart_max3110 *max)
5560+{
5561+ int loop = 1, num, total = 0;
5562+ u8 recv_buf[512], *pbuf;
5563+
5564+ pbuf = recv_buf;
5565+ do {
5566+ num = max3110_read_multi(max, 8, pbuf);
5567+
5568+ if (num) {
5569+ loop = 10;
5570+ pbuf += num;
5571+ total += num;
5572+
5573+ if (total >= 500) {
5574+ receive_chars(max, recv_buf, total);
5575+ pbuf = recv_buf;
5576+ total = 0;
5577+ }
5578+ }
5579+ } while (--loop);
5580+
5581+ if (total)
5582+ receive_chars(max, recv_buf, total);
5583+}
5584+
5585+static int max3110_main_thread(void *_max)
5586+{
5587+ struct uart_max3110 *max = _max;
5588+ wait_queue_head_t *wq = &max->wq;
5589+ int ret = 0;
5590+ struct circ_buf *xmit = &max->con_xmit;
5591+
5592+ init_waitqueue_head(wq);
5593+ pr_info(PR_FMT "start main thread\n");
5594+
5595+ do {
5596+ wait_event_interruptible(*wq, (atomic_read(&max->irq_pending) ||
5597+ atomic_read(&max->con_tx_need) ||
5598+ atomic_read(&max->uart_tx_need)) ||
5599+ kthread_should_stop());
5600+ max->mthread_up = 1;
5601+
5602+#ifdef CONFIG_MRST_MAX3110_IRQ
5603+ if (atomic_read(&max->irq_pending)) {
5604+ max3110_console_receive(max);
5605+ atomic_set(&max->irq_pending, 0);
5606+ }
5607+#endif
5608+
5609+ /* first handle console output */
5610+ if (atomic_read(&max->con_tx_need)) {
5611+ send_circ_buf(max, xmit);
5612+ atomic_set(&max->con_tx_need, 0);
5613+ }
5614+
5615+ /* handle uart output */
5616+ if (atomic_read(&max->uart_tx_need)) {
5617+ transmit_char(max);
5618+ atomic_set(&max->uart_tx_need, 0);
5619+ }
5620+ max->mthread_up = 0;
5621+ } while (!kthread_should_stop());
5622+
5623+ return ret;
5624+}
5625+
5626+#ifdef CONFIG_MRST_MAX3110_IRQ
5627+irqreturn_t static serial_m3110_irq(int irq, void *dev_id)
5628+{
5629+ struct uart_max3110 *max = dev_id;
5630+
5631+ /* max3110's irq is a falling edge, not level triggered,
5632+ * so no need to disable the irq */
5633+ if (!atomic_read(&max->irq_pending)) {
5634+ atomic_inc(&max->irq_pending);
5635+ wake_up_process(max->main_thread);
5636+ }
5637+ return IRQ_HANDLED;
5638+}
5639+#else
5640+/* if don't use RX IRQ, then need a thread to polling read */
5641+static int max3110_read_thread(void *_max)
5642+{
5643+ struct uart_max3110 *max = _max;
5644+
5645+ pr_info(PR_FMT "start read thread\n");
5646+ do {
5647+ if (!max->mthread_up)
5648+ max3110_console_receive(max);
5649+
5650+ set_current_state(TASK_INTERRUPTIBLE);
5651+ schedule_timeout(HZ / 20);
5652+ } while (!kthread_should_stop());
5653+
5654+ return 0;
5655+}
5656+#endif
5657+
5658+static int serial_m3110_startup(struct uart_port *port)
5659+{
5660+ struct uart_max3110 *max =
5661+ container_of(port, struct uart_max3110, port);
5662+ u16 config = 0;
5663+ int ret = 0;
5664+
5665+ if (port->line != 0)
5666+ pr_err(PR_FMT "uart port startup failed\n");
5667+
5668+ /* firstly disable all IRQ and config it to 115200, 8n1 */
5669+ config = WC_TAG | WC_FIFO_ENABLE
5670+ | WC_1_STOPBITS
5671+ | WC_8BIT_WORD
5672+ | WC_BAUD_DR2;
5673+ ret = max3110_out(max, config);
5674+
5675+ /* as we use thread to handle tx/rx, need set low latency */
5676+ port->state->port.tty->low_latency = 1;
5677+
5678+#ifdef CONFIG_MRST_MAX3110_IRQ
5679+ ret = request_irq(max->irq, serial_m3110_irq,
5680+ IRQ_TYPE_EDGE_FALLING, "max3110", max);
5681+ if (ret)
5682+ return ret;
5683+
5684+ /* enable RX IRQ only */
5685+ config |= WC_RXA_IRQ_ENABLE;
5686+ max3110_out(max, config);
5687+#else
5688+ /* if IRQ is disabled, start a read thread for input data */
5689+ max->read_thread =
5690+ kthread_run(max3110_read_thread, max, "max3110_read");
5691+#endif
5692+
5693+ max->cur_conf = config;
5694+ return 0;
5695+}
5696+
5697+static void serial_m3110_shutdown(struct uart_port *port)
5698+{
5699+ struct uart_max3110 *max =
5700+ container_of(port, struct uart_max3110, port);
5701+ u16 config;
5702+
5703+ if (max->read_thread) {
5704+ kthread_stop(max->read_thread);
5705+ max->read_thread = NULL;
5706+ }
5707+
5708+#ifdef CONFIG_MRST_MAX3110_IRQ
5709+ free_irq(max->irq, max);
5710+#endif
5711+
5712+ /* Disable interrupts from this port */
5713+ config = WC_TAG | WC_SW_SHDI;
5714+ max3110_out(max, config);
5715+}
5716+
5717+static void serial_m3110_release_port(struct uart_port *port)
5718+{
5719+}
5720+
5721+static int serial_m3110_request_port(struct uart_port *port)
5722+{
5723+ return 0;
5724+}
5725+
5726+static void serial_m3110_config_port(struct uart_port *port, int flags)
5727+{
5728+ /* give it fake type */
5729+ port->type = PORT_PXA;
5730+}
5731+
5732+static int
5733+serial_m3110_verify_port(struct uart_port *port, struct serial_struct *ser)
5734+{
5735+ /* we don't want the core code to modify any port params */
5736+ return -EINVAL;
5737+}
5738+
5739+
5740+static const char *serial_m3110_type(struct uart_port *port)
5741+{
5742+ struct uart_max3110 *max =
5743+ container_of(port, struct uart_max3110, port);
5744+ return max->name;
5745+}
5746+
5747+static void
5748+serial_m3110_set_termios(struct uart_port *port, struct ktermios *termios,
5749+ struct ktermios *old)
5750+{
5751+ struct uart_max3110 *max =
5752+ container_of(port, struct uart_max3110, port);
5753+ unsigned char cval;
5754+ unsigned int baud, parity = 0;
5755+ int clk_div = -1;
5756+ u16 new_conf = max->cur_conf;
5757+
5758+ switch (termios->c_cflag & CSIZE) {
5759+ case CS7:
5760+ cval = UART_LCR_WLEN7;
5761+ new_conf |= WC_7BIT_WORD;
5762+ break;
5763+ default:
5764+ case CS8:
5765+ cval = UART_LCR_WLEN8;
5766+ new_conf |= WC_8BIT_WORD;
5767+ break;
5768+ }
5769+
5770+ baud = uart_get_baud_rate(port, termios, old, 0, 230400);
5771+
5772+ /* first calc the div for 1.8MHZ clock case */
5773+ switch (baud) {
5774+ case 300:
5775+ clk_div = WC_BAUD_DR384;
5776+ break;
5777+ case 600:
5778+ clk_div = WC_BAUD_DR192;
5779+ break;
5780+ case 1200:
5781+ clk_div = WC_BAUD_DR96;
5782+ break;
5783+ case 2400:
5784+ clk_div = WC_BAUD_DR48;
5785+ break;
5786+ case 4800:
5787+ clk_div = WC_BAUD_DR24;
5788+ break;
5789+ case 9600:
5790+ clk_div = WC_BAUD_DR12;
5791+ break;
5792+ case 19200:
5793+ clk_div = WC_BAUD_DR6;
5794+ break;
5795+ case 38400:
5796+ clk_div = WC_BAUD_DR3;
5797+ break;
5798+ case 57600:
5799+ clk_div = WC_BAUD_DR2;
5800+ break;
5801+ case 115200:
5802+ clk_div = WC_BAUD_DR1;
5803+ break;
5804+ default:
5805+ /* pick the previous baud rate */
5806+ baud = max->baud;
5807+ clk_div = max->cur_conf & WC_BAUD_DIV_MASK;
5808+ tty_termios_encode_baud_rate(termios, baud, baud);
5809+ }
5810+
5811+ if (max->clock & MAX3110_HIGH_CLK) {
5812+ clk_div += 1;
5813+ /* high clk version max3110 doesn't support B300 */
5814+ if (baud == 300)
5815+ baud = 600;
5816+ if (baud == 230400)
5817+ clk_div = WC_BAUD_DR1;
5818+ tty_termios_encode_baud_rate(termios, baud, baud);
5819+ }
5820+
5821+ new_conf = (new_conf & ~WC_BAUD_DIV_MASK) | clk_div;
5822+ if (termios->c_cflag & CSTOPB)
5823+ new_conf |= WC_2_STOPBITS;
5824+ else
5825+ new_conf &= ~WC_2_STOPBITS;
5826+
5827+ if (termios->c_cflag & PARENB) {
5828+ new_conf |= WC_PARITY_ENABLE;
5829+ parity |= UART_LCR_PARITY;
5830+ } else
5831+ new_conf &= ~WC_PARITY_ENABLE;
5832+
5833+ if (!(termios->c_cflag & PARODD))
5834+ parity |= UART_LCR_EPAR;
5835+ max->parity = parity;
5836+
5837+ uart_update_timeout(port, termios->c_cflag, baud);
5838+
5839+ new_conf |= WC_TAG;
5840+ if (new_conf != max->cur_conf) {
5841+ max3110_out(max, new_conf);
5842+ max->cur_conf = new_conf;
5843+ max->baud = baud;
5844+ }
5845+}
5846+
5847+/* don't handle hw handshaking */
5848+static unsigned int serial_m3110_get_mctrl(struct uart_port *port)
5849+{
5850+ return TIOCM_DSR | TIOCM_CAR | TIOCM_DSR;
5851+}
5852+
5853+static void serial_m3110_set_mctrl(struct uart_port *port, unsigned int mctrl)
5854+{
5855+}
5856+
5857+static void serial_m3110_break_ctl(struct uart_port *port, int break_state)
5858+{
5859+}
5860+
5861+static void serial_m3110_pm(struct uart_port *port, unsigned int state,
5862+ unsigned int oldstate)
5863+{
5864+}
5865+
5866+static void serial_m3110_enable_ms(struct uart_port *port)
5867+{
5868+}
5869+
5870+struct uart_ops serial_m3110_ops = {
5871+ .tx_empty = serial_m3110_tx_empty,
5872+ .set_mctrl = serial_m3110_set_mctrl,
5873+ .get_mctrl = serial_m3110_get_mctrl,
5874+ .stop_tx = serial_m3110_stop_tx,
5875+ .start_tx = serial_m3110_start_tx,
5876+ .stop_rx = serial_m3110_stop_rx,
5877+ .enable_ms = serial_m3110_enable_ms,
5878+ .break_ctl = serial_m3110_break_ctl,
5879+ .startup = serial_m3110_startup,
5880+ .shutdown = serial_m3110_shutdown,
5881+ .set_termios = serial_m3110_set_termios, /* must have */
5882+ .pm = serial_m3110_pm,
5883+ .type = serial_m3110_type,
5884+ .release_port = serial_m3110_release_port,
5885+ .request_port = serial_m3110_request_port,
5886+ .config_port = serial_m3110_config_port,
5887+ .verify_port = serial_m3110_verify_port,
5888+};
5889+
5890+static struct uart_driver serial_m3110_reg = {
5891+ .owner = THIS_MODULE,
5892+ .driver_name = "MRST serial",
5893+ .dev_name = "ttyS",
5894+ .major = TTY_MAJOR,
5895+ .minor = 64,
5896+ .nr = 1,
5897+ .cons = MRST_CONSOLE,
5898+};
5899+
5900+static int serial_m3110_suspend(struct spi_device *spi, pm_message_t state)
5901+{
5902+ return 0;
5903+}
5904+
5905+static int serial_m3110_resume(struct spi_device *spi)
5906+{
5907+ return 0;
5908+}
5909+
5910+#ifdef CONFIG_MRST_MAX3110
5911+static struct mrst_spi_chip spi0_uart = {
5912+ .poll_mode = 1,
5913+ .enable_dma = 0,
5914+ .type = SPI_FRF_SPI,
5915+};
5916+#endif
5917+
5918+static int serial_m3110_probe(struct spi_device *spi)
5919+{
5920+ struct uart_max3110 *max;
5921+ int ret;
5922+ unsigned char *buffer;
5923+
5924+ max = kzalloc(sizeof(*max), GFP_KERNEL);
5925+ if (!max)
5926+ return -ENOMEM;
5927+
5928+ /* set spi info */
5929+ spi->mode = SPI_MODE_0;
5930+ spi->bits_per_word = 16;
5931+#ifdef CONFIG_MRST_MAX3110
5932+ max->clock = MAX3110_HIGH_CLK;
5933+ spi->controller_data = &spi0_uart;
5934+#endif
5935+ spi_setup(spi);
5936+
5937+ max->port.type = PORT_PXA; /* need apply for a max3110 type */
5938+ max->port.fifosize = 2; /* only have 16b buffer */
5939+ max->port.ops = &serial_m3110_ops;
5940+ max->port.line = 0;
5941+ max->port.dev = &spi->dev;
5942+ max->port.uartclk = 115200;
5943+
5944+ max->spi = spi;
5945+ max->name = spi->modalias; /* use spi name as the name */
5946+ max->irq = (u16)spi->irq;
5947+
5948+ spin_lock_init(&max->lock);
5949+
5950+ max->word_7bits = 0;
5951+ max->parity = 0;
5952+ max->baud = 0;
5953+
5954+ max->cur_conf = 0;
5955+ atomic_set(&max->irq_pending, 0);
5956+
5957+ buffer = (unsigned char *)__get_free_page(GFP_KERNEL);
5958+ if (!buffer) {
5959+ ret = -ENOMEM;
5960+ goto err_get_page;
5961+ }
5962+ max->con_xmit.buf = (unsigned char *)buffer;
5963+ max->con_xmit.head = max->con_xmit.tail = 0;
5964+
5965+ max->main_thread = kthread_run(max3110_main_thread,
5966+ max, "max3110_main");
5967+ if (IS_ERR(max->main_thread)) {
5968+ ret = PTR_ERR(max->main_thread);
5969+ goto err_kthread;
5970+ }
5971+
5972+ pmax = max;
5973+ /* give membase a psudo value to pass serial_core's check */
5974+ max->port.membase = (void *)0xff110000;
5975+ uart_add_one_port(&serial_m3110_reg, &max->port);
5976+
5977+ return 0;
5978+
5979+err_kthread:
5980+ free_page((unsigned long)buffer);
5981+err_get_page:
5982+ pmax = NULL;
5983+ kfree(max);
5984+ return ret;
5985+}
5986+
5987+static int max3110_remove(struct spi_device *dev)
5988+{
5989+ struct uart_max3110 *max = pmax;
5990+
5991+ if (!pmax)
5992+ return 0;
5993+
5994+ pmax = NULL;
5995+ uart_remove_one_port(&serial_m3110_reg, &max->port);
5996+
5997+ free_page((unsigned long)max->con_xmit.buf);
5998+
5999+ if (max->main_thread)
6000+ kthread_stop(max->main_thread);
6001+
6002+ kfree(max);
6003+ return 0;
6004+}
6005+
6006+static struct spi_driver uart_max3110_driver = {
6007+ .driver = {
6008+ .name = "spi_max3111",
6009+ .bus = &spi_bus_type,
6010+ .owner = THIS_MODULE,
6011+ },
6012+ .probe = serial_m3110_probe,
6013+ .remove = __devexit_p(max3110_remove),
6014+ .suspend = serial_m3110_suspend,
6015+ .resume = serial_m3110_resume,
6016+};
6017+
6018+
6019+int __init serial_m3110_init(void)
6020+{
6021+ int ret = 0;
6022+
6023+ ret = uart_register_driver(&serial_m3110_reg);
6024+ if (ret)
6025+ return ret;
6026+
6027+ ret = spi_register_driver(&uart_max3110_driver);
6028+ if (ret)
6029+ uart_unregister_driver(&serial_m3110_reg);
6030+
6031+ return ret;
6032+}
6033+
6034+void __exit serial_m3110_exit(void)
6035+{
6036+ spi_unregister_driver(&uart_max3110_driver);
6037+ uart_unregister_driver(&serial_m3110_reg);
6038+}
6039+
6040+module_init(serial_m3110_init);
6041+module_exit(serial_m3110_exit);
6042+
6043+MODULE_LICENSE("GPL");
6044+MODULE_ALIAS("max3110-uart");
6045Index: linux-2.6.33/drivers/serial/max3110.h
6046===================================================================
6047--- /dev/null
6048+++ linux-2.6.33/drivers/serial/max3110.h
6049@@ -0,0 +1,59 @@
6050+#ifndef _MAX3110_HEAD_FILE_
6051+#define _MAX3110_HEAD_FILE_
6052+
6053+#define MAX3110_HIGH_CLK 0x1 /* 3.6864 MHZ */
6054+#define MAX3110_LOW_CLK 0x0 /* 1.8432 MHZ */
6055+
6056+/* status bits for all 4 MAX3110 operate modes */
6057+#define MAX3110_READ_DATA_AVAILABLE (1 << 15)
6058+#define MAX3110_WRITE_BUF_EMPTY (1 << 14)
6059+
6060+#define WC_TAG (3 << 14)
6061+#define RC_TAG (1 << 14)
6062+#define WD_TAG (2 << 14)
6063+#define RD_TAG (0 << 14)
6064+
6065+/* bits def for write configuration */
6066+#define WC_FIFO_ENABLE_MASK (1 << 13)
6067+#define WC_FIFO_ENABLE (0 << 13)
6068+
6069+#define WC_SW_SHDI (1 << 12)
6070+
6071+#define WC_IRQ_MASK (0xF << 8)
6072+#define WC_TXE_IRQ_ENABLE (1 << 11) /* TX empty irq */
6073+#define WC_RXA_IRQ_ENABLE (1 << 10) /* RX availabe irq */
6074+#define WC_PAR_HIGH_IRQ_ENABLE (1 << 9)
6075+#define WC_REC_ACT_IRQ_ENABLE (1 << 8)
6076+
6077+#define WC_IRDA_ENABLE (1 << 7)
6078+
6079+#define WC_STOPBITS_MASK (1 << 6)
6080+#define WC_2_STOPBITS (1 << 6)
6081+#define WC_1_STOPBITS (0 << 6)
6082+
6083+#define WC_PARITY_ENABLE_MASK (1 << 5)
6084+#define WC_PARITY_ENABLE (1 << 5)
6085+
6086+#define WC_WORDLEN_MASK (1 << 4)
6087+#define WC_7BIT_WORD (1 << 4)
6088+#define WC_8BIT_WORD (0 << 4)
6089+
6090+#define WC_BAUD_DIV_MASK (0xF)
6091+#define WC_BAUD_DR1 (0x0)
6092+#define WC_BAUD_DR2 (0x1)
6093+#define WC_BAUD_DR4 (0x2)
6094+#define WC_BAUD_DR8 (0x3)
6095+#define WC_BAUD_DR16 (0x4)
6096+#define WC_BAUD_DR32 (0x5)
6097+#define WC_BAUD_DR64 (0x6)
6098+#define WC_BAUD_DR128 (0x7)
6099+#define WC_BAUD_DR3 (0x8)
6100+#define WC_BAUD_DR6 (0x9)
6101+#define WC_BAUD_DR12 (0xA)
6102+#define WC_BAUD_DR24 (0xB)
6103+#define WC_BAUD_DR48 (0xC)
6104+#define WC_BAUD_DR96 (0xD)
6105+#define WC_BAUD_DR192 (0xE)
6106+#define WC_BAUD_DR384 (0xF)
6107+
6108+#endif
6109Index: linux-2.6.33/arch/x86/Kconfig.debug
6110===================================================================
6111--- linux-2.6.33.orig/arch/x86/Kconfig.debug
6112+++ linux-2.6.33/arch/x86/Kconfig.debug
6113@@ -43,6 +43,10 @@ config EARLY_PRINTK
6114 with klogd/syslogd or the X server. You should normally N here,
6115 unless you want to debug such a crash.
6116
6117+config X86_MRST_EARLY_PRINTK
6118+ bool "Early printk for MRST platform support"
6119+ depends on EARLY_PRINTK && X86_MRST
6120+
6121 config EARLY_PRINTK_DBGP
6122 bool "Early printk via EHCI debug port"
6123 default n
6124Index: linux-2.6.33/arch/x86/kernel/early_printk.c
6125===================================================================
6126--- linux-2.6.33.orig/arch/x86/kernel/early_printk.c
6127+++ linux-2.6.33/arch/x86/kernel/early_printk.c
6128@@ -14,6 +14,7 @@
6129 #include <xen/hvc-console.h>
6130 #include <asm/pci-direct.h>
6131 #include <asm/fixmap.h>
6132+#include <linux/spi/mrst_spi.h>
6133 #include <asm/pgtable.h>
6134 #include <linux/usb/ehci_def.h>
6135
6136@@ -231,6 +232,10 @@ static int __init setup_early_printk(cha
6137 if (!strncmp(buf, "xen", 3))
6138 early_console_register(&xenboot_console, keep);
6139 #endif
6140+#ifdef CONFIG_X86_MRST_EARLY_PRINTK
6141+ if (!strncmp(buf, "mrst", 4))
6142+ early_console_register(&early_mrst_console, keep);
6143+#endif
6144 buf++;
6145 }
6146 return 0;
6147Index: linux-2.6.33/arch/x86/kernel/mrst_earlyprintk.c
6148===================================================================
6149--- /dev/null
6150+++ linux-2.6.33/arch/x86/kernel/mrst_earlyprintk.c
6151@@ -0,0 +1,177 @@
6152+/*
6153+ * mrst_earlyprintk.c - spi-uart early printk for Intel Moorestown platform
6154+ *
6155+ * Copyright (c) 2008 Intel Corporation
6156+ * Author: Feng Tang(feng.tang@intel.com)
6157+ *
6158+ * This program is free software; you can redistribute it and/or
6159+ * modify it under the terms of the GNU General Public License
6160+ * as published by the Free Software Foundation; version 2
6161+ * of the License.
6162+ */
6163+
6164+#include <linux/console.h>
6165+#include <linux/kernel.h>
6166+#include <linux/init.h>
6167+#include <linux/spi/mrst_spi.h>
6168+
6169+#include <asm/fixmap.h>
6170+#include <asm/pgtable.h>
6171+
6172+#define MRST_SPI_TIMEOUT 0x200000
6173+#define MRST_REGBASE_SPI0 0xff128000
6174+#define MRST_CLK_SPI0_REG 0xff11d86c
6175+
6176+/* use SPI0 register for MRST x86 core */
6177+static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
6178+
6179+/* always contains a accessable address, start with 0 */
6180+static void *pspi;
6181+static u32 *pclk_spi0;
6182+static int mrst_spi_inited;
6183+
6184+/*
6185+ * One trick for the early printk is that it could be called
6186+ * before and after the real page table is enabled for kernel,
6187+ * so the PHY IO registers should be mapped twice. And a flag
6188+ * "real_pgt_is_up" is used as an indicator
6189+ */
6190+static int real_pgt_is_up;
6191+
6192+static void early_mrst_spi_init(void)
6193+{
6194+ u32 ctrlr0 = 0;
6195+ u32 spi0_cdiv;
6196+ static u32 freq; /* freq info only need be searched once */
6197+
6198+ if (pspi && mrst_spi_inited)
6199+ return;
6200+
6201+ if (!freq) {
6202+ set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, MRST_CLK_SPI0_REG);
6203+ pclk_spi0 = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
6204+ (MRST_CLK_SPI0_REG & (PAGE_SIZE - 1)));
6205+
6206+ spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9;
6207+ freq = 100000000 / (spi0_cdiv + 1);
6208+ }
6209+
6210+ set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, mrst_spi_paddr);
6211+ pspi = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
6212+ (mrst_spi_paddr & (PAGE_SIZE - 1)));
6213+
6214+ /* disable SPI controller */
6215+ write_ssienr(0x0, pspi);
6216+
6217+ /* set control param, 8 bits, transmit only mode */
6218+ ctrlr0 = read_ctrl0(pspi);
6219+
6220+ ctrlr0 &= 0xfcc0;
6221+ ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET)
6222+ | (SPI_TMOD_TO << SPI_TMOD_OFFSET);
6223+ write_ctrl0(ctrlr0, pspi);
6224+
6225+ /* change the spi0 clk to comply with 115200 bps */
6226+ write_baudr(freq/115200, pspi);
6227+
6228+ /* disable all INT for early phase */
6229+ write_imr(0x0, pspi);
6230+
6231+ /* set the cs to max3110 */
6232+ write_ser(0x2, pspi);
6233+
6234+ /* enable the HW, the last step for HW init */
6235+ write_ssienr(0x1, pspi);
6236+
6237+ mrst_spi_inited = 1;
6238+}
6239+
6240+/* set the ratio rate, INT */
6241+static void max3110_write_config(void)
6242+{
6243+ u16 config;
6244+
6245+ /* 115200, TM not set, no parity, 8bit word */
6246+ config = 0xc001;
6247+ write_dr(config, pspi);
6248+}
6249+
6250+/* transfer char to a eligibal word and send to max3110 */
6251+static void max3110_write_data(char c)
6252+{
6253+ u16 data;
6254+
6255+ data = 0x8000 | c;
6256+ write_dr(data, pspi);
6257+}
6258+
6259+/* slave select should be called in the read/write function */
6260+static int early_mrst_spi_putc(char c)
6261+{
6262+ unsigned int timeout;
6263+ u32 sr;
6264+
6265+ timeout = MRST_SPI_TIMEOUT;
6266+ /* early putc need make sure the TX FIFO is not full*/
6267+ while (timeout--) {
6268+ sr = read_sr(pspi);
6269+ if (!(sr & SR_TF_NOT_FULL))
6270+ cpu_relax();
6271+ else
6272+ break;
6273+ }
6274+
6275+ if (timeout == 0xffffffff) {
6276+ printk(KERN_INFO "SPI: waiting timeout \n");
6277+ return -1;
6278+ }
6279+
6280+ max3110_write_data(c);
6281+ return 0;
6282+}
6283+
6284+/* early SPI only use polling mode */
6285+static void early_mrst_spi_write(struct console *con,
6286+ const char *str, unsigned n)
6287+{
6288+ int i;
6289+
6290+ if ((read_cr3() == __pa(swapper_pg_dir)) && !real_pgt_is_up) {
6291+ mrst_spi_inited = 0;
6292+ real_pgt_is_up = 1;
6293+ }
6294+
6295+ if (!mrst_spi_inited) {
6296+ early_mrst_spi_init();
6297+ max3110_write_config();
6298+ }
6299+
6300+ for (i = 0; i < n && *str; i++) {
6301+ if (*str == '\n')
6302+ early_mrst_spi_putc('\r');
6303+ early_mrst_spi_putc(*str);
6304+
6305+ str++;
6306+ }
6307+}
6308+
6309+struct console early_mrst_console = {
6310+ .name = "earlymrst",
6311+ .write = early_mrst_spi_write,
6312+ .flags = CON_PRINTBUFFER,
6313+ .index = -1,
6314+};
6315+
6316+/* a debug function */
6317+void mrst_early_printk(const char *fmt, ...)
6318+{
6319+ char buf[512];
6320+ int n;
6321+ va_list ap;
6322+
6323+ va_start(ap, fmt);
6324+ n = vscnprintf(buf, 512, fmt, ap);
6325+ va_end(ap);
6326+
6327+ early_mrst_console.write(&early_mrst_console, buf, n);
6328+}
6329Index: linux-2.6.33/arch/x86/include/asm/ipc_defs.h
6330===================================================================
6331--- /dev/null
6332+++ linux-2.6.33/arch/x86/include/asm/ipc_defs.h
6333@@ -0,0 +1,217 @@
6334+/*
6335+*ipc_defs.h - Header file defining data types and functions for ipc driver.
6336+*
6337+*Copyright (C) 2008 Intel Corp
6338+*Copyright (C) 2008 Sreenidhi Gurudatt <sreenidhi.b.gurudatt@intel.com>
6339+*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
6340+*
6341+*This program is free software; you can redistribute it and/or modify
6342+*it under the terms of the GNU General Public License as published by
6343+*the Free Software Foundation; version 2 of the License.
6344+*
6345+*This program is distributed in the hope that it will be useful, but
6346+*WITHOUT ANY WARRANTY; without even the implied warranty of
6347+*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
6348+*General Public License for more details.
6349+ *
6350+*You should have received a copy of the GNU General Public License along
6351+*with this program; if not, write to the Free Software Foundation, Inc.,
6352+*59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
6353+*
6354+*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
6355+*
6356+*This driver implements core IPC kernel functions to read/write and execute
6357+*various commands supported by System controller firmware for Moorestown
6358+*platform.
6359+*/
6360+
6361+#ifndef __IPC_DEFS_H__
6362+#define __IPC_DEFS_H__
6363+
6364+#include <linux/init.h>
6365+#include <linux/module.h>
6366+
6367+#define E_INVALID_CMD -249
6368+#define E_READ_USER_CMD -250
6369+#define E_READ_USER_DATA -251
6370+#define E_WRITE_USER_DATA -252
6371+#define E_PMIC_MALLOC -253
6372+
6373+#define MAX_PMICREGS 5
6374+#define MAX_PMIC_MOD_REGS 4
6375+
6376+#ifndef FALSE
6377+#define FALSE 0
6378+#define TRUE 1
6379+#endif
6380+#define SUCCESS 0
6381+
6382+/*
6383+ * List of commands sent by calling host
6384+ * drivers to IPC_Driver
6385+*/
6386+
6387+/* CCA battery driver specific commands.
6388+ * Thise commands are shared across IPC driver
6389+ * and calling host driver
6390+ */
6391+
6392+#define IPC_WATCHDOG 0xA0
6393+#define IPC_PROGRAM_BUS_MASTER 0xA1
6394+#define DEVICE_FW_UPGRADE 0xA2
6395+#define GET_FW_VERSION 0xA3
6396+
6397+#define IPC_BATT_CCA_READ 0xB0
6398+#define IPC_BATT_CCA_WRITE 0xB1
6399+#define IPC_BATT_GET_PROP 0xB2
6400+
6401+#define IPC_PMIC_REGISTER_READ_NON_BLOCKING 0xEB
6402+#define IPC_READ32 0xEC
6403+#define IPC_WRITE32 0xED
6404+#define IPC_LPE_READ 0xEE
6405+#define IPC_LPE_WRITE 0xEF
6406+#define IPC_SEND_COMMAND 0xFA
6407+#define IPC_PMIC_REGISTER_READ 0xFB
6408+#define IPC_PMIC_REGISTER_READ_MODIFY 0xFC
6409+#define IPC_PMIC_REGISTER_WRITE 0xFD
6410+#define IPC_CHECK_STATUS 0xFE
6411+#define GET_SCU_FIRMWARE_VERSION 0xFF
6412+
6413+#define MAX_PMICREGS 5
6414+#define MAX_PMIC_MOD_REGS 4
6415+
6416+/* Adding the error code*/
6417+#define E_INVALID_PARAM -0xA0
6418+#define E_NUM_ENTRIES_OUT_OF_RANGE -0xA1
6419+#define E_CMD_FAILED -0xA2
6420+#define E_NO_INTERRUPT_ON_IOC -0xA3
6421+#define E_QUEUE_IS_FULL -0xA4
6422+
6423+/* VRTC IPC CMD ID and sub id */
6424+#define IPC_VRTC_CMD 0xFA
6425+#define IPC_VRTC_SET_TIME 0x01
6426+#define IPC_VRTC_SET_ALARM 0x02
6427+
6428+struct ipc_cmd_val {
6429+ /*
6430+ *More fields to be added for
6431+ *future enhancements
6432+ */
6433+ u32 ipc_cmd_data;
6434+};
6435+
6436+struct ipc_cmd_type {
6437+ u8 cmd;
6438+ u32 data;
6439+ u8 value;
6440+ u8 ioc;
6441+};
6442+
6443+/*
6444+ * Structures defined for battery PMIC driver
6445+ * This structure is used by the following commands
6446+ * IPC_BATT_CCA_READ and IPC_BATT_CCA_WRITE
6447+ */
6448+struct ipc_batt_cca_data {
6449+ int cca_val;
6450+};
6451+
6452+/*
6453+ * Structures defined for battery PMIC driver
6454+ * This structure is used by IPC_BATT_GET_PROP
6455+ */
6456+struct ipc_batt_prop_data {
6457+ u32 batt_value1;
6458+ u8 batt_value2[5];
6459+};
6460+
6461+struct ipc_reg_data {
6462+ u8 ioc;
6463+ u32 address;
6464+ u32 data;
6465+};
6466+
6467+struct ipc_cmd {
6468+ u8 cmd;
6469+ u32 data;
6470+};
6471+
6472+struct pmicmodreg {
6473+ u16 register_address;
6474+ u8 value;
6475+ u8 bit_map;
6476+};
6477+
6478+struct pmicreg {
6479+ u16 register_address;
6480+ u8 value;
6481+};
6482+
6483+struct ipc_pmic_reg_data {
6484+ bool ioc;
6485+ struct pmicreg pmic_reg_data[MAX_PMICREGS];
6486+ u8 num_entries;
6487+};
6488+
6489+struct ipc_pmic_mod_reg_data {
6490+ bool ioc;
6491+ struct pmicmodreg pmic_mod_reg_data[MAX_PMIC_MOD_REGS];
6492+ u8 num_entries;
6493+};
6494+
6495+/* Firmware ingredient version information.
6496+ * fw_data[0] = scu_rt_minor;
6497+ * fw_data[1] = scu_rt_major;
6498+ * fw_data[2] = scu_bs_minor;
6499+ * fw_data[3] = scu_bs_major;
6500+ * fw_data[4] = punit_minor;
6501+ * fw_data[5] = punit_major;
6502+ * fw_data[6] = x86_minor;
6503+ * fw_data[7] = x86_major;
6504+ * fw_data[8] = spectra_minor;
6505+ * fw_data[9] = spectra_major;
6506+ * fw_data[10] = val_hook_minor;
6507+ * fw_data[11] = val_hook_major;
6508+ * fw_data[12] = ifw_minor;
6509+ * fw_data[13] = ifw_major;
6510+ * fw_data[14] = rfu1;
6511+ * fw_data[15] = rfu2;
6512+*/
6513+struct watchdog_reg_data {
6514+ int payload1;
6515+ int payload2;
6516+ bool ioc;
6517+};
6518+
6519+struct ipc_io_bus_master_regs {
6520+ u32 ctrl_reg_addr;
6521+ u32 ctrl_reg_data;
6522+};
6523+
6524+struct ipc_non_blocking_pmic_read{
6525+ struct ipc_pmic_reg_data pmic_nb_read;
6526+ void *context;
6527+ int (*callback_host)(struct ipc_pmic_reg_data pmic_read_data,
6528+ void *context);
6529+};
6530+
6531+int ipc_check_status(void);
6532+int mrst_get_firmware_version(unsigned char *mrst_fw_ver_info);
6533+int ipc_config_cmd(struct ipc_cmd_type ipc_cmd,
6534+ u32 ipc_cmd_len, void *cmd_data);
6535+int ipc_pmic_register_write(struct ipc_pmic_reg_data *p_write_reg_data,
6536+ u8 ipc_blocking_flag);
6537+int ipc_pmic_register_read(struct ipc_pmic_reg_data *p_read_reg_data);
6538+int ipc_pmic_register_read_modify(struct ipc_pmic_mod_reg_data
6539+ *p_read_mod_reg_data);
6540+int mrst_ipc_read32(struct ipc_reg_data *p_reg_data);
6541+int mrst_ipc_write32(struct ipc_reg_data *p_reg_data);
6542+int ipc_set_watchdog(struct watchdog_reg_data *p_watchdog_data);
6543+int ipc_program_io_bus_master(struct ipc_io_bus_master_regs
6544+ *p_reg_data);
6545+int ipc_pmic_register_read_non_blocking(struct ipc_non_blocking_pmic_read
6546+ *p_nb_read);
6547+int ipc_device_fw_upgrade(u8 *cmd_data, u32 ipc_cmd_len);
6548+int lnw_ipc_single_cmd(u8 cmd_id, u8 sub_id, int size, int msi);
6549+
6550+#endif
6551Index: linux-2.6.33/arch/x86/kernel/ipc_mrst.c
6552===================================================================
6553--- /dev/null
6554+++ linux-2.6.33/arch/x86/kernel/ipc_mrst.c
6555@@ -0,0 +1,1612 @@
6556+/*
6557+ * ipc_mrst.c: Driver for Langwell IPC1
6558+ *
6559+ * (C) Copyright 2008 Intel Corporation
6560+ * Author: Sreenidhi Gurudatt (sreenidhi.b.gurudatt@intel.com)
6561+ *
6562+ * This program is free software; you can redistribute it and/or
6563+ * modify it under the terms of the GNU General Public License
6564+ * as published by the Free Software Foundation; version 2
6565+ * of the License.
6566+ *
6567+ * Note:
6568+ * Langwell provides two IPC units to communicate with IA host. IPC1 is
6569+ * dedicated for IA. IPC commands results in LNW SCU interrupt. The
6570+ * initial implementation of this driver is platform specific. It will be
6571+ * converted to a PCI driver once SCU FW is in place.
6572+ * Log: Tested after submitting bugzilla patch - 24th December 08
6573+ * Log: Implemented Error Handling features and resolved IPC driver sighting
6574+ * PMIC Read/Write calls now take 80 to 200usecs - March 09 09.
6575+ * Log: Adding the IO BUS Master programming support - March 09 09.
6576+ */
6577+#include <linux/delay.h>
6578+#include <linux/errno.h>
6579+#include <linux/init.h>
6580+#include <linux/sysdev.h>
6581+#include <linux/pm.h>
6582+#include <linux/pci.h>
6583+#include <asm/ipc_defs.h>
6584+#include <linux/workqueue.h>
6585+#include <linux/sched.h>
6586+
6587+#include "ipc_mrst.h"
6588+
6589+#ifndef CONFIG_PCI
6590+#error "This file is PCI bus glue.CONFIG_PCI must be defined."
6591+#endif
6592+
6593+/*virtual memory address for IPC base returned by IOREMAP().*/
6594+void __iomem *p_ipc_base;
6595+void __iomem *p_i2c_ser_bus;
6596+void __iomem *p_dfu_fw_base;
6597+void __iomem *p_dfu_mailbox_base;
6598+static unsigned char fw_ver_data[16];
6599+
6600+static wait_queue_head_t wait;
6601+static struct semaphore sema_ipc;
6602+static int scu_cmd_completed = FALSE;
6603+static bool non_blocking_read_flag = FALSE;
6604+static struct ipc_work_struct ipc_wq;
6605+static struct ipc_non_blocking_pmic_read pmic_read_que[MAX_NB_BUF_SIZE];
6606+static unsigned int cmd_id;
6607+static int (*callback)(struct ipc_pmic_reg_data pmic_read_data, void *context);
6608+static DEFINE_MUTEX(mrst_ipc_mutex);
6609+
6610+#ifdef LNW_IPC_DEBUG
6611+
6612+#define lnw_ipc_dbg(fmt, args...) \
6613+ do { printk(fmt, ## args); } while (0)
6614+#else
6615+#define lnw_ipc_dbg(fmt, args...) do { } while (0)
6616+#endif
6617+static const char ipc_name[] = "ipc_mrst";
6618+
6619+unsigned long lnw_ipc_address;
6620+static void __iomem *lnw_ipc_virt_address;
6621+static unsigned short cmdid_pool = 0xffff;
6622+static inline int lnw_ipc_set_mapping(struct pci_dev *dev)
6623+{
6624+ unsigned long cadr;
6625+ cadr = dev->resource[0].start;
6626+ cadr &= PCI_BASE_ADDRESS_MEM_MASK;
6627+ if (!cadr) {
6628+ printk(KERN_INFO "No PCI resource for IPC\n");
6629+ return -ENODEV;
6630+ }
6631+ lnw_ipc_virt_address = ioremap_nocache(cadr, 0x1000);
6632+ if (lnw_ipc_virt_address != NULL) {
6633+ dev_info(&dev->dev, "lnw ipc base found 0x%lup: 0x%p\n",
6634+ cadr, lnw_ipc_virt_address);
6635+ return 0;
6636+ }
6637+ printk(KERN_INFO "Failed map LNW IPC1 phy address at %lu\n", cadr);
6638+ return -ENODEV;
6639+}
6640+
6641+static inline void lnw_ipc_clear_mapping(void)
6642+{
6643+ iounmap(lnw_ipc_virt_address);
6644+ lnw_ipc_virt_address = NULL;
6645+}
6646+
6647+unsigned long lnw_ipc_readl(unsigned long a)
6648+{
6649+ return readl(lnw_ipc_virt_address + a);
6650+}
6651+
6652+static inline void lnw_ipc_writel(unsigned long d, unsigned long a)
6653+{
6654+ writel(d, lnw_ipc_virt_address + a);
6655+}
6656+
6657+static unsigned char lnw_ipc_assign_cmdid(void)
6658+{
6659+ unsigned char cmdid = 0;
6660+ unsigned short thebit;
6661+ thebit = cmdid_pool&(~cmdid_pool + 1);
6662+ printk(KERN_INFO "pool=0x%04x thebit=0x%04x\n",
6663+ cmdid_pool, thebit);
6664+ while (thebit >> cmdid)
6665+ cmdid++;
6666+ printk(KERN_INFO "Allocate IPC cmd ID %d\n", cmdid);
6667+ cmdid_pool &= ~thebit;
6668+ return cmdid;
6669+}
6670+
6671+int lnw_ipc_single_cmd(u8 cmd_id, u8 sub_id, int size, int msi)
6672+{
6673+ unsigned long cmdreg, stsreg, retry;
6674+
6675+ if (!lnw_ipc_virt_address) {
6676+ printk(KERN_ERR "No IPC mapping\n");
6677+ goto err_ipccmd;
6678+ }
6679+ if (size >= 16) {
6680+ printk(KERN_ERR "IPC message size too big %d\n", size);
6681+ goto err_ipccmd;
6682+ }
6683+
6684+ WARN_ON((msi != 0) && (msi != 1));
6685+
6686+ cmdreg = cmd_id
6687+ | (sub_id << 12)
6688+ | (size << 16)
6689+ | (msi << 8);
6690+
6691+ lnw_ipc_writel(cmdreg, LNW_IPC_CMD);
6692+
6693+ /* check status make sure the command is received by SCU */
6694+ retry = 1000;
6695+ stsreg = lnw_ipc_readl(LNW_IPC_STS);
6696+ if (stsreg & LNW_IPC_STS_ERR) {
6697+ lnw_ipc_dbg("IPC command ID %d error\n", cmd_id);
6698+ goto err_ipccmd;
6699+ }
6700+ while ((stsreg & LNW_IPC_STS_BUSY) && retry) {
6701+ lnw_ipc_dbg("IPC command ID %d busy\n", cmd_id);
6702+ stsreg = lnw_ipc_readl(LNW_IPC_STS);
6703+ udelay(10);
6704+ retry--;
6705+ }
6706+
6707+ if (!retry)
6708+ printk(KERN_ERR "IPC command ID %d failed/timeout", cmd_id);
6709+ else
6710+ lnw_ipc_dbg("IPC command ID %d completed\n", cmd_id);
6711+
6712+ return 0;
6713+
6714+err_ipccmd:
6715+ return -1;
6716+}
6717+EXPORT_SYMBOL(lnw_ipc_single_cmd);
6718+
6719+int lnw_ipc_send_cmd(unsigned char cmd, int size, int msi)
6720+{
6721+ unsigned long cmdreg, stsreg;
6722+ unsigned char cmdid, retry;
6723+
6724+ if (!lnw_ipc_virt_address) {
6725+ printk(KERN_ERR "No IPC mapping\n");
6726+ goto err_ipccmd;
6727+ }
6728+ if (size >= 16) {
6729+ printk(KERN_ERR "IPC message size too big %d\n", size);
6730+ goto err_ipccmd;
6731+ }
6732+
6733+ cmdid = lnw_ipc_assign_cmdid();
6734+ cmdreg = lnw_ipc_readl(LNW_IPC_CMD);
6735+ cmdreg |= cmdid << 12;
6736+ cmdreg |= size << 16;
6737+ if (msi)
6738+ cmdreg |= 1 << 8;
6739+ lnw_ipc_writel(cmdreg, LNW_IPC_CMD);
6740+ /* check status make sure the command is received by SCU */
6741+ retry = 10;
6742+ stsreg = lnw_ipc_readl(LNW_IPC_STS);
6743+ if (stsreg&LNW_IPC_STS_ERR) {
6744+ lnw_ipc_dbg("IPC command ID %d error\n", cmdid);
6745+ goto err_ipccmd;
6746+ }
6747+ while ((stsreg&LNW_IPC_STS_BUSY) || retry) {
6748+ lnw_ipc_dbg("IPC command ID %d busy\n", cmdid);
6749+ stsreg = lnw_ipc_readl(LNW_IPC_STS);
6750+ udelay(10);
6751+ retry--;
6752+ }
6753+ if (!retry)
6754+ lnw_ipc_dbg("IPC command ID %d failed/timeout\n", cmdid);
6755+ else
6756+ lnw_ipc_dbg("IPC command ID %d completed\n", cmdid);
6757+
6758+err_ipccmd:
6759+ return -1;
6760+}
6761+/*
6762+ * For IPC transfer modes except read DMA, there is no need for MSI,
6763+ * so the driver polls status after each IPC command is issued.
6764+ */
6765+static irqreturn_t ipc_irq(int irq, void *dev_id)
6766+{
6767+ union ipc_sts ipc_sts_reg;
6768+
6769+ ipc_sts_reg.ipc_sts_data =
6770+ __raw_readl((p_ipc_base + IPC_STS));
6771+
6772+ if (!ipc_sts_reg.ipc_sts_parts.busy) {
6773+ /*Call on NON Blocking flag being set.*/
6774+ if (non_blocking_read_flag == TRUE) {
6775+ schedule_work(&ipc_wq.ipc_work);
6776+ } else {
6777+ scu_cmd_completed = TRUE;
6778+ wake_up_interruptible(&wait);
6779+ }
6780+ }
6781+ return IRQ_HANDLED;
6782+}
6783+
6784+static const struct ipc_driver ipc_mrst_driver = {
6785+ .name = "MRST IPC Controller",
6786+ /*
6787+ * generic hardware linkage
6788+ */
6789+ .irq = ipc_irq,
6790+ .flags = 0,
6791+};
6792+
6793+static int ipc_mrst_pci_probe(struct pci_dev *dev,
6794+ const struct pci_device_id *id)
6795+{
6796+ int err, retval, i;
6797+ lnw_ipc_dbg("Attempt to enable IPC irq 0x%x, pin %d\n",
6798+ dev->irq, dev->pin);
6799+ err = pci_enable_device(dev);
6800+ if (err) {
6801+ dev_err(&dev->dev, "Failed to enable MSRT IPC(%d)\n",
6802+ err);
6803+ goto exit;
6804+ }
6805+ retval = pci_request_regions(dev, "ipc_mrst");
6806+ if (retval)
6807+ dev_err(&dev->dev, "Failed to allocate resource\
6808+ for MRST IPC(%d)\n", retval);
6809+
6810+ init_ipc_driver();
6811+
6812+ /* 0 means cmd ID is in use */
6813+ cmdid_pool = 0xffff;
6814+ /* initialize mapping */
6815+ retval = lnw_ipc_set_mapping(dev);
6816+ if (retval)
6817+ goto exit;
6818+ /* clear buffer */
6819+ for (i = 0; i < LNW_IPC_RWBUF_SIZE; i = i + 4) {
6820+ lnw_ipc_writel(0, LNW_IPC_WBUF + i);
6821+ lnw_ipc_writel(0, LNW_IPC_RBUF + i);
6822+ }
6823+ retval = request_irq(dev->irq, ipc_irq, IRQF_SHARED,
6824+ "ipc_mrst", (void *)&ipc_mrst_driver);
6825+ if (retval) {
6826+ printk(KERN_ERR "ipc: cannot register ISR %p irq %d ret %d\n",
6827+ ipc_irq, dev->irq, retval);
6828+ return -EIO;
6829+ }
6830+exit:
6831+ return 0;
6832+}
6833+
6834+void ipc_mrst_pci_remove(struct pci_dev *pdev)
6835+{
6836+ pci_release_regions(pdev);
6837+}
6838+
6839+/* PCI driver selection metadata; PCI hotplugging uses this */
6840+static const struct pci_device_id pci_ids[] = {
6841+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)}
6842+};
6843+
6844+MODULE_DEVICE_TABLE(pci, pci_ids);
6845+
6846+/* pci driver glue; this is a "new style" PCI driver module */
6847+static struct pci_driver ipc_mrst_pci_driver = {
6848+ .name = (char *)ipc_name,
6849+ .id_table = pci_ids,
6850+ .probe = ipc_mrst_pci_probe,
6851+ .remove = ipc_mrst_pci_remove,
6852+};
6853+
6854+static int __init ipc_mrst_init(void)
6855+{
6856+ int retval = 0;
6857+ lnw_ipc_dbg("%s\n", __func__);
6858+ retval = pci_register_driver(&ipc_mrst_pci_driver);
6859+ if (retval < 0) {
6860+ printk(KERN_CRIT "Failed to register %s\n",
6861+ ipc_mrst_pci_driver.name);
6862+ pci_unregister_driver(&ipc_mrst_pci_driver);
6863+ } else {
6864+ printk(KERN_CRIT "****Loaded %s driver version %s****\n",
6865+ ipc_mrst_pci_driver.name, MRST_IPC_DRIVER_VERSION);
6866+ cache_mrst_firmware_version();
6867+ }
6868+ return retval;
6869+}
6870+
6871+static void __exit ipc_mrst_exit(void)
6872+{
6873+ iounmap(p_ipc_base);
6874+ iounmap(p_i2c_ser_bus);
6875+ pci_unregister_driver(&ipc_mrst_pci_driver);
6876+ de_init_ipc_driver();
6877+}
6878+
6879+/*
6880+ * Steps to read PMIC Register(Psuedocode)
6881+ * 1) Construct the SCU FW command structure with normal read
6882+ * 2) Fill the IPC_WBUF with the p_reg_data
6883+ * 3) write the command to(Memory Mapped address) IPC_CMD register
6884+ * 4) Wait for an interrupt from SCUFirmware or do a timeout.
6885+*/
6886+int ipc_check_status(void)
6887+{
6888+ if (down_interruptible(&sema_ipc)) {
6889+ printk(KERN_INFO "IPC_Driver module busy\n");
6890+ return -EBUSY;
6891+ }
6892+
6893+ lnw_ipc_dbg(KERN_INFO
6894+ "ipc_driver: in <%s> -><%s> file line = <%d>\n",
6895+ __func__, __FILE__, __LINE__);
6896+ up(&sema_ipc);
6897+
6898+ return SUCCESS;
6899+}
6900+EXPORT_SYMBOL(ipc_check_status);
6901+
6902+int ipc_config_cmd(struct ipc_cmd_type cca_cmd, u32 ipc_cmd_len, void *cmd_data)
6903+{
6904+
6905+ union ipc_fw_cmd ipc_cca_cmd;
6906+ union ipc_sts ipc_sts_reg;
6907+ u32 retry = MAX_RETRY_CNT;
6908+ u32 ipc_wbuf;
6909+ u8 cbuf[MAX_NUM_ENTRIES] = { '\0' };
6910+ u32 rbuf_offset = 2;
6911+ u32 i = 0;
6912+
6913+ if ((&cca_cmd == NULL) || (cmd_data == NULL)) {
6914+ printk(KERN_INFO "Invalid arguments recieved:\
6915+ <%s> -> <%s> file line = <%d>\n", __func__, __FILE__, __LINE__);
6916+ return -EBUSY;
6917+ }
6918+
6919+ if (ipc_cmd_len < 4) {
6920+ printk(KERN_INFO
6921+ "ipc_send_config: Invalid input param (size) recieved \n");
6922+ return -EBUSY;
6923+ }
6924+ if (down_interruptible(&sema_ipc)) {
6925+ printk(KERN_INFO "IPC_Driver module busy\n");
6926+ return -EBUSY;
6927+ }
6928+ lnw_ipc_dbg(KERN_INFO
6929+ "ipc_driver: in <%s> -> <%s> file at line no = <%d>\n",
6930+ __func__, __FILE__, __LINE__);
6931+
6932+ switch (cca_cmd.cmd) {
6933+ case IPC_BATT_CCA_READ:
6934+ {
6935+ struct ipc_batt_cca_data *cca_data =
6936+ (struct ipc_batt_cca_data *)cmd_data;
6937+
6938+ lnw_ipc_dbg(KERN_INFO "Recieved IPC_BATT_CCA_READ\n");
6939+ ipc_cca_cmd.cmd_parts.cmd = IPC_CCA_CMD_READ_WRITE;
6940+ ipc_cca_cmd.cmd_parts.ioc = cca_cmd.ioc;
6941+ ipc_cca_cmd.cmd_parts.rfu1 = 0x0;
6942+ ipc_cca_cmd.cmd_parts.cmd_ID = CCA_REG_READ;
6943+ ipc_cca_cmd.cmd_parts.size = 0;
6944+ ipc_cca_cmd.cmd_parts.rfu2 = 0x0;
6945+
6946+ lnw_ipc_dbg(KERN_INFO "ipc_cca_cmd.cmd_data = 0x%x\n",
6947+ ipc_cca_cmd.cmd_data);
6948+ /* Check for Status bit = 0 before sending an IPC command */
6949+ while (retry--) {
6950+ ipc_sts_reg.ipc_sts_data =
6951+ __raw_readl((p_ipc_base + IPC_STS));
6952+ if (!ipc_sts_reg.ipc_sts_parts.busy)
6953+ break;
6954+ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
6955+ }
6956+ if (ipc_sts_reg.ipc_sts_parts.busy) {
6957+ printk(KERN_CRIT "SCU is busy %d\n",
6958+ ipc_sts_reg.ipc_sts_parts.busy);
6959+ up(&sema_ipc);
6960+ return -EBUSY;
6961+ }
6962+ __raw_writel(ipc_cca_cmd.cmd_data, (p_ipc_base + IPC_CMD));
6963+
6964+ /* Wait for command completion from SCU firmware */
6965+ scu_cmd_completed = FALSE;
6966+ wait_event_interruptible_timeout(wait,
6967+ scu_cmd_completed, IPC_TIMEOUT);
6968+
6969+ /*Check for error in command processing*/
6970+ ipc_sts_reg.ipc_sts_data =
6971+ __raw_readl((p_ipc_base + IPC_STS));
6972+ if (ipc_sts_reg.ipc_sts_parts.error) {
6973+ printk(KERN_CRIT "IPC Command failed %d\n",
6974+ ipc_sts_reg.ipc_sts_parts.error);
6975+ up(&sema_ipc);
6976+ return E_CMD_FAILED;
6977+ }
6978+
6979+ ipc_wbuf =
6980+ __raw_readl(p_ipc_base + IPC_RBUF);
6981+ cca_data->cca_val = ipc_wbuf;
6982+ lnw_ipc_dbg(KERN_INFO
6983+ "CCA Read at (0x%.8x) = 0x%.8x\n",
6984+ (u32) (p_ipc_base + IPC_RBUF), ipc_wbuf);
6985+ break;
6986+ }
6987+ case IPC_BATT_CCA_WRITE:
6988+
6989+ ipc_cca_cmd.cmd_parts.cmd = IPC_CCA_CMD_READ_WRITE;
6990+ ipc_cca_cmd.cmd_parts.ioc = cca_cmd.ioc;
6991+ ipc_cca_cmd.cmd_parts.rfu1 = 0x0;
6992+ ipc_cca_cmd.cmd_parts.cmd_ID = CCA_REG_WRITE;
6993+ ipc_cca_cmd.cmd_parts.size = 0;
6994+ ipc_cca_cmd.cmd_parts.rfu2 = 0x0;
6995+
6996+ lnw_ipc_dbg(KERN_INFO "ipc_cca_cmd.cmd_data = 0x%x\n",
6997+ ipc_cca_cmd.cmd_data);
6998+
6999+ /* Check for Status bit = 0 before sending an IPC command */
7000+ while (retry--) {
7001+ ipc_sts_reg.ipc_sts_data =
7002+ __raw_readl((p_ipc_base + IPC_STS));
7003+ if (!ipc_sts_reg.ipc_sts_parts.busy)
7004+ break;
7005+ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
7006+ }
7007+
7008+ if (ipc_sts_reg.ipc_sts_parts.busy) {
7009+ printk(KERN_CRIT "SCU is busy %d\n",
7010+ ipc_sts_reg.ipc_sts_parts.busy);
7011+ up(&sema_ipc);
7012+ return -EBUSY;
7013+ }
7014+ __raw_writel(cca_cmd.data, ((p_ipc_base + IPC_WBUF) + 4));
7015+ __raw_writel(ipc_cca_cmd.cmd_data, (p_ipc_base + IPC_CMD));
7016+
7017+ /* Wait for command completion from SCU firmware */
7018+ scu_cmd_completed = FALSE;
7019+ wait_event_interruptible_timeout(wait,
7020+ scu_cmd_completed, IPC_TIMEOUT);
7021+
7022+ /*Check for error in command processing*/
7023+ ipc_sts_reg.ipc_sts_data =
7024+ __raw_readl((p_ipc_base + IPC_STS));
7025+ if (ipc_sts_reg.ipc_sts_parts.error) {
7026+ printk(KERN_CRIT "IPC Command failed %d\n",
7027+ ipc_sts_reg.ipc_sts_parts.error);
7028+ up(&sema_ipc);
7029+ return E_CMD_FAILED;
7030+ }
7031+
7032+ break;
7033+ case IPC_BATT_GET_PROP:
7034+ {
7035+ struct ipc_batt_prop_data *prop_data =
7036+ (struct ipc_batt_prop_data *)cmd_data;
7037+
7038+ lnw_ipc_dbg(KERN_CRIT "Recieved IPC_BATT_GET_PROP\n");
7039+
7040+ /* CCA Read Implementation here.*/
7041+ ipc_cca_cmd.cmd_parts.cmd = IPC_CCA_CMD_READ_WRITE;
7042+ ipc_cca_cmd.cmd_parts.ioc = cca_cmd.ioc;
7043+ ipc_cca_cmd.cmd_parts.rfu1 = 0x0;
7044+ ipc_cca_cmd.cmd_parts.cmd_ID = CCA_REG_GET_PROP;
7045+ ipc_cca_cmd.cmd_parts.size = 0;
7046+ ipc_cca_cmd.cmd_parts.rfu2 = 0x0;
7047+
7048+ lnw_ipc_dbg(KERN_CRIT "ipc_cca_cmd.cmd_data = 0x%x\n",
7049+ ipc_cca_cmd.cmd_data);
7050+ /* Check for Status bit = 0 before sending an IPC command */
7051+ while (retry--) {
7052+ ipc_sts_reg.ipc_sts_data =
7053+ __raw_readl((p_ipc_base + IPC_STS));
7054+ if (!ipc_sts_reg.ipc_sts_parts.busy)
7055+ break;
7056+ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
7057+ }
7058+
7059+ if (ipc_sts_reg.ipc_sts_parts.busy) {
7060+ printk(KERN_CRIT "SCU is busy %d\n",
7061+ ipc_sts_reg.ipc_sts_parts.busy);
7062+ up(&sema_ipc);
7063+ return -EBUSY;
7064+ }
7065+ __raw_writel(ipc_cca_cmd.cmd_data, (p_ipc_base + IPC_CMD));
7066+
7067+ scu_cmd_completed = FALSE;
7068+ wait_event_interruptible_timeout(wait,
7069+ scu_cmd_completed, IPC_TIMEOUT);
7070+
7071+ if (ipc_cca_cmd.cmd_parts.ioc == 0) {
7072+ /*Check for error in command processing*/
7073+ ipc_sts_reg.ipc_sts_data =
7074+ __raw_readl((p_ipc_base + IPC_STS));
7075+ if (ipc_sts_reg.ipc_sts_parts.error) {
7076+ printk(KERN_CRIT "IPC Command failed %d\n",
7077+ ipc_sts_reg.ipc_sts_parts.error);
7078+ up(&sema_ipc);
7079+ return E_CMD_FAILED;
7080+ }
7081+ }
7082+
7083+ /* On wake-up fill the user buffer with IPC_RBUF data.*/
7084+ rbuf_offset = 0;
7085+ if ((ipc_cmd_len < 4) || (ipc_cmd_len > 9)) {
7086+ lnw_ipc_dbg(KERN_CRIT
7087+ "ipc_send_config: Invalid input param\
7088+ (size) recieved \n");
7089+ up(&sema_ipc);
7090+ return -EBUSY;
7091+ }
7092+
7093+ if (ipc_cmd_len >= 4) {
7094+ ipc_wbuf = __raw_readl(p_ipc_base + IPC_RBUF);
7095+ lnw_ipc_dbg(KERN_CRIT
7096+ "Read ipc_wbuf at (0x%.8x) = 0x%.8x\n",
7097+ (u32) (p_ipc_base + IPC_RBUF + rbuf_offset),
7098+ ipc_wbuf);
7099+ rbuf_offset += 4;
7100+ for (i = 0; i < (ipc_cmd_len - 4); i++) {
7101+ cbuf[i] =
7102+ __raw_readb((p_ipc_base + IPC_RBUF +
7103+ rbuf_offset));
7104+ prop_data->batt_value2[i] = cbuf[i];
7105+ lnw_ipc_dbg(KERN_CRIT
7106+ "Read cbuf[%d] at (0x%.8x) = 0x%.8x\n",
7107+ i,
7108+ (u32) (p_ipc_base + IPC_RBUF +
7109+ rbuf_offset), cbuf[i]);
7110+ rbuf_offset++;
7111+ }
7112+
7113+ }
7114+
7115+ break;
7116+ }
7117+ default:
7118+ printk(KERN_CRIT "Recieved unknown option\n");
7119+ up(&sema_ipc);
7120+ return -ENODEV;
7121+ }
7122+ up(&sema_ipc);
7123+
7124+ return SUCCESS;
7125+}
7126+EXPORT_SYMBOL(ipc_config_cmd);
7127+
7128+int mrst_get_firmware_version(unsigned char *mrst_fw_ver_info)
7129+{
7130+ int i = 0;
7131+ mutex_lock(&mrst_ipc_mutex);
7132+
7133+ if (mrst_fw_ver_info == NULL) {
7134+ WARN_ON(1);
7135+ return -EINVAL;
7136+ }
7137+ for (i = 0; i < 16; i++)
7138+ mrst_fw_ver_info[i] = fw_ver_data[i];
7139+
7140+ mutex_unlock(&mrst_ipc_mutex);
7141+ return 0;
7142+}
7143+EXPORT_SYMBOL(mrst_get_firmware_version);
7144+
7145+int init_ipc_driver(void)
7146+{
7147+ init_waitqueue_head(&wait);
7148+
7149+ sema_init(&sema_ipc, MAX_INSTANCES_ALLOWED);
7150+ if (down_interruptible(&sema_ipc)) {
7151+ printk(KERN_CRIT "IPC_Driver module busy\n");
7152+ up(&sema_ipc);
7153+ return -EBUSY;
7154+ }
7155+
7156+ INIT_WORK(&ipc_wq.ipc_work, mrst_pmic_read_handler);
7157+
7158+ /* Map the memory of ipc1 PMIC reg base */
7159+ p_ipc_base = ioremap_nocache(IPC_BASE_ADDRESS, IPC_MAX_ADDRESS);
7160+ if (p_ipc_base == NULL) {
7161+ printk(KERN_CRIT
7162+ "IPC Driver: unable to map the address of IPC 1 \n");
7163+ up(&sema_ipc);
7164+ return E_PMIC_MALLOC;
7165+ }
7166+
7167+ printk(KERN_CRIT "p_ipc_base = <0x%.8X>\
7168+ IPC_BASE_ADDRESS = <0x%.8X>\n", (u32) p_ipc_base, IPC_BASE_ADDRESS);
7169+ p_i2c_ser_bus = ioremap_nocache(I2C_SER_BUS, I2C_MAX_ADDRESS);
7170+ if (p_i2c_ser_bus == NULL) {
7171+ printk(KERN_CRIT
7172+ "IPC Driver: unable to map the address of IPC 1 \n");
7173+ up(&sema_ipc);
7174+ return E_PMIC_MALLOC;
7175+ }
7176+
7177+ printk(KERN_CRIT "p_i2c_ser_bus = <0x%.8X>\
7178+ I2C_SER_BUS = <0x%.8X>\n", (u32) p_i2c_ser_bus, I2C_SER_BUS);
7179+ up(&sema_ipc);
7180+
7181+ return SUCCESS;
7182+}
7183+
7184+int de_init_ipc_driver(void)
7185+{
7186+ if (down_interruptible(&sema_ipc)) {
7187+ lnw_ipc_dbg(KERN_CRIT "IPC_Driver module busy\n");
7188+ up(&sema_ipc);
7189+ return -EBUSY;
7190+ }
7191+
7192+ lnw_ipc_dbg(KERN_CRIT
7193+ "ipc_driver: in <%s> -> <%s> file at line no = <%d>\n",
7194+ __func__, __FILE__, __LINE__);
7195+ IOUNMAP(p_ipc_base);
7196+ IOUNMAP(p_i2c_ser_bus);
7197+ up(&sema_ipc);
7198+
7199+ return SUCCESS;
7200+}
7201+
7202+int ipc_pmic_register_read(struct ipc_pmic_reg_data *p_read_reg_data)
7203+{
7204+ union ipc_fw_cmd ipc_cmd;
7205+ union ipc_sts ipc_sts_reg;
7206+ u32 retry = MAX_RETRY_CNT;
7207+ u32 *ipc_wbuf;
7208+ u8 cbuf[IPC_BUF_LEN] = { '\0' };
7209+ u32 cnt = 0;
7210+ u32 i = 0;
7211+ u32 rbuf_offset = 2;
7212+ u8 temp_value = 0;
7213+ u64 time_to_wait = 0;
7214+
7215+ ipc_wbuf = (u32 *)&cbuf;
7216+
7217+ if (p_read_reg_data == NULL) {
7218+ printk(KERN_CRIT "Invalid Input Param recieved in pmic read\n");
7219+ return -E_INVALID_PARAM;
7220+ }
7221+ if (p_read_reg_data->num_entries > MAX_NUM_ENTRIES) {
7222+ printk(KERN_CRIT "Invalid Input Param recieved in pmic read\n");
7223+ return -E_NUM_ENTRIES_OUT_OF_RANGE;
7224+ }
7225+
7226+ if (down_interruptible(&sema_ipc)) {
7227+ printk(KERN_CRIT "IPC_Driver module busy\n");
7228+ return -EBUSY;
7229+ }
7230+
7231+ ipc_cmd.cmd_parts.cmd = IPC_PMIC_CMD_READ_WRITE;
7232+ ipc_cmd.cmd_parts.ioc = p_read_reg_data->ioc;
7233+ ipc_cmd.cmd_parts.rfu1 = 0x0;
7234+ ipc_cmd.cmd_parts.cmd_ID = PMIC_REG_READ;
7235+ ipc_cmd.cmd_parts.size = 3 * (p_read_reg_data->num_entries);
7236+ ipc_cmd.cmd_parts.rfu2 = 0x0;
7237+
7238+ /* command is set. Fill the IPC_BUF */
7239+ lnw_ipc_dbg(KERN_INFO "p_read_reg_data->num_entries <0x%X>\n",
7240+ p_read_reg_data->num_entries);
7241+
7242+ lnw_ipc_dbg(KERN_INFO "p_read_reg_data->register_address <0x%X>\n",
7243+ p_read_reg_data->pmic_reg_data[0].register_address);
7244+
7245+ for (i = 0; i < p_read_reg_data->num_entries; i++) {
7246+ cbuf[cnt] = p_read_reg_data->pmic_reg_data[i].register_address;
7247+ cbuf[(cnt) + 1] =
7248+ (p_read_reg_data->pmic_reg_data[i].register_address >> 8);
7249+ cbuf[(cnt) + 2] = p_read_reg_data->pmic_reg_data[i].value;
7250+ cnt = cnt + 3;
7251+ }
7252+
7253+ rbuf_offset = 0;
7254+ for (i = 0; i < p_read_reg_data->num_entries; i++) {
7255+ __raw_writel(ipc_wbuf[i], ((p_ipc_base + IPC_WBUF)
7256+ + rbuf_offset));
7257+ rbuf_offset += 4;
7258+ if (i >= 3)
7259+ break;
7260+ }
7261+
7262+ /* Check for Status bit = 0 before sending an IPC command */
7263+ while (retry--) {
7264+ ipc_sts_reg.ipc_sts_data =
7265+ __raw_readl((p_ipc_base + IPC_STS));
7266+ if (!ipc_sts_reg.ipc_sts_parts.busy)
7267+ break;
7268+ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
7269+ }
7270+
7271+ if (ipc_sts_reg.ipc_sts_parts.busy) {
7272+ printk(KERN_CRIT "SCU is busy %d\n",
7273+ ipc_sts_reg.ipc_sts_parts.busy);
7274+ up(&sema_ipc);
7275+ return -EBUSY;
7276+ }
7277+
7278+ scu_cmd_completed = FALSE;
7279+ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
7280+
7281+ /*wait for 10ms do not tie to kernel timer_ticks*/
7282+ time_to_wait = msecs_to_jiffies(IPC_TIMEOUT);
7283+
7284+ /* Wait for command completion from SCU firmware */
7285+ wait_event_interruptible_timeout(wait,
7286+ scu_cmd_completed, time_to_wait);
7287+
7288+ if (ipc_cmd.cmd_parts.ioc == 0) {
7289+ /*Check for error in command processing*/
7290+ ipc_sts_reg.ipc_sts_data =
7291+ __raw_readl((p_ipc_base + IPC_STS));
7292+ if (ipc_sts_reg.ipc_sts_parts.busy) {
7293+ printk(KERN_CRIT "Timeout occured for ioc=0 and SCU is busy%d\n",
7294+ ipc_sts_reg.ipc_sts_parts.busy);
7295+ up(&sema_ipc);
7296+ return -EBUSY;
7297+ }
7298+ if (ipc_sts_reg.ipc_sts_parts.error) {
7299+ printk(KERN_CRIT "IPC Command failed %d\n",
7300+ ipc_sts_reg.ipc_sts_parts.error);
7301+ up(&sema_ipc);
7302+ return E_CMD_FAILED;
7303+ }
7304+ }
7305+ /* IPC driver expects interrupt when IOC is set to 1.*/
7306+ if ((ipc_cmd.cmd_parts.ioc == 1) && (scu_cmd_completed == FALSE)) {
7307+ up(&sema_ipc);
7308+ return E_NO_INTERRUPT_ON_IOC;
7309+ }
7310+ rbuf_offset = 2;
7311+ for (i = 0; i < p_read_reg_data->num_entries; i++) {
7312+ temp_value = readb((p_ipc_base + IPC_RBUF + rbuf_offset));
7313+ p_read_reg_data->pmic_reg_data[i].value = temp_value;
7314+ rbuf_offset += 3;
7315+ }
7316+
7317+ up(&sema_ipc);
7318+
7319+ return SUCCESS;
7320+}
7321+EXPORT_SYMBOL(ipc_pmic_register_read);
7322+
7323+int ipc_pmic_register_write(struct ipc_pmic_reg_data *p_write_reg_data,
7324+ u8 ipc_blocking_flag)
7325+{
7326+ union ipc_fw_cmd ipc_cmd;
7327+ union ipc_sts ipc_sts_reg;
7328+ u32 retry = MAX_RETRY_CNT;
7329+ u32 *ipc_wbuf;
7330+ u8 cbuf[IPC_BUF_LEN] = { '\0' };
7331+ u32 cnt = 0;
7332+ u32 i = 0;
7333+ u32 rbuf_offset = 2;
7334+
7335+ ipc_wbuf = (u32 *)&cbuf;
7336+
7337+ if (p_write_reg_data == NULL) {
7338+ printk(KERN_CRIT "Invalid Input Param recieved in pmic write\n");
7339+ return -E_INVALID_PARAM;
7340+ }
7341+ if (p_write_reg_data->num_entries > MAX_NUM_ENTRIES) {
7342+ printk(KERN_CRIT "Invalid Input Param recieved in pmic write\n");
7343+ return -E_NUM_ENTRIES_OUT_OF_RANGE;
7344+ }
7345+
7346+ if (down_interruptible(&sema_ipc)) {
7347+ printk(KERN_INFO "IPC_Driver module busy\n");
7348+ return -EBUSY;
7349+ }
7350+
7351+ ipc_cmd.cmd_parts.cmd = IPC_PMIC_CMD_READ_WRITE;
7352+ ipc_cmd.cmd_parts.ioc = p_write_reg_data->ioc;
7353+ ipc_cmd.cmd_parts.rfu1 = 0x0;
7354+ ipc_cmd.cmd_parts.cmd_ID = PMIC_REG_WRITE;
7355+ ipc_cmd.cmd_parts.size = 3 * (p_write_reg_data->num_entries);
7356+ ipc_cmd.cmd_parts.rfu2 = 0x0;
7357+
7358+ /* command is set. Fill the IPC_BUF */
7359+ lnw_ipc_dbg(KERN_INFO "p_write_reg_data->num_entries 0x%X>\n",
7360+ p_write_reg_data->num_entries);
7361+
7362+ lnw_ipc_dbg(KERN_INFO "p_write_reg_data->register_address 0x%X>\n",
7363+ p_write_reg_data->pmic_reg_data[0].register_address);
7364+ for (i = 0; i < p_write_reg_data->num_entries; i++) {
7365+ cbuf[cnt] = p_write_reg_data->pmic_reg_data[i].register_address;
7366+ cbuf[(cnt) + 1] =
7367+ (p_write_reg_data->pmic_reg_data[i].register_address >> 8);
7368+ cbuf[(cnt) + 2] = p_write_reg_data->pmic_reg_data[i].value;
7369+ cnt = cnt + 3;
7370+ }
7371+
7372+ rbuf_offset = 0;
7373+ for (i = 0; i < p_write_reg_data->num_entries; i++) {
7374+ __raw_writel(ipc_wbuf[i], ((p_ipc_base + IPC_WBUF)
7375+ + rbuf_offset));
7376+ rbuf_offset += 4;
7377+ if (i >= 3)
7378+ break;
7379+ }
7380+ /* Check for Status bit = 0 before sending an IPC command */
7381+ while (retry--) {
7382+ ipc_sts_reg.ipc_sts_data =
7383+ __raw_readl((p_ipc_base + IPC_STS));
7384+ if (!ipc_sts_reg.ipc_sts_parts.busy)
7385+ break;
7386+ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
7387+ }
7388+
7389+ if (ipc_sts_reg.ipc_sts_parts.busy) {
7390+ printk(KERN_CRIT "IPC Command failed %d\n",
7391+ ipc_sts_reg.ipc_sts_parts.busy);
7392+ up(&sema_ipc);
7393+ return -EBUSY;
7394+ }
7395+ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
7396+
7397+ /* Wait for command completion from SCU firmware */
7398+ scu_cmd_completed = FALSE;
7399+ wait_event_interruptible_timeout(wait,
7400+ scu_cmd_completed, IPC_TIMEOUT);
7401+
7402+ /*Check for error in command processing*/
7403+ ipc_sts_reg.ipc_sts_data =
7404+ __raw_readl((p_ipc_base + IPC_STS));
7405+ if (ipc_sts_reg.ipc_sts_parts.error) {
7406+ printk(KERN_CRIT "IPC Command failed %d\n",
7407+ ipc_sts_reg.ipc_sts_parts.error);
7408+ up(&sema_ipc);
7409+ return E_CMD_FAILED;
7410+ }
7411+ up(&sema_ipc);
7412+
7413+ return SUCCESS;
7414+}
7415+EXPORT_SYMBOL(ipc_pmic_register_write);
7416+
7417+int ipc_pmic_register_read_modify(struct ipc_pmic_mod_reg_data
7418+ *p_read_mod_reg_data)
7419+{
7420+ union ipc_fw_cmd ipc_cmd;
7421+ union ipc_sts ipc_sts_reg;
7422+ u32 retry = MAX_RETRY_CNT;
7423+ u32 *ipc_wbuf;
7424+ u8 cbuf[IPC_BUF_LEN] = { '\0' };
7425+ u32 cnt = 0;
7426+ u32 i = 0;
7427+ u32 rbuf_offset = 2;
7428+ ipc_wbuf = (u32 *)&cbuf;
7429+
7430+ if (down_interruptible(&sema_ipc)) {
7431+ printk(KERN_INFO "IPC_Driver module busy\n");
7432+ return -EBUSY;
7433+ }
7434+
7435+ if (p_read_mod_reg_data == NULL) {
7436+ printk(KERN_CRIT "Invalid Input recieved pmic read modify\n");
7437+ up(&sema_ipc);
7438+ return -E_INVALID_PARAM;
7439+ }
7440+ if (p_read_mod_reg_data->num_entries > MAX_NUM_ENTRIES) {
7441+ printk(KERN_CRIT "Invalid Input recieved pmic read modify\n");
7442+ up(&sema_ipc);
7443+ return -E_NUM_ENTRIES_OUT_OF_RANGE;
7444+ }
7445+
7446+ ipc_cmd.cmd_parts.cmd = IPC_PMIC_CMD_READ_WRITE;
7447+ ipc_cmd.cmd_parts.ioc = p_read_mod_reg_data->ioc;
7448+ ipc_cmd.cmd_parts.rfu1 = 0x0;
7449+ ipc_cmd.cmd_parts.cmd_ID = PMIC_REG_READ_MODIFY;
7450+ ipc_cmd.cmd_parts.size = 3 * (p_read_mod_reg_data->num_entries);
7451+ ipc_cmd.cmd_parts.rfu2 = 0x0;
7452+
7453+ /* command is set. Fill the IPC_BUF */
7454+ lnw_ipc_dbg(KERN_INFO "p_read_mod_reg_data->num_entries <0x%X> \n",
7455+ p_read_mod_reg_data->num_entries);
7456+
7457+ for (i = 0; i < p_read_mod_reg_data->num_entries; i++) {
7458+ cbuf[cnt] =
7459+ p_read_mod_reg_data->pmic_mod_reg_data[i].register_address;
7460+ cbuf[(cnt) + 1] =
7461+ (p_read_mod_reg_data->pmic_mod_reg_data[i].
7462+ register_address >> 8);
7463+ cbuf[(cnt) + 2] =
7464+ p_read_mod_reg_data->pmic_mod_reg_data[i].value;
7465+ cbuf[(cnt) + 3] =
7466+ p_read_mod_reg_data->pmic_mod_reg_data[i].bit_map;
7467+ cnt = cnt + 4;
7468+ }
7469+
7470+ rbuf_offset = 0;
7471+ for (i = 0; i < p_read_mod_reg_data->num_entries; i++) {
7472+ __raw_writel(ipc_wbuf[i],
7473+ ((p_ipc_base + IPC_WBUF) + rbuf_offset));
7474+ rbuf_offset += 4;
7475+ if (i >= 3)
7476+ break;
7477+ }
7478+
7479+ /* Check for Status bit = 0 before sending an IPC command */
7480+ while (retry--) {
7481+ ipc_sts_reg.ipc_sts_data =
7482+ __raw_readl((p_ipc_base + IPC_STS));
7483+ if (!ipc_sts_reg.ipc_sts_parts.busy)
7484+ break;
7485+ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
7486+ }
7487+ if (ipc_sts_reg.ipc_sts_parts.busy) {
7488+ printk(KERN_CRIT "SCU is busy %d\n",
7489+ ipc_sts_reg.ipc_sts_parts.busy);
7490+ up(&sema_ipc);
7491+ return -EBUSY;
7492+ }
7493+ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
7494+
7495+ /* Wait for command completion from SCU firmware */
7496+ scu_cmd_completed = FALSE;
7497+ wait_event_interruptible_timeout(wait,
7498+ scu_cmd_completed, IPC_TIMEOUT);
7499+
7500+ if (ipc_cmd.cmd_parts.ioc == 0) {
7501+ /*Check for error in command processing*/
7502+ ipc_sts_reg.ipc_sts_data =
7503+ __raw_readl((p_ipc_base + IPC_STS));
7504+ if (ipc_sts_reg.ipc_sts_parts.error) {
7505+ printk(KERN_CRIT "IPC Command failed %d\n",
7506+ ipc_sts_reg.ipc_sts_parts.error);
7507+ up(&sema_ipc);
7508+ return E_CMD_FAILED;
7509+ }
7510+ }
7511+
7512+ /* IPC driver expects interrupt when IOC is set to 1.*/
7513+ if ((ipc_cmd.cmd_parts.ioc == 1) && (scu_cmd_completed == FALSE)) {
7514+ up(&sema_ipc);
7515+ return E_NO_INTERRUPT_ON_IOC;
7516+ }
7517+
7518+ /* On wake-up fill the user buffer with IPC_RBUF data.*/
7519+ rbuf_offset = 0;
7520+ for (i = 0; i < p_read_mod_reg_data->num_entries; i++) {
7521+ ipc_wbuf[i] =
7522+ __raw_readl((p_ipc_base + IPC_RBUF + rbuf_offset));
7523+ rbuf_offset += 4;
7524+ }
7525+
7526+ rbuf_offset = 2;
7527+ for (i = 0; i < p_read_mod_reg_data->num_entries; i++) {
7528+ p_read_mod_reg_data->pmic_mod_reg_data[i].value =
7529+ __raw_readb((p_ipc_base + IPC_RBUF + rbuf_offset));
7530+ rbuf_offset += 4;
7531+ }
7532+ up(&sema_ipc);
7533+
7534+ return SUCCESS;
7535+}
7536+EXPORT_SYMBOL(ipc_pmic_register_read_modify);
7537+
7538+int ipc_pmic_register_read_non_blocking(
7539+ struct ipc_non_blocking_pmic_read *p_nb_read)
7540+{
7541+ union ipc_fw_cmd ipc_cmd;
7542+ union ipc_sts ipc_sts_reg;
7543+ u32 retry = MAX_RETRY_CNT;
7544+ u32 *ipc_wbuf;
7545+ u8 cbuf[IPC_BUF_LEN] = { '\0' };
7546+ u32 cnt = 0;
7547+ u32 i = 0;
7548+ u32 rbuf_offset = 2;
7549+ ipc_wbuf = (u32 *)&cbuf;
7550+
7551+ if (down_interruptible(&sema_ipc)) {
7552+ printk(KERN_CRIT "IPC_Driver module busy\n");
7553+ return -EBUSY;
7554+ }
7555+ if (p_nb_read == NULL) {
7556+ printk(KERN_CRIT "Invalid Input Param recieved\
7557+ in non blocking pmic read\n");
7558+ up(&sema_ipc);
7559+ return -E_INVALID_PARAM;
7560+ }
7561+ if (p_nb_read->pmic_nb_read.num_entries > MAX_NUM_ENTRIES) {
7562+ printk(KERN_CRIT "Invalid Number Of Entries\
7563+ - non blocking pmic read\n");
7564+ up(&sema_ipc);
7565+ return -E_NUM_ENTRIES_OUT_OF_RANGE;
7566+ }
7567+
7568+ if (cmd_id >= MAX_NB_BUF_SIZE) {
7569+ printk(KERN_CRIT "Queue is full!! cannot service request!\n");
7570+ up(&sema_ipc);
7571+ return -E_QUEUE_IS_FULL;
7572+ }
7573+
7574+
7575+ non_blocking_read_flag = TRUE;
7576+ /*Copy the contents to this global structure for future use*/
7577+ pmic_read_que[cmd_id] = *(p_nb_read);
7578+ ipc_wq.cmd_id = cmd_id++;
7579+ callback = p_nb_read->callback_host;
7580+ pmic_read_que[cmd_id].callback_host = p_nb_read->callback_host;
7581+
7582+ ipc_cmd.cmd_parts.cmd = IPC_PMIC_CMD_READ_WRITE;
7583+ ipc_cmd.cmd_parts.ioc = 1;
7584+ ipc_cmd.cmd_parts.rfu1 = 0x0;
7585+ ipc_cmd.cmd_parts.cmd_ID = PMIC_REG_READ;
7586+ ipc_cmd.cmd_parts.size = 3 * (p_nb_read->pmic_nb_read.num_entries);
7587+ ipc_cmd.cmd_parts.rfu2 = 0x0;
7588+
7589+ /* command is set. Fill the IPC_BUF */
7590+ lnw_ipc_dbg(KERN_INFO "pmic_nb_read.num_entries <0x%X>\n",
7591+ p_nb_read->pmic_nb_read.num_entries);
7592+
7593+ lnw_ipc_dbg(KERN_INFO "pmic_nb_read.register_address <0x%X>\n",
7594+ p_nb_read->pmic_nb_read.pmic_reg_data[0].register_address);
7595+
7596+ for (i = 0; i < p_nb_read->pmic_nb_read.num_entries; i++) {
7597+ cbuf[cnt] =
7598+ p_nb_read->pmic_nb_read.pmic_reg_data[i].register_address;
7599+ cbuf[(cnt) + 1] = (p_nb_read->pmic_nb_read.pmic_reg_data[i]\
7600+ .register_address >> 8);
7601+ cbuf[(cnt) + 2] =
7602+ p_nb_read->pmic_nb_read.pmic_reg_data[i].value;
7603+ cnt = cnt + 3;
7604+ }
7605+ rbuf_offset = 0;
7606+ for (i = 0; i < p_nb_read->pmic_nb_read.num_entries; i++) {
7607+ __raw_writel(ipc_wbuf[i], ((p_ipc_base + IPC_WBUF)
7608+ + rbuf_offset));
7609+ rbuf_offset += 4;
7610+ if (i >= 3)
7611+ break;
7612+ }
7613+ /* Check for Status bit = 0 before sending an IPC command */
7614+ while (retry--) {
7615+ ipc_sts_reg.ipc_sts_data = __raw_readl((p_ipc_base + IPC_STS));
7616+ if (!ipc_sts_reg.ipc_sts_parts.busy)
7617+ break;
7618+
7619+ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
7620+ }
7621+ if (ipc_sts_reg.ipc_sts_parts.busy) {
7622+ printk(KERN_CRIT "SCU is busy %d\n",
7623+ ipc_sts_reg.ipc_sts_parts.busy);
7624+ up(&sema_ipc);
7625+ return -EBUSY;
7626+ }
7627+ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
7628+ /*Control returns after issueing the command here*/
7629+ /*Data is read asynchronously later*/
7630+ up(&sema_ipc);
7631+
7632+ return SUCCESS;
7633+}
7634+EXPORT_SYMBOL(ipc_pmic_register_read_non_blocking);
7635+
7636+int mrst_ipc_read32(struct ipc_reg_data *p_reg_data)
7637+{
7638+ union ipc_fw_cmd ipc_cmd;
7639+ union ipc_sts ipc_sts_reg;
7640+ u32 retry = MAX_RETRY_CNT;
7641+
7642+ if (p_reg_data == NULL) {
7643+ printk(KERN_CRIT "Invalid Input Param recieved\
7644+ in mrst_ipc_read32\n");
7645+ return -E_INVALID_PARAM;
7646+ }
7647+
7648+ if (down_interruptible(&sema_ipc)) {
7649+ printk(KERN_INFO "IPC_Driver module busy\n");
7650+ return -EBUSY;
7651+ }
7652+
7653+ lnw_ipc_dbg(KERN_INFO
7654+ "ipc_driver: Address = 0x%.8X\t: Data = 0x%.8X\n",
7655+ p_reg_data->address, p_reg_data->data);
7656+
7657+ ipc_cmd.cmd_parts.cmd = INDIRECT_READ;
7658+ ipc_cmd.cmd_parts.ioc = p_reg_data->ioc;
7659+ ipc_cmd.cmd_parts.rfu1 = 0x0;
7660+ ipc_cmd.cmd_parts.cmd_ID = 0x00;
7661+ ipc_cmd.cmd_parts.size = 4;
7662+ ipc_cmd.cmd_parts.rfu2 = 0x0;
7663+
7664+ lnw_ipc_dbg(KERN_INFO
7665+ "ipc_driver: IPC_CMD-> 0x%.8X\n", ipc_cmd.cmd_data);
7666+ /* Check for Status bit = 0 before sending an IPC command */
7667+ while (retry--) {
7668+ ipc_sts_reg.ipc_sts_data =
7669+ __raw_readl((p_ipc_base + IPC_STS));
7670+ if (!ipc_sts_reg.ipc_sts_parts.busy)
7671+ break;
7672+ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
7673+ }
7674+
7675+ if (ipc_sts_reg.ipc_sts_parts.busy) {
7676+ printk(KERN_CRIT "SCU is busy %d\n",
7677+ ipc_sts_reg.ipc_sts_parts.busy);
7678+ up(&sema_ipc);
7679+ return -EBUSY;
7680+ }
7681+ /*
7682+ * Write the Address to IPC_SPTR
7683+ * Issue the command by writing to IPC_CMD
7684+ * Read the contents of IPC_RBUF to data
7685+ */
7686+
7687+ __raw_writel(p_reg_data->address, (p_ipc_base + IPC_SPTR));
7688+ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
7689+
7690+ scu_cmd_completed = FALSE;
7691+ wait_event_interruptible_timeout(wait,
7692+ scu_cmd_completed, IPC_TIMEOUT);
7693+
7694+ if (ipc_cmd.cmd_parts.ioc == 0) {
7695+ /*Check for error in command processing*/
7696+ ipc_sts_reg.ipc_sts_data =
7697+ __raw_readl((p_ipc_base + IPC_STS));
7698+ if (ipc_sts_reg.ipc_sts_parts.error) {
7699+ printk(KERN_CRIT "IPC Command failed %d\n",
7700+ ipc_sts_reg.ipc_sts_parts.error);
7701+ up(&sema_ipc);
7702+ return E_CMD_FAILED;
7703+ }
7704+ }
7705+ /* IPC driver expects interrupt when IOC is set to 1.*/
7706+ if ((ipc_cmd.cmd_parts.ioc == 1) && (scu_cmd_completed == FALSE)) {
7707+ up(&sema_ipc);
7708+ return E_NO_INTERRUPT_ON_IOC;
7709+ }
7710+
7711+ /* Command completed successfully Read the data */
7712+ p_reg_data->data =
7713+ __raw_readl(p_ipc_base + IPC_RBUF);
7714+ lnw_ipc_dbg(KERN_INFO
7715+ "ipc_driver: Data Recieved from IPC_RBUF = 0x%.8X\n",
7716+ p_reg_data->data);
7717+
7718+ up(&sema_ipc);
7719+
7720+ return SUCCESS;
7721+}
7722+EXPORT_SYMBOL(mrst_ipc_read32);
7723+
7724+int mrst_ipc_write32(struct ipc_reg_data *p_reg_data)
7725+{
7726+ union ipc_fw_cmd ipc_cmd;
7727+ union ipc_sts ipc_sts_reg;
7728+ u32 retry = MAX_RETRY_CNT;
7729+
7730+ if (p_reg_data == NULL) {
7731+ printk(KERN_CRIT "Invalid Input Param recieved\
7732+ in mrst_ipc_write32\n");
7733+ return -E_INVALID_PARAM;
7734+ }
7735+
7736+ if (down_interruptible(&sema_ipc)) {
7737+ printk(KERN_INFO "IPC_Driver module busy\n");
7738+ return -EBUSY;
7739+ }
7740+
7741+ lnw_ipc_dbg(KERN_INFO
7742+ "ipc_driver: in <%s> -> <%s> file at line no = <%d>\n",
7743+ __func__, __FILE__, __LINE__);
7744+
7745+ ipc_cmd.cmd_parts.cmd = INDIRECT_WRITE;
7746+ ipc_cmd.cmd_parts.ioc = p_reg_data->ioc;
7747+ ipc_cmd.cmd_parts.rfu1 = 0x0;
7748+ ipc_cmd.cmd_parts.cmd_ID = 0x00;
7749+ ipc_cmd.cmd_parts.size = 4;
7750+ ipc_cmd.cmd_parts.rfu2 = 0x0;
7751+
7752+ /* Check for Status bit = 0 before sending an IPC command */
7753+ while (retry--) {
7754+ ipc_sts_reg.ipc_sts_data =
7755+ __raw_readl((p_ipc_base + IPC_STS));
7756+ if (!ipc_sts_reg.ipc_sts_parts.busy)
7757+ break;
7758+ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
7759+ }
7760+
7761+ if (ipc_sts_reg.ipc_sts_parts.busy) {
7762+ printk(KERN_CRIT "SCU is busy %d\n",
7763+ ipc_sts_reg.ipc_sts_parts.busy);
7764+ up(&sema_ipc);
7765+ return -EBUSY;
7766+ }
7767+ __raw_writel(p_reg_data->address, (p_ipc_base + IPC_DPTR));
7768+ __raw_writel(p_reg_data->data, (p_ipc_base + IPC_WBUF));
7769+ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
7770+
7771+ scu_cmd_completed = FALSE;
7772+ wait_event_interruptible_timeout(wait,
7773+ scu_cmd_completed, IPC_TIMEOUT);
7774+
7775+ /*Check for error in command processing*/
7776+ ipc_sts_reg.ipc_sts_data =
7777+ __raw_readl((p_ipc_base + IPC_STS));
7778+ if (ipc_sts_reg.ipc_sts_parts.error) {
7779+ printk(KERN_CRIT "IPC Command failed %d\n",
7780+ ipc_sts_reg.ipc_sts_parts.error);
7781+ up(&sema_ipc);
7782+ return E_CMD_FAILED;
7783+ }
7784+ up(&sema_ipc);
7785+
7786+ return SUCCESS;
7787+}
7788+EXPORT_SYMBOL(mrst_ipc_write32);
7789+
7790+int ipc_set_watchdog(struct watchdog_reg_data *p_watchdog_reg_data)
7791+{
7792+ union ipc_fw_cmd ipc_cmd;
7793+ u32 *ipc_wbuf;
7794+ u8 cbuf[16] = { '\0' };
7795+ u32 rbuf_offset = 2;
7796+ u32 retry = MAX_RETRY_CNT;
7797+ union ipc_sts ipc_sts_reg;
7798+
7799+ ipc_wbuf = (u32 *)&cbuf;
7800+
7801+ if (p_watchdog_reg_data == NULL) {
7802+ printk(KERN_CRIT "Invalid Input Param recieved in pmic read\n");
7803+ return -E_INVALID_PARAM;
7804+ }
7805+
7806+ if (down_interruptible(&sema_ipc)) {
7807+ printk(KERN_CRIT "IPC_Driver module busy\n");
7808+ return -EBUSY;
7809+ }
7810+
7811+ ipc_cmd.cmd_parts.cmd = IPC_SET_WATCHDOG_TIMER;
7812+ ipc_cmd.cmd_parts.ioc = p_watchdog_reg_data->ioc;
7813+ ipc_cmd.cmd_parts.rfu1 = 0x0;
7814+ ipc_cmd.cmd_parts.size = 2;
7815+ ipc_cmd.cmd_parts.rfu2 = 0x0;
7816+
7817+ /* Check for Status bit = 0 before sending an IPC command */
7818+ while (retry--) {
7819+ ipc_sts_reg.ipc_sts_data =
7820+ __raw_readl((p_ipc_base + IPC_STS));
7821+ if (!ipc_sts_reg.ipc_sts_parts.busy)
7822+ break;
7823+ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
7824+ }
7825+
7826+ ipc_wbuf[0] = p_watchdog_reg_data->payload1;
7827+ printk(KERN_INFO "p_watchdog_data->payload1 <0x%X>\n",
7828+ ipc_wbuf[0]);
7829+ __raw_writel(ipc_wbuf[0], ((p_ipc_base + IPC_WBUF) + rbuf_offset));
7830+
7831+ ipc_wbuf[1] = p_watchdog_reg_data->payload2;
7832+ lnw_ipc_dbg(KERN_INFO "p_watchdog_data->payload2 <0x%X>\n",
7833+ ipc_wbuf[1]);
7834+ __raw_writel(ipc_wbuf[1], ((p_ipc_base + IPC_WBUF) + rbuf_offset));
7835+
7836+ lnw_ipc_dbg(KERN_INFO "ipc_cmd.cmd_data is <0x%X>\n",
7837+ ipc_cmd.cmd_data);
7838+ /*execute the command by writing to IPC_CMD registers*/
7839+ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
7840+
7841+ /* Wait for command completion from SCU firmware and return */
7842+ scu_cmd_completed = FALSE;
7843+ wait_event_interruptible_timeout(wait,
7844+ scu_cmd_completed, IPC_TIMEOUT);
7845+
7846+ /* IPC driver expects interrupt when IOC is set to 1.*/
7847+ if ((ipc_cmd.cmd_parts.ioc == 1) && (scu_cmd_completed == FALSE)) {
7848+ up(&sema_ipc);
7849+ return E_NO_INTERRUPT_ON_IOC;
7850+ }
7851+
7852+ /*Check for error in command processing*/
7853+ ipc_sts_reg.ipc_sts_data =
7854+ __raw_readl((p_ipc_base + IPC_STS));
7855+ if (ipc_sts_reg.ipc_sts_parts.error) {
7856+ printk(KERN_CRIT "IPC Command failed %d\n",
7857+ ipc_sts_reg.ipc_sts_parts.error);
7858+ up(&sema_ipc);
7859+ return E_CMD_FAILED;
7860+ }
7861+ lnw_ipc_dbg(KERN_CRIT "IPC Command status = 0x%x\n",
7862+ ipc_sts_reg.ipc_sts_data);
7863+ up(&sema_ipc);
7864+
7865+ return SUCCESS;
7866+}
7867+EXPORT_SYMBOL(ipc_set_watchdog);
7868+
7869+int ipc_program_io_bus_master(struct ipc_io_bus_master_regs *p_reg_data)
7870+{
7871+ u32 io_bus_master_cmd = 0;
7872+ if (down_interruptible(&sema_ipc)) {
7873+ printk(KERN_INFO "IPC_Driver module busy\n");
7874+ return -EBUSY;
7875+ }
7876+
7877+ if (p_reg_data == NULL) {
7878+ printk(KERN_CRIT "Invalid Input Param recieved in\
7879+ <ipc_program_io_bus_master>\n");
7880+ up(&sema_ipc);
7881+ return -E_INVALID_PARAM;
7882+ }
7883+ printk(KERN_CRIT "p_reg_data->ctrl_reg_addr = 0x%x\n",\
7884+ p_reg_data->ctrl_reg_addr);
7885+ printk(KERN_CRIT "p_reg_data->ctrl_reg_data = 0x%x\n",\
7886+ p_reg_data->ctrl_reg_data);
7887+
7888+ /* Read the first byte for command*/
7889+ io_bus_master_cmd = (p_reg_data->ctrl_reg_addr)&(0xFF000000);
7890+ io_bus_master_cmd = (io_bus_master_cmd >> 24);
7891+
7892+ if (io_bus_master_cmd == NOP_CMD) {
7893+ printk(KERN_CRIT "NOP_CMD = 0x%x\n", io_bus_master_cmd);
7894+ } else if (io_bus_master_cmd == READ_CMD) {
7895+ lnw_ipc_dbg(KERN_CRIT "Address %#xp = data = %#x\n",
7896+ (unsigned int)(p_i2c_ser_bus + CTRL_REG_ADDR),
7897+ p_reg_data->ctrl_reg_addr);
7898+ __raw_writel(p_reg_data->ctrl_reg_addr,
7899+ (p_i2c_ser_bus + CTRL_REG_ADDR));
7900+ udelay(1000);/*Write Not getting updated without delay*/
7901+ p_reg_data->ctrl_reg_data =
7902+ __raw_readl(p_i2c_ser_bus + CTRL_REG_DATA);
7903+ lnw_ipc_dbg(KERN_CRIT "Data = %#x\n",
7904+ p_reg_data->ctrl_reg_data);
7905+ } else if (io_bus_master_cmd == WRITE_CMD) {
7906+ printk(KERN_CRIT"WRITE_CMD = 0x%x\n", io_bus_master_cmd);
7907+
7908+ __raw_writel(p_reg_data->ctrl_reg_data,
7909+ (p_i2c_ser_bus + CTRL_REG_DATA));
7910+ udelay(1000);
7911+ __raw_writel(p_reg_data->ctrl_reg_addr,
7912+ (p_i2c_ser_bus + CTRL_REG_ADDR));
7913+ } else {
7914+ printk(KERN_CRIT "in INVALID_CMD = 0x%x\n", io_bus_master_cmd);
7915+ up(&sema_ipc);
7916+ return -E_INVALID_CMD;
7917+ }
7918+ up(&sema_ipc);
7919+ return SUCCESS;
7920+}
7921+EXPORT_SYMBOL(ipc_program_io_bus_master);
7922+
7923+/*Work QUEUE Handler function:
7924+ *This function gets invoked by queue.
7925+ */
7926+static void mrst_pmic_read_handler(struct work_struct *work)
7927+{
7928+ static int i;
7929+ union ipc_sts ipc_sts_reg;
7930+ u32 retry = MAX_RETRY_CNT;
7931+ u32 rbuf_offset = 2;
7932+
7933+ u8 pmic_data = 0;
7934+
7935+ if (down_interruptible(&sema_ipc)) {
7936+ printk(KERN_CRIT "IPC_Driver non-blocking read handler\n");
7937+ } else {
7938+ non_blocking_read_flag = FALSE;
7939+ pmic_data = __raw_readb((p_ipc_base + IPC_RBUF + 2));
7940+
7941+ while (retry--) {
7942+ ipc_sts_reg.ipc_sts_data =
7943+ __raw_readl((p_ipc_base + IPC_STS));
7944+ if (!ipc_sts_reg.ipc_sts_parts.busy)
7945+ break;
7946+ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
7947+ }
7948+ if (ipc_sts_reg.ipc_sts_parts.busy) {
7949+ printk(KERN_CRIT "SCU is busy %d\n",
7950+ ipc_sts_reg.ipc_sts_parts.busy);
7951+ pmic_data = -1 /*Invalid data*/;
7952+ } else {
7953+ rbuf_offset = 2;
7954+ cmd_id--;
7955+ for (i = 0; i < pmic_read_que[cmd_id].
7956+ pmic_nb_read.num_entries; i++) {
7957+ pmic_read_que[cmd_id].pmic_nb_read.
7958+ pmic_reg_data[i].value =
7959+ __raw_readb((p_ipc_base + IPC_RBUF
7960+ + rbuf_offset));
7961+ rbuf_offset += 3;
7962+ }
7963+ }
7964+ }
7965+ up(&sema_ipc);
7966+ /*Call the call-back function.
7967+ *The host driver is responsible for reading valid data.
7968+ */
7969+ pmic_read_que[cmd_id].callback_host(pmic_read_que[cmd_id].pmic_nb_read,
7970+ pmic_read_que[cmd_id].context);
7971+}
7972+
7973+
7974+/**
7975+ * int ipc_device_fw_upgrade() - API to upgrade the Integrated Firmware Image
7976+ * for Intel(R) Moorestown platform.
7977+ * @u8 *mrst_fw_buf: Command data.
7978+ * @u32 mrst_fw_buf_len: length of the command to be sent.
7979+ *
7980+ * This function provides and interface to send an IPC coulumb counter
7981+ * command to SCU Firmware and recieve a response. This is used by the
7982+ * PMIC battery driver on Moorestown platform.
7983+ */
7984+int ipc_device_fw_upgrade(u8 *mrst_fw_buf, u32 mrst_fw_buf_len)
7985+{
7986+ union ipc_fw_cmd ipc_dfu_cmd;
7987+ void __iomem *p_tmp_fw_base;
7988+ int retry_cnt = 0;
7989+
7990+ MailBox_t *pMailBox = NULL;
7991+
7992+ if (down_interruptible(&sema_ipc)) {
7993+ printk(KERN_ERR "IPC_Driver module busy\n");
7994+ return -EBUSY;
7995+ }
7996+
7997+ /* Map the memory of ipc1 PMIC reg base */
7998+ p_dfu_fw_base = ioremap_nocache(DFU_LOAD_ADDR, MIP_HEADER_SIZE);
7999+ p_tmp_fw_base = p_dfu_fw_base;
8000+ if (p_dfu_fw_base == NULL) {
8001+ up(&sema_ipc);
8002+ return E_PMIC_MALLOC;
8003+ }
8004+ p_dfu_mailbox_base = ioremap_nocache(DFU_MAILBOX_ADDR,
8005+ sizeof(MailBox_t));
8006+ if (p_dfu_mailbox_base == NULL) {
8007+ up(&sema_ipc);
8008+ return E_PMIC_MALLOC;
8009+ }
8010+
8011+ pMailBox = (MailBox_t*)p_dfu_mailbox_base;
8012+
8013+ ipc_dfu_cmd.cmd_data = FW_UPGRADE_READY_CMD;
8014+ writel(ipc_dfu_cmd.cmd_data, (p_ipc_base + IPC_CMD));
8015+
8016+ /*IA initializes both IAFlag and SCUFlag to zero*/
8017+ pMailBox->SCUFlag = 0;
8018+ pMailBox->IAFlag = 0;
8019+
8020+ /*IA copies the 2KB MIP header to SRAM at 0xFFFC0000*/
8021+ memcpy((u8*)(p_dfu_fw_base), mrst_fw_buf, 0x800);
8022+ iounmap(p_tmp_fw_base);
8023+
8024+ /* IA sends "FW Update" IPC command (CMD_ID 0xFE; MSG_ID 0x02).
8025+ * Upon receiving this command, SCU will write the 2K MIP header
8026+ * from 0xFFFC0000 into NAND.
8027+ * SCU will write a status code into the Mailbox, and then set SCUFlag.
8028+ */
8029+
8030+ ipc_dfu_cmd.cmd_data = FW_UPGRADE_GO_CMD;
8031+ writel(ipc_dfu_cmd.cmd_data, (p_ipc_base + IPC_CMD));
8032+
8033+ /*IA stalls until SCUFlag is set */
8034+ while (pMailBox->SCUFlag != 1)
8035+ udelay(100);
8036+
8037+ /* IA checks Mailbox status.
8038+ * If the status is 'BADN', then abort (bad NAND).
8039+ * If the status is 'TxLO', then continue.
8040+ */
8041+ while (pMailBox->Mailbox != TxLO)
8042+ udelay(10000);
8043+ udelay(10000);
8044+
8045+update_retry:
8046+ if (retry_cnt > 5)
8047+ goto exit_function;
8048+
8049+ if (pMailBox->Mailbox == TxLO) {
8050+ /* Map the memory of ipc1 PMIC reg base */
8051+ p_dfu_fw_base = ioremap_nocache(DFU_LOAD_ADDR, (128*1024));
8052+ p_tmp_fw_base = p_dfu_fw_base;
8053+ if (p_dfu_fw_base == NULL) {
8054+ up(&sema_ipc);
8055+ iounmap(p_dfu_mailbox_base);
8056+ return E_PMIC_MALLOC;
8057+ }
8058+
8059+ mrst_fw_buf = mrst_fw_buf+0x800;
8060+ memcpy((u8 *)(p_dfu_fw_base), mrst_fw_buf, 0x20000);
8061+ pMailBox->IAFlag = 0x1;
8062+ while (pMailBox->SCUFlag == 1)
8063+ udelay(100);
8064+
8065+ /* check for 'BADN' */
8066+ if (pMailBox->Mailbox == BADN) {
8067+ up(&sema_ipc);
8068+ iounmap(p_tmp_fw_base);
8069+ iounmap(p_dfu_mailbox_base);
8070+ return -1;
8071+ }
8072+
8073+ iounmap(p_tmp_fw_base);
8074+ } else {
8075+ up(&sema_ipc);
8076+ iounmap(p_dfu_mailbox_base);
8077+ return -1;
8078+ }
8079+
8080+ while (pMailBox->Mailbox != TxHI)
8081+ udelay(10000);
8082+ udelay(10000);
8083+
8084+ if (pMailBox->Mailbox == TxHI) {
8085+ /* Map the memory of ipc1 PMIC reg base */
8086+ p_dfu_fw_base = ioremap_nocache(DFU_LOAD_ADDR, (128*1024));
8087+ p_tmp_fw_base = p_dfu_fw_base;
8088+ if (p_dfu_fw_base == NULL) {
8089+ up(&sema_ipc);
8090+ iounmap(p_dfu_mailbox_base);
8091+ return E_PMIC_MALLOC;
8092+ }
8093+
8094+ mrst_fw_buf = mrst_fw_buf+0x20000;
8095+ memcpy((u8 *)(p_dfu_fw_base), mrst_fw_buf, 0x20000);
8096+ pMailBox->IAFlag = 0;
8097+ while (pMailBox->SCUFlag == 0)
8098+ udelay(100);
8099+
8100+ /* check for 'BADN' */
8101+ if (pMailBox->Mailbox == BADN) {
8102+ up(&sema_ipc);
8103+ iounmap(p_tmp_fw_base);
8104+ iounmap(p_dfu_mailbox_base);
8105+ return -1;
8106+ }
8107+
8108+ iounmap(p_tmp_fw_base);
8109+ } else {
8110+ up(&sema_ipc);
8111+ iounmap(p_dfu_mailbox_base);
8112+ return -1;
8113+ }
8114+
8115+ if (pMailBox->Mailbox == TxLO) {
8116+ ++retry_cnt;
8117+ goto update_retry;
8118+ }
8119+
8120+ if (pMailBox->Mailbox == DONE)
8121+ printk(KERN_INFO "Firmware update completed!\n");
8122+
8123+exit_function:
8124+ iounmap(p_dfu_mailbox_base);
8125+ up(&sema_ipc);
8126+
8127+ return SUCCESS;
8128+}
8129+EXPORT_SYMBOL(ipc_device_fw_upgrade);
8130+
8131+static int cache_mrst_firmware_version(void)
8132+{
8133+ union ipc_sts ipc_sts_reg;
8134+ int i = 0;
8135+
8136+ mutex_lock(&mrst_ipc_mutex);
8137+
8138+ /*execute the command by writing to IPC_CMD registers*/
8139+ writel(IPC_GET_FW_VERSION, (p_ipc_base + IPC_CMD));
8140+ udelay(1000);
8141+
8142+ ipc_sts_reg.ipc_sts_data = readl(p_ipc_base + IPC_STS);
8143+ if (ipc_sts_reg.ipc_sts_parts.error) {
8144+ printk(KERN_ERR "IPC GetSCUFW Version Command failed %d\n",
8145+ ipc_sts_reg.ipc_sts_parts.error);
8146+ up(&sema_ipc);
8147+ return -EBUSY;
8148+ }
8149+ if (ipc_sts_reg.ipc_sts_parts.busy) {
8150+ printk(KERN_ERR "SCU is busy %d\n",
8151+ ipc_sts_reg.ipc_sts_parts.busy);
8152+ up(&sema_ipc);
8153+ return -EBUSY;
8154+ }
8155+
8156+ for (i = 0; i < 16 ; i++)
8157+ fw_ver_data[i] = readb(p_ipc_base + IPC_RBUF + i);
8158+ mutex_unlock(&mrst_ipc_mutex);
8159+ return 0;
8160+}
8161+
8162+MODULE_AUTHOR("Sreenidhi Gurudatt <sreenidhi.b.gurudatt@intel.com>");
8163+MODULE_DESCRIPTION("Intel Moorestown IPC driver");
8164+MODULE_LICENSE("GPL");
8165+
8166+module_init(ipc_mrst_init);
8167+module_exit(ipc_mrst_exit);
8168Index: linux-2.6.33/arch/x86/kernel/ipc_mrst.h
8169===================================================================
8170--- /dev/null
8171+++ linux-2.6.33/arch/x86/kernel/ipc_mrst.h
8172@@ -0,0 +1,241 @@
8173+/*
8174+ * ipc_mrst.h: Driver for Langwell IPC1
8175+ *
8176+ * (C) Copyright 2008 Intel Corporation
8177+ * Author: Sreenidhi Gurudatt (sreenidhi.b.gurudatt@intel.com)
8178+ *
8179+ * This program is free software; you can redistribute it and/or
8180+ * modify it under the terms of the GNU General Public License
8181+ * as published by the Free Software Foundation; version 2
8182+ * of the License.
8183+ *
8184+ * Note:
8185+ * Langwell provides two IPC units to communicate with IA host. IPC1 is
8186+ * dedicated for IA. IPC commands results in LNW SCU interrupt. The
8187+ * initial implementation of this driver is platform specific. It will be
8188+ * converted to a PCI driver once SCU FW is in place.
8189+ */
8190+#ifndef __IPC_MRST_H__
8191+#define __IPC_MRST_H__
8192+
8193+#include <linux/interrupt.h>
8194+#include <linux/workqueue.h>
8195+
8196+#define MRST_IPC_DRIVER_VERSION "0.01.004"
8197+#define IPC_TIMEOUT 10 /*in msecs*/
8198+#define MAX_RETRY_CNT 10
8199+#define MAX_NB_BUF_SIZE 100
8200+#define IPC_BUF_LEN 16
8201+#define MAX_NUM_ENTRIES 5
8202+#define USLEEP_STS_TIMEOUT 10
8203+
8204+#define LNW_IPC1_BASE 0xff11c000
8205+#define LNW_IPC1_MMAP_SIZE 1024
8206+
8207+#define LNW_IPC1
8208+#define LNW_IPC_CMD 0x00
8209+#define LNW_IPC_STS 0x04
8210+#define LNW_IPC_DPTR 0x08
8211+#define LNW_IPC_WBUF 0x80
8212+#define LNW_IPC_RBUF 0x90
8213+#define LNW_IPC_RWBUF_SIZE 16
8214+
8215+/* IPC status register layout */
8216+#define LNW_IPC_STS_BUSY (1<<0)
8217+#define LNW_IPC_STS_ERR (1<<1)
8218+#define LNW_IPC_STS_CMDID (0xF<<4)
8219+#define LNW_IPC_STS_INITID (0xFF<<8)
8220+#define LNW_IPC_STS_ERR_CODE (0xFF<<16)
8221+
8222+/* IPC command register layout */
8223+#define LNW_IPC_CMD_CMD (0xFF<<0)
8224+#define LNW_IPC_CMD_MSI (1<<8)
8225+#define LNW_IPC_CMD_ID (0xF<<12)
8226+#define LNW_IPC_CMD_SIZE (0xFF<<16)
8227+
8228+#define FW_UPGRADE_READY_CMD 0x10FE
8229+#define FW_UPGRADE_GO_CMD 0x20FE
8230+#define DFU_MAILBOX_ADDR 0xFFFFDFF4
8231+#define IPC_CMD_GO_TO_DFU_MODE 0x0001
8232+#define IPC_CMD_UPDATE_FW 0x0002
8233+#define IPC_CMD_FORCE_UPDATE_FW 0x0003
8234+
8235+/*256K storage size for loading the FW image.*/
8236+#define MAX_FW_SIZE 262144
8237+#define MIP_HEADER_SIZE 2048
8238+#define DONE 0x444f4e45
8239+#define BADN 0x4241444E
8240+#define TxHI 0x54784849
8241+#define TxLO 0x54784c4f
8242+
8243+typedef struct {
8244+ volatile unsigned int Mailbox;
8245+ volatile unsigned int SCUFlag;
8246+ volatile unsigned int IAFlag;
8247+} MailBox_t;
8248+
8249+enum IPC_CMD {
8250+ NORMAL_WRITE, /*0x00 Normal Write */
8251+ MSG_WRITE, /*0x01 Message Write */
8252+ INDIRECT_READ, /*0x02 Indirect Read */
8253+ RSVD, /*0x03 Reserved */
8254+ READ_DMA, /*0x04 Read DMA */
8255+ INDIRECT_WRITE, /*0x05 Indirect write */
8256+};
8257+
8258+int lnw_ipc_send_cmd(unsigned char cmd, int size, int msi);
8259+
8260+struct ipc_driver {
8261+ const char *name;
8262+ irqreturn_t(*irq) (int irq, void *ipc);
8263+ int flags;
8264+};
8265+
8266+/*
8267+ * defines specific to ipc_driver and
8268+ * not exposed outside
8269+ */
8270+
8271+/*cmd_ID fields for CCA Read/Writes*/
8272+
8273+#define CCA_REG_WRITE 0x0000
8274+#define CCA_REG_READ 0x0001
8275+#define CCA_REG_GET_PROP 0x0002
8276+
8277+#define IPC_SET_WATCHDOG_TIMER 0xF8
8278+#define IPC_CCA_CMD_READ_WRITE 0xEF
8279+#define IPC_DEVICE_FIRMWARE_UPGRADE 0xFE
8280+#define IPC_PMIC_CMD_READ_WRITE 0xFF
8281+#define IPC_GET_FW_VERSION 0xF4
8282+
8283+/*cmd_ID fields for CCA Read/Writes*/
8284+#define PMIC_REG_WRITE 0x0000
8285+#define PMIC_REG_READ 0x0001
8286+#define PMIC_REG_READ_MODIFY 0x0002
8287+#define LPE_READ 0x0003
8288+#define LPE_WRITE 0x0004
8289+
8290+#define IPC_CMD_GO_TO_DFU_MODE 0x0001
8291+#define IPC_CMD_UPDATE_FW 0x0002
8292+#define IPC_CMD_FORCE_UPDATE_FW 0x0003
8293+
8294+#define NORMAL_WRITE 0x00
8295+#define MESSAGE_WRITE 0x01
8296+#define INDIRECT_READ 0x02
8297+#define INDIRECT_WRITE 0x05
8298+#define READ_DMA 0x04
8299+
8300+
8301+/* Used to override user option */
8302+#define IOC 1
8303+
8304+#define IPC_REG_ISR_FAILED 0xFF
8305+
8306+/*
8307+ * IO remap functions for PMIC Register reads
8308+ * and writes.
8309+ */
8310+
8311+#ifdef UNIT_TEST
8312+#define IOREMAP(x, y) \
8313+ kmalloc((y), GFP_KERNEL);
8314+
8315+#define IOUNMAP(x) \
8316+ kfree((x));
8317+
8318+#define IOREAD32(x) \
8319+ *(u32 *) (x);
8320+
8321+#define IOWRITE32(x, y) \
8322+ *(u32 *) (y) = x;
8323+#else
8324+
8325+#define IOREMAP(x, y) \
8326+ ioremap_nocache((x), (y));
8327+
8328+#define IOUNMAP(x) \
8329+ iounmap((x));
8330+
8331+#define IOREAD32(x) \
8332+ ioread32((x));
8333+
8334+#define IOWRITE32(x, y) \
8335+ iowrite32((x), (y));
8336+
8337+#endif
8338+
8339+/*********************************************
8340+ * Define IPC_Base_Address and offsets
8341+ ********************************************/
8342+#define IPC_BASE_ADDRESS 0xFF11C000
8343+#define I2C_SER_BUS 0xFF12B000
8344+#define DFU_LOAD_ADDR 0xFFFC0000
8345+/*256K storage size for loading the FW image.*/
8346+#define MAX_FW_SIZE 262144
8347+
8348+#define NOP_CMD 0x00
8349+#define WRITE_CMD 0x01
8350+#define READ_CMD 0x02
8351+
8352+/* IPC2 offset addresses */
8353+#define IPC_MAX_ADDRESS 0x100
8354+/* I2C offset addresses - Confirm this */
8355+#define I2C_MAX_ADDRESS 0x10
8356+/* Offsets for CTRL_REG_ADDR and CTRL_REG_DATA */
8357+#define CTRL_REG_ADDR 0x00
8358+#define CTRL_REG_DATA 0x04
8359+#define I2C_MAX_ADDRESS 0x10
8360+
8361+#define IPC_CMD 0x00
8362+#define IPC_STS 0x04
8363+#define IPC_SPTR 0x08
8364+#define IPC_DPTR 0x0C
8365+#define IPC_WBUF 0x80
8366+#define IPC_RBUF 0x90
8367+
8368+#define MAX_INSTANCES_ALLOWED 1
8369+
8370+union ipc_sts {
8371+ struct {
8372+ u32 busy:1;
8373+ u32 error:1;
8374+ u32 rfu1:2;
8375+ u32 cmd_id:4;
8376+ u32 initiator_id:8;
8377+ u32 error_code:8;
8378+ u32 rfu3:8;
8379+ } ipc_sts_parts;
8380+ u32 ipc_sts_data;
8381+};
8382+
8383+union ipc_fw_cmd {
8384+ struct {
8385+ u32 cmd:8;
8386+ u32 ioc:1;
8387+ u32 rfu1:3;
8388+ u32 cmd_ID:4;
8389+ u32 size:8;
8390+ u32 rfu2:8;
8391+ } cmd_parts;
8392+ u32 cmd_data;
8393+};
8394+
8395+struct ipc_intr {
8396+ u8 cmd;
8397+ u32 data;
8398+
8399+};
8400+
8401+struct ipc_work_struct{
8402+ struct work_struct ipc_work;
8403+ unsigned int cmd_id;
8404+};
8405+
8406+int ipc_process_interrupt(struct ipc_intr intr_data);
8407+int init_ipc_driver(void);
8408+int de_init_ipc_driver(void);
8409+static int cache_mrst_firmware_version(void);
8410+static void mrst_pmic_read_handler(struct work_struct *work);
8411+static DECLARE_DELAYED_WORK(mrst_ipc, mrst_pmic_read_handler);
8412+
8413+#endif
8414Index: linux-2.6.33/drivers/input/keyboard/gpio_keys.c
8415===================================================================
8416--- linux-2.6.33.orig/drivers/input/keyboard/gpio_keys.c
8417+++ linux-2.6.33/drivers/input/keyboard/gpio_keys.c
8418@@ -45,6 +45,9 @@ static void gpio_keys_report_event(struc
8419 int state = (gpio_get_value(button->gpio) ? 1 : 0) ^ button->active_low;
8420
8421 input_event(input, type, button->code, !!state);
8422+ /* if button disabled auto repeat */
8423+ if (state && test_bit(EV_REP, input->evbit) && button->norep)
8424+ input_event(input, type, button->code, 0);
8425 input_sync(input);
8426 }
8427
8428Index: linux-2.6.33/include/linux/gpio_keys.h
8429===================================================================
8430--- linux-2.6.33.orig/include/linux/gpio_keys.h
8431+++ linux-2.6.33/include/linux/gpio_keys.h
8432@@ -10,6 +10,7 @@ struct gpio_keys_button {
8433 int type; /* input event type (EV_KEY, EV_SW) */
8434 int wakeup; /* configure the button as a wake-up source */
8435 int debounce_interval; /* debounce ticks interval in msecs */
8436+ unsigned int norep:1; /* more precise auto repeat control */
8437 };
8438
8439 struct gpio_keys_platform_data {
8440Index: linux-2.6.33/drivers/gpio/Kconfig
8441===================================================================
8442--- linux-2.6.33.orig/drivers/gpio/Kconfig
8443+++ linux-2.6.33/drivers/gpio/Kconfig
8444@@ -224,6 +224,12 @@ config GPIO_TIMBERDALE
8445
8446 comment "SPI GPIO expanders:"
8447
8448+config GPIO_LANGWELL_PMIC
8449+ bool "Intel Moorestown Platform Langwell GPIO support"
8450+ depends on SPI_MASTER
8451+ help
8452+ Say Y here to support Intel Moorestown platform GPIO.
8453+
8454 config GPIO_MAX7301
8455 tristate "Maxim MAX7301 GPIO expander"
8456 depends on SPI_MASTER
8457Index: linux-2.6.33/drivers/gpio/Makefile
8458===================================================================
8459--- linux-2.6.33.orig/drivers/gpio/Makefile
8460+++ linux-2.6.33/drivers/gpio/Makefile
8461@@ -7,6 +7,7 @@ obj-$(CONFIG_GPIOLIB) += gpiolib.o
8462 obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
8463 obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
8464 obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
8465+obj-$(CONFIG_GPIO_LANGWELL_PMIC) += langwell_pmic_gpio.o
8466 obj-$(CONFIG_GPIO_MAX7301) += max7301.o
8467 obj-$(CONFIG_GPIO_MAX732X) += max732x.o
8468 obj-$(CONFIG_GPIO_MC33880) += mc33880.o
8469Index: linux-2.6.33/drivers/gpio/langwell_pmic_gpio.c
8470===================================================================
8471--- /dev/null
8472+++ linux-2.6.33/drivers/gpio/langwell_pmic_gpio.c
8473@@ -0,0 +1,331 @@
8474+/* Moorestown PMIC GPIO (access through SPI and IPC) driver
8475+ * Copyright (c) 2008 - 2009, Intel Corporation.
8476+ *
8477+ * This program is free software; you can redistribute it and/or modify
8478+ * it under the terms of the GNU General Public License version 2 as
8479+ * published by the Free Software Foundation.
8480+ *
8481+ * This program is distributed in the hope that it will be useful,
8482+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
8483+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8484+ * GNU General Public License for more details.
8485+ *
8486+ * You should have received a copy of the GNU General Public License
8487+ * along with this program; if not, write to the Free Software
8488+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
8489+ */
8490+
8491+/* Supports:
8492+ * Moorestown platform pmic chip
8493+ */
8494+
8495+#include <linux/module.h>
8496+#include <linux/kernel.h>
8497+#include <linux/interrupt.h>
8498+#include <linux/delay.h>
8499+#include <linux/stddef.h>
8500+#include <linux/ioport.h>
8501+#include <linux/init.h>
8502+#include <linux/io.h>
8503+#include <linux/device.h>
8504+#include <linux/pci.h>
8505+#include <linux/spi/spi.h>
8506+#include <linux/spi/langwell_pmic_gpio.h>
8507+#include <linux/gpio.h>
8508+#include <asm/ipc_defs.h>
8509+
8510+/* register offset that IPC driver should use
8511+ * 8 GPIO + 8 GPOSW + 8GPO
8512+ */
8513+enum pmic_gpio_register {
8514+ GPIO0 = 0xE0,
8515+ GPIO7 = 0xE7,
8516+ GPIOINT = 0xE8,
8517+ GPOSWCTL0 = 0xEC,
8518+ GPOSWCTL5 = 0xF1,
8519+ GPO = 0xF4,
8520+};
8521+
8522+/* bits definitions for GPIO & GPOSW */
8523+#define GPIO_DRV 0x01
8524+#define GPIO_DIR 0x02
8525+#define GPIO_DIN 0x04
8526+#define GPIO_DOU 0x08
8527+#define GPIO_INTCTL 0x30
8528+#define GPIO_DBC 0xc0
8529+
8530+#define GPOSW_DRV 0x01
8531+#define GPOSW_DOU 0x08
8532+#define GPOSW_RDRV 0x30
8533+
8534+/* to schedule ipc read_modify in work queue for irq context */
8535+#define MAX_IPC_QUEUE 16
8536+struct ipc_cmd_queue {
8537+ struct ipc_pmic_mod_reg_data cmd[MAX_IPC_QUEUE];
8538+ struct work_struct work;
8539+};
8540+
8541+struct pmic_gpio {
8542+ struct gpio_chip chip;
8543+ struct ipc_cmd_queue cmd_queue;
8544+ void *gpiointr;
8545+ int irq;
8546+ struct spi_device *spi;
8547+ unsigned irq_base;
8548+};
8549+
8550+static int ipc_read_char(u16 offset)
8551+{
8552+ struct ipc_pmic_reg_data tmp;
8553+ tmp.ioc = 0;
8554+ tmp.pmic_reg_data[0].register_address = offset;
8555+ tmp.num_entries = 1;
8556+ if (ipc_pmic_register_read(&tmp)) {
8557+ printk(KERN_ERR "%s: IPC read error\n", __func__);
8558+ return 0;
8559+ }
8560+ return tmp.pmic_reg_data[0].value;
8561+}
8562+
8563+static int ipc_modify_char(u16 offset, u8 value, u8 mask)
8564+{
8565+ struct ipc_pmic_mod_reg_data tmp;
8566+
8567+ tmp.ioc = 0;
8568+ tmp.pmic_mod_reg_data[0].register_address = offset;
8569+ tmp.pmic_mod_reg_data[0].value = value;
8570+ tmp.pmic_mod_reg_data[0].bit_map = mask;
8571+ tmp.num_entries = 1;
8572+ return ipc_pmic_register_read_modify(&tmp);
8573+}
8574+
8575+static int queue_ipc_modify_char(struct pmic_gpio *pg,
8576+ u16 offset, u8 value, u8 mask)
8577+{
8578+ struct ipc_pmic_mod_reg_data *tmp;
8579+ int i;
8580+
8581+ for (i = 0; i < MAX_IPC_QUEUE; i ++) {
8582+ tmp = &pg->cmd_queue.cmd[i];
8583+ if (tmp->num_entries)
8584+ continue;
8585+ tmp->ioc = 0;
8586+ tmp->pmic_mod_reg_data[0].register_address = offset;
8587+ tmp->pmic_mod_reg_data[0].value = value;
8588+ tmp->pmic_mod_reg_data[0].bit_map = mask;
8589+ tmp->num_entries=1;
8590+ return i;
8591+ }
8592+ return -1;
8593+}
8594+
8595+static void ipc_modify_char_work(struct work_struct *work)
8596+{
8597+ struct pmic_gpio *pg =
8598+ container_of(work, struct pmic_gpio, cmd_queue.work);
8599+ struct ipc_pmic_mod_reg_data *tmp;
8600+ int i;
8601+
8602+ for (i = 0; i < MAX_IPC_QUEUE; i ++) {
8603+ tmp = &pg->cmd_queue.cmd[i];
8604+ if (tmp->num_entries) {
8605+ ipc_pmic_register_read_modify(tmp);
8606+ tmp->num_entries = 0;
8607+ }
8608+ }
8609+}
8610+
8611+static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
8612+{
8613+ if (offset > 8) {
8614+ printk(KERN_ERR
8615+ "%s: only pin 0-7 support input\n", __func__);
8616+ return -1;/* we only have 8 GPIO can use as input */
8617+ }
8618+ return ipc_modify_char(GPIO0 + offset, GPIO_DIR, GPIO_DIR);
8619+}
8620+
8621+static int pmic_gpio_direction_output(struct gpio_chip *chip,
8622+ unsigned offset, int value)
8623+{
8624+ int rc = 0;
8625+
8626+ if (offset < 8)/* it is GPIO */
8627+ rc = ipc_modify_char(GPIO0 + offset,
8628+ GPIO_DRV | (value ? GPIO_DOU : 0),
8629+ GPIO_DRV | GPIO_DOU | GPIO_DIR);
8630+ else if (offset < 16)/* it is GPOSW */
8631+ rc = ipc_modify_char(GPOSWCTL0 + offset - 8,
8632+ GPOSW_DRV | (value ? GPOSW_DOU : 0),
8633+ GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
8634+ else if (offset < 24)/* it is GPO */
8635+ rc = ipc_modify_char(GPO, value ? 1 << (offset - 16) : 0,
8636+ 1 << (offset - 16));
8637+
8638+ return rc;
8639+}
8640+
8641+static int pmic_gpio_get(struct gpio_chip *chip, unsigned offset)
8642+{
8643+ /* we only have 8 GPIO can use as input */
8644+ if (offset > 8) {
8645+ printk(KERN_ERR
8646+ "%s: only pin 0-7 support input\n", __func__);
8647+ return -1;
8648+ }
8649+ return ipc_read_char(GPIO0 + offset) & GPIO_DIN;
8650+}
8651+
8652+static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
8653+{
8654+ if (offset < 8)/* it is GPIO */
8655+ ipc_modify_char(GPIO0 + offset,
8656+ GPIO_DRV | (value ? GPIO_DOU : 0),
8657+ GPIO_DRV | GPIO_DOU);
8658+ else if (offset < 16)/* it is GPOSW */
8659+ ipc_modify_char(GPOSWCTL0 + offset - 8,
8660+ GPOSW_DRV | (value ? GPOSW_DOU : 0),
8661+ GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
8662+ else if (offset < 24)/* it is GPO */
8663+ ipc_modify_char(GPO, value ? 1 << (offset - 16) : 0,
8664+ 1 << (offset - 16));
8665+}
8666+
8667+static int pmic_irq_type(unsigned irq, unsigned type)
8668+{
8669+ struct pmic_gpio *pg = get_irq_chip_data(irq);
8670+ u32 gpio = irq - pg->irq_base;
8671+
8672+ if (gpio < 0 || gpio > pg->chip.ngpio)
8673+ return -EINVAL;
8674+
8675+ if (type & IRQ_TYPE_EDGE_RISING)
8676+ queue_ipc_modify_char(pg, GPIO0 + gpio, 0x20, 0x20);
8677+ else
8678+ queue_ipc_modify_char(pg, GPIO0 + gpio, 0x00, 0x20);
8679+
8680+ if (type & IRQ_TYPE_EDGE_FALLING)
8681+ queue_ipc_modify_char(pg, GPIO0 + gpio, 0x10, 0x10);
8682+ else
8683+ queue_ipc_modify_char(pg, GPIO0 + gpio, 0x00, 0x10);
8684+
8685+ schedule_work(&pg->cmd_queue.work);
8686+ return 0;
8687+};
8688+
8689+static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
8690+{
8691+ struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
8692+
8693+ return pg->irq_base + offset;
8694+}
8695+
8696+/* the gpiointr register is read-clear, so just do nothing. */
8697+static void pmic_irq_unmask(unsigned irq)
8698+{
8699+};
8700+
8701+static void pmic_irq_mask(unsigned irq)
8702+{
8703+};
8704+
8705+static struct irq_chip pmic_irqchip = {
8706+ .name = "PMIC-GPIO",
8707+ .mask = pmic_irq_mask,
8708+ .unmask = pmic_irq_unmask,
8709+ .set_type = pmic_irq_type,
8710+};
8711+
8712+static void pmic_irq_handler(unsigned irq, struct irq_desc *desc)
8713+{
8714+ struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq);
8715+ u8 intsts = *((u8 *)pg->gpiointr + 4);
8716+ int gpio;
8717+
8718+ for (gpio = 0; gpio < 8; gpio++) {
8719+ if (intsts & (1 << gpio)) {
8720+ pr_debug("pmic pin %d triggered\n", gpio);
8721+ generic_handle_irq(pg->irq_base + gpio);
8722+ }
8723+ }
8724+ desc->chip->eoi(irq);
8725+}
8726+
8727+static int __devinit pmic_gpio_probe(struct spi_device *spi)
8728+{
8729+ struct pmic_gpio *pg;
8730+ struct langwell_pmic_gpio_platform_data *pdata;
8731+ int retval;
8732+ int i;
8733+
8734+ printk(KERN_INFO "%s: PMIC GPIO driver loaded.\n", __func__);
8735+
8736+ pdata = spi->dev.platform_data;
8737+ if (!pdata || !pdata->gpio_base || !pdata->irq_base) {
8738+ dev_dbg(&spi->dev, "incorrect or missing platform data\n");
8739+ return -EINVAL;
8740+ }
8741+
8742+ pg = kzalloc(sizeof(*pg), GFP_KERNEL);
8743+ if (!pg)
8744+ return -ENOMEM;
8745+
8746+ dev_set_drvdata(&spi->dev, pg);
8747+
8748+ pg->irq = spi->irq;
8749+ /* setting up SRAM mapping for GPIOINT register */
8750+ pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8);
8751+ if (!pg->gpiointr) {
8752+ printk(KERN_ERR "%s: Can not map GPIOINT.\n", __func__);
8753+ retval = -EINVAL;
8754+ goto err2;
8755+ }
8756+ pg->irq_base = pdata->irq_base;
8757+ pg->chip.label = "langwell_pmic";
8758+ pg->chip.direction_input = pmic_gpio_direction_input;
8759+ pg->chip.direction_output = pmic_gpio_direction_output;
8760+ pg->chip.get = pmic_gpio_get;
8761+ pg->chip.set = pmic_gpio_set;
8762+ pg->chip.to_irq = pmic_gpio_to_irq;
8763+ pg->chip.base = pdata->gpio_base;
8764+ pg->chip.ngpio = 24;
8765+ pg->chip.can_sleep = 1;
8766+ pg->chip.dev = &spi->dev;
8767+ retval = gpiochip_add(&pg->chip);
8768+ if (retval) {
8769+ printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
8770+ goto err;
8771+ }
8772+ set_irq_data(pg->irq, pg);
8773+ set_irq_chained_handler(pg->irq, pmic_irq_handler);
8774+ for (i = 0; i < 8; i++) {
8775+ set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip,
8776+ handle_simple_irq, "demux");
8777+ set_irq_chip_data(i + pg->irq_base, pg);
8778+ }
8779+ INIT_WORK(&pg->cmd_queue.work, ipc_modify_char_work);
8780+ return 0;
8781+err:
8782+ iounmap(pg->gpiointr);
8783+err2:
8784+ kfree(pg);
8785+ return retval;
8786+}
8787+
8788+static struct spi_driver pmic_gpio_driver = {
8789+ .driver = {
8790+ .name = "pmic_gpio",
8791+ .owner = THIS_MODULE,
8792+ },
8793+ .probe = pmic_gpio_probe,
8794+};
8795+
8796+static int __init pmic_gpio_init(void)
8797+{
8798+ return spi_register_driver(&pmic_gpio_driver);
8799+}
8800+
8801+/* register after spi postcore initcall and before
8802+ * subsys initcalls that may rely on these GPIOs
8803+ */
8804+subsys_initcall(pmic_gpio_init);
8805Index: linux-2.6.33/include/linux/spi/langwell_pmic_gpio.h
8806===================================================================
8807--- /dev/null
8808+++ linux-2.6.33/include/linux/spi/langwell_pmic_gpio.h
8809@@ -0,0 +1,15 @@
8810+#ifndef LINUX_SPI_LANGWELL_PMIC_H
8811+#define LINUX_SPI_LANGWELL_PMIC_H
8812+
8813+struct langwell_pmic_gpio_platform_data {
8814+ /* the first IRQ of the chip */
8815+ unsigned irq_base;
8816+ /* number assigned to the first GPIO */
8817+ unsigned gpio_base;
8818+ /* sram address for gpiointr register, the langwell chip will map
8819+ * the PMIC spi GPIO expander's GPIOINTR register in sram.
8820+ */
8821+ unsigned gpiointr;
8822+};
8823+
8824+#endif
8825Index: linux-2.6.33/drivers/gpio/pca953x.c
8826===================================================================
8827--- linux-2.6.33.orig/drivers/gpio/pca953x.c
8828+++ linux-2.6.33/drivers/gpio/pca953x.c
8829@@ -14,6 +14,7 @@
8830 #include <linux/module.h>
8831 #include <linux/init.h>
8832 #include <linux/gpio.h>
8833+#include <linux/interrupt.h>
8834 #include <linux/i2c.h>
8835 #include <linux/i2c/pca953x.h>
8836 #ifdef CONFIG_OF_GPIO
8837@@ -50,6 +51,7 @@ MODULE_DEVICE_TABLE(i2c, pca953x_id);
8838
8839 struct pca953x_chip {
8840 unsigned gpio_start;
8841+ unsigned irq_base;
8842 uint16_t reg_output;
8843 uint16_t reg_direction;
8844
8845@@ -182,6 +184,13 @@ static void pca953x_gpio_set_value(struc
8846 chip->reg_output = reg_val;
8847 }
8848
8849+static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
8850+{
8851+ struct pca953x_chip *chip = container_of(gc, struct pca953x_chip,
8852+ gpio_chip);
8853+ return chip->irq_base + offset;
8854+}
8855+
8856 static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
8857 {
8858 struct gpio_chip *gc;
8859@@ -192,6 +201,7 @@ static void pca953x_setup_gpio(struct pc
8860 gc->direction_output = pca953x_gpio_direction_output;
8861 gc->get = pca953x_gpio_get_value;
8862 gc->set = pca953x_gpio_set_value;
8863+ gc->to_irq = pca953x_gpio_to_irq;
8864 gc->can_sleep = 1;
8865
8866 gc->base = chip->gpio_start;
8867@@ -250,6 +260,39 @@ pca953x_get_alt_pdata(struct i2c_client
8868 }
8869 #endif
8870
8871+static void pca953x_irq_unmask(unsigned irq)
8872+{
8873+}
8874+
8875+static void pca953x_irq_mask(unsigned irq)
8876+{
8877+}
8878+
8879+static struct irq_chip pca953x_irqchip = {
8880+ .name = "pca953x",
8881+ .mask = pca953x_irq_mask,
8882+ .unmask = pca953x_irq_unmask,
8883+};
8884+
8885+static void pca953x_irq_handler(unsigned irq, struct irq_desc *desc)
8886+{
8887+ struct pca953x_chip *chip = (struct pca953x_chip *)get_irq_data(irq);
8888+ int i;
8889+
8890+ if (desc->chip->ack)
8891+ desc->chip->ack(irq);
8892+ /* we must call all sub-irqs, since there is no way to read
8893+ * I2C gpio expander's status in irq context. The driver itself
8894+ * would be reponsible to check if the irq is for him.
8895+ */
8896+ for (i = 0; i < chip->gpio_chip.ngpio; i++)
8897+ if (chip->reg_direction & (1u << i))
8898+ generic_handle_irq(chip->irq_base + i);
8899+
8900+ if (desc->chip->unmask)
8901+ desc->chip->unmask(irq);
8902+}
8903+
8904 static int __devinit pca953x_probe(struct i2c_client *client,
8905 const struct i2c_device_id *id)
8906 {
8907@@ -283,6 +326,8 @@ static int __devinit pca953x_probe(struc
8908
8909 chip->names = pdata->names;
8910
8911+ chip->irq_base = pdata->irq_base;
8912+
8913 /* initialize cached registers from their original values.
8914 * we can't share this chip with another i2c master.
8915 */
8916@@ -314,6 +359,21 @@ static int __devinit pca953x_probe(struc
8917 }
8918
8919 i2c_set_clientdata(client, chip);
8920+
8921+ if (chip->irq_base != (unsigned)-1) {
8922+ int i;
8923+
8924+ set_irq_type(client->irq,
8925+ IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING);
8926+ set_irq_data(client->irq, chip);
8927+ for (i = 0; i < chip->gpio_chip.ngpio; i++) {
8928+ set_irq_chip(i + chip->irq_base, &pca953x_irqchip);
8929+ __set_irq_handler(i + chip->irq_base,
8930+ handle_simple_irq, 0, "demux");
8931+ set_irq_chip_data(i + chip->irq_base, chip);
8932+ }
8933+ set_irq_chained_handler(client->irq, pca953x_irq_handler);
8934+ }
8935 return 0;
8936
8937 out_failed:
8938Index: linux-2.6.33/include/linux/i2c/pca953x.h
8939===================================================================
8940--- linux-2.6.33.orig/include/linux/i2c/pca953x.h
8941+++ linux-2.6.33/include/linux/i2c/pca953x.h
8942@@ -1,6 +1,8 @@
8943 /* platform data for the PCA9539 16-bit I/O expander driver */
8944
8945 struct pca953x_platform_data {
8946+ /* number of the first IRQ */
8947+ unsigned irq_base;
8948 /* number of the first GPIO */
8949 unsigned gpio_base;
8950
8951Index: linux-2.6.33/drivers/input/keyboard/Kconfig
8952===================================================================
8953--- linux-2.6.33.orig/drivers/input/keyboard/Kconfig
8954+++ linux-2.6.33/drivers/input/keyboard/Kconfig
8955@@ -73,7 +73,7 @@ config KEYBOARD_ATKBD
8956 default y
8957 select SERIO
8958 select SERIO_LIBPS2
8959- select SERIO_I8042 if X86
8960+ select SERIO_I8042 if X86 && !X86_MRST
8961 select SERIO_GSCPS2 if GSC
8962 help
8963 Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
8964Index: linux-2.6.33/drivers/input/mouse/Kconfig
8965===================================================================
8966--- linux-2.6.33.orig/drivers/input/mouse/Kconfig
8967+++ linux-2.6.33/drivers/input/mouse/Kconfig
8968@@ -17,7 +17,7 @@ config MOUSE_PS2
8969 default y
8970 select SERIO
8971 select SERIO_LIBPS2
8972- select SERIO_I8042 if X86
8973+ select SERIO_I8042 if X86 && !X86_MRST
8974 select SERIO_GSCPS2 if GSC
8975 help
8976 Say Y here if you have a PS/2 mouse connected to your system. This
8977Index: linux-2.6.33/kernel/time/tick-broadcast.c
8978===================================================================
8979--- linux-2.6.33.orig/kernel/time/tick-broadcast.c
8980+++ linux-2.6.33/kernel/time/tick-broadcast.c
8981@@ -214,10 +214,13 @@ static void tick_do_broadcast_on_off(uns
8982
8983 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
8984
8985+ bc = tick_broadcast_device.evtdev;
8986+ if (!bc)
8987+ goto out;
8988+
8989 cpu = smp_processor_id();
8990 td = &per_cpu(tick_cpu_device, cpu);
8991 dev = td->evtdev;
8992- bc = tick_broadcast_device.evtdev;
8993
8994 /*
8995 * Is the device not affected by the powerstate ?
8996@@ -467,6 +470,9 @@ void tick_broadcast_oneshot_control(unsi
8997 goto out;
8998
8999 bc = tick_broadcast_device.evtdev;
9000+ if (!bc)
9001+ goto out;
9002+
9003 cpu = smp_processor_id();
9004 td = &per_cpu(tick_cpu_device, cpu);
9005 dev = td->evtdev;
9006Index: linux-2.6.33/drivers/usb/core/hcd.h
9007===================================================================
9008--- linux-2.6.33.orig/drivers/usb/core/hcd.h
9009+++ linux-2.6.33/drivers/usb/core/hcd.h
9010@@ -104,6 +104,9 @@ struct usb_hcd {
9011 unsigned wireless:1; /* Wireless USB HCD */
9012 unsigned authorized_default:1;
9013 unsigned has_tt:1; /* Integrated TT in root hub */
9014+ unsigned has_sram:1; /* Local SRAM for caching */
9015+ unsigned sram_no_payload:1; /* sram not for payload */
9016+ unsigned lpm_cap:1; /* LPM capable */
9017
9018 int irq; /* irq allocated */
9019 void __iomem *regs; /* device memory/io */
9020@@ -148,6 +151,13 @@ struct usb_hcd {
9021 * (ohci 32, uhci 1024, ehci 256/512/1024).
9022 */
9023
9024+#ifdef CONFIG_USB_OTG
9025+ /* some otg HCDs need this to get USB_DEVICE_ADD and USB_DEVICE_REMOVE
9026+ * from root hub, we do not want to use USB notification chain, since
9027+ * it would be a over kill to use high level notification.
9028+ */
9029+ void (*otg_notify) (struct usb_device *udev, unsigned action);
9030+#endif
9031 /* The HC driver's private data is stored at the end of
9032 * this structure.
9033 */
9034Index: linux-2.6.33/drivers/usb/core/hub.c
9035===================================================================
9036--- linux-2.6.33.orig/drivers/usb/core/hub.c
9037+++ linux-2.6.33/drivers/usb/core/hub.c
9038@@ -1563,6 +1563,24 @@ static void hub_free_dev(struct usb_devi
9039 hcd->driver->free_dev(hcd, udev);
9040 }
9041
9042+#ifdef CONFIG_USB_OTG
9043+
9044+static void otg_notify(struct usb_device *udev, unsigned action)
9045+{
9046+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
9047+
9048+ if (hcd->otg_notify)
9049+ hcd->otg_notify(udev, action);
9050+}
9051+
9052+#else
9053+
9054+static inline void otg_notify(struct usb_device *udev, unsigned action)
9055+{
9056+}
9057+
9058+#endif
9059+
9060 /**
9061 * usb_disconnect - disconnect a device (usbcore-internal)
9062 * @pdev: pointer to device being disconnected
9063@@ -1620,7 +1638,7 @@ void usb_disconnect(struct usb_device **
9064 * notifier chain (used by usbfs and possibly others).
9065 */
9066 device_del(&udev->dev);
9067-
9068+ otg_notify(udev, USB_DEVICE_REMOVE);
9069 /* Free the device number and delete the parent's children[]
9070 * (or root_hub) pointer.
9071 */
9072@@ -1833,6 +1851,7 @@ int usb_new_device(struct usb_device *ud
9073 * notifier chain (used by usbfs and possibly others).
9074 */
9075 err = device_add(&udev->dev);
9076+ otg_notify(udev, USB_DEVICE_ADD);
9077 if (err) {
9078 dev_err(&udev->dev, "can't device_add, error %d\n", err);
9079 goto fail;
9080Index: linux-2.6.33/drivers/usb/core/usb.h
9081===================================================================
9082--- linux-2.6.33.orig/drivers/usb/core/usb.h
9083+++ linux-2.6.33/drivers/usb/core/usb.h
9084@@ -178,4 +178,3 @@ extern void usb_notify_add_device(struct
9085 extern void usb_notify_remove_device(struct usb_device *udev);
9086 extern void usb_notify_add_bus(struct usb_bus *ubus);
9087 extern void usb_notify_remove_bus(struct usb_bus *ubus);
9088-
9089Index: linux-2.6.33/drivers/usb/host/ehci-hcd.c
9090===================================================================
9091--- linux-2.6.33.orig/drivers/usb/host/ehci-hcd.c
9092+++ linux-2.6.33/drivers/usb/host/ehci-hcd.c
9093@@ -35,6 +35,7 @@
9094 #include <linux/moduleparam.h>
9095 #include <linux/dma-mapping.h>
9096 #include <linux/debugfs.h>
9097+#include <linux/uaccess.h>
9098
9099 #include "../core/hcd.h"
9100
9101@@ -43,6 +44,8 @@
9102 #include <asm/irq.h>
9103 #include <asm/system.h>
9104 #include <asm/unaligned.h>
9105+#include <linux/usb/otg.h>
9106+#include <linux/usb/langwell_otg.h>
9107
9108 /*-------------------------------------------------------------------------*/
9109
9110@@ -101,6 +104,11 @@ static int ignore_oc = 0;
9111 module_param (ignore_oc, bool, S_IRUGO);
9112 MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
9113
9114+/* for link power management(LPM) feature */
9115+static unsigned int hird;
9116+module_param(hird, int, S_IRUGO);
9117+MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n");
9118+
9119 #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
9120
9121 /*-------------------------------------------------------------------------*/
9122@@ -305,6 +313,7 @@ static void end_unlink_async(struct ehci
9123 static void ehci_work(struct ehci_hcd *ehci);
9124
9125 #include "ehci-hub.c"
9126+#include "ehci-lpm.c"
9127 #include "ehci-mem.c"
9128 #include "ehci-q.c"
9129 #include "ehci-sched.c"
9130@@ -501,7 +510,8 @@ static void ehci_stop (struct usb_hcd *h
9131 ehci_work (ehci);
9132 spin_unlock_irq (&ehci->lock);
9133 ehci_mem_cleanup (ehci);
9134-
9135+ if (hcd->has_sram)
9136+ sram_deinit(hcd);
9137 #ifdef EHCI_STATS
9138 ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
9139 ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
9140@@ -577,6 +587,17 @@ static int ehci_init(struct usb_hcd *hcd
9141 if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
9142 log2_irq_thresh = 0;
9143 temp = 1 << (16 + log2_irq_thresh);
9144+ if (HCC_32FRAME_PERIODIC_LIST(hcc_params))
9145+ ehci_dbg(ehci, "32 frame periodic list capable\n");
9146+ if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) {
9147+ ehci_dbg(ehci, "enable per-port change event %d\n", park);
9148+ temp |= CMD_PPCEE;
9149+ }
9150+ if (HCC_HW_PREFETCH(hcc_params)) {
9151+ ehci_dbg(ehci, "HW prefetch capable %d\n", park);
9152+ temp |= (CMD_ASPE | CMD_PSPE);
9153+ }
9154+
9155 if (HCC_CANPARK(hcc_params)) {
9156 /* HW default park == 3, on hardware that supports it (like
9157 * NVidia and ALI silicon), maximizes throughput on the async
9158@@ -590,7 +611,7 @@ static int ehci_init(struct usb_hcd *hcd
9159 temp |= CMD_PARK;
9160 temp |= park << 8;
9161 }
9162- ehci_dbg(ehci, "park %d\n", park);
9163+ ehci_dbg(ehci, "park %d ", park);
9164 }
9165 if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
9166 /* periodic schedule size can be smaller than default */
9167@@ -603,6 +624,17 @@ static int ehci_init(struct usb_hcd *hcd
9168 default: BUG();
9169 }
9170 }
9171+ if (HCC_LPM(hcc_params)) {
9172+ /* support link power management EHCI 1.1 addendum */
9173+ ehci_dbg(ehci, "lpm\n");
9174+ hcd->lpm_cap = 1;
9175+ if (hird > 0xf) {
9176+ ehci_dbg(ehci, "hird %d invalid, use default 0",
9177+ hird);
9178+ hird = 0;
9179+ }
9180+ temp |= hird << 24;
9181+ }
9182 ehci->command = temp;
9183
9184 /* Accept arbitrarily long scatter-gather lists */
9185@@ -840,6 +872,7 @@ static int ehci_urb_enqueue (
9186 ) {
9187 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
9188 struct list_head qtd_list;
9189+ int status;
9190
9191 INIT_LIST_HEAD (&qtd_list);
9192
9193@@ -855,7 +888,16 @@ static int ehci_urb_enqueue (
9194 default:
9195 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
9196 return -ENOMEM;
9197- return submit_async(ehci, urb, &qtd_list, mem_flags);
9198+ status = submit_async(ehci, urb, &qtd_list, mem_flags);
9199+
9200+ /* check device LPM cap after set address */
9201+ if (usb_pipecontrol(urb->pipe)) {
9202+ if (((struct usb_ctrlrequest *)urb->setup_packet)
9203+ ->bRequest == USB_REQ_SET_ADDRESS &&
9204+ ehci_to_hcd(ehci)->lpm_cap)
9205+ ehci_lpm_check(ehci, urb->dev->portnum);
9206+ }
9207+ return status;
9208
9209 case PIPE_INTERRUPT:
9210 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
9211@@ -1101,6 +1143,10 @@ MODULE_LICENSE ("GPL");
9212 #ifdef CONFIG_PCI
9213 #include "ehci-pci.c"
9214 #define PCI_DRIVER ehci_pci_driver
9215+#ifdef CONFIG_USB_LANGWELL_OTG
9216+#include "ehci-langwell-pci.c"
9217+#define LNW_OTG_HOST_DRIVER ehci_otg_driver
9218+#endif
9219 #endif
9220
9221 #ifdef CONFIG_USB_EHCI_FSL
9222@@ -1213,8 +1259,19 @@ static int __init ehci_hcd_init(void)
9223 if (retval < 0)
9224 goto clean3;
9225 #endif
9226+
9227+#ifdef LNW_OTG_HOST_DRIVER
9228+ retval = langwell_register_host(&LNW_OTG_HOST_DRIVER);
9229+ if (retval < 0)
9230+ goto clean4;
9231+#endif
9232 return retval;
9233
9234+#ifdef LNW_OTG_HOST_DRIVER
9235+clean4:
9236+ langwell_unregister_host(&LNW_OTG_HOST_DRIVER);
9237+#endif
9238+
9239 #ifdef OF_PLATFORM_DRIVER
9240 /* of_unregister_platform_driver(&OF_PLATFORM_DRIVER); */
9241 clean3:
9242@@ -1255,6 +1312,9 @@ static void __exit ehci_hcd_cleanup(void
9243 #ifdef PS3_SYSTEM_BUS_DRIVER
9244 ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
9245 #endif
9246+#ifdef LNW_OTG_HOST_DRIVER
9247+ langwell_unregister_host(&LNW_OTG_HOST_DRIVER);
9248+#endif
9249 #ifdef DEBUG
9250 debugfs_remove(ehci_debug_root);
9251 #endif
9252Index: linux-2.6.33/drivers/usb/host/ehci-hub.c
9253===================================================================
9254--- linux-2.6.33.orig/drivers/usb/host/ehci-hub.c
9255+++ linux-2.6.33/drivers/usb/host/ehci-hub.c
9256@@ -112,6 +112,7 @@ static int ehci_bus_suspend (struct usb_
9257 int port;
9258 int mask;
9259 u32 __iomem *hostpc_reg = NULL;
9260+ int rc = 0;
9261
9262 ehci_dbg(ehci, "suspend root hub\n");
9263
9264@@ -228,13 +229,18 @@ static int ehci_bus_suspend (struct usb_
9265 ehci_readl(ehci, &ehci->regs->intr_enable);
9266
9267 ehci->next_statechange = jiffies + msecs_to_jiffies(10);
9268+
9269+#ifdef CONFIG_USB_OTG
9270+ if (ehci->has_otg && ehci->otg_suspend)
9271+ rc = ehci->otg_suspend(hcd);
9272+#endif
9273 spin_unlock_irq (&ehci->lock);
9274
9275 /* ehci_work() may have re-enabled the watchdog timer, which we do not
9276 * want, and so we must delete any pending watchdog timer events.
9277 */
9278 del_timer_sync(&ehci->watchdog);
9279- return 0;
9280+ return rc;
9281 }
9282
9283
9284@@ -246,6 +252,7 @@ static int ehci_bus_resume (struct usb_h
9285 u32 power_okay;
9286 int i;
9287 u8 resume_needed = 0;
9288+ int rc = 0;
9289
9290 if (time_before (jiffies, ehci->next_statechange))
9291 msleep(5);
9292@@ -295,7 +302,11 @@ static int ehci_bus_resume (struct usb_h
9293 i = HCS_N_PORTS (ehci->hcs_params);
9294 while (i--) {
9295 temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
9296- temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
9297+ temp &= ~(PORT_RWC_BITS | PORT_WKDISC_E | PORT_WKOC_E);
9298+ if (temp & PORT_CONNECT)
9299+ temp |= PORT_WKOC_E | PORT_WKDISC_E;
9300+ else
9301+ temp |= PORT_WKOC_E | PORT_WKCONN_E;
9302 if (test_bit(i, &ehci->bus_suspended) &&
9303 (temp & PORT_SUSPEND)) {
9304 temp |= PORT_RESUME;
9305@@ -340,9 +351,13 @@ static int ehci_bus_resume (struct usb_h
9306 /* Now we can safely re-enable irqs */
9307 ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
9308
9309+#ifdef CONFIG_USB_OTG
9310+ if (ehci->has_otg && ehci->otg_resume)
9311+ rc = ehci->otg_resume(hcd);
9312+#endif
9313 spin_unlock_irq (&ehci->lock);
9314 ehci_handover_companion_ports(ehci);
9315- return 0;
9316+ return rc;
9317 }
9318
9319 #else
9320@@ -678,10 +693,20 @@ static int ehci_hub_control (
9321 if (temp & PORT_SUSPEND) {
9322 if ((temp & PORT_PE) == 0)
9323 goto error;
9324- /* resume signaling for 20 msec */
9325- temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
9326+ /* clear phy low power mode before resume */
9327+ if (hostpc_reg) {
9328+ temp1 = ehci_readl(ehci, hostpc_reg);
9329+ ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
9330+ hostpc_reg);
9331+ mdelay(5);
9332+ }
9333+ /* after PORT_PE check, the port must be
9334+ connected, set correct wakeup bits */
9335+ temp &= ~PORT_WKCONN_E;
9336+ temp |= PORT_WKDISC_E | PORT_WKOC_E;
9337 ehci_writel(ehci, temp | PORT_RESUME,
9338 status_reg);
9339+ /* resume signaling for 20 msec */
9340 ehci->reset_done [wIndex] = jiffies
9341 + msecs_to_jiffies (20);
9342 }
9343@@ -696,6 +721,23 @@ static int ehci_hub_control (
9344 status_reg);
9345 break;
9346 case USB_PORT_FEAT_C_CONNECTION:
9347+ /*
9348+ * for connection change, we need to enable
9349+ * appropriate wake bits.
9350+ */
9351+ temp |= PORT_WKOC_E;
9352+ if (temp & PORT_CONNECT) {
9353+ temp |= PORT_WKDISC_E;
9354+ temp &= ~PORT_WKCONN_E;
9355+ } else {
9356+ temp &= ~PORT_WKDISC_E;
9357+ temp |= PORT_WKCONN_E;
9358+ }
9359+ if (ehci_to_hcd(ehci)->lpm_cap) {
9360+ /* clear PORTSC bits on disconnect */
9361+ temp &= ~PORT_LPM;
9362+ temp &= ~PORT_DEV_ADDR;
9363+ }
9364 ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_CSC,
9365 status_reg);
9366 break;
9367Index: linux-2.6.33/drivers/usb/host/ehci-langwell-pci.c
9368===================================================================
9369--- /dev/null
9370+++ linux-2.6.33/drivers/usb/host/ehci-langwell-pci.c
9371@@ -0,0 +1,195 @@
9372+/*
9373+ * Intel Moorestown Platform Langwell OTG EHCI Controller PCI Bus Glue.
9374+ *
9375+ * Copyright (c) 2008 - 2009, Intel Corporation.
9376+ *
9377+ * This program is free software; you can redistribute it and/or modify it
9378+ * under the terms of the GNU General Public License 2 as published by the
9379+ * Free Software Foundation.
9380+ *
9381+ * This program is distributed in the hope that it will be useful, but
9382+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
9383+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
9384+ * for more details.
9385+ *
9386+ * You should have received a copy of the GNU General Public License
9387+ * along with this program; if not, write to the Free Software Foundation,
9388+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
9389+ */
9390+
9391+static int usb_otg_suspend(struct usb_hcd *hcd)
9392+{
9393+ struct otg_transceiver *otg;
9394+ struct langwell_otg *iotg;
9395+
9396+ otg = otg_get_transceiver();
9397+ if (otg == NULL) {
9398+ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
9399+ return -EINVAL;
9400+ }
9401+ iotg = container_of(otg, struct langwell_otg, otg);
9402+ printk(KERN_INFO "%s OTG HNP update suspend\n", __func__);
9403+ if (iotg->otg.default_a)
9404+ iotg->hsm.a_suspend_req = 1;
9405+ else
9406+ iotg->hsm.b_bus_req = 0;
9407+ langwell_update_transceiver();
9408+ otg_put_transceiver(otg);
9409+ return 0;
9410+}
9411+
9412+static int usb_otg_resume(struct usb_hcd *hcd)
9413+{
9414+ struct otg_transceiver *otg;
9415+ struct langwell_otg *iotg;
9416+
9417+ otg = otg_get_transceiver();
9418+ if (otg == NULL) {
9419+ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
9420+ return -EINVAL;
9421+ }
9422+ iotg = container_of(otg, struct langwell_otg, otg);
9423+ printk(KERN_INFO "%s OTG HNP update resume\n", __func__);
9424+ if (iotg->otg.default_a) {
9425+ iotg->hsm.b_bus_resume = 1;
9426+ langwell_update_transceiver();
9427+ }
9428+ otg_put_transceiver(otg);
9429+ return 0;
9430+}
9431+
9432+/* the root hub will call this callback when device added/removed */
9433+static void otg_notify(struct usb_device *udev, unsigned action)
9434+{
9435+ struct otg_transceiver *otg;
9436+ struct langwell_otg *iotg;
9437+
9438+ otg = otg_get_transceiver();
9439+ if (otg == NULL) {
9440+ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
9441+ return;
9442+ }
9443+ iotg = container_of(otg, struct langwell_otg, otg);
9444+
9445+ switch (action) {
9446+ case USB_DEVICE_ADD:
9447+ pr_debug("Notify OTG HNP add device\n");
9448+ if (iotg->otg.default_a == 1)
9449+ iotg->hsm.b_conn = 1;
9450+ else
9451+ iotg->hsm.a_conn = 1;
9452+ break;
9453+ case USB_DEVICE_REMOVE:
9454+ pr_debug("Notify OTG HNP delete device\n");
9455+ if (iotg->otg.default_a == 1)
9456+ iotg->hsm.b_conn = 0;
9457+ else
9458+ iotg->hsm.a_conn = 0;
9459+ break;
9460+ default:
9461+ otg_put_transceiver(otg);
9462+ return ;
9463+ }
9464+ if (spin_trylock(&iotg->wq_lock)) {
9465+ langwell_update_transceiver();
9466+ spin_unlock(&iotg->wq_lock);
9467+ }
9468+ otg_put_transceiver(otg);
9469+ return;
9470+}
9471+
9472+static int ehci_langwell_probe(struct pci_dev *pdev,
9473+ const struct pci_device_id *id)
9474+{
9475+ struct hc_driver *driver;
9476+ struct langwell_otg *iotg;
9477+ struct otg_transceiver *otg;
9478+ struct usb_hcd *hcd;
9479+ struct ehci_hcd *ehci;
9480+ int irq;
9481+ int retval;
9482+
9483+ pr_debug("initializing Langwell USB OTG Host Controller\n");
9484+
9485+ /* we need not call pci_enable_dev since otg transceiver already take
9486+ * the control of this device and this probe actaully gets called by
9487+ * otg transceiver driver with HNP protocol.
9488+ */
9489+ irq = pdev->irq;
9490+
9491+ if (!id)
9492+ return -EINVAL;
9493+ driver = (struct hc_driver *)id->driver_data;
9494+ if (!driver)
9495+ return -EINVAL;
9496+
9497+ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
9498+ if (!hcd) {
9499+ retval = -ENOMEM;
9500+ goto err1;
9501+ }
9502+
9503+ hcd->self.otg_port = 1;
9504+ ehci = hcd_to_ehci(hcd);
9505+ /* this will be called in ehci_bus_suspend and ehci_bus_resume */
9506+ ehci->otg_suspend = usb_otg_suspend;
9507+ ehci->otg_resume = usb_otg_resume;
9508+ /* this will be called by root hub code */
9509+ hcd->otg_notify = otg_notify;
9510+ otg = otg_get_transceiver();
9511+ if (otg == NULL) {
9512+ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
9513+ retval = -EINVAL;
9514+ goto err1;
9515+ }
9516+ iotg = container_of(otg, struct langwell_otg, otg);
9517+ hcd->regs = iotg->regs;
9518+ hcd->rsrc_start = pci_resource_start(pdev, 0);
9519+ hcd->rsrc_len = pci_resource_len(pdev, 0);
9520+
9521+ if (hcd->regs == NULL) {
9522+ dev_dbg(&pdev->dev, "error mapping memory\n");
9523+ retval = -EFAULT;
9524+ goto err2;
9525+ }
9526+ retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
9527+ if (retval != 0)
9528+ goto err2;
9529+ retval = otg_set_host(otg, &hcd->self);
9530+ if (!otg->default_a)
9531+ hcd->self.is_b_host = 1;
9532+ otg_put_transceiver(otg);
9533+ return retval;
9534+
9535+err2:
9536+ usb_put_hcd(hcd);
9537+err1:
9538+ dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval);
9539+ return retval;
9540+}
9541+
9542+void ehci_langwell_remove(struct pci_dev *dev)
9543+{
9544+ struct usb_hcd *hcd = pci_get_drvdata(dev);
9545+
9546+ if (!hcd)
9547+ return;
9548+ usb_remove_hcd(hcd);
9549+ usb_put_hcd(hcd);
9550+}
9551+
9552+/* Langwell OTG EHCI driver */
9553+static struct pci_driver ehci_otg_driver = {
9554+ .name = "ehci-langwell",
9555+ .id_table = pci_ids,
9556+
9557+ .probe = ehci_langwell_probe,
9558+ .remove = ehci_langwell_remove,
9559+
9560+#ifdef CONFIG_PM_SLEEP
9561+ .driver = {
9562+ .pm = &usb_hcd_pci_pm_ops
9563+ },
9564+#endif
9565+ .shutdown = usb_hcd_pci_shutdown,
9566+};
9567Index: linux-2.6.33/drivers/usb/host/ehci-pci.c
9568===================================================================
9569--- linux-2.6.33.orig/drivers/usb/host/ehci-pci.c
9570+++ linux-2.6.33/drivers/usb/host/ehci-pci.c
9571@@ -41,6 +41,39 @@ static int ehci_pci_reinit(struct ehci_h
9572 return 0;
9573 }
9574
9575+/* enable SRAM if sram detected */
9576+static void sram_init(struct usb_hcd *hcd)
9577+{
9578+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
9579+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
9580+
9581+ if (!hcd->has_sram)
9582+ return;
9583+ ehci->sram_addr = pci_resource_start(pdev, 1);
9584+ ehci->sram_size = pci_resource_len(pdev, 1);
9585+ ehci_info(ehci, "Found HCD SRAM at %x size:%x\n",
9586+ ehci->sram_addr, ehci->sram_size);
9587+ if (pci_request_region(pdev, 1, kobject_name(&pdev->dev.kobj))) {
9588+ ehci_warn(ehci, "SRAM request failed\n");
9589+ hcd->has_sram = 0;
9590+ } else if (!dma_declare_coherent_memory(&pdev->dev, ehci->sram_addr,
9591+ ehci->sram_addr, ehci->sram_size, DMA_MEMORY_MAP)) {
9592+ ehci_warn(ehci, "SRAM DMA declare failed\n");
9593+ pci_release_region(pdev, 1);
9594+ hcd->has_sram = 0;
9595+ }
9596+}
9597+
9598+static void sram_deinit(struct usb_hcd *hcd)
9599+{
9600+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
9601+
9602+ if (!hcd->has_sram)
9603+ return;
9604+ dma_release_declared_memory(&pdev->dev);
9605+ pci_release_region(pdev, 1);
9606+}
9607+
9608 /* called during probe() after chip reset completes */
9609 static int ehci_pci_setup(struct usb_hcd *hcd)
9610 {
9611@@ -50,6 +83,7 @@ static int ehci_pci_setup(struct usb_hcd
9612 u8 rev;
9613 u32 temp;
9614 int retval;
9615+ int force_otg_hc_mode = 0;
9616
9617 switch (pdev->vendor) {
9618 case PCI_VENDOR_ID_TOSHIBA_2:
9619@@ -63,6 +97,26 @@ static int ehci_pci_setup(struct usb_hcd
9620 #endif
9621 }
9622 break;
9623+ case PCI_VENDOR_ID_INTEL:
9624+ if (pdev->device == 0x0811) {
9625+ ehci_info(ehci, "Detected Langwell OTG HC\n");
9626+ hcd->has_tt = 1;
9627+ ehci->has_hostpc = 1;
9628+#ifdef CONFIG_USB_OTG
9629+ ehci->has_otg = 1;
9630+#endif
9631+ force_otg_hc_mode = 1;
9632+ hcd->has_sram = 1;
9633+ hcd->sram_no_payload = 1;
9634+ sram_init(hcd);
9635+ } else if (pdev->device == 0x0806) {
9636+ ehci_info(ehci, "Detected Langwell MPH\n");
9637+ hcd->has_tt = 1;
9638+ ehci->has_hostpc = 1;
9639+ hcd->has_sram = 1;
9640+ hcd->sram_no_payload = 1;
9641+ sram_init(hcd);
9642+ }
9643 }
9644
9645 ehci->caps = hcd->regs;
9646@@ -98,6 +152,8 @@ static int ehci_pci_setup(struct usb_hcd
9647
9648 /* cache this readonly data; minimize chip reads */
9649 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
9650+ if (force_otg_hc_mode)
9651+ ehci_reset(ehci);
9652
9653 retval = ehci_halt(ehci);
9654 if (retval)
9655Index: linux-2.6.33/drivers/usb/host/ehci.h
9656===================================================================
9657--- linux-2.6.33.orig/drivers/usb/host/ehci.h
9658+++ linux-2.6.33/drivers/usb/host/ehci.h
9659@@ -139,8 +139,15 @@ struct ehci_hcd { /* one per controlle
9660 #define OHCI_HCCTRL_LEN 0x4
9661 __hc32 *ohci_hcctrl_reg;
9662 unsigned has_hostpc:1;
9663-
9664+#ifdef CONFIG_USB_OTG
9665+ unsigned has_otg:1; /* if it is otg host*/
9666+ /* otg host has additional bus_suspend and bus_resume */
9667+ int (*otg_suspend)(struct usb_hcd *hcd);
9668+ int (*otg_resume)(struct usb_hcd *hcd);
9669+#endif
9670 u8 sbrn; /* packed release number */
9671+ unsigned int sram_addr;
9672+ unsigned int sram_size;
9673
9674 /* irq statistics */
9675 #ifdef EHCI_STATS
9676@@ -156,6 +163,7 @@ struct ehci_hcd { /* one per controlle
9677 struct dentry *debug_async;
9678 struct dentry *debug_periodic;
9679 struct dentry *debug_registers;
9680+ struct dentry *debug_lpm;
9681 #endif
9682 };
9683
9684@@ -719,5 +727,10 @@ static inline u32 hc32_to_cpup (const st
9685 #endif /* DEBUG */
9686
9687 /*-------------------------------------------------------------------------*/
9688-
9689+#ifdef CONFIG_PCI
9690+static void sram_deinit(struct usb_hcd *hcd);
9691+#else
9692+static void sram_deinit(struct usb_hcd *hcd) { return; };
9693+#endif
9694+static unsigned ehci_lpm_check(struct ehci_hcd *ehci, int port);
9695 #endif /* __LINUX_EHCI_HCD_H */
9696Index: linux-2.6.33/include/linux/usb.h
9697===================================================================
9698--- linux-2.6.33.orig/include/linux/usb.h
9699+++ linux-2.6.33/include/linux/usb.h
9700@@ -1582,6 +1582,7 @@ usb_maxpacket(struct usb_device *udev, i
9701 #define USB_DEVICE_REMOVE 0x0002
9702 #define USB_BUS_ADD 0x0003
9703 #define USB_BUS_REMOVE 0x0004
9704+
9705 extern void usb_register_notify(struct notifier_block *nb);
9706 extern void usb_unregister_notify(struct notifier_block *nb);
9707
9708Index: linux-2.6.33/drivers/usb/core/buffer.c
9709===================================================================
9710--- linux-2.6.33.orig/drivers/usb/core/buffer.c
9711+++ linux-2.6.33/drivers/usb/core/buffer.c
9712@@ -115,6 +115,11 @@ void *hcd_buffer_alloc(
9713 return kmalloc(size, mem_flags);
9714 }
9715
9716+ /* we won't use internal SRAM as data payload, we can't get
9717+ any benefits from it */
9718+ if (hcd->has_sram && hcd->sram_no_payload)
9719+ return dma_alloc_coherent(NULL, size, dma, mem_flags);
9720+
9721 for (i = 0; i < HCD_BUFFER_POOLS; i++) {
9722 if (size <= pool_max [i])
9723 return dma_pool_alloc(hcd->pool [i], mem_flags, dma);
9724@@ -141,6 +146,11 @@ void hcd_buffer_free(
9725 return;
9726 }
9727
9728+ if (hcd->has_sram && hcd->sram_no_payload) {
9729+ dma_free_coherent(NULL, size, addr, dma);
9730+ return;
9731+ }
9732+
9733 for (i = 0; i < HCD_BUFFER_POOLS; i++) {
9734 if (size <= pool_max [i]) {
9735 dma_pool_free(hcd->pool [i], addr, dma);
9736Index: linux-2.6.33/drivers/usb/host/ehci-dbg.c
9737===================================================================
9738--- linux-2.6.33.orig/drivers/usb/host/ehci-dbg.c
9739+++ linux-2.6.33/drivers/usb/host/ehci-dbg.c
9740@@ -98,13 +98,18 @@ static void dbg_hcc_params (struct ehci_
9741 HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
9742 } else {
9743 ehci_dbg (ehci,
9744- "%s hcc_params %04x thresh %d uframes %s%s%s\n",
9745+ "%s hcc_params %04x thresh %d uframes %s%s%s%s%s%s%s\n",
9746 label,
9747 params,
9748 HCC_ISOC_THRES(params),
9749 HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
9750 HCC_CANPARK(params) ? " park" : "",
9751- HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
9752+ HCC_64BIT_ADDR(params) ? " 64 bit addr" : "",
9753+ HCC_LPM(params) ? " LPM" : "",
9754+ HCC_PER_PORT_CHANGE_EVENT(params) ? " ppce" : "",
9755+ HCC_HW_PREFETCH(params) ? " hw prefetch" : "",
9756+ HCC_32FRAME_PERIODIC_LIST(params) ?
9757+ " 32 peridic list" : "");
9758 }
9759 }
9760 #else
9761@@ -191,8 +196,9 @@ static int __maybe_unused
9762 dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
9763 {
9764 return scnprintf (buf, len,
9765- "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
9766+ "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s%s",
9767 label, label [0] ? " " : "", status,
9768+ (status & STS_PPCE_MASK) ? " PPCE" : "",
9769 (status & STS_ASS) ? " Async" : "",
9770 (status & STS_PSS) ? " Periodic" : "",
9771 (status & STS_RECL) ? " Recl" : "",
9772@@ -210,8 +216,9 @@ static int __maybe_unused
9773 dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
9774 {
9775 return scnprintf (buf, len,
9776- "%s%sintrenable %02x%s%s%s%s%s%s",
9777+ "%s%sintrenable %02x%s%s%s%s%s%s%s",
9778 label, label [0] ? " " : "", enable,
9779+ (enable & STS_PPCE_MASK) ? " PPCE" : "",
9780 (enable & STS_IAA) ? " IAA" : "",
9781 (enable & STS_FATAL) ? " FATAL" : "",
9782 (enable & STS_FLR) ? " FLR" : "",
9783@@ -228,9 +235,14 @@ static int
9784 dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
9785 {
9786 return scnprintf (buf, len,
9787- "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
9788+ "%s%scmd %07x %s%s%s%s%s%s=%d ithresh=%d%s%s%s%s prd=%s%s %s",
9789 label, label [0] ? " " : "", command,
9790- (command & CMD_PARK) ? "park" : "(park)",
9791+ (command & CMD_HIRD) ? " HIRD" : "",
9792+ (command & CMD_PPCEE) ? " PPCEE" : "",
9793+ (command & CMD_FSP) ? " FSP" : "",
9794+ (command & CMD_ASPE) ? " ASPE" : "",
9795+ (command & CMD_PSPE) ? " PSPE" : "",
9796+ (command & CMD_PARK) ? " park" : "(park)",
9797 CMD_PARK_CNT (command),
9798 (command >> 16) & 0x3f,
9799 (command & CMD_LRESET) ? " LReset" : "",
9800@@ -257,11 +269,21 @@ dbg_port_buf (char *buf, unsigned len, c
9801 }
9802
9803 return scnprintf (buf, len,
9804- "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
9805+ "%s%sp:%d sts %06x %d %s%s%s%s%s%s sig=%s%s%s%s%s%s%s%s%s%s%s",
9806 label, label [0] ? " " : "", port, status,
9807+ status>>25,/*device address */
9808+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ACK ?
9809+ " ACK" : "",
9810+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_NYET ?
9811+ " NYET" : "",
9812+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_STALL ?
9813+ " STALL" : "",
9814+ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ERR ?
9815+ " ERR" : "",
9816 (status & PORT_POWER) ? " POWER" : "",
9817 (status & PORT_OWNER) ? " OWNER" : "",
9818 sig,
9819+ (status & PORT_LPM) ? " LPM" : "",
9820 (status & PORT_RESET) ? " RESET" : "",
9821 (status & PORT_SUSPEND) ? " SUSPEND" : "",
9822 (status & PORT_RESUME) ? " RESUME" : "",
9823@@ -330,6 +352,13 @@ static int debug_async_open(struct inode
9824 static int debug_periodic_open(struct inode *, struct file *);
9825 static int debug_registers_open(struct inode *, struct file *);
9826 static int debug_async_open(struct inode *, struct file *);
9827+static int debug_lpm_open(struct inode *, struct file *);
9828+static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
9829+ size_t count, loff_t *ppos);
9830+static ssize_t debug_lpm_write(struct file *file, const char __user *buffer,
9831+ size_t count, loff_t *ppos);
9832+static int debug_lpm_close(struct inode *inode, struct file *file);
9833+
9834 static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
9835 static int debug_close(struct inode *, struct file *);
9836
9837@@ -351,6 +380,13 @@ static const struct file_operations debu
9838 .read = debug_output,
9839 .release = debug_close,
9840 };
9841+static const struct file_operations debug_lpm_fops = {
9842+ .owner = THIS_MODULE,
9843+ .open = debug_lpm_open,
9844+ .read = debug_lpm_read,
9845+ .write = debug_lpm_write,
9846+ .release = debug_lpm_close,
9847+};
9848
9849 static struct dentry *ehci_debug_root;
9850
9851@@ -917,6 +953,94 @@ static int debug_registers_open(struct i
9852 return file->private_data ? 0 : -ENOMEM;
9853 }
9854
9855+static int debug_lpm_open(struct inode *inode, struct file *file)
9856+{
9857+ file->private_data = inode->i_private;
9858+ return 0;
9859+}
9860+static int debug_lpm_close(struct inode *inode, struct file *file)
9861+{
9862+ return 0;
9863+}
9864+static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
9865+ size_t count, loff_t *ppos)
9866+{
9867+ /* TODO: show lpm stats */
9868+ return 0;
9869+}
9870+
9871+
9872+static
9873+ssize_t debug_lpm_write(struct file *file, const char __user *user_buf,
9874+ size_t count, loff_t *ppos)
9875+{
9876+ struct usb_hcd *hcd;
9877+ struct ehci_hcd *ehci;
9878+ char buf[50];
9879+ size_t len;
9880+ u32 temp;
9881+ unsigned long port;
9882+ u32 __iomem *portsc ;
9883+ u32 params;
9884+
9885+ hcd = bus_to_hcd(file->private_data);
9886+ ehci = hcd_to_ehci(hcd);
9887+
9888+ len = min(count, sizeof(buf) - 1);
9889+ if (copy_from_user(buf, user_buf, len))
9890+ return -EFAULT;
9891+ buf[len] = '\0';
9892+ if (len > 0 && buf[len - 1] == '\n')
9893+ buf[len - 1] = '\0';
9894+
9895+ if (strncmp(buf, "enable", 5) == 0) {
9896+ if (strict_strtoul(buf + 7, 10, &port))
9897+ return -EINVAL;
9898+ params = ehci_readl(ehci, &ehci->caps->hcs_params);
9899+ if (port > HCS_N_PORTS(params)) {
9900+ ehci_dbg(ehci, "ERR: LPM on bad port %lu\n", port);
9901+ return -ENODEV;
9902+ }
9903+ portsc = &ehci->regs->port_status[port-1];
9904+ temp = ehci_readl(ehci, portsc);
9905+ if (!(temp & PORT_DEV_ADDR)) {
9906+ ehci_dbg(ehci, "LPM: no device attached\n");
9907+ return -ENODEV;
9908+ }
9909+ temp |= PORT_LPM;
9910+ ehci_writel(ehci, temp, portsc);
9911+ printk(KERN_INFO "force enable LPM for port %lu\n", port);
9912+ } else if (strncmp(buf, "hird=", 5) == 0) {
9913+ unsigned long hird;
9914+ if (strict_strtoul(buf + 5, 16, &hird))
9915+ return -EINVAL;
9916+ printk(KERN_INFO " setting hird %s %lu \n", buf + 6, hird);
9917+ temp = ehci_readl(ehci, &ehci->regs->command);
9918+ temp &= ~CMD_HIRD;
9919+ temp |= hird << 24;
9920+ ehci_writel(ehci, temp, &ehci->regs->command);
9921+ } else if (strncmp(buf, "disable", 7) == 0) {
9922+ if (strict_strtoul(buf + 8, 10, &port))
9923+ return -EINVAL;
9924+ params = ehci_readl(ehci, &ehci->caps->hcs_params);
9925+ if (port > HCS_N_PORTS(params)) {
9926+ ehci_dbg(ehci, "ERR: LPM off bad port %lu\n", port);
9927+ return -ENODEV;
9928+ }
9929+ portsc = &ehci->regs->port_status[port-1];
9930+ temp = ehci_readl(ehci, portsc);
9931+ if (!(temp & PORT_DEV_ADDR)) {
9932+ ehci_dbg(ehci, "ERR: no device attached\n");
9933+ return -ENODEV;
9934+ }
9935+ temp &= ~PORT_LPM;
9936+ ehci_writel(ehci, temp, portsc);
9937+ printk(KERN_INFO "disabled LPM for port %lu\n", port);
9938+ } else
9939+ return -EOPNOTSUPP;
9940+ return count;
9941+}
9942+
9943 static inline void create_debug_files (struct ehci_hcd *ehci)
9944 {
9945 struct usb_bus *bus = &ehci_to_hcd(ehci)->self;
9946@@ -940,6 +1064,10 @@ static inline void create_debug_files (s
9947 ehci->debug_registers = debugfs_create_file("registers", S_IRUGO,
9948 ehci->debug_dir, bus,
9949 &debug_registers_fops);
9950+
9951+ ehci->debug_registers = debugfs_create_file("lpm", S_IRUGO|S_IWUGO,
9952+ ehci->debug_dir, bus,
9953+ &debug_lpm_fops);
9954 if (!ehci->debug_registers)
9955 goto registers_error;
9956 return;
9957Index: linux-2.6.33/drivers/usb/host/ehci-lpm.c
9958===================================================================
9959--- /dev/null
9960+++ linux-2.6.33/drivers/usb/host/ehci-lpm.c
9961@@ -0,0 +1,90 @@
9962+/*
9963+ *
9964+ * Author: Jacob Pan <jacob.jun.pan@intel.com>
9965+ *
9966+ * Copyright 2009- Intel Corp.
9967+ *
9968+ * This program is free software; you can redistribute it and/or modify it
9969+ * under the terms of the GNU General Public License as published by the
9970+ * Free Software Foundation; either version 2 of the License, or (at your
9971+ * option) any later version.
9972+ *
9973+ * This program is distributed in the hope that it will be useful, but
9974+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
9975+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
9976+ * for more details.
9977+ *
9978+ * You should have received a copy of the GNU General Public License
9979+ * along with this program; if not, write to the Free Software Foundation,
9980+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
9981+ */
9982+
9983+/* this file is part of ehci-hcd.c */
9984+static int ehci_lpm_set_da(struct ehci_hcd *ehci, int dev_addr, int port_num)
9985+{
9986+ u32 __iomem portsc;
9987+
9988+ ehci_dbg(ehci, "set dev address %d for port %d \n", dev_addr, port_num);
9989+ if (port_num > HCS_N_PORTS(ehci->hcs_params)) {
9990+ ehci_dbg(ehci, "invalid port number %d \n", port_num);
9991+ return -ENODEV;
9992+ }
9993+ portsc = ehci_readl(ehci, &ehci->regs->port_status[port_num-1]);
9994+ portsc &= ~PORT_DEV_ADDR;
9995+ portsc |= dev_addr<<25;
9996+ ehci_writel(ehci, portsc, &ehci->regs->port_status[port_num-1]);
9997+ return 0;
9998+}
9999+
10000+/*
10001+ * this function is called to put a link into L1 state. the steps are:
10002+ * - verify HC supports LPM
10003+ * - make sure all pipe idle on the link
10004+ * - shutdown all qh on the pipe
10005+ * - send LPM packet
10006+ * - confirm device ack
10007+ */
10008+static unsigned ehci_lpm_check(struct ehci_hcd *ehci, int port)
10009+{
10010+ u32 __iomem *portsc ;
10011+ u32 val32;
10012+ int retval;
10013+
10014+ portsc = &ehci->regs->port_status[port-1];
10015+ val32 = ehci_readl(ehci, portsc);
10016+ if (!(val32 & PORT_DEV_ADDR)) {
10017+ ehci_dbg(ehci, "LPM: no device attached\n");
10018+ return -ENODEV;
10019+ }
10020+ val32 |= PORT_LPM;
10021+ ehci_writel(ehci, val32, portsc);
10022+ mdelay(5);
10023+ val32 |= PORT_SUSPEND;
10024+ ehci_dbg(ehci, "Sending LPM 0x%08x to port %d\n", val32, port);
10025+ ehci_writel(ehci, val32, portsc);
10026+ /* wait for ACK */
10027+ mdelay(10);
10028+ retval = handshake(ehci, &ehci->regs->port_status[port-1], PORT_SSTS,
10029+ PORTSC_SUSPEND_STS_ACK, 125);
10030+ dbg_port(ehci, "LPM", port, val32);
10031+ if (retval != -ETIMEDOUT) {
10032+ ehci_dbg(ehci, "LPM: device ACK for LPM\n");
10033+ val32 |= PORT_LPM;
10034+ /*
10035+ * now device should be in L1 sleep, let's wake up the device
10036+ * so that we can complete enumeration.
10037+ */
10038+ ehci_writel(ehci, val32, portsc);
10039+ mdelay(10);
10040+ val32 |= PORT_RESUME;
10041+ ehci_writel(ehci, val32, portsc);
10042+ } else {
10043+ ehci_dbg(ehci, "LPM: device does not ACK, disable LPM %d\n",
10044+ retval);
10045+ val32 &= ~PORT_LPM;
10046+ retval = -ETIMEDOUT;
10047+ ehci_writel(ehci, val32, portsc);
10048+ }
10049+
10050+ return retval;
10051+}
10052Index: linux-2.6.33/drivers/usb/host/ehci-q.c
10053===================================================================
10054--- linux-2.6.33.orig/drivers/usb/host/ehci-q.c
10055+++ linux-2.6.33/drivers/usb/host/ehci-q.c
10056@@ -643,6 +643,16 @@ qh_urb_transaction (
10057 sizeof (struct usb_ctrlrequest),
10058 token | (2 /* "setup" */ << 8), 8);
10059
10060+ if (((struct usb_ctrlrequest *)urb->setup_packet)->bRequest
10061+ == USB_REQ_SET_ADDRESS) {
10062+ /* for LPM capable HC, set up device address*/
10063+ int dev_address = ((struct usb_ctrlrequest *)
10064+ (urb->setup_packet))->wValue;
10065+ if (ehci_to_hcd(ehci)->lpm_cap)
10066+ ehci_lpm_set_da(ehci, dev_address,
10067+ urb->dev->portnum);
10068+ }
10069+
10070 /* ... and always at least one more pid */
10071 token ^= QTD_TOGGLE;
10072 qtd_prev = qtd;
10073Index: linux-2.6.33/include/linux/usb/ehci_def.h
10074===================================================================
10075--- linux-2.6.33.orig/include/linux/usb/ehci_def.h
10076+++ linux-2.6.33/include/linux/usb/ehci_def.h
10077@@ -39,6 +39,12 @@ struct ehci_caps {
10078 #define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
10079
10080 u32 hcc_params; /* HCCPARAMS - offset 0x8 */
10081+/* for 1.1 addendum */
10082+#define HCC_32FRAME_PERIODIC_LIST(p) ((p)&(1 << 19))
10083+#define HCC_PER_PORT_CHANGE_EVENT(p) ((p)&(1 << 18))
10084+#define HCC_LPM(p) ((p)&(1 << 17))
10085+#define HCC_HW_PREFETCH(p) ((p)&(1 << 16))
10086+
10087 #define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */
10088 #define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */
10089 #define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */
10090@@ -54,6 +60,13 @@ struct ehci_regs {
10091
10092 /* USBCMD: offset 0x00 */
10093 u32 command;
10094+
10095+/* EHCI 1.1 addendum */
10096+#define CMD_HIRD (0xf<<24) /* host initiated resume duration */
10097+#define CMD_PPCEE (1<<15) /* per port change event enable */
10098+#define CMD_FSP (1<<14) /* fully synchronized prefetch */
10099+#define CMD_ASPE (1<<13) /* async schedule prefetch enable */
10100+#define CMD_PSPE (1<<12) /* periodic schedule prefetch enable */
10101 /* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
10102 #define CMD_PARK (1<<11) /* enable "park" on async qh */
10103 #define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
10104@@ -67,6 +80,7 @@ struct ehci_regs {
10105
10106 /* USBSTS: offset 0x04 */
10107 u32 status;
10108+#define STS_PPCE_MASK (0xff<<16) /* Per-Port change event 1-16 */
10109 #define STS_ASS (1<<15) /* Async Schedule Status */
10110 #define STS_PSS (1<<14) /* Periodic Schedule Status */
10111 #define STS_RECL (1<<13) /* Reclamation */
10112@@ -100,6 +114,14 @@ struct ehci_regs {
10113
10114 /* PORTSC: offset 0x44 */
10115 u32 port_status [0]; /* up to N_PORTS */
10116+/* EHCI 1.1 addendum */
10117+#define PORTSC_SUSPEND_STS_ACK 0
10118+#define PORTSC_SUSPEND_STS_NYET 1
10119+#define PORTSC_SUSPEND_STS_STALL 2
10120+#define PORTSC_SUSPEND_STS_ERR 3
10121+
10122+#define PORT_DEV_ADDR (0x7f<<25) /* device address */
10123+#define PORT_SSTS (0x3<<23) /* suspend status */
10124 /* 31:23 reserved */
10125 #define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */
10126 #define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */
10127@@ -115,6 +137,7 @@ struct ehci_regs {
10128 #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
10129 /* 11:10 for detecting lowspeed devices (reset vs release ownership) */
10130 /* 9 reserved */
10131+#define PORT_LPM (1<<9) /* LPM transaction */
10132 #define PORT_RESET (1<<8) /* reset port */
10133 #define PORT_SUSPEND (1<<7) /* suspend port */
10134 #define PORT_RESUME (1<<6) /* resume it */
10135Index: linux-2.6.33/arch/x86/include/asm/i8259.h
10136===================================================================
10137--- linux-2.6.33.orig/arch/x86/include/asm/i8259.h
10138+++ linux-2.6.33/arch/x86/include/asm/i8259.h
10139@@ -26,11 +26,6 @@ extern unsigned int cached_irq_mask;
10140
10141 extern spinlock_t i8259A_lock;
10142
10143-extern void init_8259A(int auto_eoi);
10144-extern void enable_8259A_irq(unsigned int irq);
10145-extern void disable_8259A_irq(unsigned int irq);
10146-extern unsigned int startup_8259A_irq(unsigned int irq);
10147-
10148 /* the PIC may need a careful delay on some platforms, hence specific calls */
10149 static inline unsigned char inb_pic(unsigned int port)
10150 {
10151@@ -57,7 +52,17 @@ static inline void outb_pic(unsigned cha
10152
10153 extern struct irq_chip i8259A_chip;
10154
10155-extern void mask_8259A(void);
10156-extern void unmask_8259A(void);
10157+struct legacy_pic {
10158+ int nr_legacy_irqs;
10159+ struct irq_chip *chip;
10160+ void (*mask_all)(void);
10161+ void (*restore_mask)(void);
10162+ void (*init)(int auto_eoi);
10163+ int (*irq_pending)(unsigned int irq);
10164+ void (*make_irq)(unsigned int irq);
10165+};
10166+
10167+extern struct legacy_pic *legacy_pic;
10168+extern struct legacy_pic null_legacy_pic;
10169
10170 #endif /* _ASM_X86_I8259_H */
10171Index: linux-2.6.33/arch/x86/kernel/i8259.c
10172===================================================================
10173--- linux-2.6.33.orig/arch/x86/kernel/i8259.c
10174+++ linux-2.6.33/arch/x86/kernel/i8259.c
10175@@ -34,6 +34,12 @@
10176 static int i8259A_auto_eoi;
10177 DEFINE_SPINLOCK(i8259A_lock);
10178 static void mask_and_ack_8259A(unsigned int);
10179+static void mask_8259A(void);
10180+static void unmask_8259A(void);
10181+static void disable_8259A_irq(unsigned int irq);
10182+static void enable_8259A_irq(unsigned int irq);
10183+static void init_8259A(int auto_eoi);
10184+static int i8259A_irq_pending(unsigned int irq);
10185
10186 struct irq_chip i8259A_chip = {
10187 .name = "XT-PIC",
10188@@ -63,7 +69,7 @@ unsigned int cached_irq_mask = 0xffff;
10189 */
10190 unsigned long io_apic_irqs;
10191
10192-void disable_8259A_irq(unsigned int irq)
10193+static void disable_8259A_irq(unsigned int irq)
10194 {
10195 unsigned int mask = 1 << irq;
10196 unsigned long flags;
10197@@ -77,7 +83,7 @@ void disable_8259A_irq(unsigned int irq)
10198 spin_unlock_irqrestore(&i8259A_lock, flags);
10199 }
10200
10201-void enable_8259A_irq(unsigned int irq)
10202+static void enable_8259A_irq(unsigned int irq)
10203 {
10204 unsigned int mask = ~(1 << irq);
10205 unsigned long flags;
10206@@ -91,7 +97,7 @@ void enable_8259A_irq(unsigned int irq)
10207 spin_unlock_irqrestore(&i8259A_lock, flags);
10208 }
10209
10210-int i8259A_irq_pending(unsigned int irq)
10211+static int i8259A_irq_pending(unsigned int irq)
10212 {
10213 unsigned int mask = 1<<irq;
10214 unsigned long flags;
10215@@ -107,7 +113,7 @@ int i8259A_irq_pending(unsigned int irq)
10216 return ret;
10217 }
10218
10219-void make_8259A_irq(unsigned int irq)
10220+static void make_8259A_irq(unsigned int irq)
10221 {
10222 disable_irq_nosync(irq);
10223 io_apic_irqs &= ~(1<<irq);
10224@@ -281,7 +287,7 @@ static int __init i8259A_init_sysfs(void
10225
10226 device_initcall(i8259A_init_sysfs);
10227
10228-void mask_8259A(void)
10229+static void mask_8259A(void)
10230 {
10231 unsigned long flags;
10232
10233@@ -293,7 +299,7 @@ void mask_8259A(void)
10234 spin_unlock_irqrestore(&i8259A_lock, flags);
10235 }
10236
10237-void unmask_8259A(void)
10238+static void unmask_8259A(void)
10239 {
10240 unsigned long flags;
10241
10242@@ -305,7 +311,7 @@ void unmask_8259A(void)
10243 spin_unlock_irqrestore(&i8259A_lock, flags);
10244 }
10245
10246-void init_8259A(int auto_eoi)
10247+static void init_8259A(int auto_eoi)
10248 {
10249 unsigned long flags;
10250
10251@@ -358,3 +364,47 @@ void init_8259A(int auto_eoi)
10252
10253 spin_unlock_irqrestore(&i8259A_lock, flags);
10254 }
10255+
10256+/*
10257+ * make i8259 a driver so that we can select pic functions at run time. the goal
10258+ * is to make x86 binary compatible among pc compatible and non-pc compatible
10259+ * platforms, such as x86 MID.
10260+ */
10261+
10262+static void __init legacy_pic_noop(void) { };
10263+static void __init legacy_pic_uint_noop(unsigned int unused) { };
10264+static void __init legacy_pic_int_noop(int unused) { };
10265+
10266+static struct irq_chip dummy_pic_chip = {
10267+ .name = "dummy pic",
10268+ .mask = legacy_pic_uint_noop,
10269+ .unmask = legacy_pic_uint_noop,
10270+ .disable = legacy_pic_uint_noop,
10271+ .mask_ack = legacy_pic_uint_noop,
10272+};
10273+static int legacy_pic_irq_pending_noop(unsigned int irq)
10274+{
10275+ return 0;
10276+}
10277+
10278+struct legacy_pic null_legacy_pic = {
10279+ .nr_legacy_irqs = 0,
10280+ .chip = &dummy_pic_chip,
10281+ .mask_all = legacy_pic_noop,
10282+ .restore_mask = legacy_pic_noop,
10283+ .init = legacy_pic_int_noop,
10284+ .irq_pending = legacy_pic_irq_pending_noop,
10285+ .make_irq = legacy_pic_uint_noop,
10286+};
10287+
10288+struct legacy_pic default_legacy_pic = {
10289+ .nr_legacy_irqs = NR_IRQS_LEGACY,
10290+ .chip = &i8259A_chip,
10291+ .mask_all = mask_8259A,
10292+ .restore_mask = unmask_8259A,
10293+ .init = init_8259A,
10294+ .irq_pending = i8259A_irq_pending,
10295+ .make_irq = make_8259A_irq,
10296+};
10297+
10298+struct legacy_pic *legacy_pic = &default_legacy_pic;
10299Index: linux-2.6.33/arch/x86/include/asm/hw_irq.h
10300===================================================================
10301--- linux-2.6.33.orig/arch/x86/include/asm/hw_irq.h
10302+++ linux-2.6.33/arch/x86/include/asm/hw_irq.h
10303@@ -53,13 +53,6 @@ extern void threshold_interrupt(void);
10304 extern void call_function_interrupt(void);
10305 extern void call_function_single_interrupt(void);
10306
10307-/* PIC specific functions */
10308-extern void disable_8259A_irq(unsigned int irq);
10309-extern void enable_8259A_irq(unsigned int irq);
10310-extern int i8259A_irq_pending(unsigned int irq);
10311-extern void make_8259A_irq(unsigned int irq);
10312-extern void init_8259A(int aeoi);
10313-
10314 /* IOAPIC */
10315 #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
10316 extern unsigned long io_apic_irqs;
10317Index: linux-2.6.33/arch/x86/kernel/apic/nmi.c
10318===================================================================
10319--- linux-2.6.33.orig/arch/x86/kernel/apic/nmi.c
10320+++ linux-2.6.33/arch/x86/kernel/apic/nmi.c
10321@@ -177,7 +177,7 @@ int __init check_nmi_watchdog(void)
10322 error:
10323 if (nmi_watchdog == NMI_IO_APIC) {
10324 if (!timer_through_8259)
10325- disable_8259A_irq(0);
10326+ legacy_pic->chip->mask(0);
10327 on_each_cpu(__acpi_nmi_disable, NULL, 1);
10328 }
10329
10330Index: linux-2.6.33/arch/x86/kernel/irqinit.c
10331===================================================================
10332--- linux-2.6.33.orig/arch/x86/kernel/irqinit.c
10333+++ linux-2.6.33/arch/x86/kernel/irqinit.c
10334@@ -123,7 +123,7 @@ void __init init_ISA_irqs(void)
10335 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
10336 init_bsp_APIC();
10337 #endif
10338- init_8259A(0);
10339+ legacy_pic->init(0);
10340
10341 /*
10342 * 16 old-style INTA-cycle interrupts:
10343Index: linux-2.6.33/drivers/misc/Makefile
10344===================================================================
10345--- linux-2.6.33.orig/drivers/misc/Makefile
10346+++ linux-2.6.33/drivers/misc/Makefile
10347@@ -20,6 +20,7 @@ obj-$(CONFIG_SGI_XP) += sgi-xp/
10348 obj-$(CONFIG_SGI_GRU) += sgi-gru/
10349 obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
10350 obj-$(CONFIG_HP_ILO) += hpilo.o
10351+obj-$(CONFIG_MRST) += intel_mrst.o
10352 obj-$(CONFIG_ISL29003) += isl29003.o
10353 obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
10354 obj-$(CONFIG_DS1682) += ds1682.o
10355Index: linux-2.6.33/drivers/misc/intel_mrst.c
10356===================================================================
10357--- /dev/null
10358+++ linux-2.6.33/drivers/misc/intel_mrst.c
10359@@ -0,0 +1,216 @@
10360+/*
10361+ * intel_mrst.c - Intel Moorestown Driver for misc functionality
10362+ *
10363+ * Copyright (C) 2009 Intel Corp
10364+ * Author: James Ausmus <james.ausmus@intel.com>
10365+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10366+ *
10367+ * This program is free software; you can redistribute it and/or modify
10368+ * it under the terms of the GNU General Public License as published by
10369+ * the Free Software Foundation; version 2 of the License.
10370+ *
10371+ * This program is distributed in the hope that it will be useful, but
10372+ * WITHOUT ANY WARRANTY; without even the implied warranty of
10373+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10374+ * General Public License for more details.
10375+ *
10376+ * You should have received a copy of the GNU General Public License along
10377+ * with this program; if not, write to the Free Software Foundation, Inc.,
10378+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
10379+ *
10380+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10381+ *
10382+ * This driver sets up initial PMIC register values for various voltage rails
10383+ * and GPIOs
10384+ */
10385+
10386+#include <linux/kernel.h>
10387+#include <linux/module.h>
10388+#include <linux/init.h>
10389+
10390+#include <linux/delay.h>
10391+#include <asm/ipc_defs.h>
10392+
10393+
10394+MODULE_AUTHOR("James Ausmus");
10395+MODULE_AUTHOR("German Monroy");
10396+MODULE_DESCRIPTION("Intel MRST platform specific driver");
10397+MODULE_LICENSE("GPL");
10398+
10399+static int intel_mrst_pmic_read(unsigned int reg, unsigned int *value)
10400+{
10401+ struct ipc_pmic_reg_data pmic_data = { 0 };
10402+ int ret = 0;
10403+
10404+ pmic_data.pmic_reg_data[0].register_address = reg;
10405+ pmic_data.num_entries = 1;
10406+ ret = ipc_pmic_register_read(&pmic_data);
10407+ if (ret)
10408+ printk(KERN_ERR "intel_mrst_pmic_read: unable to read "
10409+ "PMIC register 0x%03x\n", reg);
10410+ else
10411+ *value = pmic_data.pmic_reg_data[0].value;
10412+
10413+ return ret;
10414+}
10415+
10416+static int intel_mrst_pmic_write(unsigned int reg, unsigned int value)
10417+{
10418+ struct ipc_pmic_reg_data pmic_data = { 0 };
10419+ int ret = 0;
10420+
10421+ pmic_data.pmic_reg_data[0].register_address = reg;
10422+ pmic_data.pmic_reg_data[0].value = value;
10423+ pmic_data.num_entries = 1;
10424+ ret = ipc_pmic_register_write(&pmic_data, 0);
10425+ if (ret) {
10426+ printk(KERN_ERR "intel_mrst_pmic_write: register 0x%03x "
10427+ "failed ipc_pmic_register_write of value %02x, "
10428+ "retval %d\n", reg, value, ret);
10429+ } else {
10430+ printk(KERN_INFO "intel_mrst_pmic_write: register "
10431+ "0x%03x, now=0x%02x\n",
10432+ reg, value);
10433+ }
10434+
10435+ return ret;
10436+}
10437+
10438+static int intel_mrst_sdio_EVP_power_up(void)
10439+{
10440+ intel_mrst_pmic_write(0xF4, 0x25);
10441+ intel_mrst_pmic_write(0x21, 0x00);
10442+ intel_mrst_pmic_write(0x4a, 0x7f);
10443+ intel_mrst_pmic_write(0x4b, 0x7f);
10444+ intel_mrst_pmic_write(0x4c, 0x3f);
10445+
10446+ intel_mrst_pmic_write(0x3b, 0x3f);
10447+ intel_mrst_pmic_write(0x3c, 0x3f);
10448+ mdelay(1);
10449+ intel_mrst_pmic_write(0xF4, 0x05);
10450+ mdelay(12);
10451+ intel_mrst_pmic_write(0xF4, 0x21);
10452+
10453+ return 0;
10454+
10455+}
10456+
10457+static int intel_mrst_sdio_EVP_power_down(void)
10458+{
10459+ intel_mrst_pmic_write(0xF4, 0x25);
10460+ intel_mrst_pmic_write(0x21, 0x00);
10461+
10462+ intel_mrst_pmic_write(0x4b, 0x00);
10463+ intel_mrst_pmic_write(0x4c, 0x00);
10464+
10465+ intel_mrst_pmic_write(0x3b, 0x00);
10466+ intel_mrst_pmic_write(0x3c, 0x00);
10467+ intel_mrst_pmic_write(0x4a, 0x00);
10468+
10469+ return 0;
10470+}
10471+
10472+static int intel_mrst_sdio_8688_power_up(void)
10473+{
10474+ intel_mrst_pmic_write(0x37, 0x3f); /* Set VDDQ for Marvell 8688 */
10475+ intel_mrst_pmic_write(0x4a, 0x3f); /* Set GYMXIOCNT for Marvell 8688 */
10476+ intel_mrst_pmic_write(0x4e, 0x3f); /* Set GYMX33CNT for Marvell 8688 */
10477+
10478+ intel_mrst_pmic_write(0x3a, 0x27); /* Enables the V3p3_FLASH line,
10479+ which routes to VIO_X1 and VIO_X2
10480+ on the MRVL8688 */
10481+
10482+ intel_mrst_pmic_write(0x4b, 0x27); /* Enable V1p2_VWYMXA for MRVL8688 */
10483+ intel_mrst_pmic_write(0x4c, 0x27); /* Enable V1p8_VWYMXARF for
10484+ MRVL8688 */
10485+
10486+ return 0;
10487+}
10488+
10489+static int intel_mrst_bringup_8688_sdio2(void)
10490+{
10491+ unsigned int temp = 0;
10492+
10493+ /* Register 0xf4 has 2 GPIO lines connected to the MRVL 8688:
10494+ * bit 4: PDn
10495+ * bit 3: WiFi RESETn */
10496+
10497+ intel_mrst_pmic_read(0xf4, &temp);
10498+ temp = temp|0x8;
10499+ intel_mrst_pmic_write(0xf4, temp);
10500+
10501+ temp = temp|0x10;
10502+ intel_mrst_pmic_write(0xf4, temp);
10503+
10504+ return 0;
10505+}
10506+
10507+static int intel_mrst_bringup_EVP_sdio2_Option_spi(void)
10508+{
10509+ unsigned int temp = 0;
10510+
10511+ /* Register 0xf4 has 3 GPIO lines connected to the EVP:
10512+ * bit 0: RF_KILL_N
10513+ * bit 2: H2D_INT
10514+ * bit 5: SYS_RST_N
10515+ */
10516+
10517+ /* Register 0xf4 has 2 GPIO lines connected to the Option:
10518+ * bit 0: GPO_WWAN_DISABLE
10519+ * bit 5: GPO_WWAN_RESET
10520+ */
10521+
10522+ intel_mrst_pmic_read(0xf4, &temp);
10523+ temp = temp|0x21;
10524+ temp = temp & 0xFB;
10525+ intel_mrst_pmic_write(0xf4, temp); /* Set RF_KILL_N & SYS_RST_N to
10526+ High. H2D_INT to LOW */
10527+
10528+ intel_mrst_pmic_read(0xf4, &temp); /* Set SYS_RST_N to Low */
10529+ temp = temp & 0xDF;
10530+ mdelay(1);
10531+ intel_mrst_pmic_write(0xf4, temp);
10532+
10533+ mdelay(12); /* Try to generate a 12mS delay here if possible */
10534+ intel_mrst_pmic_read(0xf4, &temp); /* Set SYS_RST_N to High */
10535+ temp = temp | 0x20;
10536+ intel_mrst_pmic_write(0xf4, temp);
10537+
10538+ return 0;
10539+}
10540+
10541+
10542+static int __init intel_mrst_module_init(void)
10543+{
10544+ int ret = 0;
10545+
10546+/* We only need the following PMIC register initializations if
10547+ * we are using the Marvell 8688 WLAN card on the SDIO2 port */
10548+
10549+#ifdef CONFIG_8688_RC
10550+
10551+ printk(KERN_INFO "intel_mrst_module_init: bringing up power for "
10552+ "8688 WLAN on SDIO2...\n");
10553+ ret = intel_mrst_bringup_8688_sdio2();
10554+
10555+#endif /* CONFIG_8688_RC */
10556+
10557+/* We only need the following PMIC register initializations if
10558+ * we are using the EVP on SDIO2 port or Option on SPI port */
10559+
10560+#if defined(CONFIG_EVP_SDIO2) || defined(CONFIG_SPI_MRST_GTM501)
10561+
10562+ printk(KERN_INFO "intel_mrst_module_init: bringing up power for "
10563+ "EvP on SDIO2 and Option on SPI...\n");
10564+ ret = intel_mrst_bringup_EVP_sdio2_Option_spi();
10565+
10566+#endif /* CONFIG_EVP_SDIO2 || CONFIG_SPI_MRST_GTM501 */
10567+ return ret;
10568+}
10569+
10570+static void __exit intel_mrst_module_exit(void)
10571+{
10572+}
10573+
10574+module_init(intel_mrst_module_init);
10575+module_exit(intel_mrst_module_exit);
10576Index: linux-2.6.33/drivers/i2c/busses/Kconfig
10577===================================================================
10578--- linux-2.6.33.orig/drivers/i2c/busses/Kconfig
10579+++ linux-2.6.33/drivers/i2c/busses/Kconfig
10580@@ -772,4 +772,14 @@ config SCx200_ACB
10581 This support is also available as a module. If so, the module
10582 will be called scx200_acb.
10583
10584+config I2C_MRST
10585+ tristate "Intel Moorestown I2C Controller"
10586+ depends on PCI && GPIOLIB && GPIO_LANGWELL
10587+ default y
10588+ help
10589+ If you say yes to this option, support will be included for the Intel
10590+ Moorestown chipset I2C controller.
10591+ This driver can also be built as a module. If so, the module
10592+ will be called i2c-mrst.
10593+
10594 endmenu
10595Index: linux-2.6.33/drivers/i2c/busses/Makefile
10596===================================================================
10597--- linux-2.6.33.orig/drivers/i2c/busses/Makefile
10598+++ linux-2.6.33/drivers/i2c/busses/Makefile
10599@@ -72,6 +72,7 @@ obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
10600 obj-$(CONFIG_I2C_STUB) += i2c-stub.o
10601 obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
10602 obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o
10603+obj-$(CONFIG_I2C_MRST) += i2c-mrst.o
10604
10605 ifeq ($(CONFIG_I2C_DEBUG_BUS),y)
10606 EXTRA_CFLAGS += -DDEBUG
10607Index: linux-2.6.33/drivers/i2c/busses/i2c-mrst.c
10608===================================================================
10609--- /dev/null
10610+++ linux-2.6.33/drivers/i2c/busses/i2c-mrst.c
10611@@ -0,0 +1,953 @@
10612+/*
10613+ * Support for Moorestown Langwell I2C chip
10614+ *
10615+ * Copyright (c) 2009 Intel Corporation.
10616+ * Copyright (c) 2009 Synopsys. Inc.
10617+ *
10618+ * This program is free software; you can redistribute it and/or modify it
10619+ * under the terms and conditions of the GNU General Public License, version
10620+ * 2, as published by the Free Software Foundation.
10621+ *
10622+ * This program is distributed in the hope it will be useful, but WITHOUT ANY
10623+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
10624+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
10625+ * details.
10626+ *
10627+ * You should have received a copy of the GNU General Public License along
10628+ * with this program; if not, write to the Free Software Foundation, Inc., 51
10629+ * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
10630+ *
10631+ */
10632+
10633+#include <linux/module.h>
10634+#include <linux/moduleparam.h>
10635+#include <linux/version.h>
10636+#include <linux/kernel.h>
10637+#include <linux/err.h>
10638+#include <linux/slab.h>
10639+#include <linux/stat.h>
10640+#include <linux/types.h>
10641+#include <linux/delay.h>
10642+#include <linux/i2c.h>
10643+#include <linux/init.h>
10644+#include <linux/pci.h>
10645+#include <linux/gpio.h>
10646+
10647+#include <linux/io.h>
10648+
10649+#include "i2c-mrst.h"
10650+
10651+#define MAX_T_POLL_COUNT 4000 /* FIXME */
10652+#define DEF_BAR 0
10653+#define VERSION "Version 0.5"
10654+
10655+#define mrst_i2c_read(reg) __raw_readl(reg)
10656+#define mrst_i2c_write(reg, val) __raw_writel((val), (reg))
10657+
10658+static int speed_mode = STANDARD;
10659+module_param(speed_mode, int, S_IRUGO);
10660+
10661+static int mrst_i2c_register_board_info(struct pci_dev *dev, int busnum)
10662+{
10663+ struct mrst_i2c_private *mrst = (struct mrst_i2c_private *)
10664+ pci_get_drvdata(dev);
10665+ int err;
10666+ unsigned short addr, irq, host;
10667+ char *name = NULL;
10668+ struct i2c_board_info *info = NULL;
10669+ unsigned int addr_off, irq_off, name_off, data_off, host_off;
10670+ unsigned int table_len, block_num, block = 0;
10671+ int i, j;
10672+ unsigned int start, len;
10673+ void __iomem *base = NULL, *ptr = NULL;
10674+
10675+ /* Determine the address of the I2C device info table area */
10676+ start = pci_resource_start(dev, 1);
10677+ len = pci_resource_len(dev, 1);
10678+ if (!start || len <= 0) {
10679+ err = -ENODEV;
10680+ return err;
10681+ }
10682+
10683+ err = pci_request_region(dev, 1, "mrst_i2c");
10684+ if (err) {
10685+ dev_err(&dev->dev, "Failed to request I2C region "
10686+ "0x%1x-0x%Lx\n", start,
10687+ (unsigned long long)pci_resource_end(dev, 1));
10688+ return err;
10689+ }
10690+
10691+ ptr = ioremap(start, len);
10692+ if (!ptr) {
10693+ dev_err(&dev->dev, "I/O memory remapping failed\n");
10694+ err = -ENOMEM;
10695+ goto err0;
10696+ }
10697+
10698+ if (len == 8) {
10699+ start = ioread32(ptr);
10700+ len = ioread32(ptr + 4);
10701+ iounmap(ptr);
10702+ dev_dbg(&dev->dev, "New FW: start 0x%x 0x%x\n", start, len);
10703+ base = ioremap(start, len);
10704+ } else {
10705+ dev_dbg(&dev->dev, "this is an old FW\n");
10706+ base = ptr;
10707+ }
10708+
10709+ /* Initialization */
10710+ name = kzalloc(sizeof(char) * NAME_LENGTH, GFP_KERNEL);
10711+ if (name == NULL) {
10712+ err = -ENOMEM;
10713+ goto err1;
10714+ }
10715+
10716+ info = kzalloc(sizeof(struct i2c_board_info), GFP_KERNEL);
10717+ if (info == NULL) {
10718+ dev_err(&dev->dev,
10719+ "Can't allocate interface for i2c_board_info\n");
10720+ err = -ENOMEM;
10721+ goto err2;
10722+ }
10723+
10724+ /* Get I2C info table length */
10725+ table_len = ioread32(base + I2C_INFO_TABLE_LENGTH);
10726+
10727+ /* Calculate the number of I2C device */
10728+ block_num = (table_len - HEAD_LENGTH)/BLOCK_LENGTH;
10729+ dev_dbg(&dev->dev, "the number of table is %d\n", block_num);
10730+ if (!block_num)
10731+ /* No I2C device info */
10732+ goto err3;
10733+
10734+ /* Initialize mrst_i2c_info array */
10735+ mrst->mrst_i2c_info = kzalloc(sizeof(struct i2c_board_info) *
10736+ block_num, GFP_KERNEL);
10737+ if (mrst->mrst_i2c_info == NULL) {
10738+ dev_err(&dev->dev,
10739+ "Can't allocate interface for i2c_board_info\n");
10740+ err = -ENOMEM;
10741+ goto err3;
10742+ }
10743+
10744+ mrst->data = kzalloc(sizeof(*mrst->data) * block_num, GFP_KERNEL);
10745+ if (mrst->data == NULL) {
10746+ dev_err(&dev->dev,
10747+ "Can't allocate interface for per device data\n");
10748+ err = -ENOMEM;
10749+ goto err4;
10750+ }
10751+
10752+ for (i = 0; i < block_num; i++) {
10753+ /* I2C device info block offsets */
10754+ host_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i;
10755+ addr_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i + I2C_DEV_ADDR;
10756+ irq_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i + I2C_DEV_IRQ;
10757+ name_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i + I2C_DEV_NAME;
10758+ data_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i + I2C_DEV_INFO;
10759+
10760+ /* Read PCI config table */
10761+ host = ioread16(base + host_off);
10762+ if (host != busnum)
10763+ continue;
10764+ addr = ioread16(base + addr_off);
10765+ irq = ioread16(base + irq_off);
10766+ for (j = 0; j < NAME_LENGTH; j++)
10767+ name[j] = ioread8(base + name_off + j);
10768+
10769+ for (j = 0; j < INFO_LENGTH; j++)
10770+ mrst->data[i][j] = ioread8(base + data_off + j);
10771+ dev_dbg(&dev->dev, "after read PCI config table: name = %s,"
10772+ " address = %x\n", name, addr);
10773+
10774+ /* Fill in i2c_board_info struct */
10775+ memcpy(info->type, name, NAME_LENGTH);
10776+ info->platform_data = mrst->data[i];
10777+ info->addr = addr;
10778+ info->irq = irq;
10779+
10780+ /* Add to mrst_i2c_info array */
10781+ memcpy(mrst->mrst_i2c_info + block, info,
10782+ sizeof(struct i2c_board_info));
10783+ block++;
10784+ }
10785+
10786+ /* Register i2c board info */
10787+ err = i2c_register_board_info(busnum, mrst->mrst_i2c_info, block);
10788+ goto err3;
10789+
10790+/* Clean up */
10791+err4:
10792+ kfree(mrst->mrst_i2c_info);
10793+err3:
10794+ kfree(info);
10795+err2:
10796+ kfree(name);
10797+err1:
10798+ iounmap(base);
10799+err0:
10800+ pci_release_region(dev, 1);
10801+ return err;
10802+}
10803+/* End update */
10804+
10805+/**
10806+ * mrst_i2c_disable - Disable I2C controller
10807+ * @adap: struct pointer to i2c_adapter
10808+ *
10809+ * Return Value:
10810+ * 0 success
10811+ * -EBUSY if device is busy
10812+ * -ETIMEOUT if i2c cannot be disabled within the given time
10813+ *
10814+ * I2C bus state should be checked prior to disabling the hardware. If bus is
10815+ * not in idle state, an errno is returned. Write "0" to IC_ENABLE to disable
10816+ * I2C controller.
10817+ */
10818+static int mrst_i2c_disable(struct i2c_adapter *adap)
10819+{
10820+ struct mrst_i2c_private *i2c =
10821+ (struct mrst_i2c_private *)i2c_get_adapdata(adap);
10822+
10823+ int count = 0;
10824+
10825+ /* Check if device is busy */
10826+ dev_dbg(&adap->dev, "mrst i2c disable\n");
10827+ while (mrst_i2c_read(i2c->base + IC_STATUS) & 0x1) {
10828+ dev_dbg(&adap->dev, "i2c is busy, count is %d\n", count);
10829+ if (count++ > 10000)
10830+ return -EBUSY;
10831+ }
10832+
10833+ /* Set IC_ENABLE to 0 */
10834+ mrst_i2c_write(i2c->base + IC_ENABLE, 0);
10835+
10836+ /* Disable all interupts */
10837+ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0000);
10838+
10839+ /* Clear all interrupts */
10840+ mrst_i2c_read(i2c->base + IC_CLR_INTR);
10841+
10842+ return 0;
10843+}
10844+
10845+/**
10846+ * mrst_i2c_hwinit - Initiate the I2C hardware registers. This function will
10847+ * be called in mrst_i2c_probe() before device registration.
10848+ * @dev: pci device struct pointer
10849+ *
10850+ * Return Values:
10851+ * 0 success
10852+ * -EBUSY i2c cannot be disabled
10853+ * -ETIMEDOUT i2c cannot be disabled
10854+ * -EFAULT If APB data width is not 32-bit wide
10855+ *
10856+ * I2C should be disabled prior to other register operation. If failed, an
10857+ * errno is returned. Mask and Clear all interrpts, this should be done at
10858+ * first. Set common registers which will not be modified during normal
10859+ * transfers, including: controll register, FIFO threshold and clock freq.
10860+ * Check APB data width at last.
10861+ */
10862+static int __devinit mrst_i2c_hwinit(struct pci_dev *dev)
10863+{
10864+ struct mrst_i2c_private *i2c =
10865+ (struct mrst_i2c_private *)pci_get_drvdata(dev);
10866+ int err = 0;
10867+
10868+ /* Disable i2c first */
10869+ err = mrst_i2c_disable(i2c->adap);
10870+ if (err)
10871+ return err;
10872+
10873+ /* Disable all interupts */
10874+ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0000);
10875+
10876+ /* Clear all interrupts */
10877+ mrst_i2c_read(i2c->base + IC_CLR_INTR);
10878+
10879+ /*
10880+ * Setup clock frequency and speed mode
10881+ * Enable restart condition,
10882+ * enable master FSM, disable slave FSM,
10883+ * use target address when initiating transfer
10884+ */
10885+ switch (speed_mode) {
10886+ case STANDARD:
10887+ mrst_i2c_write(i2c->base + IC_CON,
10888+ SLV_DIS | RESTART | STANDARD_MODE | MASTER_EN);
10889+ mrst_i2c_write(i2c->base + IC_SS_SCL_HCNT, 0x75);
10890+ mrst_i2c_write(i2c->base + IC_SS_SCL_LCNT, 0x7c);
10891+ break;
10892+ case FAST:
10893+ mrst_i2c_write(i2c->base + IC_CON,
10894+ SLV_DIS | RESTART | FAST_MODE | MASTER_EN);
10895+ mrst_i2c_write(i2c->base + IC_SS_SCL_HCNT, 0x15);
10896+ mrst_i2c_write(i2c->base + IC_SS_SCL_LCNT, 0x21);
10897+ break;
10898+ case HIGH:
10899+ mrst_i2c_write(i2c->base + IC_CON,
10900+ SLV_DIS | RESTART | HIGH_MODE | MASTER_EN);
10901+ mrst_i2c_write(i2c->base + IC_SS_SCL_HCNT, 0x7);
10902+ mrst_i2c_write(i2c->base + IC_SS_SCL_LCNT, 0xE);
10903+ break;
10904+ default:
10905+ ;
10906+ }
10907+
10908+ /* Set tranmit & receive FIFO threshold to zero */
10909+ mrst_i2c_write(i2c->base + IC_RX_TL, 0x3);
10910+ mrst_i2c_write(i2c->base + IC_TX_TL, 0x3);
10911+
10912+ mrst_i2c_write(i2c->base + IC_ENABLE, 1);
10913+
10914+ return err;
10915+}
10916+
10917+/**
10918+ * mrst_i2c_func - Return the supported three I2C operations.
10919+ * @adapter: i2c_adapter struct pointer
10920+ */
10921+static u32 mrst_i2c_func(struct i2c_adapter *adapter)
10922+{
10923+ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SMBUS_EMUL;
10924+}
10925+
10926+/**
10927+ * mrst_i2c_invalid_address - To check if the address in i2c message is
10928+ * correct.
10929+ * @p: i2c_msg struct pointer
10930+ *
10931+ * Return Values:
10932+ * 0 if the address is valid
10933+ * 1 if the address is invalid
10934+ */
10935+static inline int mrst_i2c_invalid_address(const struct i2c_msg *p)
10936+{
10937+ int ret = ((p->addr > 0x3ff) || (!(p->flags & I2C_M_TEN)
10938+ && (p->addr > 0x7f)));
10939+ return ret;
10940+}
10941+
10942+/**
10943+ * mrst_i2c_address_neq - To check if the addresses for different i2c messages
10944+ * are equal.
10945+ * @p1: first i2c_msg
10946+ * @p2: second i2c_msg
10947+ *
10948+ * Return Values:
10949+ * 0 if addresse are equal
10950+ * 1 if not equal
10951+ *
10952+ * Within a single transfer, I2C client may need to send its address more
10953+ * than one time. So a check for the address equation is needed.
10954+ */
10955+static inline int mrst_i2c_address_neq(const struct i2c_msg *p1,
10956+ const struct i2c_msg *p2)
10957+{
10958+ int ret = ((p1->addr != p2->addr) || ((p1->flags & (I2C_M_TEN))
10959+ != ((p2->flags) & (I2C_M_TEN))));
10960+ return ret;
10961+}
10962+
10963+/**
10964+ * mrst_i2c_abort - To handle transfer abortions and print error messages.
10965+ * @adap: i2c_adapter struct pointer
10966+ *
10967+ * By reading register IC_TX_ABRT_SOURCE, various transfer errors can be
10968+ * distingushed. At present, no circumstances have been found out that
10969+ * multiple errors would be occured simutaneously, so we simply use the
10970+ * register value directly.
10971+ *
10972+ * At last the error bits are cleared. (Note clear ABRT_SBYTE_NORSTRT bit need
10973+ * a few extra steps)
10974+ */
10975+static void mrst_i2c_abort(struct i2c_adapter *adap)
10976+{
10977+ struct mrst_i2c_private *i2c = (struct mrst_i2c_private *)
10978+ i2c_get_adapdata(adap);
10979+
10980+ /* Read about source register */
10981+ int abort = mrst_i2c_read(i2c->base + IC_TX_ABRT_SOURCE);
10982+
10983+ dev_dbg(&adap->dev, "Abort: ");
10984+
10985+ /* Single transfer error check:
10986+ * According to databook, TX/RX FIFOs would be flushed when
10987+ * the abort interrupt occured.
10988+ */
10989+ switch (abort) {
10990+ case (ABRT_MASTER_DIS):
10991+ dev_err(&adap->dev,
10992+ "initiate Master operation with Master mode"
10993+ "disabled.\n");
10994+
10995+ break;
10996+ case (ABRT_10B_RD_NORSTRT):
10997+ dev_err(&adap->dev,
10998+ "RESTART disabled and master sends READ cmd in 10-BIT"
10999+ "addressing.\n");
11000+ break;
11001+ case (ABRT_SBYTE_NORSTRT):
11002+ dev_err(&adap->dev,
11003+ "RESTART disabled and user is trying to send START"
11004+ "byte.\n");
11005+ /* Page 141 data book */
11006+ mrst_i2c_write(i2c->base + IC_TX_ABRT_SOURCE,
11007+ !(ABRT_SBYTE_NORSTRT));
11008+ mrst_i2c_write(i2c->base + IC_CON, RESTART);
11009+ mrst_i2c_write(i2c->base + IC_TAR, !(IC_TAR_SPECIAL));
11010+ break;
11011+ case (ABRT_SBYTE_ACKDET):
11012+ dev_err(&adap->dev,
11013+ "START byte was acknowledged.\n");
11014+ break;
11015+ case (ABRT_TXDATA_NOACK):
11016+ dev_err(&adap->dev,
11017+ "No acknowledge received from slave.\n");
11018+ break;
11019+ case (ABRT_10ADDR2_NOACK):
11020+ dev_err(&adap->dev,
11021+ "The 2nd address byte of 10-bit address not"
11022+ "acknowledged.\n");
11023+ break;
11024+ case (ABRT_10ADDR1_NOACK):
11025+ dev_dbg(&adap->dev,
11026+ "The 1st address byte of 10-bit address not"
11027+ "acknowledged.\n");
11028+ break;
11029+ case (ABRT_7B_ADDR_NOACK):
11030+ dev_err(&adap->dev,
11031+ "7-bit address not acknowledged.\n");
11032+ break;
11033+ default:
11034+ ;;
11035+ }
11036+
11037+ /* Clear TX_ABRT bit */
11038+ mrst_i2c_read(i2c->base + IC_CLR_TX_ABRT);
11039+}
11040+
11041+/**
11042+ * xfer_read - Internal function to implement master read transfer.
11043+ * @adap: i2c_adapter struct pointer
11044+ * @buf: buffer in i2c_msg
11045+ * @length: number of bytes to be read
11046+ *
11047+ * Return Values:
11048+ * 0 if the read transfer succeeds
11049+ * -ETIMEDOUT if cannot read the "raw" interrupt register
11050+ * -EINVAL if transfer abort occured
11051+ *
11052+ * For every byte, a "READ" command will be loaded into IC_DATA_CMD prior to
11053+ * data transfer. The actual "read" operation will be performed if the RX_FULL
11054+ * interrupt is occured.
11055+ *
11056+ * Note there may be two interrupt signals captured, one should read
11057+ * IC_RAW_INTR_STAT to seperate between errors and actual data.
11058+ */
11059+static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length)
11060+{
11061+ struct mrst_i2c_private *i2c = (struct mrst_i2c_private *)
11062+ i2c_get_adapdata(adap);
11063+ uint32_t reg_val;
11064+ int i = length;
11065+ unsigned count = 0;
11066+ uint32_t bit_get = 1 << 3; /* receive fifo not empty */
11067+
11068+ while (i--)
11069+ mrst_i2c_write(i2c->base + IC_DATA_CMD, (uint16_t)0x100);
11070+
11071+ i = length;
11072+ while (i--) {
11073+ count = 0;
11074+ reg_val = mrst_i2c_read(i2c->base + IC_STATUS);
11075+ while ((reg_val & bit_get) == 0) {
11076+ reg_val = mrst_i2c_read(i2c->base + IC_RAW_INTR_STAT);
11077+ if ((reg_val & 0x40) == 0x40)
11078+ goto read_abrt;
11079+ reg_val = mrst_i2c_read(i2c->base + IC_STATUS);
11080+ if (count++ > MAX_T_POLL_COUNT)
11081+ goto read_loop;
11082+ }
11083+
11084+ reg_val = mrst_i2c_read(i2c->base + IC_DATA_CMD);
11085+ *buf++ = reg_val;
11086+ }
11087+
11088+ return 0;
11089+
11090+read_loop:
11091+ dev_err(&adap->dev, "Time out in read\n");
11092+ return -ETIMEDOUT;
11093+read_abrt:
11094+ dev_err(&adap->dev, "Abort from read\n");
11095+ mrst_i2c_abort(adap);
11096+ return -EINVAL;
11097+}
11098+
11099+/**
11100+ * xfer_write - Internal function to implement master write transfer.
11101+ * @adap: i2c_adapter struct pointer
11102+ * @buf: buffer in i2c_msg
11103+ * @length: number of bytes to be read
11104+ *
11105+ * Return Values:
11106+ * 0 if the read transfer succeeds
11107+ * -ETIMEDOUT if cannot read the "raw" interrupt register
11108+ * -EINVAL if transfer abort occured
11109+ *
11110+ * For every byte, a "WRITE" command will be loaded into IC_DATA_CMD prior to
11111+ * data transfer. The actual "write" operation will be performed if the
11112+ * RX_FULL interrupt siganal is occured.
11113+ *
11114+ * Note there may be two interrupt signals captured, one should read
11115+ * IC_RAW_INTR_STAT to seperate between errors and actual data.
11116+ */
11117+static int xfer_write(struct i2c_adapter *adap,
11118+ unsigned char *buf, int length)
11119+{
11120+ struct mrst_i2c_private *i2c = (struct mrst_i2c_private *)
11121+ i2c_get_adapdata(adap);
11122+
11123+ int i;
11124+ uint32_t reg_val;
11125+ unsigned count = 0;
11126+ uint32_t bit_get = 1 << 2; /* transmit fifo completely empty */
11127+
11128+ for (i = 0; i < length; i++)
11129+ mrst_i2c_write(i2c->base + IC_DATA_CMD,
11130+ (uint16_t)(*(buf + i)));
11131+
11132+ reg_val = mrst_i2c_read(i2c->base + IC_STATUS);
11133+ while ((reg_val & bit_get) == 0) {
11134+ if (count++ > MAX_T_POLL_COUNT)
11135+ goto write_loop;
11136+ reg_val = mrst_i2c_read(i2c->base + IC_STATUS);
11137+ }
11138+
11139+ udelay(100);
11140+ reg_val = mrst_i2c_read(i2c->base + IC_RAW_INTR_STAT);
11141+ if ((reg_val & 0x40) == 0x40)
11142+ goto write_abrt;
11143+
11144+ return 0;
11145+
11146+write_loop:
11147+ dev_err(&adap->dev, "Time out in write\n");
11148+ return -ETIMEDOUT;
11149+write_abrt:
11150+ dev_err(&adap->dev, "Abort from write\n");
11151+ mrst_i2c_abort(adap);
11152+ return -EINVAL;
11153+}
11154+
11155+static int mrst_i2c_setup(struct i2c_adapter *adap, struct i2c_msg *pmsg)
11156+{
11157+ struct mrst_i2c_private *i2c =
11158+ (struct mrst_i2c_private *)i2c_get_adapdata(adap);
11159+ int err;
11160+ uint32_t reg_val;
11161+ uint32_t bit_mask;
11162+
11163+ /* Disable device first */
11164+ err = mrst_i2c_disable(adap);
11165+ if (err) {
11166+ dev_err(&adap->dev,
11167+ "Cannot disable i2c controller, timeout!\n");
11168+ return -ETIMEDOUT;
11169+ }
11170+
11171+
11172+ reg_val = mrst_i2c_read(i2c->base + IC_ENABLE);
11173+ if (reg_val & 0x1) {
11174+ dev_dbg(&adap->dev, "i2c busy, can't setup\n");
11175+ return -EINVAL;
11176+ }
11177+
11178+ /* set the speed mode to standard */
11179+ reg_val = mrst_i2c_read(i2c->base + IC_CON);
11180+ if ((reg_val & (1<<1 | 1<<2)) != 1<<1) {
11181+ dev_dbg(&adap->dev, "set standard mode\n");
11182+ mrst_i2c_write(i2c->base + IC_CON, (reg_val & (~0x6)) | 1<<1);
11183+ }
11184+
11185+ reg_val = mrst_i2c_read(i2c->base + IC_CON);
11186+ /* use 7-bit addressing */
11187+ if ((reg_val & (1<<4)) != 0x0) {
11188+ dev_dbg(&adap->dev, "set i2c 7 bit address mode\n");
11189+ mrst_i2c_write(i2c->base + IC_CON, reg_val & (~(1<<4)));
11190+ }
11191+
11192+ /*enable restart conditions */
11193+ reg_val = mrst_i2c_read(i2c->base + IC_CON);
11194+ if ((reg_val & (1<<5)) != 1<<5) {
11195+ dev_dbg(&adap->dev, "enable restart conditions\n");
11196+ mrst_i2c_write(i2c->base + IC_CON, (reg_val & (~(1 << 5)))
11197+ | 1 << 5);
11198+ }
11199+
11200+ /* enable master FSM */
11201+ reg_val = mrst_i2c_read(i2c->base + IC_CON);
11202+ dev_dbg(&adap->dev, "ic_con reg_val is 0x%x\n", reg_val);
11203+ if ((reg_val & (1<<6)) != 1<<6) {
11204+ dev_dbg(&adap->dev, "enable master FSM\n");
11205+ mrst_i2c_write(i2c->base + IC_CON, (reg_val & (~(1 << 6)))
11206+ | 1<<6);
11207+ dev_dbg(&adap->dev, "ic_con reg_val is 0x%x\n", reg_val);
11208+ }
11209+
11210+ /* use target address when initiating transfer */
11211+ reg_val = mrst_i2c_read(i2c->base + IC_TAR);
11212+ bit_mask = 1 << 11 | 1 << 10;
11213+
11214+ if ((reg_val & bit_mask) != 0x0) {
11215+ dev_dbg(&adap->dev, "WR: use target address when intiating"
11216+ "transfer, i2c_tx_target\n");
11217+ mrst_i2c_write(i2c->base + IC_TAR, reg_val & ~bit_mask);
11218+ }
11219+
11220+ /* set target address to the I2C slave address */
11221+ dev_dbg(&adap->dev, "set target address to the I2C slave address,"
11222+ "addr is %x\n", pmsg->addr);
11223+ mrst_i2c_write(i2c->base + IC_TAR, pmsg->addr
11224+ | (pmsg->flags & I2C_M_TEN ? IC_TAR_10BIT_ADDR : 0));
11225+
11226+ /* Enable I2C controller */
11227+ mrst_i2c_write(i2c->base + IC_ENABLE, ENABLE);
11228+
11229+ reg_val = mrst_i2c_read(i2c->base + IC_CON);
11230+
11231+ return 0;
11232+}
11233+
11234+/**
11235+ * mrst_i2c_xfer - Main master transfer routine.
11236+ * @adap: i2c_adapter struct pointer
11237+ * @pmsg: i2c_msg struct pointer
11238+ * @num: number of i2c_msg
11239+ *
11240+ * Return Values:
11241+ * + number of messages transfered
11242+ * -ETIMEDOUT If cannot disable I2C controller or read IC_STATUS
11243+ * -EINVAL If the address in i2c_msg is invalid
11244+ *
11245+ * This function will be registered in i2c-core and exposed to external
11246+ * I2C clients.
11247+ * 1. Disable I2C controller
11248+ * 2. Unmask three interrupts: RX_FULL, TX_EMPTY, TX_ABRT
11249+ * 3. Check if address in i2c_msg is valid
11250+ * 4. Enable I2C controller
11251+ * 5. Perform real transfer (call xfer_read or xfer_write)
11252+ * 6. Wait until the current transfer is finished(check bus state)
11253+ * 7. Mask and clear all interrupts
11254+ */
11255+static int mrst_i2c_xfer(struct i2c_adapter *adap,
11256+ struct i2c_msg *pmsg,
11257+ int num)
11258+{
11259+ struct mrst_i2c_private *i2c =
11260+ (struct mrst_i2c_private *)i2c_get_adapdata(adap);
11261+ int i, err;
11262+
11263+ dev_dbg(&adap->dev, "mrst_i2c_xfer, process %d msg(s)\n", num);
11264+ dev_dbg(&adap->dev, KERN_INFO "slave address is %x\n", pmsg->addr);
11265+
11266+ /* if number of messages equal 0*/
11267+ if (num == 0)
11268+ return 0;
11269+
11270+ /* Checked the sanity of passed messages. */
11271+ if (unlikely(mrst_i2c_invalid_address(&pmsg[0]))) {
11272+ dev_err(&adap->dev, "Invalid address 0x%03x (%d-bit)\n",
11273+ pmsg[0].addr, pmsg[0].flags & I2C_M_TEN ? 10 : 7);
11274+ return -EINVAL;
11275+ }
11276+ for (i = 0; i < num; i++) {
11277+ /* Message address equal? */
11278+ if (unlikely(mrst_i2c_address_neq(&pmsg[0], &pmsg[i]))) {
11279+ dev_err(&adap->dev, "Invalid address in msg[%d]\n", i);
11280+ return -EINVAL;
11281+ }
11282+ }
11283+
11284+ if (mrst_i2c_setup(adap, pmsg))
11285+ return -EINVAL;
11286+
11287+ for (i = 0; i < num; i++) {
11288+ dev_dbg(&adap->dev, " #%d: %sing %d byte%s %s 0x%02x\n", i,
11289+ pmsg->flags & I2C_M_RD ? "read" : "writ",
11290+ pmsg->len, pmsg->len > 1 ? "s" : "",
11291+ pmsg->flags & I2C_M_RD ? "from" : "to", pmsg->addr);
11292+
11293+
11294+ /* Read or Write */
11295+ if (pmsg->len && pmsg->buf) {
11296+ if (pmsg->flags & I2C_M_RD) {
11297+ dev_dbg(&adap->dev, "I2C_M_RD\n");
11298+ err = xfer_read(adap, pmsg->buf, pmsg->len);
11299+ } else {
11300+ dev_dbg(&adap->dev, "I2C_M_WR\n");
11301+ err = xfer_write(adap, pmsg->buf, pmsg->len);
11302+ }
11303+ if (err < 0)
11304+ goto err_1;
11305+ }
11306+ dev_dbg(&adap->dev, "msg[%d] transfer complete\n", i);
11307+ pmsg++; /* next message */
11308+ }
11309+ goto exit;
11310+
11311+err_1:
11312+ i = err;
11313+exit:
11314+ /* Mask interrupts */
11315+ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0000);
11316+ /* Clear all interrupts */
11317+ mrst_i2c_read(i2c->base + IC_CLR_INTR);
11318+
11319+ return i;
11320+}
11321+
11322+static int mrst_gpio_init(int sda, int scl)
11323+{
11324+ if (gpio_request(sda, "I2C_SDA"))
11325+ goto err_sda;
11326+
11327+ if (gpio_request(scl, "I2C_SCL"))
11328+ goto err_scl;
11329+
11330+ return 0;
11331+err_scl:
11332+ gpio_free(sda);
11333+err_sda:
11334+ return -1;
11335+}
11336+
11337+static struct pci_device_id mrst_i2c_ids[] = {
11338+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0802)},
11339+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0803)},
11340+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0804)},
11341+ {0,}
11342+};
11343+MODULE_DEVICE_TABLE(pci, mrst_i2c_ids);
11344+
11345+static struct i2c_algorithm mrst_i2c_algorithm = {
11346+ .master_xfer = mrst_i2c_xfer,
11347+ .functionality = mrst_i2c_func,
11348+};
11349+
11350+static struct pci_driver mrst_i2c_driver = {
11351+ .name = "mrst_i2c",
11352+ .id_table = mrst_i2c_ids,
11353+ .probe = mrst_i2c_probe,
11354+ .remove = __devexit_p(mrst_i2c_remove),
11355+ .suspend = NULL,
11356+ .resume = NULL,
11357+};
11358+
11359+/**
11360+ * mrst_i2c_probe - I2C controller initialization routine
11361+ * @dev: pci device
11362+ * @id: device id
11363+ *
11364+ * Return Values:
11365+ * 0 success
11366+ * -ENODEV If cannot allocate pci resource
11367+ * -ENOMEM If the register base remapping failed, or
11368+ * if kzalloc failed
11369+ *
11370+ * Initialization steps:
11371+ * 1. Request for PCI resource
11372+ * 2. Remap the start address of PCI resource to register base
11373+ * 3. Request for device memory region
11374+ * 4. Fill in the struct members of mrst_i2c_private
11375+ * 5. Call mrst_i2c_hwinit() for hardware initialization
11376+ * 6. Register I2C adapter in i2c-core
11377+ */
11378+static int __devinit mrst_i2c_probe(struct pci_dev *dev,
11379+ const struct pci_device_id *id)
11380+{
11381+ struct mrst_i2c_private *mrst;
11382+ struct i2c_adapter *adap;
11383+ unsigned int start, len;
11384+ int err, busnum = 0;
11385+ void __iomem *base = NULL;
11386+ int gpio_sda = 0, gpio_scl = 0;
11387+
11388+ err = pci_enable_device(dev);
11389+ if (err) {
11390+ dev_err(&dev->dev, "Failed to enable I2C PCI device (%d)\n",
11391+ err);
11392+ goto exit;
11393+ }
11394+
11395+ /* Determine the address of the I2C area */
11396+ start = pci_resource_start(dev, DEF_BAR);
11397+ len = pci_resource_len(dev, DEF_BAR);
11398+ if (!start || len <= 0) {
11399+ dev_err(&dev->dev, "Base address initialization failed\n");
11400+ err = -ENODEV;
11401+ goto exit;
11402+ }
11403+ dev_dbg(&dev->dev, "mrst i2c resource start %x, len=%d\n",
11404+ start, len);
11405+ err = pci_request_region(dev, DEF_BAR, mrst_i2c_driver.name);
11406+ if (err) {
11407+ dev_err(&dev->dev, "Failed to request I2C region "
11408+ "0x%1x-0x%Lx\n", start,
11409+ (unsigned long long)pci_resource_end(dev, DEF_BAR));
11410+ goto exit;
11411+ }
11412+
11413+ base = ioremap_nocache(start, len);
11414+ if (!base) {
11415+ dev_err(&dev->dev, "I/O memory remapping failed\n");
11416+ err = -ENOMEM;
11417+ goto fail0;
11418+ }
11419+
11420+ /* Allocate the per-device data structure, mrst_i2c_private */
11421+ mrst = kzalloc(sizeof(struct mrst_i2c_private), GFP_KERNEL);
11422+ if (mrst == NULL) {
11423+ dev_err(&dev->dev, "Can't allocate interface!\n");
11424+ err = -ENOMEM;
11425+ goto fail1;
11426+ }
11427+
11428+ adap = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
11429+ if (adap == NULL) {
11430+ dev_err(&dev->dev, "Can't allocate interface!\n");
11431+ err = -ENOMEM;
11432+ goto fail2;
11433+ }
11434+
11435+ /* Initialize struct members */
11436+ snprintf(adap->name, sizeof(adap->name), "mrst_i2c");
11437+ adap->owner = THIS_MODULE;
11438+ adap->algo = &mrst_i2c_algorithm;
11439+ adap->class = I2C_CLASS_HWMON;
11440+ adap->dev.parent = &dev->dev;
11441+ mrst->adap = adap;
11442+ mrst->base = base;
11443+ mrst->speed = speed_mode;
11444+
11445+ pci_set_drvdata(dev, mrst);
11446+ i2c_set_adapdata(adap, mrst);
11447+
11448+ /* Initialize i2c controller */
11449+ err = mrst_i2c_hwinit(dev);
11450+ if (err < 0) {
11451+ dev_err(&dev->dev, "I2C interface initialization failed\n");
11452+ goto fail3;
11453+ }
11454+
11455+ switch (id->device) {
11456+ case 0x0802:
11457+ dev_dbg(&adap->dev, KERN_INFO "I2C0\n");
11458+ gpio_sda = GPIO_I2C_0_SDA;
11459+ gpio_scl = GPIO_I2C_0_SCL;
11460+ adap->nr = busnum = 0;
11461+ break;
11462+ case 0x0803:
11463+ dev_dbg(&adap->dev, KERN_INFO "I2C1\n");
11464+ gpio_sda = GPIO_I2C_1_SDA;
11465+ gpio_scl = GPIO_I2C_1_SCL;
11466+ adap->nr = busnum = 1;
11467+ break;
11468+ case 0x0804:
11469+ dev_dbg(&adap->dev, KERN_INFO "I2C2\n");
11470+ gpio_sda = GPIO_I2C_2_SDA;
11471+ gpio_scl = GPIO_I2C_2_SCL;
11472+ adap->nr = busnum = 2;
11473+ break;
11474+ default:
11475+ ;
11476+ }
11477+
11478+ /* Config GPIO pin for I2C */
11479+ err = mrst_gpio_init(gpio_sda, gpio_scl);
11480+ if (err) {
11481+ dev_err(&dev->dev, "GPIO %s registration failed\n",
11482+ adap->name);
11483+ goto fail3;
11484+ }
11485+
11486+ /* Register i2c board info */
11487+ /*mrst_i2c_register_board_info(dev, busnum);*/
11488+
11489+ /* Adapter registration */
11490+ err = i2c_add_numbered_adapter(adap);
11491+ if (err) {
11492+ dev_err(&dev->dev, "Adapter %s registration failed\n",
11493+ adap->name);
11494+ goto fail3;
11495+ }
11496+
11497+ dev_dbg(&dev->dev, "MRST I2C bus %d driver bind success.\n", busnum);
11498+ return 0;
11499+
11500+fail3:
11501+ i2c_set_adapdata(adap, NULL);
11502+ pci_set_drvdata(dev, NULL);
11503+ kfree(adap);
11504+fail2:
11505+ kfree(mrst);
11506+fail1:
11507+ iounmap(base);
11508+fail0:
11509+ pci_release_region(dev, DEF_BAR);
11510+exit:
11511+ return err;
11512+}
11513+
11514+static void __devexit mrst_i2c_remove(struct pci_dev *dev)
11515+{
11516+ struct mrst_i2c_private *mrst = (struct mrst_i2c_private *)
11517+ pci_get_drvdata(dev);
11518+ if (i2c_del_adapter(mrst->adap))
11519+ dev_err(&dev->dev, "Failed to delete i2c adapter");
11520+
11521+ kfree(mrst->mrst_i2c_info);
11522+ kfree(mrst->data);
11523+
11524+ switch (dev->device) {
11525+ case 0x0802:
11526+ gpio_free(GPIO_I2C_0_SDA);
11527+ gpio_free(GPIO_I2C_0_SCL);
11528+ break;
11529+ case 0x0803:
11530+ gpio_free(GPIO_I2C_1_SDA);
11531+ gpio_free(GPIO_I2C_1_SCL);
11532+ break;
11533+ case 0x0804:
11534+ gpio_free(GPIO_I2C_2_SDA);
11535+ gpio_free(GPIO_I2C_2_SCL);
11536+ break;
11537+ default:
11538+ break;
11539+ }
11540+
11541+ pci_set_drvdata(dev, NULL);
11542+ iounmap(mrst->base);
11543+ kfree(mrst);
11544+ pci_release_region(dev, DEF_BAR);
11545+}
11546+
11547+static int __init mrst_i2c_init(void)
11548+{
11549+ printk(KERN_NOTICE "Moorestown I2C driver %s\n", VERSION);
11550+ return pci_register_driver(&mrst_i2c_driver);
11551+}
11552+
11553+static void __exit mrst_i2c_exit(void)
11554+{
11555+ pci_unregister_driver(&mrst_i2c_driver);
11556+}
11557+
11558+module_init(mrst_i2c_init);
11559+module_exit(mrst_i2c_exit);
11560+
11561+MODULE_AUTHOR("Ba Zheng <zheng.ba@intel.com>");
11562+MODULE_DESCRIPTION("I2C driver for Moorestown Platform");
11563+MODULE_LICENSE("GPL");
11564+MODULE_VERSION(VERSION);
11565Index: linux-2.6.33/drivers/i2c/busses/i2c-mrst.h
11566===================================================================
11567--- /dev/null
11568+++ linux-2.6.33/drivers/i2c/busses/i2c-mrst.h
11569@@ -0,0 +1,282 @@
11570+#ifndef __I2C_MRST_H
11571+#define __I2C_MRST_H
11572+
11573+#include <linux/i2c.h>
11574+
11575+/* Update for 2.6.27 kernel by Wen */
11576+
11577+/* PCI config table macros */
11578+/* Offests */
11579+#define I2C_INFO_TABLE_LENGTH 4
11580+#define I2C_INFO_DEV_BLOCK 10
11581+#define I2C_DEV_ADDR 2
11582+#define I2C_DEV_IRQ 4
11583+#define I2C_DEV_NAME 6
11584+#define I2C_DEV_INFO 22
11585+/* Length */
11586+#define HEAD_LENGTH 10
11587+#define BLOCK_LENGTH 32
11588+#define ADDR_LENGTH 2
11589+#define IRQ_LENGTH 2
11590+#define NAME_LENGTH 16
11591+#define INFO_LENGTH 10
11592+
11593+struct mrst_i2c_private {
11594+ struct i2c_adapter *adap;
11595+ /* Register base address */
11596+ void __iomem *base;
11597+ /* Speed mode */
11598+ int speed;
11599+ struct i2c_board_info *mrst_i2c_info;
11600+ char (*data)[INFO_LENGTH];
11601+};
11602+
11603+/* Speed mode macros */
11604+#define STANDARD 100
11605+#define FAST 25
11606+#define HIGH 3
11607+
11608+/* Control register */
11609+#define IC_CON 0x00
11610+#define SLV_DIS (1 << 6) /* Disable slave mode */
11611+#define RESTART (1 << 5) /* Send a Restart condition */
11612+#define ADDR_10BIT (1 << 4) /* 10-bit addressing */
11613+#define STANDARD_MODE (1 << 1) /* standard mode */
11614+#define FAST_MODE (2 << 1) /* fast mode */
11615+#define HIGH_MODE (3 << 1) /* high speed mode */
11616+#define MASTER_EN (1 << 0) /* Master mode */
11617+
11618+/* Target address register */
11619+#define IC_TAR 0x04
11620+#define IC_TAR_10BIT_ADDR (1 << 12) /* 10-bit addressing */
11621+#define IC_TAR_SPECIAL (1 << 11) /* Perform special I2C cmd */
11622+#define IC_TAR_GC_OR_START (1 << 10) /* 0: Gerneral Call Address */
11623+ /* 1: START BYTE */
11624+
11625+/* Slave Address Register */
11626+#define IC_SAR 0x08 /* Not used in Master mode */
11627+
11628+/* High Speed Master Mode Code Address Register */
11629+#define IC_HS_MADDR 0x0c
11630+
11631+/* Rx/Tx Data Buffer and Command Register */
11632+#define IC_DATA_CMD 0x10
11633+#define IC_RD (1 << 8) /* 1: Read 0: Write */
11634+
11635+/* Standard Speed Clock SCL High Count Register */
11636+#define IC_SS_SCL_HCNT 0x14
11637+
11638+/* Standard Speed Clock SCL Low Count Register */
11639+#define IC_SS_SCL_LCNT 0x18
11640+
11641+/* Fast Speed Clock SCL High Count Register */
11642+#define IC_FS_SCL_HCNT 0x1c
11643+
11644+/* Fast Spedd Clock SCL Low Count Register */
11645+#define IC_FS_SCL_LCNT 0x20
11646+
11647+/* High Speed Clock SCL High Count Register */
11648+#define IC_HS_SCL_HCNT 0x24
11649+
11650+/* High Speed Clock SCL Low Count Register */
11651+#define IC_HS_SCL_LCNT 0x28
11652+
11653+/* Interrupt Status Register */
11654+#define IC_INTR_STAT 0x2c /* Read only */
11655+#define R_GEN_CALL (1 << 11)
11656+#define R_START_DET (1 << 10)
11657+#define R_STOP_DET (1 << 9)
11658+#define R_ACTIVITY (1 << 8)
11659+#define R_RX_DONE (1 << 7)
11660+#define R_TX_ABRT (1 << 6)
11661+#define R_RD_REQ (1 << 5)
11662+#define R_TX_EMPTY (1 << 4)
11663+#define R_TX_OVER (1 << 3)
11664+#define R_RX_FULL (1 << 2)
11665+#define R_RX_OVER (1 << 1)
11666+#define R_RX_UNDER (1 << 0)
11667+
11668+/* Interrupt Mask Register */
11669+#define IC_INTR_MASK 0x30 /* Read and Write */
11670+#define M_GEN_CALL (1 << 11)
11671+#define M_START_DET (1 << 10)
11672+#define M_STOP_DET (1 << 9)
11673+#define M_ACTIVITY (1 << 8)
11674+#define M_RX_DONE (1 << 7)
11675+#define M_TX_ABRT (1 << 6)
11676+#define M_RD_REQ (1 << 5)
11677+#define M_TX_EMPTY (1 << 4)
11678+#define M_TX_OVER (1 << 3)
11679+#define M_RX_FULL (1 << 2)
11680+#define M_RX_OVER (1 << 1)
11681+#define M_RX_UNDER (1 << 0)
11682+
11683+/* Raw Interrupt Status Register */
11684+#define IC_RAW_INTR_STAT 0x34 /* Read Only */
11685+#define GEN_CALL (1 << 11) /* General call */
11686+#define START_DET (1 << 10) /* (RE)START occured */
11687+#define STOP_DET (1 << 9) /* STOP occured */
11688+#define ACTIVITY (1 << 8) /* Bus busy */
11689+#define RX_DONE (1 << 7) /* Not used in Master mode */
11690+#define TX_ABRT (1 << 6) /* Transmit Abort */
11691+#define RD_REQ (1 << 5) /* Not used in Master mode */
11692+#define TX_EMPTY (1 << 4) /* TX FIFO <= threshold */
11693+#define TX_OVER (1 << 3) /* TX FIFO overflow */
11694+#define RX_FULL (1 << 2) /* RX FIFO >= threshold */
11695+#define RX_OVER (1 << 1) /* RX FIFO overflow */
11696+#define RX_UNDER (1 << 0) /* RX FIFO empty */
11697+
11698+/* Receive FIFO Threshold Register */
11699+#define IC_RX_TL 0x38
11700+
11701+/* Transmit FIFO Treshold Register */
11702+#define IC_TX_TL 0x3c
11703+
11704+/* Clear Combined and Individual Interrupt Register */
11705+#define IC_CLR_INTR 0x40
11706+#define CLR_INTR (1 << 0)
11707+
11708+/* Clear RX_UNDER Interrupt Register */
11709+#define IC_CLR_RX_UNDER 0x44
11710+#define CLR_RX_UNDER (1 << 0)
11711+
11712+/* Clear RX_OVER Interrupt Register */
11713+#define IC_CLR_RX_OVER 0x48
11714+#define CLR_RX_OVER (1 << 0)
11715+
11716+/* Clear TX_OVER Interrupt Register */
11717+#define IC_CLR_TX_OVER 0x4c
11718+#define CLR_TX_OVER (1 << 0)
11719+
11720+#define IC_CLR_RD_REQ 0x50
11721+
11722+/* Clear TX_ABRT Interrupt Register */
11723+#define IC_CLR_TX_ABRT 0x54
11724+#define CLR_TX_ABRT (1 << 0)
11725+
11726+#define IC_CLR_RX_DONE 0x58
11727+
11728+
11729+/* Clear ACTIVITY Interrupt Register */
11730+#define IC_CLR_ACTIVITY 0x5c
11731+#define CLR_ACTIVITY (1 << 0)
11732+
11733+/* Clear STOP_DET Interrupt Register */
11734+#define IC_CLR_STOP_DET 0x60
11735+#define CLR_STOP_DET (1 << 0)
11736+
11737+/* Clear START_DET Interrupt Register */
11738+#define IC_CLR_START_DET 0x64
11739+#define CLR_START_DET (1 << 0)
11740+
11741+/* Clear GEN_CALL Interrupt Register */
11742+#define IC_CLR_GEN_CALL 0x68
11743+#define CLR_GEN_CALL (1 << 0)
11744+
11745+/* Enable Register */
11746+#define IC_ENABLE 0x6c
11747+#define ENABLE (1 << 0)
11748+
11749+/* Status Register */
11750+#define IC_STATUS 0x70 /* Read Only */
11751+#define STAT_SLV_ACTIVITY (1 << 6) /* Slave not in idle */
11752+#define STAT_MST_ACTIVITY (1 << 5) /* Master not in idle */
11753+#define STAT_RFF (1 << 4) /* RX FIFO Full */
11754+#define STAT_RFNE (1 << 3) /* RX FIFO Not Empty */
11755+#define STAT_TFE (1 << 2) /* TX FIFO Empty */
11756+#define STAT_TFNF (1 << 1) /* TX FIFO Not Full */
11757+#define STAT_ACTIVITY (1 << 0) /* Activity Status */
11758+
11759+/* Transmit FIFO Level Register */
11760+#define IC_TXFLR 0x74 /* Read Only */
11761+#define TXFLR (1 << 0) /* TX FIFO level */
11762+
11763+/* Receive FIFO Level Register */
11764+#define IC_RXFLR 0x78 /* Read Only */
11765+#define RXFLR (1 << 0) /* RX FIFO level */
11766+
11767+/* Transmit Abort Source Register */
11768+#define IC_TX_ABRT_SOURCE 0x80
11769+#define ABRT_SLVRD_INTX (1 << 15)
11770+#define ABRT_SLV_ARBLOST (1 << 14)
11771+#define ABRT_SLVFLUSH_TXFIFO (1 << 13)
11772+#define ARB_LOST (1 << 12)
11773+#define ABRT_MASTER_DIS (1 << 11)
11774+#define ABRT_10B_RD_NORSTRT (1 << 10)
11775+#define ABRT_SBYTE_NORSTRT (1 << 9)
11776+#define ABRT_HS_NORSTRT (1 << 8)
11777+#define ABRT_SBYTE_ACKDET (1 << 7)
11778+#define ABRT_HS_ACKDET (1 << 6)
11779+#define ABRT_GCALL_READ (1 << 5)
11780+#define ABRT_GCALL_NOACK (1 << 4)
11781+#define ABRT_TXDATA_NOACK (1 << 3)
11782+#define ABRT_10ADDR2_NOACK (1 << 2)
11783+#define ABRT_10ADDR1_NOACK (1 << 1)
11784+#define ABRT_7B_ADDR_NOACK (1 << 0)
11785+
11786+/* Enable Status Register */
11787+#define IC_ENABLE_STATUS 0x9c
11788+#define IC_EN (1 << 0) /* I2C in an enabled state */
11789+
11790+/* Component Parameter Register 1*/
11791+#define IC_COMP_PARAM_1 0xf4
11792+#define APB_DATA_WIDTH (0x3 << 0)
11793+
11794+/* GPIO_PINS */
11795+#define GPIO_I2C_0_SDA 56
11796+#define GPIO_I2C_0_SCL 57
11797+
11798+#define GPIO_I2C_1_SDA 54
11799+#define GPIO_I2C_1_SCL 55
11800+
11801+#define GPIO_I2C_2_SDA 52
11802+#define GPIO_I2C_2_SCL 53
11803+
11804+/* added by xiaolin --begin */
11805+#define SS_MIN_SCL_HIGH 4000
11806+#define SS_MIN_SCL_LOW 4700
11807+#define FS_MIN_SCL_HIGH 600
11808+#define FS_MIN_SCL_LOW 1300
11809+#define HS_MIN_SCL_HIGH_100PF 60
11810+#define HS_MIN_SCL_LOW_100PF 120
11811+
11812+enum mrst_i2c_irq {
11813+ i2c_irq_none = 0x000,
11814+ i2c_irq_rx_under = 0x001,
11815+ i2c_irq_rx_over = 0x002,
11816+ i2c_irq_rx_full = 0x004,
11817+ i2c_irq_tx_over = 0x008,
11818+ i2c_irq_tx_empty = 0x010,
11819+ i2c_irq_rd_req = 0x020,
11820+ i2c_irq_tx_abrt = 0x040,
11821+ i2c_irq_rx_done = 0x080,
11822+ i2c_irq_activity = 0x100,
11823+ i2c_irq_stop_det = 0x200,
11824+ i2c_irq_start_det = 0x400,
11825+ i2c_irq_gen_call = 0x800,
11826+ i2c_irq_all = 0xfff
11827+};
11828+
11829+/* added by xiaolin --end */
11830+
11831+/* Function declarations */
11832+
11833+static int mrst_i2c_disable(struct i2c_adapter *);
11834+static int __devinit mrst_i2c_hwinit(struct pci_dev *);
11835+static u32 mrst_i2c_func(struct i2c_adapter *);
11836+static inline int mrst_i2c_invalid_address(const struct i2c_msg *);
11837+static inline int mrst_i2c_address_neq(const struct i2c_msg *,
11838+ const struct i2c_msg *);
11839+static int mrst_i2c_xfer(struct i2c_adapter *,
11840+ struct i2c_msg *,
11841+ int);
11842+static int __devinit mrst_i2c_probe(struct pci_dev *,
11843+ const struct pci_device_id *);
11844+static void __devexit mrst_i2c_remove(struct pci_dev *);
11845+static int __init mrst_i2c_init(void);
11846+static void __exit mrst_i2c_exit(void);
11847+static int xfer_read(struct i2c_adapter *,
11848+ unsigned char *, int);
11849+static int xfer_write(struct i2c_adapter *,
11850+ unsigned char *, int);
11851+#endif /* __I2C_MRST_H */
11852Index: linux-2.6.33/drivers/i2c/i2c-boardinfo.c
11853===================================================================
11854--- linux-2.6.33.orig/drivers/i2c/i2c-boardinfo.c
11855+++ linux-2.6.33/drivers/i2c/i2c-boardinfo.c
11856@@ -58,11 +58,13 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bu
11857 * The board info passed can safely be __initdata, but be careful of embedded
11858 * pointers (for platform_data, functions, etc) since that won't be copied.
11859 */
11860-int __init
11861+int
11862 i2c_register_board_info(int busnum,
11863 struct i2c_board_info const *info, unsigned len)
11864 {
11865 int status;
11866+ int flag = 0;
11867+ struct i2c_devinfo *devinfo;
11868
11869 down_write(&__i2c_board_lock);
11870
11871@@ -71,21 +73,32 @@ i2c_register_board_info(int busnum,
11872 __i2c_first_dynamic_bus_num = busnum + 1;
11873
11874 for (status = 0; len; len--, info++) {
11875- struct i2c_devinfo *devinfo;
11876-
11877- devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
11878- if (!devinfo) {
11879- pr_debug("i2c-core: can't register boardinfo!\n");
11880- status = -ENOMEM;
11881- break;
11882+ list_for_each_entry(devinfo, &__i2c_board_list, list) {
11883+ if (devinfo->busnum == busnum
11884+ && devinfo->board_info.addr == info->addr) {
11885+ flag = 1;
11886+ break;
11887+ }
11888 }
11889-
11890- devinfo->busnum = busnum;
11891- devinfo->board_info = *info;
11892- list_add_tail(&devinfo->list, &__i2c_board_list);
11893+ if (flag != 1) {
11894+ struct i2c_devinfo *dev;
11895+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
11896+ if (!dev) {
11897+ pr_debug("i2c-core: can't register"
11898+ "boardinfo!\n");
11899+ status = -ENOMEM;
11900+ break;
11901+ }
11902+
11903+ dev->busnum = busnum;
11904+ dev->board_info = *info;
11905+ list_add_tail(&dev->list, &__i2c_board_list);
11906+ }
11907+ flag = 0;
11908 }
11909
11910 up_write(&__i2c_board_lock);
11911
11912 return status;
11913 }
11914+EXPORT_SYMBOL_GPL(i2c_register_board_info);
11915Index: linux-2.6.33/arch/x86/kernel/cpu/cpufreq/Kconfig
11916===================================================================
11917--- linux-2.6.33.orig/arch/x86/kernel/cpu/cpufreq/Kconfig
11918+++ linux-2.6.33/arch/x86/kernel/cpu/cpufreq/Kconfig
11919@@ -10,6 +10,22 @@ if CPU_FREQ
11920
11921 comment "CPUFreq processor drivers"
11922
11923+config X86_SFI_CPUFREQ
11924+ tristate "SFI Processor P-States driver"
11925+ depends on SFI_PROCESSOR_PM
11926+ select CPU_FREQ_TABLE
11927+ help
11928+ This driver adds a CPUFreq driver which utilizes the SFI
11929+ Processor Performance States.
11930+ This driver also supports Intel Enhanced Speedstep.
11931+
11932+ To compile this driver as a module, choose M here: the
11933+ module will be called sfi-cpufreq.
11934+
11935+ For details, take a look at <file:Documentation/cpu-freq/>.
11936+
11937+ If in doubt, say N.
11938+
11939 config X86_ACPI_CPUFREQ
11940 tristate "ACPI Processor P-States driver"
11941 select CPU_FREQ_TABLE
11942Index: linux-2.6.33/arch/x86/kernel/cpu/cpufreq/Makefile
11943===================================================================
11944--- linux-2.6.33.orig/arch/x86/kernel/cpu/cpufreq/Makefile
11945+++ linux-2.6.33/arch/x86/kernel/cpu/cpufreq/Makefile
11946@@ -15,6 +15,7 @@ obj-$(CONFIG_X86_GX_SUSPMOD) += gx-susp
11947 obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
11948 obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
11949 obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
11950+obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
11951 obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
11952 obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
11953 obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
11954Index: linux-2.6.33/arch/x86/kernel/cpu/cpufreq/sfi-cpufreq.c
11955===================================================================
11956--- /dev/null
11957+++ linux-2.6.33/arch/x86/kernel/cpu/cpufreq/sfi-cpufreq.c
11958@@ -0,0 +1,655 @@
11959+/*
11960+ * sfi_cpufreq.c - sfi Processor P-States Driver
11961+ *
11962+ *
11963+ *
11964+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11965+ *
11966+ * This program is free software; you can redistribute it and/or modify
11967+ * it under the terms of the GNU General Public License as published by
11968+ * the Free Software Foundation; either version 2 of the License, or (at
11969+ * your option) any later version.
11970+ *
11971+ * This program is distributed in the hope that it will be useful, but
11972+ * WITHOUT ANY WARRANTY; without even the implied warranty of
11973+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11974+ * General Public License for more details.
11975+ * You should have received a copy of the GNU General Public License along
11976+ * with this program; if not, write to the Free Software Foundation, Inc.,
11977+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
11978+ *
11979+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11980+ * Author: Vishwesh M Rudramuni
11981+ * Contact information: Vishwesh Rudramuni <vishwesh.m.rudramuni@intel.com>
11982+ */
11983+
11984+/*
11985+ * This sfi Processor P-States Driver re-uses most part of the code available
11986+ * in acpi cpufreq driver.
11987+ */
11988+
11989+#include <linux/kernel.h>
11990+#include <linux/module.h>
11991+#include <linux/init.h>
11992+#include <linux/smp.h>
11993+#include <linux/sched.h>
11994+#include <linux/cpufreq.h>
11995+#include <linux/compiler.h>
11996+#include <linux/dmi.h>
11997+
11998+#include <linux/sfi.h>
11999+#include <linux/sfi_processor.h>
12000+
12001+#include <linux/io.h>
12002+#include <asm/msr.h>
12003+#include <asm/processor.h>
12004+#include <asm/cpufeature.h>
12005+#include <linux/delay.h>
12006+#include <linux/uaccess.h>
12007+
12008+#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
12009+ "sfi-cpufreq", msg)
12010+
12011+MODULE_AUTHOR("Vishwesh Rudramuni");
12012+MODULE_DESCRIPTION("SFI Processor P-States Driver");
12013+MODULE_LICENSE("GPL");
12014+#define SYSTEM_INTEL_MSR_CAPABLE 0x1
12015+#define INTEL_MSR_RANGE (0xffff)
12016+#define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1)
12017+
12018+struct sfi_cpufreq_data {
12019+ struct sfi_processor_performance *sfi_data;
12020+ struct cpufreq_frequency_table *freq_table;
12021+ unsigned int max_freq;
12022+ unsigned int resume;
12023+ unsigned int cpu_feature;
12024+};
12025+
12026+static DEFINE_PER_CPU(struct sfi_cpufreq_data *, drv_data);
12027+
12028+/* sfi_perf_data is a pointer to percpu data. */
12029+static struct sfi_processor_performance *sfi_perf_data;
12030+
12031+static struct cpufreq_driver sfi_cpufreq_driver;
12032+
12033+static unsigned int sfi_pstate_strict;
12034+
12035+static int check_est_cpu(unsigned int cpuid)
12036+{
12037+ struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
12038+
12039+ if (cpu->x86_vendor != X86_VENDOR_INTEL ||
12040+ !cpu_has(cpu, X86_FEATURE_EST))
12041+ return 0;
12042+
12043+ return 1;
12044+}
12045+
12046+static unsigned extract_freq(u32 msr, struct sfi_cpufreq_data *data)
12047+{
12048+ int i;
12049+ struct sfi_processor_performance *perf;
12050+
12051+ msr &= INTEL_MSR_RANGE;
12052+ perf = data->sfi_data;
12053+
12054+ for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
12055+ if (msr == perf->states[data->freq_table[i].index].status)
12056+ return data->freq_table[i].frequency;
12057+ }
12058+ return data->freq_table[0].frequency;
12059+}
12060+
12061+
12062+struct msr_addr {
12063+ u32 reg;
12064+};
12065+
12066+
12067+struct drv_cmd {
12068+ unsigned int type;
12069+ cpumask_t mask;
12070+ u32 msr_reg;
12071+ u32 val;
12072+};
12073+
12074+static void do_drv_read(struct drv_cmd *cmd)
12075+{
12076+ u32 h;
12077+ rdmsr(cmd->msr_reg, cmd->val, h);
12078+}
12079+
12080+static void do_drv_write(struct drv_cmd *cmd)
12081+{
12082+ u32 lo, hi;
12083+
12084+ rdmsr(cmd->msr_reg, lo, hi);
12085+ lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
12086+ wrmsr(cmd->msr_reg, lo, hi);
12087+}
12088+
12089+static void drv_read(struct drv_cmd *cmd)
12090+{
12091+ cpumask_t saved_mask = current->cpus_allowed;
12092+ cmd->val = 0;
12093+
12094+ set_cpus_allowed(current, cmd->mask);
12095+ do_drv_read(cmd);
12096+ set_cpus_allowed(current, saved_mask);
12097+}
12098+
12099+static void drv_write(struct drv_cmd *cmd)
12100+{
12101+ cpumask_t saved_mask = current->cpus_allowed;
12102+ unsigned int i;
12103+
12104+ for_each_cpu_mask(i, cmd->mask) {
12105+ set_cpus_allowed(current, cpumask_of_cpu(i));
12106+ do_drv_write(cmd);
12107+ }
12108+
12109+ set_cpus_allowed(current, saved_mask);
12110+ return;
12111+}
12112+
12113+static u32 get_cur_val(cpumask_t mask)
12114+{
12115+ struct drv_cmd cmd;
12116+
12117+ if (unlikely(cpus_empty(mask)))
12118+ return 0;
12119+
12120+ cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
12121+ cmd.msr_reg = MSR_IA32_PERF_STATUS;
12122+ cmd.mask = mask;
12123+
12124+ drv_read(&cmd);
12125+
12126+ dprintk("get_cur_val = %u\n", cmd.val);
12127+
12128+ return cmd.val;
12129+}
12130+
12131+/*
12132+ * Return the measured active (C0) frequency on this CPU since last call
12133+ * to this function.
12134+ * Input: cpu number
12135+ * Return: Average CPU frequency in terms of max frequency (zero on error)
12136+ *
12137+ * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
12138+ * over a period of time, while CPU is in C0 state.
12139+ * IA32_MPERF counts at the rate of max advertised frequency
12140+ * IA32_APERF counts at the rate of actual CPU frequency
12141+ * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
12142+ * no meaning should be associated with absolute values of these MSRs.
12143+ */
12144+static unsigned int get_measured_perf(struct cpufreq_policy *policy,
12145+ unsigned int cpu)
12146+{
12147+ union {
12148+ struct {
12149+ u32 lo;
12150+ u32 hi;
12151+ } split;
12152+ u64 whole;
12153+ } aperf_cur, mperf_cur;
12154+
12155+ cpumask_t saved_mask;
12156+ unsigned int perf_percent;
12157+ unsigned int retval;
12158+
12159+ saved_mask = current->cpus_allowed;
12160+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
12161+ if (get_cpu() != cpu) {
12162+ /* We were not able to run on requested processor */
12163+ put_cpu();
12164+ return 0;
12165+ }
12166+
12167+ rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi);
12168+ rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi);
12169+
12170+ wrmsr(MSR_IA32_APERF, 0, 0);
12171+ wrmsr(MSR_IA32_MPERF, 0, 0);
12172+
12173+#ifdef __i386__
12174+ /*
12175+ * We dont want to do 64 bit divide with 32 bit kernel
12176+ * Get an approximate value. Return failure in case we cannot get
12177+ * an approximate value.
12178+ */
12179+ if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) {
12180+ int shift_count;
12181+ u32 h;
12182+
12183+ h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi);
12184+ shift_count = fls(h);
12185+
12186+ aperf_cur.whole >>= shift_count;
12187+ mperf_cur.whole >>= shift_count;
12188+ }
12189+
12190+ if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) {
12191+ int shift_count = 7;
12192+ aperf_cur.split.lo >>= shift_count;
12193+ mperf_cur.split.lo >>= shift_count;
12194+ }
12195+
12196+ if (aperf_cur.split.lo && mperf_cur.split.lo)
12197+ perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo;
12198+ else
12199+ perf_percent = 0;
12200+
12201+#else
12202+ if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) {
12203+ int shift_count = 7;
12204+ aperf_cur.whole >>= shift_count;
12205+ mperf_cur.whole >>= shift_count;
12206+ }
12207+
12208+ if (aperf_cur.whole && mperf_cur.whole)
12209+ perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole;
12210+ else
12211+ perf_percent = 0;
12212+
12213+#endif
12214+
12215+ retval = per_cpu(drv_data, cpu)->max_freq * perf_percent / 100;
12216+
12217+ put_cpu();
12218+ set_cpus_allowed(current, saved_mask);
12219+
12220+ dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
12221+ return retval;
12222+}
12223+
12224+
12225+static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
12226+{
12227+ struct sfi_cpufreq_data *data = per_cpu(drv_data, cpu);
12228+ unsigned int freq;
12229+
12230+ unsigned int cached_freq;
12231+
12232+ dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
12233+
12234+ if (unlikely(data == NULL ||
12235+ data->sfi_data == NULL || data->freq_table == NULL)) {
12236+ return 0;
12237+ }
12238+ cached_freq = data->freq_table[data->sfi_data->state].frequency;
12239+ freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data);
12240+
12241+ if (freq != cached_freq) {
12242+ data->resume = 1;
12243+ return cached_freq;
12244+ }
12245+
12246+ dprintk("cur freq = %u\n", freq);
12247+
12248+ return freq;
12249+}
12250+
12251+static unsigned int check_freqs(cpumask_t mask, unsigned int freq,
12252+ struct sfi_cpufreq_data *data)
12253+{
12254+ unsigned int cur_freq;
12255+ unsigned int i;
12256+
12257+ for (i = 0; i < 100; i++) {
12258+ cur_freq = extract_freq(get_cur_val(mask), data);
12259+ if (cur_freq == freq)
12260+ return 1;
12261+ udelay(10);
12262+ }
12263+ return 0;
12264+}
12265+
12266+static int sfi_cpufreq_target(struct cpufreq_policy *policy,
12267+ unsigned int target_freq, unsigned int relation)
12268+{
12269+ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
12270+ struct sfi_processor_performance *perf;
12271+ struct cpufreq_freqs freqs;
12272+ cpumask_t online_policy_cpus;
12273+ struct drv_cmd cmd;
12274+ unsigned int next_state = 0; /* Index into freq_table */
12275+ unsigned int next_perf_state = 0; /* Index into perf table */
12276+ unsigned int i;
12277+ int result = 0;
12278+
12279+ dprintk("sfi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
12280+
12281+ if (unlikely(data == NULL ||
12282+ data->sfi_data == NULL || data->freq_table == NULL)) {
12283+ return -ENODEV;
12284+ }
12285+
12286+ perf = data->sfi_data;
12287+ result = cpufreq_frequency_table_target(policy,
12288+ data->freq_table,
12289+ target_freq,
12290+ relation, &next_state);
12291+ if (unlikely(result))
12292+ return -ENODEV;
12293+
12294+#ifdef CONFIG_HOTPLUG_CPU
12295+ /* cpufreq holds the hotplug lock, so we are safe from here on */
12296+ cpus_and(online_policy_cpus, cpu_online_map, *policy->cpus);
12297+#else
12298+ online_policy_cpus = policy->cpus;
12299+#endif
12300+
12301+ next_perf_state = data->freq_table[next_state].index;
12302+ if (perf->state == next_perf_state) {
12303+ if (unlikely(data->resume)) {
12304+ dprintk("Called after resume, resetting to P%d\n",
12305+ next_perf_state);
12306+ data->resume = 0;
12307+ } else {
12308+ dprintk("Already at target state (P%d)\n",
12309+ next_perf_state);
12310+ return 0;
12311+ }
12312+ }
12313+
12314+ cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
12315+ cmd.msr_reg = MSR_IA32_PERF_CTL;
12316+ cmd.val = (u32) perf->states[next_perf_state].control;
12317+
12318+ cpus_clear(cmd.mask);
12319+
12320+ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
12321+ cmd.mask = online_policy_cpus;
12322+ else
12323+ cpu_set(policy->cpu, cmd.mask);
12324+
12325+ freqs.old = perf->states[perf->state].core_frequency * 1000;
12326+ freqs.new = data->freq_table[next_state].frequency;
12327+ for_each_cpu_mask(i, cmd.mask) {
12328+ freqs.cpu = i;
12329+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
12330+ }
12331+
12332+ drv_write(&cmd);
12333+
12334+ if (sfi_pstate_strict) {
12335+ if (!check_freqs(cmd.mask, freqs.new, data)) {
12336+ dprintk("sfi_cpufreq_target failed (%d)\n",
12337+ policy->cpu);
12338+ return -EAGAIN;
12339+ }
12340+ }
12341+
12342+ for_each_cpu_mask(i, cmd.mask) {
12343+ freqs.cpu = i;
12344+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
12345+ }
12346+ perf->state = next_perf_state;
12347+
12348+ return result;
12349+}
12350+
12351+static int sfi_cpufreq_verify(struct cpufreq_policy *policy)
12352+{
12353+ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
12354+
12355+ dprintk("sfi_cpufreq_verify\n");
12356+
12357+ return cpufreq_frequency_table_verify(policy, data->freq_table);
12358+}
12359+
12360+/*
12361+ * sfi_cpufreq_early_init - initialize SFI P-States library
12362+ *
12363+ * Initialize the SFI P-States library (drivers/acpi/processor_perflib.c)
12364+ * in order to determine correct frequency and voltage pairings. We can
12365+ * do _PDC and _PSD and find out the processor dependency for the
12366+ * actual init that will happen later...
12367+ */
12368+static int __init sfi_cpufreq_early_init(void)
12369+{
12370+ int i;
12371+ struct sfi_processor *pr;
12372+
12373+ dprintk("sfi_cpufreq_early_init\n");
12374+
12375+ sfi_perf_data = alloc_percpu(struct sfi_processor_performance);
12376+ if (!sfi_perf_data) {
12377+ dprintk("Memory allocation error for sfi_perf_data.\n");
12378+ return -ENOMEM;
12379+ }
12380+
12381+ for_each_possible_cpu(i) {
12382+ pr = per_cpu(sfi_processors, i);
12383+ if (!pr || !pr->performance)
12384+ continue;
12385+
12386+ /* Assume no coordination on any error parsing domain info */
12387+ cpus_clear(*pr->performance->shared_cpu_map);
12388+ cpu_set(i, *pr->performance->shared_cpu_map);
12389+ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
12390+ pr->performance = NULL; /* Will be set for real in register */
12391+ }
12392+
12393+ /* _PSD & _PDC is not supported in SFI.Its just a placeholder.
12394+ * sfi_processor_preregister_performance(sfi_perf_data);
12395+ * TBD: We need to study what we need to do here
12396+ */
12397+ return 0;
12398+}
12399+
12400+
12401+static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
12402+{
12403+ unsigned int i;
12404+ unsigned int valid_states = 0;
12405+ unsigned int cpu = policy->cpu;
12406+ struct sfi_cpufreq_data *data;
12407+ unsigned int result = 0;
12408+ struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
12409+ struct sfi_processor_performance *perf;
12410+
12411+ dprintk("sfi_cpufreq_cpu_init\n");
12412+
12413+ data = kzalloc(sizeof(struct sfi_cpufreq_data), GFP_KERNEL);
12414+ if (!data)
12415+ return -ENOMEM;
12416+
12417+ data->sfi_data = per_cpu_ptr(sfi_perf_data, cpu);
12418+ per_cpu(drv_data, cpu) = data;
12419+
12420+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
12421+ sfi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
12422+
12423+
12424+ result = sfi_processor_register_performance(data->sfi_data, cpu);
12425+ if (result)
12426+ goto err_free;
12427+
12428+ perf = data->sfi_data;
12429+ policy->shared_type = perf->shared_type;
12430+
12431+ /*
12432+ * Will let policy->cpus know about dependency only when software
12433+ * coordination is required.
12434+ */
12435+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
12436+ policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
12437+ memcpy(policy->cpus, perf->shared_cpu_map
12438+ , sizeof(cpumask_var_t));
12439+ }
12440+
12441+ /* capability check */
12442+ if (perf->state_count <= 1) {
12443+ dprintk("No P-States\n");
12444+ result = -ENODEV;
12445+ goto err_unreg;
12446+ }
12447+
12448+ dprintk("HARDWARE addr space\n");
12449+ if (!check_est_cpu(cpu)) {
12450+ result = -ENODEV;
12451+ goto err_unreg;
12452+ }
12453+
12454+ data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
12455+ data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
12456+ (perf->state_count+1), GFP_KERNEL);
12457+ if (!data->freq_table) {
12458+ result = -ENOMEM;
12459+ goto err_unreg;
12460+ }
12461+
12462+ /* detect transition latency */
12463+ policy->cpuinfo.transition_latency = 0;
12464+ for (i = 0; i < perf->state_count; i++) {
12465+ if ((perf->states[i].transition_latency * 1000) >
12466+ policy->cpuinfo.transition_latency)
12467+ policy->cpuinfo.transition_latency =
12468+ perf->states[i].transition_latency * 1000;
12469+ }
12470+
12471+ data->max_freq = perf->states[0].core_frequency * 1000;
12472+ /* table init */
12473+ for (i = 0; i < perf->state_count; i++) {
12474+ if (i > 0 && perf->states[i].core_frequency >=
12475+ data->freq_table[valid_states-1].frequency / 1000)
12476+ continue;
12477+
12478+ data->freq_table[valid_states].index = i;
12479+ data->freq_table[valid_states].frequency =
12480+ perf->states[i].core_frequency * 1000;
12481+ valid_states++;
12482+ }
12483+ data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
12484+ perf->state = 0;
12485+
12486+ result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
12487+ if (result)
12488+ goto err_freqfree;
12489+
12490+ sfi_cpufreq_driver.get = get_cur_freq_on_cpu;
12491+ policy->cur = get_cur_freq_on_cpu(cpu);
12492+
12493+ /* notify BIOS that we exist
12494+ * currently not being done.
12495+ */
12496+
12497+ /* Check for APERF/MPERF support in hardware */
12498+ if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) {
12499+ unsigned int ecx;
12500+ ecx = cpuid_ecx(6);
12501+ if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY)
12502+ sfi_cpufreq_driver.getavg = get_measured_perf;
12503+ }
12504+
12505+ dprintk("CPU%u - SFI performance management activated.\n", cpu);
12506+ for (i = 0; i < perf->state_count; i++)
12507+ dprintk(" %cP%d: %d MHz, %d uS\n",
12508+ (i == perf->state ? '*' : ' '), i,
12509+ (u32) perf->states[i].core_frequency,
12510+ (u32) perf->states[i].transition_latency);
12511+
12512+ cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
12513+
12514+ /*
12515+ * the first call to ->target() should result in us actually
12516+ * writing something to the appropriate registers.
12517+ */
12518+ data->resume = 1;
12519+
12520+ return result;
12521+
12522+err_freqfree:
12523+ kfree(data->freq_table);
12524+err_unreg:
12525+ sfi_processor_unregister_performance(perf, cpu);
12526+err_free:
12527+ kfree(data);
12528+ per_cpu(drv_data, cpu) = NULL;
12529+
12530+ return result;
12531+}
12532+
12533+static int sfi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
12534+{
12535+ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
12536+
12537+ dprintk("sfi_cpufreq_cpu_exit\n");
12538+
12539+ if (data) {
12540+ cpufreq_frequency_table_put_attr(policy->cpu);
12541+ per_cpu(drv_data, policy->cpu) = NULL;
12542+ /* acpi_processor_unregister_performance(data->acpi_data,
12543+ * policy->cpu);
12544+ * TBD: Need to study how do we do this
12545+ */
12546+ sfi_processor_unregister_performance(data->sfi_data,
12547+ policy->cpu);
12548+ kfree(data);
12549+ }
12550+
12551+ return 0;
12552+}
12553+
12554+static int sfi_cpufreq_resume(struct cpufreq_policy *policy)
12555+{
12556+ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
12557+
12558+ dprintk("sfi_cpufreq_resume\n");
12559+
12560+ data->resume = 1;
12561+
12562+ return 0;
12563+}
12564+
12565+static struct freq_attr *sfi_cpufreq_attr[] = {
12566+ &cpufreq_freq_attr_scaling_available_freqs,
12567+ NULL,
12568+};
12569+
12570+static struct cpufreq_driver sfi_cpufreq_driver = {
12571+ .verify = sfi_cpufreq_verify,
12572+ .target = sfi_cpufreq_target,
12573+ .init = sfi_cpufreq_cpu_init,
12574+ .exit = sfi_cpufreq_cpu_exit,
12575+ .resume = sfi_cpufreq_resume,
12576+ .name = "sfi-cpufreq",
12577+ .owner = THIS_MODULE,
12578+ .attr = sfi_cpufreq_attr,
12579+};
12580+
12581+static int __init sfi_cpufreq_init(void)
12582+{
12583+ int ret;
12584+
12585+ dprintk("sfi_cpufreq_init\n");
12586+
12587+ ret = sfi_cpufreq_early_init();
12588+ if (ret)
12589+ return ret;
12590+
12591+ return cpufreq_register_driver(&sfi_cpufreq_driver);
12592+}
12593+
12594+static void __exit sfi_cpufreq_exit(void)
12595+{
12596+ dprintk("sfi_cpufreq_exit\n");
12597+
12598+ cpufreq_unregister_driver(&sfi_cpufreq_driver);
12599+
12600+ free_percpu(sfi_perf_data);
12601+
12602+ return;
12603+}
12604+
12605+module_param(sfi_pstate_strict, uint, 0644);
12606+MODULE_PARM_DESC(sfi_pstate_strict,
12607+ "value 0 or non-zero. non-zero -> strict sfi checks are "
12608+ "performed during frequency changes.");
12609+
12610+late_initcall(sfi_cpufreq_init);
12611+module_exit(sfi_cpufreq_exit);
12612+
12613+MODULE_ALIAS("sfi");
12614Index: linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_core.c
12615===================================================================
12616--- /dev/null
12617+++ linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_core.c
12618@@ -0,0 +1,134 @@
12619+/*
12620+ * sfi_processor_core.c
12621+ *
12622+ * Copyright (C) 2008 Intel Corp
12623+ *
12624+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12625+ * This program is free software; you can redistribute it and/or modify
12626+ * it under the terms of the GNU General Public License as published by
12627+ * the Free Software Foundation; version 2 of the License.
12628+ *
12629+ * This program is distributed in the hope that it will be useful, but
12630+ * WITHOUT ANY WARRANTY; without even the implied warranty of
12631+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12632+ * General Public License for more details.
12633+ *
12634+ * You should have received a copy of the GNU General Public License along
12635+ * with this program; if not, write to the Free Software Foundation, Inc.,
12636+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
12637+ *
12638+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12639+ * Author: Sujith Thomas
12640+ * Contact information: Sujith Thomas <sujith.thomas@intel.com>
12641+ */
12642+
12643+#include <linux/module.h>
12644+#include <linux/init.h>
12645+#include <linux/types.h>
12646+#include <linux/sfi.h>
12647+#include <linux/cpu.h>
12648+#include <linux/sfi_processor.h>
12649+
12650+MODULE_LICENSE("GPL");
12651+MODULE_AUTHOR("Sujith Thomas");
12652+MODULE_DESCRIPTION("Processor enumeration based on SFI table.");
12653+
12654+DEFINE_PER_CPU(struct sfi_processor *, sfi_processors);
12655+
12656+int sfi_cstate_num;
12657+struct sfi_cstate_table_entry sfi_cstate_array[SFI_C_STATES_MAX];
12658+
12659+static int __init sfi_parse_idle(struct sfi_table_header *table)
12660+{
12661+ struct sfi_table_simple *sb;
12662+ struct sfi_cstate_table_entry *pentry;
12663+ int totallen;
12664+
12665+ sb = (struct sfi_table_simple *)table;
12666+ if (!sb) {
12667+ printk(KERN_WARNING "SFI: Unable to map IDLE\n");
12668+ return -ENODEV;
12669+ }
12670+
12671+ if (!sfi_cstate_num) {
12672+ sfi_cstate_num = SFI_GET_NUM_ENTRIES(sb, struct sfi_cstate_table_entry);
12673+ pentry = (struct sfi_cstate_table_entry *)sb->pentry;
12674+ totallen = sfi_cstate_num * sizeof(*pentry);
12675+ memcpy(sfi_cstate_array, pentry, totallen);
12676+ }
12677+
12678+ printk(KERN_INFO "SFI: IDLE C-state info (num = %d):\n",
12679+ sfi_cstate_num);
12680+ pentry = sfi_cstate_array;
12681+ for (totallen = 0; totallen < sfi_cstate_num; totallen++, pentry++) {
12682+ printk(KERN_INFO "Cstate[%d]: hint = 0x%08x, latency = %dms\n",
12683+ totallen, pentry->hint, pentry->latency);
12684+ }
12685+
12686+ return 0;
12687+}
12688+
12689+static int __init sfi_init_cpus(void/*struct sfi_table_header *table*/)
12690+{
12691+ struct sfi_processor *pr;
12692+ int i;
12693+ int result = 0;
12694+
12695+
12696+ for (i = 0; i < num_processors; i++) {
12697+ pr = kzalloc(sizeof(struct sfi_processor), GFP_KERNEL);
12698+ pr->id = early_per_cpu(x86_cpu_to_apicid, i);
12699+//sfi_cpu_array[i].apicid;
12700+ per_cpu(sfi_processors, pr->id) = pr;
12701+
12702+#ifdef CONFIG_SFI_CPUIDLE
12703+ result = sfi_processor_power_init(pr);
12704+#endif
12705+ }
12706+ return result;
12707+}
12708+
12709+static int __init sfi_processor_init(void)
12710+{
12711+ int result = 0;
12712+
12713+ sfi_table_parse(SFI_SIG_IDLE, NULL, NULL, sfi_parse_idle);
12714+
12715+#ifdef CONFIG_SFI_CPUIDLE
12716+ if (sfi_cstate_num > 0)
12717+ result = cpuidle_register_driver(&sfi_idle_driver);
12718+ if (result)
12719+ return result;
12720+#endif
12721+ result = sfi_init_cpus();
12722+#ifdef CONFIG_SFI_CPUIDLE
12723+ if (result)
12724+ cpuidle_unregister_driver(&sfi_idle_driver);
12725+
12726+#endif
12727+ return result;
12728+}
12729+
12730+static void __exit sfi_processor_exit(void)
12731+{
12732+ struct sfi_processor *pr;
12733+ int i;
12734+ for (i = 0; i < num_processors; i++) {
12735+ pr = per_cpu(sfi_processors, i);
12736+ if (pr) {
12737+#ifdef CONFIG_SFI_CPUIDLE
12738+ sfi_processor_power_exit(pr);
12739+#endif
12740+ kfree(pr);
12741+ per_cpu(sfi_processors, i) = NULL;
12742+ }
12743+ }
12744+
12745+#ifdef CONFIG_SFI_CPUIDLE
12746+ cpuidle_unregister_driver(&sfi_idle_driver);
12747+#endif
12748+
12749+}
12750+
12751+module_init(sfi_processor_init);
12752+module_exit(sfi_processor_exit);
12753Index: linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_idle.c
12754===================================================================
12755--- /dev/null
12756+++ linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_idle.c
12757@@ -0,0 +1,490 @@
12758+/*
12759+ * sfi_processor_idle.c
12760+ *
12761+ * Copyright (C) 2009 Intel Corp
12762+ *
12763+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12764+ * This program is free software; you can redistribute it and/or modify
12765+ * it under the terms of the GNU General Public License as published by
12766+ * the Free Software Foundation; version 2 of the License.
12767+ *
12768+ * This program is distributed in the hope that it will be useful, but
12769+ * WITHOUT ANY WARRANTY; without even the implied warranty of
12770+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12771+ * General Public License for more details.
12772+ *
12773+ * You should have received a copy of the GNU General Public License along
12774+ * with this program; if not, write to the Free Software Foundation, Inc.,
12775+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
12776+ *
12777+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12778+ * Author: Sujith Thomas
12779+ * Contact information: Sujith Thomas <sujith.thomas@intel.com>
12780+ * Author: Vishwesh Rudramuni
12781+ * Contact information: Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>
12782+ */
12783+
12784+#include <asm/processor.h>
12785+#include <linux/sfi_processor.h>
12786+#include <linux/sched.h>
12787+#include <linux/clockchips.h>
12788+#include <linux/sfi.h>
12789+
12790+#ifdef CONFIG_MSTWN_POWER_MGMT
12791+#include <linux/intel_mid.h>
12792+#endif
12793+
12794+static short mwait_supported[SFI_PROCESSOR_MAX_POWER];
12795+
12796+#define MWAIT_SUBSTATE_MASK (0xf)
12797+#define MWAIT_SUBSTATE_SIZE (4)
12798+
12799+#ifdef CONFIG_MSTWN_POWER_MGMT
12800+#define MID_S0I1_STATE 1
12801+#define MID_S0I3_STATE 3
12802+static int p1_c6;
12803+static int __init s0ix_latency_setup(char *str);
12804+static u32 s0ix_latency = 20000;
12805+__setup("s0ix_latency=", s0ix_latency_setup);
12806+#endif
12807+
12808+#define CPUID_MWAIT_LEAF (5)
12809+#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
12810+#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
12811+
12812+#define MWAIT_ECX_INTERRUPT_BREAK (0x1)
12813+
12814+static unsigned int latency_factor __read_mostly = 4;
12815+module_param(latency_factor, uint, 0644);
12816+
12817+static int sfi_idle_enter_bm(struct cpuidle_device *dev,
12818+ struct cpuidle_state *state);
12819+
12820+struct cpuidle_driver sfi_idle_driver = {
12821+ .name = "sfi_idle",
12822+ .owner = THIS_MODULE,
12823+};
12824+
12825+/*
12826+ * Callers should disable interrupts before the call and enable
12827+ * interrupts after return.
12828+ */
12829+static void sfi_safe_halt(void)
12830+{
12831+ current_thread_info()->status &= ~TS_POLLING;
12832+ /*
12833+ * TS_POLLING-cleared state must be visible before we
12834+ * test NEED_RESCHED:
12835+ */
12836+ smp_mb();
12837+ if (!need_resched()) {
12838+ safe_halt();
12839+ local_irq_disable();
12840+ }
12841+ current_thread_info()->status |= TS_POLLING;
12842+}
12843+
12844+static int sfi_idle_enter_c1(struct cpuidle_device *dev,
12845+ struct cpuidle_state *state)
12846+{
12847+ ktime_t t1, t2;
12848+ s64 diff = 0;
12849+
12850+ local_irq_disable();
12851+
12852+ t1 = ktime_get();
12853+ sfi_safe_halt();
12854+ t2 = ktime_get();
12855+
12856+ local_irq_enable();
12857+
12858+ diff = ktime_to_us(ktime_sub(t2, t1));
12859+
12860+ if (diff > INT_MAX)
12861+ diff = INT_MAX;
12862+
12863+ return (int)diff;
12864+}
12865+
12866+static int sfi_idle_enter_simple(struct cpuidle_device *dev,
12867+ struct cpuidle_state *state)
12868+{
12869+ ktime_t t1, t2;
12870+ s64 diff = 0;
12871+ struct sfi_cstate_table_entry *data;
12872+
12873+ data = (struct sfi_cstate_table_entry *)cpuidle_get_statedata(state);
12874+ if (unlikely(!data))
12875+ return 0;
12876+
12877+
12878+ local_irq_disable();
12879+ current_thread_info()->status &= ~TS_POLLING;
12880+ /*
12881+ * TS_POLLING-cleared state must be visible before we test
12882+ * NEED_RESCHED:
12883+ */
12884+ smp_mb();
12885+
12886+ if (unlikely(need_resched())) {
12887+ current_thread_info()->status |= TS_POLLING;
12888+ local_irq_enable();
12889+ return 0;
12890+ }
12891+
12892+ t1 = ktime_get();
12893+ mwait_idle_with_hints(data->hint, MWAIT_ECX_INTERRUPT_BREAK);
12894+ t2 = ktime_get();
12895+
12896+ local_irq_enable();
12897+ current_thread_info()->status |= TS_POLLING;
12898+
12899+ diff = ktime_to_us(ktime_sub(t2, t1));
12900+ if (diff > INT_MAX)
12901+ diff = INT_MAX;
12902+
12903+ return (int)diff;
12904+}
12905+
12906+#ifdef CONFIG_MSTWN_POWER_MGMT
12907+static int __init s0ix_latency_setup(char *str)
12908+{
12909+ u32 latency;
12910+
12911+ latency = memparse(str, &str);
12912+ if (latency > 150)
12913+ s0ix_latency = latency;
12914+
12915+ printk(KERN_INFO "latency for c7 is %x\n", latency);
12916+ return 1;
12917+}
12918+
12919+static int s0i3_enter_bm(struct cpuidle_device *dev,
12920+ struct cpuidle_state *state)
12921+{
12922+ ktime_t t1, t2;
12923+ s64 diff_us = 0;
12924+ s64 diff_ns = 0;
12925+ struct sfi_processor *pr;
12926+ struct cpuidle_state *next_state;
12927+ int pr_id;
12928+ int ret;
12929+
12930+ pr_id = smp_processor_id();
12931+
12932+ pr = __get_cpu_var(sfi_processors);
12933+ if (unlikely(!pr))
12934+ return 0;
12935+
12936+ switch (g_ospm_base->platform_sx_state) {
12937+ case MID_S0I3_STATE:
12938+ if (pr_id == 0) {
12939+ t1 = ktime_get();
12940+
12941+ /* Tell the scheduler that we
12942+ * are going deep-idle:
12943+ */
12944+ sched_clock_idle_sleep_event();
12945+
12946+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
12947+ &pr->id);
12948+
12949+ mid_suspend_enter(MID_S0I3_STATE);
12950+
12951+ t2 = ktime_get();
12952+
12953+ diff_us = ktime_to_us(ktime_sub(t2, t1));
12954+ diff_ns = ktime_to_ns(ktime_sub(t2, t1));
12955+
12956+ /* Tell the scheduler how much
12957+ * we idled:
12958+ */
12959+ sched_clock_idle_wakeup_event(diff_ns);
12960+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
12961+ &pr->id);
12962+
12963+ if (diff_us > INT_MAX)
12964+ diff_us = INT_MAX;
12965+
12966+ return (int)diff_us;
12967+
12968+ } else {
12969+ ret = sfi_idle_enter_c1(dev, state);
12970+ return ret;
12971+ }
12972+ break;
12973+ case MID_S0I1_STATE:
12974+ if ((pr_id == 0) && (p1_c6 == 1)) {
12975+ /* pmu_issue_command(s0i1) only for thread 0 rest
12976+ * fall through
12977+ */
12978+ mid_suspend_enter(MID_S0I1_STATE);
12979+ }
12980+ next_state = &dev->states[4];
12981+ ret = sfi_idle_enter_bm(dev, next_state);
12982+ return ret;
12983+ break;
12984+ default:
12985+ next_state = &dev->states[4];
12986+ ret = sfi_idle_enter_bm(dev, next_state);
12987+ dev->last_state = &dev->states[4];
12988+ return ret;
12989+ break;
12990+
12991+ }
12992+
12993+ return 0;
12994+
12995+}
12996+#endif
12997+
12998+static int sfi_idle_enter_bm(struct cpuidle_device *dev,
12999+ struct cpuidle_state *state)
13000+{
13001+
13002+ ktime_t t1, t2;
13003+ s64 diff_us = 0;
13004+ s64 diff_ns = 0;
13005+ struct sfi_cstate_table_entry *data;
13006+ struct sfi_processor *pr;
13007+
13008+ pr = __get_cpu_var(sfi_processors);
13009+ if (unlikely(!pr))
13010+ return 0;
13011+
13012+ data = (struct sfi_cstate_table_entry *)cpuidle_get_statedata(state);
13013+ if (unlikely(!data))
13014+ return 0;
13015+
13016+ local_irq_disable();
13017+ current_thread_info()->status &= ~TS_POLLING;
13018+ /*
13019+ * TS_POLLING-cleared state must be visible before we test
13020+ * NEED_RESCHED:
13021+ */
13022+ smp_mb();
13023+
13024+ if (unlikely(need_resched())) {
13025+ current_thread_info()->status |= TS_POLLING;
13026+ local_irq_enable();
13027+ return 0;
13028+ }
13029+
13030+ t1 = ktime_get();
13031+
13032+ /* Tell the scheduler that we are going deep-idle: */
13033+ sched_clock_idle_sleep_event();
13034+
13035+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &pr->id);
13036+
13037+
13038+#ifdef CONFIG_MSTWN_POWER_MGMT
13039+ if ((smp_processor_id() == 1) && (data->hint == 0x52))
13040+ p1_c6 = 1;
13041+#endif
13042+
13043+ mwait_idle_with_hints(data->hint, MWAIT_ECX_INTERRUPT_BREAK);
13044+
13045+#ifdef CONFIG_MSTWN_POWER_MGMT
13046+ if ((smp_processor_id() == 1) && (data->hint == 0x52))
13047+ p1_c6 = 0;
13048+#endif
13049+
13050+ t2 = ktime_get();
13051+
13052+ diff_us = ktime_to_us(ktime_sub(t2, t1));
13053+ diff_ns = ktime_to_ns(ktime_sub(t2, t1));
13054+
13055+ /* Tell the scheduler how much we idled: */
13056+ sched_clock_idle_wakeup_event(diff_ns);
13057+
13058+ local_irq_enable();
13059+ current_thread_info()->status |= TS_POLLING;
13060+
13061+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &pr->id);
13062+
13063+ if (diff_us > INT_MAX)
13064+ diff_us = INT_MAX;
13065+
13066+ return (int)diff_us;
13067+
13068+}
13069+
13070+/**
13071+ * sfi_processor_setup_cpuidle - prepares and configures CPUIDLE
13072+ * @pr: the SFI processor
13073+ */
13074+static int sfi_processor_setup_cpuidle(struct sfi_processor *pr)
13075+{
13076+ int i;
13077+ int count = CPUIDLE_DRIVER_STATE_START;
13078+ struct cpuidle_state *state;
13079+ struct cpuidle_device *dev = &pr->power.dev;
13080+
13081+ for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
13082+ dev->states[i].name[0] = '\0';
13083+ dev->states[i].desc[0] = '\0';
13084+ }
13085+
13086+ for (i = 1; i < SFI_PROCESSOR_MAX_POWER; i++) {
13087+
13088+ /*Mwait not supported by processor */
13089+ if (!mwait_supported[i])
13090+ continue;
13091+
13092+ state = &dev->states[count];
13093+
13094+ snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
13095+ snprintf(state->desc, CPUIDLE_DESC_LEN, "C%d", i);
13096+
13097+ state->exit_latency = pr->power.states[count].exit_latency;
13098+ state->target_residency = state->exit_latency * latency_factor;
13099+ state->power_usage = pr->power.states[count].power_usage;
13100+ state->flags = 0;
13101+ cpuidle_set_statedata(state, &pr->power.sfi_cstates[count]);
13102+
13103+ printk
13104+ (KERN_INFO "State details Name:%s, Desc:%s, \
13105+ exit_latency:%d,target_residency%d,power_usage%d,hint%d",
13106+ state->name, state->desc, state->exit_latency,
13107+ state->target_residency, state->power_usage,
13108+ pr->power.sfi_cstates[count].hint);
13109+
13110+ switch (i) {
13111+ case SFI_STATE_C1:
13112+ state->flags |= CPUIDLE_FLAG_SHALLOW;
13113+ state->enter = sfi_idle_enter_c1;
13114+ break;
13115+
13116+ case SFI_STATE_C2:
13117+ state->flags |= CPUIDLE_FLAG_BALANCED;
13118+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
13119+ state->enter = sfi_idle_enter_simple;
13120+ break;
13121+
13122+ case SFI_STATE_C3:
13123+ case SFI_STATE_C4:
13124+ case SFI_STATE_C5:
13125+ case SFI_STATE_C6:
13126+ state->flags |= CPUIDLE_FLAG_DEEP;
13127+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
13128+ state->flags |= CPUIDLE_FLAG_CHECK_BM;
13129+ state->enter = sfi_idle_enter_bm;
13130+ break;
13131+#ifdef CONFIG_MSTWN_POWER_MGMT
13132+ case STATE_S0IX:
13133+ state->flags |= CPUIDLE_FLAG_DEEP;
13134+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
13135+ state->flags |= CPUIDLE_FLAG_CHECK_BM;
13136+ state->enter = s0i3_enter_bm;
13137+ break;
13138+#endif
13139+ }
13140+
13141+ count++;
13142+ if (count == CPUIDLE_STATE_MAX)
13143+ break;
13144+ }
13145+
13146+ dev->state_count = count;
13147+ if (!count)
13148+ return -EINVAL;
13149+
13150+ return 0;
13151+}
13152+
13153+int sfi_cstate_probe(unsigned int hint)
13154+{
13155+ int retval;
13156+ unsigned int eax, ebx, ecx, edx;
13157+ unsigned int edx_part;
13158+ unsigned int cstate_type;
13159+ unsigned int num_cstate_subtype;
13160+
13161+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
13162+
13163+ /* Check whether this particular CState is supported or not */
13164+ cstate_type = (hint >> MWAIT_SUBSTATE_SIZE) + 1;
13165+ edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
13166+ num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
13167+
13168+ retval = 0;
13169+ if (num_cstate_subtype < (hint & MWAIT_SUBSTATE_MASK)) {
13170+ retval = -1;
13171+ goto out;
13172+ }
13173+
13174+ /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
13175+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
13176+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
13177+ retval = -1;
13178+ goto out;
13179+ }
13180+
13181+ if (!mwait_supported[cstate_type]) {
13182+ mwait_supported[cstate_type] = 1;
13183+ printk(KERN_DEBUG
13184+ "Monitor-Mwait will be used to enter C-%d state\n",
13185+ cstate_type);
13186+ }
13187+
13188+out:
13189+ return retval;
13190+}
13191+
13192+int sfi_processor_power_init(struct sfi_processor *pr)
13193+{
13194+
13195+ int totallen;
13196+ struct sfi_cstate_table_entry *pentry;
13197+ u32 sfi_max_states;
13198+
13199+ pentry = sfi_cstate_array;
13200+
13201+#ifdef CONFIG_MSTWN_POWER_MGMT
13202+ sfi_max_states = SFI_PROCESSOR_MAX_POWER - 1;
13203+#else
13204+ sfi_max_states = SFI_PROCESSOR_MAX_POWER;
13205+#endif
13206+
13207+ for (totallen = 1; totallen <= sfi_cstate_num &&
13208+ totallen < sfi_max_states; totallen++, pentry++) {
13209+ pr->power.states[totallen].power_usage = 0;
13210+ pr->power.states[totallen].exit_latency = pentry->latency;
13211+
13212+ pr->power.sfi_cstates[totallen].hint = pentry->hint;
13213+ pr->power.sfi_cstates[totallen].latency = pentry->latency;
13214+
13215+ sfi_cstate_probe(pentry->hint);
13216+
13217+ printk(KERN_INFO "Cstate[%d]: hint = 0x%08x, latency = %dms\n",
13218+ totallen, pentry->hint, pentry->latency);
13219+ }
13220+
13221+#ifdef CONFIG_MSTWN_POWER_MGMT
13222+
13223+ p1_c6 = 0;
13224+
13225+ /* this initialization is for the S0i3 state */
13226+ pr->power.states[totallen].power_usage = 0;
13227+ pr->power.states[totallen].exit_latency = s0ix_latency;
13228+
13229+ pr->power.sfi_cstates[totallen].hint = 0;
13230+ pr->power.sfi_cstates[totallen].latency = s0ix_latency;
13231+
13232+ mwait_supported[STATE_S0IX] = 1;
13233+#endif
13234+
13235+ sfi_processor_setup_cpuidle(pr);
13236+ pr->power.dev.cpu = pr->id;
13237+ if (cpuidle_register_device(&pr->power.dev))
13238+ return -EIO;
13239+
13240+ return 0;
13241+}
13242+
13243+int sfi_processor_power_exit(struct sfi_processor *pr)
13244+{
13245+ cpuidle_unregister_device(&pr->power.dev);
13246+ return 0;
13247+}
13248Index: linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_perflib.c
13249===================================================================
13250--- /dev/null
13251+++ linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_perflib.c
13252@@ -0,0 +1,185 @@
13253+/*
13254+ * sfi_Processor_perflib.c - sfi Processor P-States Library
13255+ *
13256+ *
13257+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13258+ *
13259+ * This program is free software; you can redistribute it and/or modify
13260+ * it under the terms of the GNU General Public License as published by
13261+ * the Free Software Foundation; either version 2 of the License, or (at
13262+ * your option) any later version.
13263+ *
13264+ * This program is distributed in the hope that it will be useful, but
13265+ * WITHOUT ANY WARRANTY; without even the implied warranty of
13266+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13267+ * General Public License for more details.
13268+ *
13269+ * You should have received a copy of the GNU General Public License along
13270+ * with this program; if not, write to the Free Software Foundation, Inc.,
13271+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
13272+ *
13273+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13274+ * Author: Vishwesh M Rudramuni
13275+ * Contact information: Vishwesh Rudramuni <vishwesh.m.rudramuni@intel.com>
13276+ */
13277+
13278+#include <linux/kernel.h>
13279+#include <linux/module.h>
13280+#include <linux/init.h>
13281+#include <linux/cpufreq.h>
13282+#include <linux/sfi.h>
13283+#include <linux/sfi_processor.h>
13284+
13285+#define SFI_PROCESSOR_COMPONENT 0x01000000
13286+#define SFI_PROCESSOR_CLASS "processor"
13287+#define SFI_PROCESSOR_FILE_PERFORMANCE "performance"
13288+#define _COMPONENT SFI_PROCESSOR_COMPONENT
13289+
13290+static DEFINE_MUTEX(performance_mutex);
13291+
13292+/* Use cpufreq debug layer for _PPC changes. */
13293+#define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
13294+ "cpufreq-core", msg)
13295+
13296+static void sfi_cpufreq_add_file(struct sfi_processor *pr)
13297+{
13298+ return;
13299+}
13300+static void sfi_cpufreq_remove_file(struct sfi_processor *pr)
13301+{
13302+ return;
13303+}
13304+
13305+struct sfi_cpufreq_table_entry sfi_cpufreq_array[SFI_PROCESSOR_MAX_POWER];
13306+EXPORT_SYMBOL_GPL(sfi_cpufreq_array);
13307+
13308+int sfi_cpufreq_num;
13309+EXPORT_SYMBOL_GPL(sfi_cpufreq_num);
13310+
13311+static int __init sfi_parse_freq(struct sfi_table_header *table)
13312+{
13313+ struct sfi_table_simple *sb;
13314+ struct sfi_cpufreq_table_entry *pentry;
13315+ int totallen;
13316+
13317+ sb = (struct sfi_table_simple *)table;
13318+ if (!sb) {
13319+ printk(KERN_WARNING "SFI: Unable to map FREQ\n");
13320+ return -ENODEV;
13321+ }
13322+
13323+ if (!sfi_cpufreq_num) {
13324+ sfi_cpufreq_num = SFI_GET_NUM_ENTRIES(sb,
13325+ struct sfi_cpufreq_table_entry);
13326+ pentry = (struct sfi_cpufreq_table_entry *)sb->pentry;
13327+ totallen = sfi_cpufreq_num * sizeof(*pentry);
13328+ memcpy(sfi_cpufreq_array, pentry, totallen);
13329+ }
13330+
13331+ printk(KERN_INFO "SFI: P state info (num = %d):\n", sfi_cpufreq_num);
13332+ pentry = sfi_cpufreq_array;
13333+ for (totallen = 0; totallen < sfi_cpufreq_num; totallen++, pentry++) {
13334+ printk(KERN_INFO "Pstate[%d]: freq = %dMHz latency = %dms"
13335+ " ctrl = 0x%08x\n", totallen, pentry->freq,
13336+ pentry->latency, pentry->ctrl_val);
13337+ }
13338+
13339+ return 0;
13340+}
13341+
13342+
13343+static int sfi_processor_get_performance_states(struct sfi_processor *pr)
13344+{
13345+ int result = 0;
13346+ int i;
13347+
13348+ sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, sfi_parse_freq);
13349+
13350+
13351+ pr->performance->state_count = sfi_cpufreq_num;
13352+ pr->performance->states =
13353+ kmalloc(sizeof(struct sfi_processor_px) * sfi_cpufreq_num,
13354+ GFP_KERNEL);
13355+ if (!pr->performance->states)
13356+ result = -ENOMEM;
13357+
13358+ printk(KERN_INFO "Num p-states %d\n", sfi_cpufreq_num);
13359+
13360+ /* Populate the P-states info from the SFI table here */
13361+ for (i = 0; i < sfi_cpufreq_num; i++) {
13362+ pr->performance->states[i].core_frequency = \
13363+ sfi_cpufreq_array[i].freq;
13364+ pr->performance->states[i].transition_latency = \
13365+ sfi_cpufreq_array[i].latency;
13366+ pr->performance->states[i].control = \
13367+ sfi_cpufreq_array[i].ctrl_val;
13368+ printk(KERN_INFO "State [%d]: core_frequency[%d] \
13369+ transition_latency[%d] \
13370+ control[0x%x] status[0x%x]\n", i,
13371+ (u32) pr->performance->states[i].core_frequency,
13372+ (u32) pr->performance->states[i].transition_latency,
13373+ (u32) pr->performance->states[i].control,
13374+ (u32) pr->performance->states[i].status);
13375+ }
13376+
13377+ return result;
13378+}
13379+
13380+int
13381+sfi_processor_register_performance(struct sfi_processor_performance
13382+ *performance, unsigned int cpu)
13383+{
13384+ struct sfi_processor *pr;
13385+
13386+ mutex_lock(&performance_mutex);
13387+
13388+ pr = per_cpu(sfi_processors, cpu);
13389+ if (!pr) {
13390+ mutex_unlock(&performance_mutex);
13391+ return -ENODEV;
13392+ }
13393+
13394+ if (pr->performance) {
13395+ mutex_unlock(&performance_mutex);
13396+ return -EBUSY;
13397+ }
13398+
13399+ WARN_ON(!performance);
13400+
13401+ pr->performance = performance;
13402+
13403+ sfi_processor_get_performance_states(pr);
13404+
13405+ sfi_cpufreq_add_file(pr);
13406+
13407+ mutex_unlock(&performance_mutex);
13408+ return 0;
13409+}
13410+EXPORT_SYMBOL(sfi_processor_register_performance);
13411+
13412+void
13413+sfi_processor_unregister_performance(struct sfi_processor_performance
13414+ *performance, unsigned int cpu)
13415+{
13416+ struct sfi_processor *pr;
13417+
13418+
13419+ mutex_lock(&performance_mutex);
13420+
13421+ pr = per_cpu(sfi_processors, cpu);
13422+ if (!pr) {
13423+ mutex_unlock(&performance_mutex);
13424+ return;
13425+ }
13426+
13427+ if (pr->performance)
13428+ kfree(pr->performance->states);
13429+ pr->performance = NULL;
13430+
13431+ sfi_cpufreq_remove_file(pr);
13432+
13433+ mutex_unlock(&performance_mutex);
13434+
13435+ return;
13436+}
13437+EXPORT_SYMBOL(sfi_processor_unregister_performance);
13438Index: linux-2.6.33/drivers/sfi/Kconfig
13439===================================================================
13440--- linux-2.6.33.orig/drivers/sfi/Kconfig
13441+++ linux-2.6.33/drivers/sfi/Kconfig
13442@@ -15,3 +15,13 @@ menuconfig SFI
13443 For more information, see http://simplefirmware.org
13444
13445 Say 'Y' here to enable the kernel to boot on SFI-only platforms.
13446+config SFI_PROCESSOR_PM
13447+ bool "SFI Processor Power Management"
13448+ depends on SFI && X86_LOCAL_APIC
13449+ default y
13450+
13451+config SFI_CPUIDLE
13452+ bool "SFI Processor C-State driver"
13453+ depends on SFI_PROCESSOR_PM && CPU_IDLE
13454+ default y
13455+
13456Index: linux-2.6.33/include/linux/sfi_processor.h
13457===================================================================
13458--- /dev/null
13459+++ linux-2.6.33/include/linux/sfi_processor.h
13460@@ -0,0 +1,102 @@
13461+/*
13462+ * sfi_processor.h
13463+ *
13464+ * Copyright (C) 2008 Intel Corp
13465+ *
13466+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13467+ * This program is free software; you can redistribute it and/or modify
13468+ * it under the terms of the GNU General Public License as published by
13469+ * the Free Software Foundation; version 2 of the License.
13470+ *
13471+ * This program is distributed in the hope that it will be useful, but
13472+ * WITHOUT ANY WARRANTY; without even the implied warranty of
13473+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13474+ * General Public License for more details.
13475+ *
13476+ * You should have received a copy of the GNU General Public License along
13477+ * with this program; if not, write to the Free Software Foundation, Inc.,
13478+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
13479+ *
13480+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13481+ * Author: Sujith Thomas
13482+ * Contact information: Sujith Thomas <sujith.thomas@intel.com>
13483+ */
13484+
13485+#ifndef __SFI_PROCESSOR_H__
13486+#define __SFI_PROCESSOR_H__
13487+#include <linux/sfi.h>
13488+#include <linux/cpuidle.h>
13489+
13490+#define SFI_PROCESSOR_MAX_POWER 7
13491+
13492+#define CPU_SFI_GET_NUM(ptable, entry) \
13493+ ((ptable->header.length - SFI_TBL_HEADER_LEN) / \
13494+ (sizeof(struct entry)))
13495+
13496+struct sfi_processor_power {
13497+ struct cpuidle_device dev;
13498+ u32 default_state;
13499+ int count;
13500+ struct cpuidle_state states[SFI_PROCESSOR_MAX_POWER];
13501+ struct sfi_cstate_table_entry sfi_cstates[SFI_PROCESSOR_MAX_POWER];
13502+};
13503+
13504+struct sfi_processor_flags {
13505+ u8 valid;
13506+ u8 power;
13507+};
13508+
13509+struct sfi_processor {
13510+ u32 id;
13511+ struct sfi_processor_flags flags;
13512+ struct sfi_processor_power power;
13513+ struct sfi_processor_performance *performance;
13514+};
13515+
13516+/* Performance management */
13517+struct sfi_processor_px {
13518+ u32 core_frequency; /* megahertz */
13519+ u32 transition_latency; /* microseconds */
13520+ u32 control; /* control value */
13521+ u32 status; /* success indicator */
13522+};
13523+
13524+struct sfi_processor_performance {
13525+ unsigned int state;
13526+ unsigned int state_count;
13527+ struct sfi_processor_px *states;
13528+ cpumask_var_t shared_cpu_map;
13529+ unsigned int shared_type;
13530+};
13531+
13532+#define SFI_STATE_C0 (u8) 0
13533+#define SFI_STATE_C1 (u8) 1
13534+#define SFI_STATE_C2 (u8) 2
13535+#define SFI_STATE_C3 (u8) 3
13536+#define SFI_STATE_C4 (u8) 4
13537+#define SFI_STATE_C5 (u8) 5
13538+#define SFI_STATE_C6 (u8) 6
13539+
13540+#define SFI_C_STATES_MAX SFI_STATE_C6
13541+#define SFI_C_STATE_COUNT 6
13542+
13543+extern struct cpuidle_driver sfi_idle_driver;
13544+
13545+/* for communication between multiple parts of the processor kernel module */
13546+DECLARE_PER_CPU(struct sfi_processor *, sfi_processors);
13547+
13548+int sfi_processor_power_init(struct sfi_processor *pr);
13549+int sfi_processor_power_exit(struct sfi_processor *pr);
13550+extern int sfi_processor_register_performance(struct sfi_processor_performance
13551+ *performance, unsigned int cpu);
13552+extern void sfi_processor_unregister_performance(struct
13553+ sfi_processor_performance
13554+ *performance,
13555+ unsigned int cpu);
13556+extern struct sfi_cstate_table_entry sfi_cstate_array[SFI_C_STATES_MAX];
13557+extern int sfi_cstate_num;
13558+
13559+extern struct sfi_cstate_table_entry sfi_cstate_array[SFI_C_STATES_MAX];
13560+extern int sfi_cstate_num;
13561+
13562+#endif /*__SFI_PROCESSOR_H__*/
13563Index: linux-2.6.33/include/linux/sfi.h
13564===================================================================
13565--- linux-2.6.33.orig/include/linux/sfi.h
13566+++ linux-2.6.33/include/linux/sfi.h
13567@@ -120,6 +120,13 @@ struct sfi_cstate_table_entry {
13568 u32 latency; /* latency in ms */
13569 } __packed;
13570
13571+
13572+struct sfi_cpufreq_table_entry {
13573+ u32 freq;
13574+ u32 latency; /* transition latency in ms for this pstate */
13575+ u32 ctrl_val; /* value to write to PERF_CTL to enter thisstate */
13576+}__packed;
13577+
13578 struct sfi_apic_table_entry {
13579 u64 phys_addr; /* phy base addr for APIC reg */
13580 } __packed;