summaryrefslogtreecommitdiff
path: root/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch')
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch13580
1 files changed, 13580 insertions, 0 deletions
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch
new file mode 100644
index 000000000..7f81eb82f
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch
@@ -0,0 +1,13580 @@
+Index: linux-2.6.33/drivers/pci/pci.c
+===================================================================
+--- linux-2.6.33.orig/drivers/pci/pci.c
++++ linux-2.6.33/drivers/pci/pci.c
+@@ -297,6 +297,49 @@ int pci_find_ext_capability(struct pci_d
+ }
+ EXPORT_SYMBOL_GPL(pci_find_ext_capability);
+
++/**
++ * pci_bus_find_ext_capability - find an extended capability
++ * @bus: the PCI bus to query
++ * @devfn: PCI device to query
++ * @cap: capability code
++ *
++ * Like pci_find_ext_capability() but works for pci devices that do not have a
++ * pci_dev structure set up yet.
++ *
++ * Returns the address of the requested capability structure within the
++ * device's PCI configuration space or 0 in case the device does not
++ * support it.
++ */
++int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
++ int cap)
++{
++ u32 header;
++ int ttl;
++ int pos = PCI_CFG_SPACE_SIZE;
++
++ /* minimum 8 bytes per capability */
++ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
++
++ if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
++ return 0;
++ if (header == 0xffffffff || header == 0)
++ return 0;
++
++ while (ttl-- > 0) {
++ if (PCI_EXT_CAP_ID(header) == cap)
++ return pos;
++
++ pos = PCI_EXT_CAP_NEXT(header);
++ if (pos < PCI_CFG_SPACE_SIZE)
++ break;
++
++ if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
++ break;
++ }
++
++ return 0;
++}
++
+ static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
+ {
+ int rc, ttl = PCI_FIND_CAP_TTL;
+Index: linux-2.6.33/include/linux/pci.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/pci.h
++++ linux-2.6.33/include/linux/pci.h
+@@ -631,6 +631,8 @@ enum pci_lost_interrupt_reason pci_lost_
+ int pci_find_capability(struct pci_dev *dev, int cap);
+ int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
+ int pci_find_ext_capability(struct pci_dev *dev, int cap);
++int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
++ int cap);
+ int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
+ int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
+ struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
+Index: linux-2.6.33/arch/x86/include/asm/numaq.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/numaq.h
++++ linux-2.6.33/arch/x86/include/asm/numaq.h
+@@ -30,6 +30,7 @@
+
+ extern int found_numaq;
+ extern int get_memcfg_numaq(void);
++extern int pci_numaq_init(void);
+
+ extern void *xquad_portio;
+
+Index: linux-2.6.33/arch/x86/include/asm/pci.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/pci.h
++++ linux-2.6.33/arch/x86/include/asm/pci.h
+@@ -45,8 +45,15 @@ static inline int pci_proc_domain(struct
+
+ #ifdef CONFIG_PCI
+ extern unsigned int pcibios_assign_all_busses(void);
++extern int pci_legacy_init(void);
++# ifdef CONFIG_ACPI
++# define x86_default_pci_init pci_acpi_init
++# else
++# define x86_default_pci_init pci_legacy_init
++# endif
+ #else
+-#define pcibios_assign_all_busses() 0
++# define pcibios_assign_all_busses() 0
++# define x86_default_pci_init NULL
+ #endif
+
+ extern unsigned long pci_mem_start;
+Index: linux-2.6.33/arch/x86/include/asm/pci_x86.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/pci_x86.h
++++ linux-2.6.33/arch/x86/include/asm/pci_x86.h
+@@ -82,7 +82,6 @@ struct irq_routing_table {
+
+ extern unsigned int pcibios_irq_mask;
+
+-extern int pcibios_scanned;
+ extern spinlock_t pci_config_lock;
+
+ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
+@@ -111,10 +110,10 @@ extern void __init dmi_check_skip_isa_al
+
+ /* some common used subsys_initcalls */
+ extern int __init pci_acpi_init(void);
+-extern int __init pcibios_irq_init(void);
+-extern int __init pci_visws_init(void);
+-extern int __init pci_numaq_init(void);
++extern void __init pcibios_irq_init(void);
+ extern int __init pcibios_init(void);
++extern int pci_legacy_init(void);
++extern void pcibios_fixup_irqs(void);
+
+ /* pci-mmconfig.c */
+
+@@ -182,3 +181,17 @@ static inline void mmio_config_writel(vo
+ {
+ asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory");
+ }
++
++#ifdef CONFIG_PCI
++# ifdef CONFIG_ACPI
++# define x86_default_pci_init pci_acpi_init
++# else
++# define x86_default_pci_init pci_legacy_init
++# endif
++# define x86_default_pci_init_irq pcibios_irq_init
++# define x86_default_pci_fixup_irqs pcibios_fixup_irqs
++#else
++# define x86_default_pci_init NULL
++# define x86_default_pci_init_irq NULL
++# define x86_default_pci_fixup_irqs NULL
++#endif
+Index: linux-2.6.33/arch/x86/include/asm/setup.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/setup.h
++++ linux-2.6.33/arch/x86/include/asm/setup.h
+@@ -37,10 +37,8 @@ void setup_bios_corruption_check(void);
+
+ #ifdef CONFIG_X86_VISWS
+ extern void visws_early_detect(void);
+-extern int is_visws_box(void);
+ #else
+ static inline void visws_early_detect(void) { }
+-static inline int is_visws_box(void) { return 0; }
+ #endif
+
+ extern unsigned long saved_video_mode;
+Index: linux-2.6.33/arch/x86/include/asm/visws/cobalt.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/visws/cobalt.h
++++ linux-2.6.33/arch/x86/include/asm/visws/cobalt.h
+@@ -122,4 +122,6 @@ extern char visws_board_type;
+
+ extern char visws_board_rev;
+
++extern int pci_visws_init(void);
++
+ #endif /* _ASM_X86_VISWS_COBALT_H */
+Index: linux-2.6.33/arch/x86/include/asm/x86_init.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/x86_init.h
++++ linux-2.6.33/arch/x86/include/asm/x86_init.h
+@@ -99,6 +99,18 @@ struct x86_init_iommu {
+ };
+
+ /**
++ * struct x86_init_pci - platform specific pci init functions
++ * @init: platform specific pci init
++ * @init_irq: platform specific pci irq init
++ * @fixup_irqs: platform specific pci irq fixup
++ */
++struct x86_init_pci {
++ int (*init)(void);
++ void (*init_irq)(void);
++ void (*fixup_irqs)(void);
++};
++
++/**
+ * struct x86_init_ops - functions for platform specific setup
+ *
+ */
+@@ -110,6 +122,7 @@ struct x86_init_ops {
+ struct x86_init_paging paging;
+ struct x86_init_timers timers;
+ struct x86_init_iommu iommu;
++ struct x86_init_pci pci;
+ };
+
+ /**
+Index: linux-2.6.33/arch/x86/kernel/acpi/boot.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/acpi/boot.c
++++ linux-2.6.33/arch/x86/kernel/acpi/boot.c
+@@ -35,6 +35,7 @@
+ #include <linux/ioport.h>
+ #include <linux/pci.h>
+
++#include <asm/pci_x86.h>
+ #include <asm/pgtable.h>
+ #include <asm/io_apic.h>
+ #include <asm/apic.h>
+@@ -1603,6 +1604,9 @@ int __init acpi_boot_init(void)
+
+ acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
+
++ if (!acpi_noirq)
++ x86_init.pci.init = pci_acpi_init;
++
+ return 0;
+ }
+
+Index: linux-2.6.33/arch/x86/kernel/apic/numaq_32.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/apic/numaq_32.c
++++ linux-2.6.33/arch/x86/kernel/apic/numaq_32.c
+@@ -277,6 +277,7 @@ static __init void early_check_numaq(voi
+ x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus;
+ x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info;
+ x86_init.timers.tsc_pre_init = numaq_tsc_init;
++ x86_init.pci.init = pci_numaq_init;
+ }
+ }
+
+Index: linux-2.6.33/arch/x86/kernel/visws_quirks.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/visws_quirks.c
++++ linux-2.6.33/arch/x86/kernel/visws_quirks.c
+@@ -49,11 +49,6 @@ extern int no_broadcast;
+ char visws_board_type = -1;
+ char visws_board_rev = -1;
+
+-int is_visws_box(void)
+-{
+- return visws_board_type >= 0;
+-}
+-
+ static void __init visws_time_init(void)
+ {
+ printk(KERN_INFO "Starting Cobalt Timer system clock\n");
+@@ -242,6 +237,8 @@ void __init visws_early_detect(void)
+ x86_init.irqs.pre_vector_init = visws_pre_intr_init;
+ x86_init.irqs.trap_init = visws_trap_init;
+ x86_init.timers.timer_init = visws_time_init;
++ x86_init.pci.init = pci_visws_init;
++ x86_init.pci.init_irq = x86_init_noop;
+
+ /*
+ * Install reboot quirks:
+@@ -508,7 +505,7 @@ static struct irq_chip cobalt_irq_type =
+ */
+ static unsigned int startup_piix4_master_irq(unsigned int irq)
+ {
+- init_8259A(0);
++ legacy_pic->init(0);
+
+ return startup_cobalt_irq(irq);
+ }
+@@ -531,10 +528,7 @@ static struct irq_chip piix4_master_irq_
+
+
+ static struct irq_chip piix4_virtual_irq_type = {
+- .name = "PIIX4-virtual",
+- .shutdown = disable_8259A_irq,
+- .enable = enable_8259A_irq,
+- .disable = disable_8259A_irq,
++ .typename = "PIIX4-virtual",
+ };
+
+
+@@ -609,7 +603,7 @@ static irqreturn_t piix4_master_intr(int
+ handle_IRQ_event(realirq, desc->action);
+
+ if (!(desc->status & IRQ_DISABLED))
+- enable_8259A_irq(realirq);
++ legacy_pic->chip->unmask(realirq);
+
+ return IRQ_HANDLED;
+
+@@ -628,6 +622,12 @@ static struct irqaction cascade_action =
+ .name = "cascade",
+ };
+
++static inline void set_piix4_virtual_irq_type(void)
++{
++ piix4_virtual_irq_type.shutdown = i8259A_chip.mask;
++ piix4_virtual_irq_type.enable = i8259A_chip.unmask;
++ piix4_virtual_irq_type.disable = i8259A_chip.mask;
++}
+
+ void init_VISWS_APIC_irqs(void)
+ {
+@@ -653,6 +653,7 @@ void init_VISWS_APIC_irqs(void)
+ desc->chip = &piix4_master_irq_type;
+ }
+ else if (i < CO_IRQ_APIC0) {
++ set_piix4_virtual_irq_type();
+ desc->chip = &piix4_virtual_irq_type;
+ }
+ else if (IS_CO_APIC(i)) {
+Index: linux-2.6.33/arch/x86/kernel/x86_init.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/x86_init.c
++++ linux-2.6.33/arch/x86/kernel/x86_init.c
+@@ -4,9 +4,11 @@
+ * For licencing details see kernel-base/COPYING
+ */
+ #include <linux/init.h>
++#include <linux/ioport.h>
+
+ #include <asm/bios_ebda.h>
+ #include <asm/paravirt.h>
++#include <asm/pci_x86.h>
+ #include <asm/mpspec.h>
+ #include <asm/setup.h>
+ #include <asm/apic.h>
+@@ -70,6 +72,12 @@ struct x86_init_ops x86_init __initdata
+ .iommu = {
+ .iommu_init = iommu_init_noop,
+ },
++
++ .pci = {
++ .init = x86_default_pci_init,
++ .init_irq = x86_default_pci_init_irq,
++ .fixup_irqs = x86_default_pci_fixup_irqs,
++ },
+ };
+
+ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
+Index: linux-2.6.33/arch/x86/pci/acpi.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/acpi.c
++++ linux-2.6.33/arch/x86/pci/acpi.c
+@@ -282,17 +282,14 @@ int __init pci_acpi_init(void)
+ {
+ struct pci_dev *dev = NULL;
+
+- if (pcibios_scanned)
+- return 0;
+-
+ if (acpi_noirq)
+- return 0;
++ return -ENODEV;
+
+ printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
+ acpi_irq_penalty_init();
+- pcibios_scanned++;
+ pcibios_enable_irq = acpi_pci_irq_enable;
+ pcibios_disable_irq = acpi_pci_irq_disable;
++ x86_init.pci.init_irq = x86_init_noop;
+
+ if (pci_routeirq) {
+ /*
+Index: linux-2.6.33/arch/x86/pci/common.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/common.c
++++ linux-2.6.33/arch/x86/pci/common.c
+@@ -72,12 +72,6 @@ struct pci_ops pci_root_ops = {
+ };
+
+ /*
+- * legacy, numa, and acpi all want to call pcibios_scan_root
+- * from their initcalls. This flag prevents that.
+- */
+-int pcibios_scanned;
+-
+-/*
+ * This interrupt-safe spinlock protects all accesses to PCI
+ * configuration space.
+ */
+Index: linux-2.6.33/arch/x86/pci/legacy.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/legacy.c
++++ linux-2.6.33/arch/x86/pci/legacy.c
+@@ -35,16 +35,13 @@ static void __devinit pcibios_fixup_peer
+ }
+ }
+
+-static int __init pci_legacy_init(void)
++int __init pci_legacy_init(void)
+ {
+ if (!raw_pci_ops) {
+ printk("PCI: System does not support PCI\n");
+ return 0;
+ }
+
+- if (pcibios_scanned++)
+- return 0;
+-
+ printk("PCI: Probing PCI hardware\n");
+ pci_root_bus = pcibios_scan_root(0);
+ if (pci_root_bus)
+@@ -55,18 +52,15 @@ static int __init pci_legacy_init(void)
+
+ int __init pci_subsys_init(void)
+ {
+-#ifdef CONFIG_X86_NUMAQ
+- pci_numaq_init();
+-#endif
+-#ifdef CONFIG_ACPI
+- pci_acpi_init();
+-#endif
+-#ifdef CONFIG_X86_VISWS
+- pci_visws_init();
+-#endif
+- pci_legacy_init();
++ /*
++ * The init function returns an non zero value when
++ * pci_legacy_init should be invoked.
++ */
++ if (x86_init.pci.init())
++ pci_legacy_init();
++
+ pcibios_fixup_peer_bridges();
+- pcibios_irq_init();
++ x86_init.pci.init_irq();
+ pcibios_init();
+
+ return 0;
+Index: linux-2.6.33/arch/x86/pci/numaq_32.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/numaq_32.c
++++ linux-2.6.33/arch/x86/pci/numaq_32.c
+@@ -152,14 +152,8 @@ int __init pci_numaq_init(void)
+ {
+ int quad;
+
+- if (!found_numaq)
+- return 0;
+-
+ raw_pci_ops = &pci_direct_conf1_mq;
+
+- if (pcibios_scanned++)
+- return 0;
+-
+ pci_root_bus = pcibios_scan_root(0);
+ if (pci_root_bus)
+ pci_bus_add_devices(pci_root_bus);
+Index: linux-2.6.33/arch/x86/pci/visws.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/visws.c
++++ linux-2.6.33/arch/x86/pci/visws.c
+@@ -69,9 +69,6 @@ void __init pcibios_update_irq(struct pc
+
+ int __init pci_visws_init(void)
+ {
+- if (!is_visws_box())
+- return -1;
+-
+ pcibios_enable_irq = &pci_visws_enable_irq;
+ pcibios_disable_irq = &pci_visws_disable_irq;
+
+@@ -90,5 +87,6 @@ int __init pci_visws_init(void)
+ pci_scan_bus_with_sysdata(pci_bus1);
+ pci_fixup_irqs(pci_common_swizzle, visws_map_irq);
+ pcibios_resource_survey();
+- return 0;
++ /* Request bus scan */
++ return 1;
+ }
+Index: linux-2.6.33/arch/x86/pci/irq.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/irq.c
++++ linux-2.6.33/arch/x86/pci/irq.c
+@@ -53,7 +53,7 @@ struct irq_router_handler {
+ int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
+ };
+
+-int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
++int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
+ void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
+
+ /*
+@@ -1016,7 +1016,7 @@ static int pcibios_lookup_irq(struct pci
+ return 1;
+ }
+
+-static void __init pcibios_fixup_irqs(void)
++void __init pcibios_fixup_irqs(void)
+ {
+ struct pci_dev *dev = NULL;
+ u8 pin;
+@@ -1110,12 +1110,12 @@ static struct dmi_system_id __initdata p
+ { }
+ };
+
+-int __init pcibios_irq_init(void)
++void __init pcibios_irq_init(void)
+ {
+ DBG(KERN_DEBUG "PCI: IRQ init\n");
+
+- if (pcibios_enable_irq || raw_pci_ops == NULL)
+- return 0;
++ if (raw_pci_ops == NULL)
++ return;
+
+ dmi_check_system(pciirq_dmi_table);
+
+@@ -1142,9 +1142,7 @@ int __init pcibios_irq_init(void)
+ pirq_table = NULL;
+ }
+
+- pcibios_enable_irq = pirq_enable_irq;
+-
+- pcibios_fixup_irqs();
++ x86_init.pci.fixup_irqs();
+
+ if (io_apic_assign_pci_irqs && pci_routeirq) {
+ struct pci_dev *dev = NULL;
+@@ -1157,8 +1155,6 @@ int __init pcibios_irq_init(void)
+ for_each_pci_dev(dev)
+ pirq_enable_irq(dev);
+ }
+-
+- return 0;
+ }
+
+ static void pirq_penalize_isa_irq(int irq, int active)
+Index: linux-2.6.33/arch/x86/kernel/apic/apic.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/apic/apic.c
++++ linux-2.6.33/arch/x86/kernel/apic/apic.c
+@@ -718,6 +718,9 @@ static int __init calibrate_APIC_clock(v
+ */
+ void __init setup_boot_APIC_clock(void)
+ {
++ /* we rely on global clockevent for calibration */
++ if (global_clock_event == NULL)
++ return;
+ /*
+ * The local apic timer can be disabled via the kernel
+ * commandline or from the CPU detection code. Register the lapic
+@@ -1390,7 +1393,7 @@ void __init enable_IR_x2apic(void)
+ }
+
+ local_irq_save(flags);
+- mask_8259A();
++ legacy_pic->mask_all();
+ mask_IO_APIC_setup(ioapic_entries);
+
+ if (dmar_table_init_ret)
+@@ -1422,7 +1425,7 @@ void __init enable_IR_x2apic(void)
+ nox2apic:
+ if (!ret) /* IR enabling failed */
+ restore_IO_APIC_setup(ioapic_entries);
+- unmask_8259A();
++ legacy_pic->restore_mask();
+ local_irq_restore(flags);
+
+ out:
+@@ -2018,7 +2021,7 @@ static int lapic_resume(struct sys_devic
+ }
+
+ mask_IO_APIC_setup(ioapic_entries);
+- mask_8259A();
++ legacy_pic->mask_all();
+ }
+
+ if (x2apic_mode)
+@@ -2062,7 +2065,7 @@ static int lapic_resume(struct sys_devic
+
+ if (intr_remapping_enabled) {
+ reenable_intr_remapping(x2apic_mode);
+- unmask_8259A();
++ legacy_pic->restore_mask();
+ restore_IO_APIC_setup(ioapic_entries);
+ free_ioapic_entries(ioapic_entries);
+ }
+Index: linux-2.6.33/arch/x86/kernel/apic/io_apic.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/apic/io_apic.c
++++ linux-2.6.33/arch/x86/kernel/apic/io_apic.c
+@@ -94,10 +94,8 @@ struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCE
+ /* # of MP IRQ source entries */
+ int mp_irq_entries;
+
+-/* Number of legacy interrupts */
+-static int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
+ /* GSI interrupts */
+-static int nr_irqs_gsi = NR_IRQS_LEGACY;
++int nr_irqs_gsi = NR_IRQS_LEGACY;
+
+ #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
+ int mp_bus_id_to_type[MAX_MP_BUSSES];
+@@ -140,33 +138,10 @@ static struct irq_pin_list *get_one_free
+
+ /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
+ #ifdef CONFIG_SPARSE_IRQ
+-static struct irq_cfg irq_cfgx[] = {
++static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
+ #else
+-static struct irq_cfg irq_cfgx[NR_IRQS] = {
++static struct irq_cfg irq_cfgx[NR_IRQS];
+ #endif
+- [0] = { .vector = IRQ0_VECTOR, },
+- [1] = { .vector = IRQ1_VECTOR, },
+- [2] = { .vector = IRQ2_VECTOR, },
+- [3] = { .vector = IRQ3_VECTOR, },
+- [4] = { .vector = IRQ4_VECTOR, },
+- [5] = { .vector = IRQ5_VECTOR, },
+- [6] = { .vector = IRQ6_VECTOR, },
+- [7] = { .vector = IRQ7_VECTOR, },
+- [8] = { .vector = IRQ8_VECTOR, },
+- [9] = { .vector = IRQ9_VECTOR, },
+- [10] = { .vector = IRQ10_VECTOR, },
+- [11] = { .vector = IRQ11_VECTOR, },
+- [12] = { .vector = IRQ12_VECTOR, },
+- [13] = { .vector = IRQ13_VECTOR, },
+- [14] = { .vector = IRQ14_VECTOR, },
+- [15] = { .vector = IRQ15_VECTOR, },
+-};
+-
+-void __init io_apic_disable_legacy(void)
+-{
+- nr_legacy_irqs = 0;
+- nr_irqs_gsi = 0;
+-}
+
+ int __init arch_early_irq_init(void)
+ {
+@@ -176,16 +151,23 @@ int __init arch_early_irq_init(void)
+ int node;
+ int i;
+
++ if (!legacy_pic->nr_legacy_irqs) {
++ nr_irqs_gsi = 0;
++ io_apic_irqs = ~0UL;
++ }
++
+ cfg = irq_cfgx;
+ count = ARRAY_SIZE(irq_cfgx);
+ node= cpu_to_node(boot_cpu_id);
+
+ for (i = 0; i < count; i++) {
++ if (i < legacy_pic->nr_legacy_irqs)
++ cfg[i].vector = IRQ0_VECTOR + i;
+ desc = irq_to_desc(i);
+ desc->chip_data = &cfg[i];
+ zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
+ zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
+- if (i < nr_legacy_irqs)
++ if (i < legacy_pic->nr_legacy_irqs)
+ cpumask_setall(cfg[i].domain);
+ }
+
+@@ -865,7 +847,7 @@ static int __init find_isa_irq_apic(int
+ */
+ static int EISA_ELCR(unsigned int irq)
+ {
+- if (irq < nr_legacy_irqs) {
++ if (irq < legacy_pic->nr_legacy_irqs) {
+ unsigned int port = 0x4d0 + (irq >> 3);
+ return (inb(port) >> (irq & 7)) & 1;
+ }
+@@ -1461,8 +1443,8 @@ static void setup_IO_APIC_irq(int apic_i
+ }
+
+ ioapic_register_intr(irq, desc, trigger);
+- if (irq < nr_legacy_irqs)
+- disable_8259A_irq(irq);
++ if (irq < legacy_pic->nr_legacy_irqs)
++ legacy_pic->chip->mask(irq);
+
+ ioapic_write_entry(apic_id, pin, entry);
+ }
+@@ -1875,7 +1857,7 @@ __apicdebuginit(void) print_PIC(void)
+ unsigned int v;
+ unsigned long flags;
+
+- if (!nr_legacy_irqs)
++ if (!legacy_pic->nr_legacy_irqs)
+ return;
+
+ printk(KERN_DEBUG "\nprinting PIC contents\n");
+@@ -1959,7 +1941,7 @@ void __init enable_IO_APIC(void)
+ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
+ }
+
+- if (!nr_legacy_irqs)
++ if (!legacy_pic->nr_legacy_irqs)
+ return;
+
+ for(apic = 0; apic < nr_ioapics; apic++) {
+@@ -2016,7 +1998,7 @@ void disable_IO_APIC(void)
+ */
+ clear_IO_APIC();
+
+- if (!nr_legacy_irqs)
++ if (!legacy_pic->nr_legacy_irqs)
+ return;
+
+ /*
+@@ -2249,9 +2231,9 @@ static unsigned int startup_ioapic_irq(u
+ struct irq_cfg *cfg;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+- if (irq < nr_legacy_irqs) {
+- disable_8259A_irq(irq);
+- if (i8259A_irq_pending(irq))
++ if (irq < legacy_pic->nr_legacy_irqs) {
++ legacy_pic->chip->mask(irq);
++ if (legacy_pic->irq_pending(irq))
+ was_pending = 1;
+ }
+ cfg = irq_cfg(irq);
+@@ -2784,8 +2766,8 @@ static inline void init_IO_APIC_traps(vo
+ * so default to an old-fashioned 8259
+ * interrupt if we can..
+ */
+- if (irq < nr_legacy_irqs)
+- make_8259A_irq(irq);
++ if (irq < legacy_pic->nr_legacy_irqs)
++ legacy_pic->make_irq(irq);
+ else
+ /* Strange. Oh, well.. */
+ desc->chip = &no_irq_chip;
+@@ -2942,7 +2924,7 @@ static inline void __init check_timer(vo
+ /*
+ * get/set the timer IRQ vector:
+ */
+- disable_8259A_irq(0);
++ legacy_pic->chip->mask(0);
+ assign_irq_vector(0, cfg, apic->target_cpus());
+
+ /*
+@@ -2955,7 +2937,7 @@ static inline void __init check_timer(vo
+ * automatically.
+ */
+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
+- init_8259A(1);
++ legacy_pic->init(1);
+ #ifdef CONFIG_X86_32
+ {
+ unsigned int ver;
+@@ -3014,7 +2996,7 @@ static inline void __init check_timer(vo
+ if (timer_irq_works()) {
+ if (nmi_watchdog == NMI_IO_APIC) {
+ setup_nmi();
+- enable_8259A_irq(0);
++ legacy_pic->chip->unmask(0);
+ }
+ if (disable_timer_pin_1 > 0)
+ clear_IO_APIC_pin(0, pin1);
+@@ -3037,14 +3019,14 @@ static inline void __init check_timer(vo
+ */
+ replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
+ setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
+- enable_8259A_irq(0);
++ legacy_pic->chip->unmask(0);
+ if (timer_irq_works()) {
+ apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
+ timer_through_8259 = 1;
+ if (nmi_watchdog == NMI_IO_APIC) {
+- disable_8259A_irq(0);
++ legacy_pic->chip->mask(0);
+ setup_nmi();
+- enable_8259A_irq(0);
++ legacy_pic->chip->unmask(0);
+ }
+ goto out;
+ }
+@@ -3052,7 +3034,7 @@ static inline void __init check_timer(vo
+ * Cleanup, just in case ...
+ */
+ local_irq_disable();
+- disable_8259A_irq(0);
++ legacy_pic->chip->mask(0);
+ clear_IO_APIC_pin(apic2, pin2);
+ apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
+ }
+@@ -3071,22 +3053,22 @@ static inline void __init check_timer(vo
+
+ lapic_register_intr(0, desc);
+ apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
+- enable_8259A_irq(0);
++ legacy_pic->chip->unmask(0);
+
+ if (timer_irq_works()) {
+ apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
+ goto out;
+ }
+ local_irq_disable();
+- disable_8259A_irq(0);
++ legacy_pic->chip->mask(0);
+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
+ apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
+
+ apic_printk(APIC_QUIET, KERN_INFO
+ "...trying to set up timer as ExtINT IRQ...\n");
+
+- init_8259A(0);
+- make_8259A_irq(0);
++ legacy_pic->init(0);
++ legacy_pic->make_irq(0);
+ apic_write(APIC_LVT0, APIC_DM_EXTINT);
+
+ unlock_ExtINT_logic();
+@@ -3128,7 +3110,7 @@ void __init setup_IO_APIC(void)
+ /*
+ * calling enable_IO_APIC() is moved to setup_local_APIC for BP
+ */
+- io_apic_irqs = nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
++ io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
+
+ apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
+ /*
+@@ -3139,7 +3121,7 @@ void __init setup_IO_APIC(void)
+ sync_Arb_IDs();
+ setup_IO_APIC_irqs();
+ init_IO_APIC_traps();
+- if (nr_legacy_irqs)
++ if (legacy_pic->nr_legacy_irqs)
+ check_timer();
+ }
+
+@@ -3932,7 +3914,7 @@ static int __io_apic_set_pci_routing(str
+ /*
+ * IRQs < 16 are already in the irq_2_pin[] map
+ */
+- if (irq >= nr_legacy_irqs) {
++ if (irq >= legacy_pic->nr_legacy_irqs) {
+ cfg = desc->chip_data;
+ if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) {
+ printk(KERN_INFO "can not add pin %d for irq %d\n",
+@@ -4310,3 +4292,25 @@ void __init mp_register_ioapic(int id, u
+
+ nr_ioapics++;
+ }
++
++/* Enable IOAPIC early just for system timer */
++void __init pre_init_apic_IRQ0(void)
++{
++ struct irq_cfg *cfg;
++ struct irq_desc *desc;
++
++ printk(KERN_INFO "Early APIC setup for system timer0\n");
++#ifndef CONFIG_SMP
++ phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
++#endif
++ desc = irq_to_desc_alloc_node(0, 0);
++
++ setup_local_APIC();
++
++ cfg = irq_cfg(0);
++ add_pin_to_irq_node(cfg, 0, 0, 0);
++ set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
++
++ /* FIXME: get trigger and polarity from mp_irqs[] */
++ setup_IO_APIC_irq(0, 0, 0, desc, 0, 0);
++}
+Index: linux-2.6.33/arch/x86/kernel/smpboot.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/smpboot.c
++++ linux-2.6.33/arch/x86/kernel/smpboot.c
+@@ -48,6 +48,7 @@
+ #include <linux/err.h>
+ #include <linux/nmi.h>
+ #include <linux/tboot.h>
++#include <linux/stackprotector.h>
+
+ #include <asm/acpi.h>
+ #include <asm/desc.h>
+@@ -67,6 +68,7 @@
+ #include <linux/mc146818rtc.h>
+
+ #include <asm/smpboot_hooks.h>
++#include <asm/i8259.h>
+
+ #ifdef CONFIG_X86_32
+ u8 apicid_2_node[MAX_APICID];
+@@ -286,9 +288,9 @@ notrace static void __cpuinit start_seco
+ check_tsc_sync_target();
+
+ if (nmi_watchdog == NMI_IO_APIC) {
+- disable_8259A_irq(0);
++ legacy_pic->chip->mask(0);
+ enable_NMI_through_LVT0();
+- enable_8259A_irq(0);
++ legacy_pic->chip->unmask(0);
+ }
+
+ #ifdef CONFIG_X86_32
+@@ -324,6 +326,9 @@ notrace static void __cpuinit start_seco
+ /* enable local interrupts */
+ local_irq_enable();
+
++ /* to prevent fake stack check failure in clock setup */
++ boot_init_stack_canary();
++
+ x86_cpuinit.setup_percpu_clockev();
+
+ wmb();
+Index: linux-2.6.33/Documentation/kernel-parameters.txt
+===================================================================
+--- linux-2.6.33.orig/Documentation/kernel-parameters.txt
++++ linux-2.6.33/Documentation/kernel-parameters.txt
+@@ -1738,6 +1738,12 @@ and is between 256 and 4096 characters.
+ nomfgpt [X86-32] Disable Multi-Function General Purpose
+ Timer usage (for AMD Geode machines).
+
++ x86_mrst_timer [X86-32,APBT]
++ choose timer option for x86 moorestown mid platform.
++ two valid options are apbt timer only and lapic timer
++ plus one apbt timer for broadcast timer.
++ x86_mrst_timer=apbt_only | lapic_and_apbt
++
+ norandmaps Don't use address space randomization. Equivalent to
+ echo 0 > /proc/sys/kernel/randomize_va_space
+
+Index: linux-2.6.33/arch/x86/Kconfig
+===================================================================
+--- linux-2.6.33.orig/arch/x86/Kconfig
++++ linux-2.6.33/arch/x86/Kconfig
+@@ -390,6 +390,7 @@ config X86_MRST
+ bool "Moorestown MID platform"
+ depends on X86_32
+ depends on X86_EXTENDED_PLATFORM
++ select APB_TIMER
+ ---help---
+ Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
+ Internet Device(MID) platform. Moorestown consists of two chips:
+@@ -398,6 +399,14 @@ config X86_MRST
+ nor standard legacy replacement devices/features. e.g. Moorestown does
+ not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
+
++config MRST_SPI_UART_BOOT_MSG
++ def_bool y
++ prompt "Moorestown SPI UART boot message"
++ depends on (X86_MRST && X86_32)
++ help
++ Enable this to see boot message during protected mode boot phase, such as
++ kernel decompression, BAUD rate is set at 115200 8n1
++
+ config X86_RDC321X
+ bool "RDC R-321x SoC"
+ depends on X86_32
+@@ -612,6 +621,24 @@ config HPET_EMULATE_RTC
+ def_bool y
+ depends on HPET_TIMER && (RTC=y || RTC=m || RTC_DRV_CMOS=m || RTC_DRV_CMOS=y)
+
++config APB_TIMER
++ def_bool y if X86_MRST
++ prompt "Langwell APB Timer Support" if X86_MRST
++ help
++ APB timer is the replacement for 8254, HPET on X86 MID platforms.
++ The APBT provides a stable time base on SMP
++ systems, unlike the TSC, but it is more expensive to access,
++ as it is off-chip. APB timers are always running regardless of CPU
++ C states, they are used as per CPU clockevent device when possible.
++
++config LNW_IPC
++ def_bool n
++ prompt "Langwell IPC Support" if (X86_32 || X86_MRST)
++ depends on X86_MRST
++ help
++ IPC unit is used on Moorestown to bridge the communications
++ between IA and SCU.
++
+ # Mark as embedded because too many people got it wrong.
+ # The code disables itself when not needed.
+ config DMI
+Index: linux-2.6.33/arch/x86/include/asm/apb_timer.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/include/asm/apb_timer.h
+@@ -0,0 +1,72 @@
++/*
++ * apb_timer.h: Driver for Langwell APB timer based on Synopsis DesignWare
++ *
++ * (C) Copyright 2009 Intel Corporation
++ * Author: Jacob Pan (jacob.jun.pan@intel.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * Note:
++ */
++
++#ifndef ASM_X86_APBT_H
++#define ASM_X86_APBT_H
++#include <linux/sfi.h>
++
++#ifdef CONFIG_APB_TIMER
++
++/* Langwell DW APB timer registers */
++#define APBTMR_N_LOAD_COUNT 0x00
++#define APBTMR_N_CURRENT_VALUE 0x04
++#define APBTMR_N_CONTROL 0x08
++#define APBTMR_N_EOI 0x0c
++#define APBTMR_N_INT_STATUS 0x10
++
++#define APBTMRS_INT_STATUS 0xa0
++#define APBTMRS_EOI 0xa4
++#define APBTMRS_RAW_INT_STATUS 0xa8
++#define APBTMRS_COMP_VERSION 0xac
++#define APBTMRS_REG_SIZE 0x14
++
++/* register bits */
++#define APBTMR_CONTROL_ENABLE (1<<0)
++#define APBTMR_CONTROL_MODE_PERIODIC (1<<1) /*1: periodic 0:free running */
++#define APBTMR_CONTROL_INT (1<<2)
++
++/* default memory mapped register base */
++#define LNW_SCU_ADDR 0xFF100000
++#define LNW_EXT_TIMER_OFFSET 0x1B800
++#define APBT_DEFAULT_BASE (LNW_SCU_ADDR+LNW_EXT_TIMER_OFFSET)
++#define LNW_EXT_TIMER_PGOFFSET 0x800
++
++/* APBT clock speed range from PCLK to fabric base, 25-100MHz */
++#define APBT_MAX_FREQ 50
++#define APBT_MIN_FREQ 1
++#define APBT_MMAP_SIZE 1024
++
++#define APBT_DEV_USED 1
++
++#define SFI_MTMR_MAX_NUM 8
++
++extern void apbt_time_init(void);
++extern struct clock_event_device *global_clock_event;
++extern unsigned long apbt_quick_calibrate(void);
++extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
++extern void apbt_setup_secondary_clock(void);
++extern unsigned int boot_cpu_id;
++extern int disable_apbt_percpu;
++
++extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
++extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
++extern int sfi_mtimer_num;
++
++#else /* CONFIG_APB_TIMER */
++
++static inline unsigned long apbt_quick_calibrate(void) {return 0; }
++static inline void apbt_time_init(void) {return 0; }
++
++#endif
++#endif /* ASM_X86_APBT_H */
+Index: linux-2.6.33/arch/x86/kernel/Makefile
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/Makefile
++++ linux-2.6.33/arch/x86/kernel/Makefile
+@@ -57,6 +57,12 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
+ obj-y += cpu/
+ obj-y += acpi/
+ obj-$(CONFIG_SFI) += sfi.o
++sfi-processor-objs += sfi/sfi_processor_core.o
++sfi-processor-objs += sfi/sfi_processor_idle.o
++sfi-processor-objs += sfi/sfi_processor_perflib.o
++
++obj-$(CONFIG_SFI_PROCESSOR_PM) += sfi-processor.o
++
+ obj-y += reboot.o
+ obj-$(CONFIG_MCA) += mca_32.o
+ obj-$(CONFIG_X86_MSR) += msr.o
+@@ -85,8 +91,11 @@ obj-$(CONFIG_DOUBLEFAULT) += doublefaul
+ obj-$(CONFIG_KGDB) += kgdb.o
+ obj-$(CONFIG_VM86) += vm86_32.o
+ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
++obj-$(CONFIG_X86_MRST_EARLY_PRINTK) += mrst_earlyprintk.o
+
+ obj-$(CONFIG_HPET_TIMER) += hpet.o
++obj-$(CONFIG_APB_TIMER) += apb_timer.o
++obj-$(CONFIG_LNW_IPC) += ipc_mrst.o
+
+ obj-$(CONFIG_K8_NB) += k8.o
+ obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
+@@ -105,7 +114,7 @@ obj-$(CONFIG_SCx200) += scx200.o
+ scx200-y += scx200_32.o
+
+ obj-$(CONFIG_OLPC) += olpc.o
+-obj-$(CONFIG_X86_MRST) += mrst.o
++obj-$(CONFIG_X86_MRST) += mrst.o vrtc.o
+
+ microcode-y := microcode_core.o
+ microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o
+Index: linux-2.6.33/arch/x86/kernel/apb_timer.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/apb_timer.c
+@@ -0,0 +1,765 @@
++/*
++ * apb_timer.c: Driver for Langwell APB timers
++ *
++ * (C) Copyright 2009 Intel Corporation
++ * Author: Jacob Pan (jacob.jun.pan@intel.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * Note:
++ * Langwell is the south complex of Intel Moorestown MID platform. There are
++ * eight external timers in total that can be used by the operating system.
++ * The timer information, such as frequency and addresses, is provided to the
++ * OS via SFI tables.
++ * Timer interrupts are routed via FW/HW emulated IOAPIC independently via
++ * individual redirection table entries (RTE).
++ * Unlike HPET, there is no master counter, therefore one of the timers are
++ * used as clocksource. The overall allocation looks like:
++ * - timer 0 - NR_CPUs for per cpu timer
++ * - one timer for clocksource
++ * - one timer for watchdog driver.
++ * It is also worth notice that APB timer does not support true one-shot mode,
++ * free-running mode will be used here to emulate one-shot mode.
++ * APB timer can also be used as broadcast timer along with per cpu local APIC
++ * timer, but by default APB timer has higher rating than local APIC timers.
++ */
++
++#include <linux/clocksource.h>
++#include <linux/clockchips.h>
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/sysdev.h>
++#include <linux/pm.h>
++#include <linux/pci.h>
++#include <linux/sfi.h>
++#include <linux/interrupt.h>
++#include <linux/cpu.h>
++#include <linux/irq.h>
++
++#include <asm/fixmap.h>
++#include <asm/apb_timer.h>
++
++#define APBT_MASK CLOCKSOURCE_MASK(32)
++#define APBT_SHIFT 22
++#define APBT_CLOCKEVENT_RATING 150
++#define APBT_CLOCKSOURCE_RATING 250
++#define APBT_MIN_DELTA_USEC 200
++
++#define EVT_TO_APBT_DEV(evt) container_of(evt, struct apbt_dev, evt)
++#define APBT_CLOCKEVENT0_NUM (0)
++#define APBT_CLOCKEVENT1_NUM (1)
++#define APBT_CLOCKSOURCE_NUM (2)
++
++static unsigned long apbt_address;
++static int apb_timer_block_enabled;
++static void __iomem *apbt_virt_address;
++static int phy_cs_timer_id;
++
++/*
++ * Common DW APB timer info
++ */
++static uint64_t apbt_freq;
++
++static void apbt_set_mode(enum clock_event_mode mode,
++ struct clock_event_device *evt);
++static int apbt_next_event(unsigned long delta,
++ struct clock_event_device *evt);
++static cycle_t apbt_read_clocksource(struct clocksource *cs);
++static void apbt_restart_clocksource(void);
++
++struct apbt_dev {
++ struct clock_event_device evt;
++ unsigned int num;
++ int cpu;
++ unsigned int irq;
++ unsigned int tick;
++ unsigned int count;
++ unsigned int flags;
++ char name[10];
++};
++
++int disable_apbt_percpu __cpuinitdata;
++
++#ifdef CONFIG_SMP
++static unsigned int apbt_num_timers_used;
++static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
++static struct apbt_dev *apbt_devs;
++#endif
++
++static inline unsigned long apbt_readl_reg(unsigned long a)
++{
++ return readl(apbt_virt_address + a);
++}
++
++static inline void apbt_writel_reg(unsigned long d, unsigned long a)
++{
++ writel(d, apbt_virt_address + a);
++}
++
++static inline unsigned long apbt_readl(int n, unsigned long a)
++{
++ return readl(apbt_virt_address + a + n * APBTMRS_REG_SIZE);
++}
++
++static inline void apbt_writel(int n, unsigned long d, unsigned long a)
++{
++ writel(d, apbt_virt_address + a + n * APBTMRS_REG_SIZE);
++}
++
++static inline void apbt_set_mapping(void)
++{
++ struct sfi_timer_table_entry *mtmr;
++
++ if (apbt_virt_address) {
++ pr_debug("APBT base already mapped\n");
++ return;
++ }
++ mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
++ if (mtmr == NULL) {
++ printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
++ APBT_CLOCKEVENT0_NUM);
++ return;
++ }
++ apbt_address = (unsigned long)mtmr->phys_addr;
++ if (!apbt_address) {
++ printk(KERN_WARNING "No timer base from SFI, use default\n");
++ apbt_address = APBT_DEFAULT_BASE;
++ }
++ apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE);
++ if (apbt_virt_address) {
++ pr_debug("Mapped APBT physical addr %p at virtual addr %p\n",\
++ (void *)apbt_address, (void *)apbt_virt_address);
++ } else {
++ pr_debug("Failed mapping APBT phy address at %p\n",\
++ (void *)apbt_address);
++ goto panic_noapbt;
++ }
++ apbt_freq = mtmr->freq_hz / USEC_PER_SEC;
++ sfi_free_mtmr(mtmr);
++
++ /* Now figure out the physical timer id for clocksource device */
++ mtmr = sfi_get_mtmr(APBT_CLOCKSOURCE_NUM);
++ if (mtmr == NULL)
++ goto panic_noapbt;
++
++ /* Now figure out the physical timer id */
++ phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff)
++ / APBTMRS_REG_SIZE;
++ pr_debug("Use timer %d for clocksource\n", phy_cs_timer_id);
++ return;
++
++panic_noapbt:
++ panic("Failed to setup APB system timer\n");
++
++}
++
++static inline void apbt_clear_mapping(void)
++{
++ iounmap(apbt_virt_address);
++ apbt_virt_address = NULL;
++}
++
++/*
++ * APBT timer interrupt enable / disable
++ */
++static inline int is_apbt_capable(void)
++{
++ return apbt_virt_address ? 1 : 0;
++}
++
++static struct clocksource clocksource_apbt = {
++ .name = "apbt",
++ .rating = APBT_CLOCKSOURCE_RATING,
++ .read = apbt_read_clocksource,
++ .mask = APBT_MASK,
++ .shift = APBT_SHIFT,
++ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
++ .resume = apbt_restart_clocksource,
++};
++
++/* boot APB clock event device */
++static struct clock_event_device apbt_clockevent = {
++ .name = "apbt0",
++ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
++ .set_mode = apbt_set_mode,
++ .set_next_event = apbt_next_event,
++ .shift = APBT_SHIFT,
++ .irq = 0,
++ .rating = APBT_CLOCKEVENT_RATING,
++};
++
++/*
++ * if user does not want to use per CPU apb timer, just give it a lower rating
++ * than local apic timer and skip the late per cpu timer init.
++ */
++static inline int __init setup_x86_mrst_timer(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
++
++ if (strcmp("apbt_only", arg) == 0)
++ disable_apbt_percpu = 0;
++ else if (strcmp("lapic_and_apbt", arg) == 0)
++ disable_apbt_percpu = 1;
++ else {
++ pr_warning("X86 MRST timer option %s not recognised"
++ " use x86_mrst_timer=apbt_only or lapic_and_apbt\n",
++ arg);
++ return -EINVAL;
++ }
++ return 0;
++}
++__setup("x86_mrst_timer=", setup_x86_mrst_timer);
++
++/*
++ * start count down from 0xffff_ffff. this is done by toggling the enable bit
++ * then load initial load count to ~0.
++ */
++static void apbt_start_counter(int n)
++{
++ unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
++
++ ctrl &= ~APBTMR_CONTROL_ENABLE;
++ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
++ apbt_writel(n, ~0, APBTMR_N_LOAD_COUNT);
++ /* enable, mask interrupt */
++ ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
++ ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT);
++ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
++ /* read it once to get cached counter value initialized */
++ apbt_read_clocksource(&clocksource_apbt);
++}
++
++static irqreturn_t apbt_interrupt_handler(int irq, void *data)
++{
++ struct apbt_dev *dev = (struct apbt_dev *)data;
++ struct clock_event_device *aevt = &dev->evt;
++
++ if (!aevt->event_handler) {
++ printk(KERN_INFO "Spurious APBT timer interrupt on %d\n",
++ dev->num);
++ return IRQ_NONE;
++ }
++ aevt->event_handler(aevt);
++ return IRQ_HANDLED;
++}
++
++static void apbt_restart_clocksource(void)
++{
++ apbt_start_counter(phy_cs_timer_id);
++}
++
++/* Setup IRQ routing via IOAPIC */
++#ifdef CONFIG_SMP
++static void apbt_setup_irq(struct apbt_dev *adev)
++{
++ struct irq_chip *chip;
++ struct irq_desc *desc;
++
++ /* timer0 irq has been setup early */
++ if (adev->irq == 0)
++ return;
++ desc = irq_to_desc(adev->irq);
++ chip = get_irq_chip(adev->irq);
++ disable_irq(adev->irq);
++ desc->status |= IRQ_MOVE_PCNTXT;
++ irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
++ /* APB timer irqs are set up as mp_irqs, timer is edge triggerred */
++ set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge");
++ enable_irq(adev->irq);
++ if (system_state == SYSTEM_BOOTING)
++ if (request_irq(adev->irq, apbt_interrupt_handler,
++ IRQF_TIMER | IRQF_DISABLED|IRQF_NOBALANCING, adev->name, adev)) {
++ printk(KERN_ERR "Failed request IRQ for APBT%d\n", adev->num);
++ }
++}
++#endif
++
++static void apbt_enable_int(int n)
++{
++ unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
++ /* clear pending intr */
++ apbt_readl(n, APBTMR_N_EOI);
++ ctrl &= ~APBTMR_CONTROL_INT;
++ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
++}
++
++static void apbt_disable_int(int n)
++{
++ unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
++
++ ctrl |= APBTMR_CONTROL_INT;
++ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
++}
++
++
++static int apbt_clockevent_register(void)
++{
++ struct sfi_timer_table_entry *mtmr;
++
++ mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
++ if (mtmr == NULL) {
++ printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
++ APBT_CLOCKEVENT0_NUM);
++ return -ENODEV;
++ }
++
++ /*
++ * We need to calculate the scaled math multiplication factor for
++ * nanosecond to apbt tick conversion.
++ * mult = (nsec/cycle)*2^APBT_SHIFT
++ */
++ apbt_clockevent.mult = div_sc((unsigned long) mtmr->freq_hz
++ , NSEC_PER_SEC, APBT_SHIFT);
++
++ /* Calculate the min / max delta */
++ apbt_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
++ &apbt_clockevent);
++ apbt_clockevent.min_delta_ns = clockevent_delta2ns(
++ APBT_MIN_DELTA_USEC*apbt_freq,
++ &apbt_clockevent);
++ /*
++ * Start apbt with the boot cpu mask and make it
++ * global if not used for per cpu timer.
++ */
++ apbt_clockevent.cpumask = cpumask_of(smp_processor_id());
++
++ if (disable_apbt_percpu) {
++ apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
++ global_clock_event = &apbt_clockevent;
++ printk(KERN_DEBUG "%s clockevent registered as global\n",
++ global_clock_event->name);
++ }
++ if (request_irq(apbt_clockevent.irq, apbt_interrupt_handler,
++ IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
++ apbt_clockevent.name, &apbt_clockevent)) {
++ printk(KERN_ERR "Failed request IRQ for APBT%d\n",
++ apbt_clockevent.irq);
++ }
++
++ clockevents_register_device(&apbt_clockevent);
++ /* Start APBT 0 interrupts */
++ apbt_enable_int(APBT_CLOCKEVENT0_NUM);
++
++ sfi_free_mtmr(mtmr);
++ return 0;
++}
++
++#ifdef CONFIG_SMP
++/* Should be called with per cpu */
++void apbt_setup_secondary_clock(void)
++{
++ struct apbt_dev *adev;
++ struct clock_event_device *aevt;
++ int cpu;
++
++ /* Don't register boot CPU clockevent */
++ cpu = smp_processor_id();
++ if (cpu == boot_cpu_id)
++ return;
++ /*
++ * We need to calculate the scaled math multiplication factor for
++ * nanosecond to apbt tick conversion.
++ * mult = (nsec/cycle)*2^APBT_SHIFT
++ */
++ printk(KERN_INFO "Init per CPU clockevent %d\n", cpu);
++ adev = &per_cpu(cpu_apbt_dev, cpu);
++ aevt = &adev->evt;
++
++ memcpy(aevt, &apbt_clockevent, sizeof(*aevt));
++ aevt->cpumask = cpumask_of(cpu);
++ aevt->name = adev->name;
++ aevt->mode = CLOCK_EVT_MODE_UNUSED;
++
++ printk(KERN_INFO "Registering CPU %d clockevent device %s, mask %08x\n",
++ cpu, aevt->name, *(u32 *)aevt->cpumask);
++
++ apbt_setup_irq(adev);
++
++ clockevents_register_device(aevt);
++
++ apbt_enable_int(cpu);
++
++ return;
++}
++
++static int apbt_cpuhp_notify(struct notifier_block *n,
++ unsigned long action, void *hcpu)
++{
++ unsigned long cpu = (unsigned long)hcpu;
++ struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu);
++
++ switch (action & 0xf) {
++ case CPU_DEAD:
++ apbt_disable_int(cpu);
++ if (system_state == SYSTEM_RUNNING)
++ pr_debug("skipping APBT CPU %lu offline\n", cpu);
++ else if (adev) {
++ pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
++ free_irq(adev->irq, adev);
++ }
++ break;
++ }
++ return NOTIFY_OK;
++}
++
++static __init int apbt_late_init(void)
++{
++ if (disable_apbt_percpu)
++ return 0;
++ /* This notifier should be called after workqueue is ready */
++ hotcpu_notifier(apbt_cpuhp_notify, -20);
++ return 0;
++}
++fs_initcall(apbt_late_init);
++#else
++
++void apbt_setup_secondary_clock(void) {}
++
++#endif /* CONFIG_SMP */
++
++static void apbt_set_mode(enum clock_event_mode mode,
++ struct clock_event_device *evt)
++{
++ unsigned long ctrl;
++ uint64_t delta;
++ int timer_num;
++ struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
++
++ timer_num = adev->num;
++ pr_debug("%s CPU %d timer %d mode=%d\n",
++ __func__, first_cpu(*evt->cpumask), timer_num, mode);
++
++ switch (mode) {
++ case CLOCK_EVT_MODE_PERIODIC:
++ delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * apbt_clockevent.mult;
++ delta >>= apbt_clockevent.shift;
++ ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
++ ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ /*
++ * DW APB p. 46, have to disable timer before load counter,
++ * may cause sync problem.
++ */
++ ctrl &= ~APBTMR_CONTROL_ENABLE;
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ udelay(1);
++ pr_debug("Setting clock period %d for HZ %d\n", (int)delta, HZ);
++ apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT);
++ ctrl |= APBTMR_CONTROL_ENABLE;
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ break;
++ /* APB timer does not have one-shot mode, use free running mode */
++ case CLOCK_EVT_MODE_ONESHOT:
++ ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
++ /*
++ * set free running mode, this mode will let timer reload max
++ * timeout which will give time (3min on 25MHz clock) to rearm
++ * the next event, therefore emulate the one-shot mode.
++ */
++ ctrl &= ~APBTMR_CONTROL_ENABLE;
++ ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
++
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ /* write again to set free running mode */
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++
++ /*
++ * DW APB p. 46, load counter with all 1s before starting free
++ * running mode.
++ */
++ apbt_writel(timer_num, ~0, APBTMR_N_LOAD_COUNT);
++ ctrl &= ~APBTMR_CONTROL_INT;
++ ctrl |= APBTMR_CONTROL_ENABLE;
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ break;
++
++ case CLOCK_EVT_MODE_UNUSED:
++ case CLOCK_EVT_MODE_SHUTDOWN:
++ apbt_disable_int(timer_num);
++ ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
++ ctrl &= ~APBTMR_CONTROL_ENABLE;
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ break;
++
++ case CLOCK_EVT_MODE_RESUME:
++ apbt_enable_int(timer_num);
++ break;
++ }
++}
++
++static int apbt_next_event(unsigned long delta,
++ struct clock_event_device *evt)
++{
++ unsigned long ctrl;
++ int timer_num;
++
++ struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
++
++ timer_num = adev->num;
++ /* Disable timer */
++ ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
++ ctrl &= ~APBTMR_CONTROL_ENABLE;
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ /* write new count */
++ apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT);
++ ctrl |= APBTMR_CONTROL_ENABLE;
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ return 0;
++}
++
++/*
++ * APB timer clock is not in sync with pclk on Langwell, which translates to
++ * unreliable read value caused by sampling error. the error does not add up
++ * overtime and only happens when sampling a 0 as a 1 by mistake. so the time
++ * would go backwards. the following code is trying to prevent time traveling
++ * backwards. little bit paranoid.
++ */
++static cycle_t apbt_read_clocksource(struct clocksource *cs)
++{
++ unsigned long t0, t1, t2;
++ static unsigned long last_read;
++
++bad_count:
++ t1 = apbt_readl(phy_cs_timer_id,
++ APBTMR_N_CURRENT_VALUE);
++ t2 = apbt_readl(phy_cs_timer_id,
++ APBTMR_N_CURRENT_VALUE);
++ if (unlikely(t1 < t2)) {
++ pr_debug("APBT: read current count error %lx:%lx:%lx\n",
++ t1, t2, t2 - t1);
++ goto bad_count;
++ }
++ /*
++ * check against cached last read, makes sure time does not go back.
++ * it could be a normal rollover but we will do tripple check anyway
++ */
++ if (unlikely(t2 > last_read)) {
++ /* check if we have a normal rollover */
++ unsigned long raw_intr_status =
++ apbt_readl_reg(APBTMRS_RAW_INT_STATUS);
++ /*
++ * cs timer interrupt is masked but raw intr bit is set if
++ * rollover occurs. then we read EOI reg to clear it.
++ */
++ if (raw_intr_status & (1 << phy_cs_timer_id)) {
++ apbt_readl(phy_cs_timer_id, APBTMR_N_EOI);
++ goto out;
++ }
++ pr_debug("APB CS going back %lx:%lx:%lx ",
++ t2, last_read, t2 - last_read);
++bad_count_x3:
++ pr_debug(KERN_INFO "tripple check enforced\n");
++ t0 = apbt_readl(phy_cs_timer_id,
++ APBTMR_N_CURRENT_VALUE);
++ udelay(1);
++ t1 = apbt_readl(phy_cs_timer_id,
++ APBTMR_N_CURRENT_VALUE);
++ udelay(1);
++ t2 = apbt_readl(phy_cs_timer_id,
++ APBTMR_N_CURRENT_VALUE);
++ if ((t2 > t1) || (t1 > t0)) {
++ printk(KERN_ERR "Error: APB CS tripple check failed\n");
++ goto bad_count_x3;
++ }
++ }
++out:
++ last_read = t2;
++ return (cycle_t)~t2;
++}
++
++static int apbt_clocksource_register(void)
++{
++ u64 start, now;
++ cycle_t t1;
++
++ /* Start the counter, use timer 2 as source, timer 0/1 for event */
++ apbt_start_counter(phy_cs_timer_id);
++
++ /* Verify whether apbt counter works */
++ t1 = apbt_read_clocksource(&clocksource_apbt);
++ rdtscll(start);
++
++ /*
++ * We don't know the TSC frequency yet, but waiting for
++ * 200000 TSC cycles is safe:
++ * 4 GHz == 50us
++ * 1 GHz == 200us
++ */
++ do {
++ rep_nop();
++ rdtscll(now);
++ } while ((now - start) < 200000UL);
++
++ /* APBT is the only always on clocksource, it has to work! */
++ if (t1 == apbt_read_clocksource(&clocksource_apbt))
++ panic("APBT counter not counting. APBT disabled\n");
++
++ /*
++ * initialize and register APBT clocksource
++ * convert that to ns/clock cycle
++ * mult = (ns/c) * 2^APBT_SHIFT
++ */
++ clocksource_apbt.mult = div_sc(MSEC_PER_SEC,
++ (unsigned long) apbt_freq, APBT_SHIFT);
++ clocksource_register(&clocksource_apbt);
++
++ return 0;
++}
++
++/*
++ * Early setup the APBT timer, only use timer 0 for booting then switch to
++ * per CPU timer if possible.
++ * returns 1 if per cpu apbt is setup
++ * returns 0 if no per cpu apbt is chosen
++ * panic if set up failed, this is the only platform timer on Moorestown.
++ */
++void __init apbt_time_init(void)
++{
++#ifdef CONFIG_SMP
++ int i;
++ struct sfi_timer_table_entry *p_mtmr;
++ unsigned int percpu_timer;
++ struct apbt_dev *adev;
++#endif
++
++ if (apb_timer_block_enabled)
++ return;
++ apbt_set_mapping();
++ if (apbt_virt_address) {
++ pr_debug("Found APBT version 0x%lx\n",\
++ apbt_readl_reg(APBTMRS_COMP_VERSION));
++ } else
++ goto out_noapbt;
++ /*
++ * Read the frequency and check for a sane value, for ESL model
++ * we extend the possible clock range to allow time scaling.
++ */
++
++ if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) {
++ pr_debug("APBT has invalid freq 0x%llx\n", apbt_freq);
++ goto out_noapbt;
++ }
++ if (apbt_clocksource_register()) {
++ pr_debug("APBT has failed to register clocksource\n");
++ goto out_noapbt;
++ }
++ if (!apbt_clockevent_register())
++ apb_timer_block_enabled = 1;
++ else {
++ pr_debug("APBT has failed to register clockevent\n");
++ goto out_noapbt;
++ }
++#ifdef CONFIG_SMP
++ /* kernel cmdline disable apb timer, so we will use lapic timers */
++ if (disable_apbt_percpu) {
++ printk(KERN_INFO "apbt: disabled per cpu timer\n");
++ return;
++ }
++ pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus());
++ if (num_possible_cpus() <= sfi_mtimer_num) {
++ percpu_timer = 1;
++ apbt_num_timers_used = num_possible_cpus();
++ } else {
++ percpu_timer = 0;
++ apbt_num_timers_used = 1;
++ adev = &per_cpu(cpu_apbt_dev, 0);
++ adev->flags &= ~APBT_DEV_USED;
++ }
++ pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used);
++
++ /* here we set up per CPU timer data structure */
++ apbt_devs = kzalloc(sizeof(struct apbt_dev) * apbt_num_timers_used,
++ GFP_KERNEL);
++ if (!apbt_devs) {
++ printk(KERN_ERR "Failed to allocate APB timer devices\n");
++ return;
++ }
++ for (i = 0; i < apbt_num_timers_used; i++) {
++ adev = &per_cpu(cpu_apbt_dev, i);
++ adev->num = i;
++ adev->cpu = i;
++ p_mtmr = sfi_get_mtmr(i);
++ if (p_mtmr) {
++ adev->tick = p_mtmr->freq_hz;
++ adev->irq = p_mtmr->irq;
++ } else
++ printk(KERN_ERR "Failed to get timer for cpu %d\n", i);
++ adev->count = 0;
++ sprintf(adev->name, "apbt%d", i);
++ }
++#endif
++
++ return;
++
++out_noapbt:
++ apbt_clear_mapping();
++ apb_timer_block_enabled = 0;
++ panic("failed to enable APB timer\n");
++}
++
++static inline void apbt_disable(int n)
++{
++ if (is_apbt_capable()) {
++ unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
++ ctrl &= ~APBTMR_CONTROL_ENABLE;
++ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
++ }
++}
++
++/* called before apb_timer_enable, use early map */
++unsigned long apbt_quick_calibrate()
++{
++ int i, scale;
++ u64 old, new;
++ cycle_t t1, t2;
++ unsigned long khz = 0;
++ u32 loop, shift;
++
++ apbt_set_mapping();
++ apbt_start_counter(phy_cs_timer_id);
++
++ /* check if the timer can count down, otherwise return */
++ old = apbt_read_clocksource(&clocksource_apbt);
++ i = 10000;
++ while (--i) {
++ if (old != apbt_read_clocksource(&clocksource_apbt))
++ break;
++ }
++ if (!i)
++ goto failed;
++
++ /* count 16 ms */
++ loop = (apbt_freq * 1000) << 4;
++
++ /* restart the timer to ensure it won't get to 0 in the calibration */
++ apbt_start_counter(phy_cs_timer_id);
++
++ old = apbt_read_clocksource(&clocksource_apbt);
++ old += loop;
++
++ t1 = __native_read_tsc();
++
++ do {
++ new = apbt_read_clocksource(&clocksource_apbt);
++ } while (new < old);
++
++ t2 = __native_read_tsc();
++
++ shift = 5;
++ if (unlikely(loop >> shift == 0)) {
++ printk(KERN_INFO
++ "APBT TSC calibration failed, not enough resolution\n");
++ return 0;
++ }
++ scale = (int)div_u64((t2 - t1), loop >> shift);
++ khz = (scale * apbt_freq * 1000) >> shift;
++ printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz);
++ return khz;
++failed:
++ return 0;
++}
+Index: linux-2.6.33/arch/x86/include/asm/mrst.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/include/asm/mrst.h
+@@ -0,0 +1,16 @@
++/*
++ * mrst.h: Intel Moorestown platform specific setup code
++ *
++ * (C) Copyright 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ */
++#ifndef _ASM_X86_MRST_H
++#define _ASM_X86_MRST_H
++extern int pci_mrst_init(void);
++int __init sfi_parse_mrtc(struct sfi_table_header *table);
++
++#endif /* _ASM_X86_MRST_H */
+Index: linux-2.6.33/arch/x86/kernel/mrst.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/mrst.c
++++ linux-2.6.33/arch/x86/kernel/mrst.c
+@@ -2,16 +2,234 @@
+ * mrst.c: Intel Moorestown platform specific setup code
+ *
+ * (C) Copyright 2008 Intel Corporation
+- * Author: Jacob Pan (jacob.jun.pan@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
++
+ #include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/sfi.h>
++#include <linux/bitmap.h>
++#include <linux/threads.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/langwell_pmic_gpio.h>
++#include <linux/i2c.h>
++#include <linux/sfi.h>
++#include <linux/i2c/pca953x.h>
++#include <linux/gpio_keys.h>
++#include <linux/input.h>
++#include <linux/platform_device.h>
++#include <linux/irq.h>
+
++#include <asm/string.h>
+ #include <asm/setup.h>
++#include <asm/mpspec_def.h>
++#include <asm/hw_irq.h>
++#include <asm/apic.h>
++#include <asm/io_apic.h>
++#include <asm/apb_timer.h>
++#include <asm/io.h>
++#include <asm/mrst.h>
++#include <asm/vrtc.h>
++#include <asm/ipc_defs.h>
++#include <asm/reboot.h>
++#include <asm/i8259.h>
++
++#define LANGWELL_GPIO_ALT_ADDR 0xff12c038
++#define MRST_I2C_BUSNUM 3
++#define SFI_MRTC_MAX 8
++
++static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
++static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
++int sfi_mtimer_num;
++
++struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
++EXPORT_SYMBOL_GPL(sfi_mrtc_array);
++int sfi_mrtc_num;
++
++static inline void assign_to_mp_irq(struct mpc_intsrc *m,
++ struct mpc_intsrc *mp_irq)
++{
++ memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
++}
++
++static inline int mp_irq_cmp(struct mpc_intsrc *mp_irq,
++ struct mpc_intsrc *m)
++{
++ return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
++}
++
++static void save_mp_irq(struct mpc_intsrc *m)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ if (!mp_irq_cmp(&mp_irqs[i], m))
++ return;
++ }
++
++ assign_to_mp_irq(m, &mp_irqs[mp_irq_entries]);
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!!\n");
++}
++
++/* parse all the mtimer info to a global mtimer array */
++static int __init sfi_parse_mtmr(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_timer_table_entry *pentry;
++ struct mpc_intsrc mp_irq;
++ int totallen;
++
++ sb = (struct sfi_table_simple *)table;
++ if (!sfi_mtimer_num) {
++ sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb,
++ struct sfi_timer_table_entry);
++ pentry = (struct sfi_timer_table_entry *) sb->pentry;
++ totallen = sfi_mtimer_num * sizeof(*pentry);
++ memcpy(sfi_mtimer_array, pentry, totallen);
++ }
++
++ printk(KERN_INFO "SFI: MTIMER info (num = %d):\n", sfi_mtimer_num);
++ pentry = sfi_mtimer_array;
++ for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) {
++ printk(KERN_INFO "timer[%d]: paddr = 0x%08x, freq = %dHz,"
++ " irq = %d\n", totallen, (u32)pentry->phys_addr,
++ pentry->freq_hz, pentry->irq);
++ if (!pentry->irq)
++ continue;
++ mp_irq.type = MP_IOAPIC;
++ mp_irq.irqtype = mp_INT;
++ mp_irq.irqflag = 0;
++ mp_irq.srcbus = 0;
++ mp_irq.srcbusirq = pentry->irq; /* IRQ */
++ mp_irq.dstapic = MP_APIC_ALL;
++ mp_irq.dstirq = pentry->irq;
++ save_mp_irq(&mp_irq);
++ }
++
++ return 0;
++}
++
++struct sfi_timer_table_entry *sfi_get_mtmr(int hint)
++{
++ int i;
++ if (hint < sfi_mtimer_num) {
++ if (!sfi_mtimer_usage[hint]) {
++ printk(KERN_DEBUG "hint taken for timer %d irq %d\n",\
++ hint, sfi_mtimer_array[hint].irq);
++ sfi_mtimer_usage[hint] = 1;
++ return &sfi_mtimer_array[hint];
++ }
++ }
++ /* take the first timer available */
++ for (i = 0; i < sfi_mtimer_num;) {
++ if (!sfi_mtimer_usage[i]) {
++ sfi_mtimer_usage[i] = 1;
++ return &sfi_mtimer_array[i];
++ }
++ i++;
++ }
++ return NULL;
++}
++
++void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr)
++{
++ int i;
++ for (i = 0; i < sfi_mtimer_num;) {
++ if (mtmr->irq == sfi_mtimer_array[i].irq) {
++ sfi_mtimer_usage[i] = 0;
++ return;
++ }
++ i++;
++ }
++}
++
++/* parse all the mrtc info to a global mrtc array */
++int __init sfi_parse_mrtc(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_rtc_table_entry *pentry;
++ struct mpc_intsrc mp_irq;
++
++ int totallen;
++
++ sb = (struct sfi_table_simple *)table;
++ if (!sfi_mrtc_num) {
++ sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb,
++ struct sfi_rtc_table_entry);
++ pentry = (struct sfi_rtc_table_entry *)sb->pentry;
++ totallen = sfi_mrtc_num * sizeof(*pentry);
++ memcpy(sfi_mrtc_array, pentry, totallen);
++ }
++
++ printk(KERN_INFO "SFI: RTC info (num = %d):\n", sfi_mrtc_num);
++ pentry = sfi_mrtc_array;
++ for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
++ printk(KERN_INFO "RTC[%d]: paddr = 0x%08x, irq = %d\n",
++ totallen, (u32)pentry->phys_addr, pentry->irq);
++ mp_irq.type = MP_IOAPIC;
++ mp_irq.irqtype = mp_INT;
++ mp_irq.irqflag = 0;
++ mp_irq.srcbus = 0;
++ mp_irq.srcbusirq = pentry->irq; /* IRQ */
++ mp_irq.dstapic = MP_APIC_ALL;
++ mp_irq.dstirq = pentry->irq;
++ save_mp_irq(&mp_irq);
++ }
++ return 0;
++}
++
++/*
++ * the secondary clock in Moorestown can be APBT or LAPIC clock, default to
++ * APBT but cmdline option can also override it.
++ */
++static void __cpuinit mrst_setup_secondary_clock(void)
++{
++ /* restore default lapic clock if disabled by cmdline */
++ if (disable_apbt_percpu)
++ return setup_secondary_APIC_clock();
++ apbt_setup_secondary_clock();
++}
++
++static unsigned long __init mrst_calibrate_tsc(void)
++{
++ unsigned long flags, fast_calibrate;
++
++ local_irq_save(flags);
++ fast_calibrate = apbt_quick_calibrate();
++ local_irq_restore(flags);
++
++ if (fast_calibrate)
++ return fast_calibrate;
++
++ return 0;
++}
++
++void __init mrst_time_init(void)
++{
++ sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
++ pre_init_apic_IRQ0();
++ apbt_time_init();
++}
++
++void __init mrst_rtc_init(void)
++{
++ sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc);
++}
++
++static void mrst_power_off(void)
++{
++ lnw_ipc_single_cmd(0xf1, 1, 0, 0);
++}
++
++static void mrst_reboot(void)
++{
++ lnw_ipc_single_cmd(0xf1, 0, 0, 0);
++}
+
+ /*
+ * Moorestown specific x86_init function overrides and early setup
+@@ -21,4 +239,241 @@ void __init x86_mrst_early_setup(void)
+ {
+ x86_init.resources.probe_roms = x86_init_noop;
+ x86_init.resources.reserve_resources = x86_init_noop;
++ x86_init.timers.timer_init = mrst_time_init;
++ x86_init.irqs.pre_vector_init = x86_init_noop;
++
++ x86_cpuinit.setup_percpu_clockev = mrst_setup_secondary_clock;
++
++ x86_platform.calibrate_tsc = mrst_calibrate_tsc;
++ x86_platform.get_wallclock = vrtc_get_time;
++ x86_platform.set_wallclock = vrtc_set_mmss;
++
++ x86_init.pci.init = pci_mrst_init;
++ x86_init.pci.fixup_irqs = x86_init_noop;
++
++ x86_init.oem.banner = mrst_rtc_init;
++ legacy_pic = &null_legacy_pic;
++
++ /* Moorestown specific power_off/restart method */
++ pm_power_off = mrst_power_off;
++ machine_ops.emergency_restart = mrst_reboot;
+ }
++
++/*
++ * the dummy SPI2 salves are in SPIB table with host_num = 0, but their
++ * chip_selects begin with MRST_SPI2_CS_START, this will save a dummy ugly
++ * SPI2 controller driver
++ */
++#define MRST_SPI2_CS_START 4
++static struct langwell_pmic_gpio_platform_data pmic_gpio_pdata;
++
++static int __init sfi_parse_spib(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_spi_table_entry *pentry;
++ struct spi_board_info *info;
++ int num, i, j;
++ int ioapic;
++ struct io_apic_irq_attr irq_attr;
++
++ sb = (struct sfi_table_simple *)table;
++ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_spi_table_entry);
++ pentry = (struct sfi_spi_table_entry *) sb->pentry;
++
++ info = kzalloc(num * sizeof(*info), GFP_KERNEL);
++ if (!info) {
++ pr_info("%s(): Error in kzalloc\n", __func__);
++ return -ENOMEM;
++ }
++
++ if (num)
++ pr_info("Moorestown SPI devices info:\n");
++
++ for (i = 0, j = 0; i < num; i++, pentry++) {
++ strncpy(info[j].modalias, pentry->name, 16);
++ info[j].irq = pentry->irq_info;
++ info[j].bus_num = pentry->host_num;
++ info[j].chip_select = pentry->cs;
++ info[j].max_speed_hz = 3125000; /* hard coded */
++ if (info[i].chip_select >= MRST_SPI2_CS_START) {
++ /* these SPI2 devices are not exposed to system as PCI
++ * devices, but they have separate RTE entry in IOAPIC
++ * so we have to enable them one by one here
++ */
++ ioapic = mp_find_ioapic(info[j].irq);
++ irq_attr.ioapic = ioapic;
++ irq_attr.ioapic_pin = info[j].irq;
++ irq_attr.trigger = 1;
++ irq_attr.polarity = 1;
++ io_apic_set_pci_routing(NULL, info[j].irq,
++ &irq_attr);
++ }
++ info[j].platform_data = pentry->dev_info;
++
++ if (!strcmp(pentry->name, "pmic_gpio")) {
++ memcpy(&pmic_gpio_pdata, pentry->dev_info, 8);
++ pmic_gpio_pdata.gpiointr = 0xffffeff8;
++ info[j].platform_data = &pmic_gpio_pdata;
++ }
++ pr_info("info[%d]: name = %16s, irq = 0x%04x, bus = %d, "
++ "cs = %d\n", j, info[j].modalias, info[j].irq,
++ info[j].bus_num, info[j].chip_select);
++ j++;
++ }
++ spi_register_board_info(info, j);
++ kfree(info);
++ return 0;
++}
++
++static struct pca953x_platform_data max7315_pdata;
++static struct pca953x_platform_data max7315_pdata_2;
++
++static int __init sfi_parse_i2cb(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_i2c_table_entry *pentry;
++ struct i2c_board_info *info[MRST_I2C_BUSNUM];
++ int table_length[MRST_I2C_BUSNUM] = {0};
++ int num, i, j, busnum;
++
++ sb = (struct sfi_table_simple *)table;
++ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_i2c_table_entry);
++ pentry = (struct sfi_i2c_table_entry *) sb->pentry;
++
++ if (num <= 0)
++ return -ENODEV;
++
++ for (busnum = 0; busnum < MRST_I2C_BUSNUM; busnum++) {
++ info[busnum] = kzalloc(num * sizeof(**info), GFP_KERNEL);
++ if (!info[busnum]) {
++ pr_info("%s(): Error in kzalloc\n", __func__);
++ while (busnum--)
++ kfree(info[busnum]);
++ return -ENOMEM;
++ }
++ }
++
++ if (num)
++ pr_info("Moorestown I2C devices info:\n");
++
++ for (busnum = 0, j = 0; j < num; j++, pentry++) {
++ busnum = pentry->host_num;
++ if (busnum >= MRST_I2C_BUSNUM || busnum < 0)
++ continue;
++
++ i = table_length[busnum];
++ strncpy(info[busnum][i].type, pentry->name, 16);
++ info[busnum][i].irq = pentry->irq_info;
++ info[busnum][i].addr = pentry->addr;
++ info[busnum][i].platform_data = pentry->dev_info;
++ table_length[busnum]++;
++
++ if (!strcmp(pentry->name, "i2c_max7315")) {
++ strcpy(info[busnum][i].type, "max7315");
++ memcpy(&max7315_pdata, pentry->dev_info, 10);
++ info[busnum][i].platform_data = &max7315_pdata;
++ }
++ else if (!strcmp(pentry->name, "i2c_max7315_2")) {
++ strcpy(info[busnum][i].type, "max7315");
++ memcpy(&max7315_pdata_2, pentry->dev_info, 10);
++ info[busnum][i].platform_data = &max7315_pdata_2;
++ }
++
++ pr_info("info[%d]: bus = %d, name = %16s, irq = 0x%04x, addr = "
++ "0x%x\n", i, busnum, info[busnum][i].type,
++ info[busnum][i].irq, info[busnum][i].addr);
++ }
++
++ for (busnum = 0; busnum < MRST_I2C_BUSNUM; busnum++) {
++ i2c_register_board_info(busnum, info[busnum],
++ table_length[busnum]);
++ }
++
++ return 0;
++}
++
++/* setting multi-function-pin */
++static void set_alt_func(void)
++{
++ u32 __iomem *mem = ioremap_nocache(LANGWELL_GPIO_ALT_ADDR, 16);
++ u32 value;
++
++ if (!mem) {
++ pr_err("can not map GPIO controller address.\n");
++ return;
++ }
++ value = (readl(mem + 1) & 0x0000ffff) | 0x55550000;
++ writel(value, mem + 1);
++ value = (readl(mem + 2) & 0xf0000000) | 0x05555555;
++ writel(value, mem + 2);
++ value = (readl(mem + 3) & 0xfff000ff) | 0x00055500;
++ writel(value, mem + 3);
++
++ iounmap(mem);
++}
++
++static int __init mrst_platform_init(void)
++{
++ sfi_table_parse(SFI_SIG_SPIB, NULL, NULL, sfi_parse_spib);
++ sfi_table_parse(SFI_SIG_I2CB, NULL, NULL, sfi_parse_i2cb);
++ set_alt_func();
++ return 0;
++}
++
++arch_initcall(mrst_platform_init);
++
++static struct gpio_keys_button gpio_button[] = {
++ [0] = {
++ .desc = "power button1",
++ .code = KEY_POWER,
++ .type = EV_KEY,
++ .active_low = 1,
++ .debounce_interval = 3000, /*soft debounce*/
++ .gpio = 65,
++ },
++ [1] = {
++ .desc = "programmable button1",
++ .code = KEY_PROG1,
++ .type = EV_KEY,
++ .active_low = 1,
++ .debounce_interval = 20,
++ .gpio = 66,
++ },
++ [2] = {
++ .desc = "programmable button2",
++ .code = KEY_PROG2,
++ .type = EV_KEY,
++ .active_low = 1,
++ .debounce_interval = 20,
++ .gpio = 69
++ },
++ [3] = {
++ .desc = "lid switch",
++ .code = SW_LID,
++ .type = EV_SW,
++ .active_low = 1,
++ .debounce_interval = 20,
++ .gpio = 101
++ },
++};
++
++static struct gpio_keys_platform_data mrst_gpio_keys = {
++ .buttons = gpio_button,
++ .rep = 1,
++ .nbuttons = sizeof(gpio_button) / sizeof(struct gpio_keys_button),
++};
++
++static struct platform_device pb_device = {
++ .name = "gpio-keys",
++ .id = -1,
++ .dev = {
++ .platform_data = &mrst_gpio_keys,
++ },
++};
++
++static int __init pb_keys_init(void)
++{
++ return platform_device_register(&pb_device);
++}
++
++late_initcall(pb_keys_init);
+Index: linux-2.6.33/arch/x86/include/asm/io_apic.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/io_apic.h
++++ linux-2.6.33/arch/x86/include/asm/io_apic.h
+@@ -143,8 +143,6 @@ extern int noioapicreroute;
+ /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
+ extern int timer_through_8259;
+
+-extern void io_apic_disable_legacy(void);
+-
+ /*
+ * If we use the IO-APIC for IRQ routing, disable automatic
+ * assignment of PCI IRQ's.
+@@ -189,6 +187,7 @@ extern struct mp_ioapic_gsi mp_gsi_rout
+ int mp_find_ioapic(int gsi);
+ int mp_find_ioapic_pin(int ioapic, int gsi);
+ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
++extern void __init pre_init_apic_IRQ0(void);
+
+ #else /* !CONFIG_X86_IO_APIC */
+
+Index: linux-2.6.33/arch/x86/pci/mmconfig-shared.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/mmconfig-shared.c
++++ linux-2.6.33/arch/x86/pci/mmconfig-shared.c
+@@ -601,7 +601,8 @@ static void __init __pci_mmcfg_init(int
+ if (!known_bridge)
+ acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
+
+- pci_mmcfg_reject_broken(early);
++ if (!acpi_disabled)
++ pci_mmcfg_reject_broken(early);
+
+ if (list_empty(&pci_mmcfg_list))
+ return;
+Index: linux-2.6.33/arch/x86/pci/Makefile
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/Makefile
++++ linux-2.6.33/arch/x86/pci/Makefile
+@@ -13,7 +13,7 @@ obj-$(CONFIG_X86_VISWS) += visws.o
+
+ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
+
+-obj-y += common.o early.o
++obj-y += common.o early.o mrst.o
+ obj-y += amd_bus.o
+ obj-$(CONFIG_X86_64) += bus_numa.o
+
+Index: linux-2.6.33/arch/x86/pci/mrst.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/pci/mrst.c
+@@ -0,0 +1,262 @@
++/*
++ * Moorestown PCI support
++ * Copyright (c) 2008 Intel Corporation
++ * Jesse Barnes <jesse.barnes@intel.com>
++ *
++ * Moorestown has an interesting PCI implementation:
++ * - configuration space is memory mapped (as defined by MCFG)
++ * - Lincroft devices also have a real, type 1 configuration space
++ * - Early Lincroft silicon has a type 1 access bug that will cause
++ * a hang if non-existent devices are accessed
++ * - some devices have the "fixed BAR" capability, which means
++ * they can't be relocated or modified; check for that during
++ * BAR sizing
++ *
++ * So, we use the MCFG space for all reads and writes, but also send
++ * Lincroft writes to type 1 space. But only read/write if the device
++ * actually exists, otherwise return all 1s for reads and bit bucket
++ * the writes.
++ */
++
++#include <linux/sched.h>
++#include <linux/pci.h>
++#include <linux/ioport.h>
++#include <linux/init.h>
++#include <linux/dmi.h>
++
++#include <asm/acpi.h>
++#include <asm/segment.h>
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/pci_x86.h>
++#include <asm/hw_irq.h>
++
++#define PCIE_CAP_OFFSET 0x100
++
++/* Fixed BAR fields */
++#define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
++#define PCI_FIXED_BAR_0_SIZE 0x04
++#define PCI_FIXED_BAR_1_SIZE 0x08
++#define PCI_FIXED_BAR_2_SIZE 0x0c
++#define PCI_FIXED_BAR_3_SIZE 0x10
++#define PCI_FIXED_BAR_4_SIZE 0x14
++#define PCI_FIXED_BAR_5_SIZE 0x1c
++
++/**
++ * fixed_bar_cap - return the offset of the fixed BAR cap if found
++ * @bus: PCI bus
++ * @devfn: device in question
++ *
++ * Look for the fixed BAR cap on @bus and @devfn, returning its offset
++ * if found or 0 otherwise.
++ */
++static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn)
++{
++ int pos;
++ u32 pcie_cap = 0, cap_data;
++ if (!raw_pci_ext_ops) return 0;
++
++ pos = PCIE_CAP_OFFSET;
++ while (pos) {
++ if (raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
++ devfn, pos, 4, &pcie_cap))
++ return 0;
++
++ if (pcie_cap == 0xffffffff)
++ return 0;
++
++ if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) {
++ raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
++ devfn, pos + 4, 4, &cap_data);
++ if ((cap_data & 0xffff) == PCIE_VNDR_CAP_ID_FIXED_BAR)
++ return pos;
++ }
++
++ pos = pcie_cap >> 20;
++ }
++
++ return 0;
++}
++
++static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
++ int reg, int len, u32 val, int offset)
++{
++ u32 size;
++ unsigned int domain, busnum;
++ int bar = (reg - PCI_BASE_ADDRESS_0) >> 2;
++
++ domain = pci_domain_nr(bus);
++ busnum = bus->number;
++
++ if (val == ~0 && len == 4) {
++ unsigned long decode;
++
++ raw_pci_ext_ops->read(domain, busnum, devfn,
++ offset + 8 + (bar * 4), 4, &size);
++
++ /* Turn the size into a decode pattern for the sizing code */
++ if (size) {
++ decode = size - 1;
++ decode |= decode >> 1;
++ decode |= decode >> 2;
++ decode |= decode >> 4;
++ decode |= decode >> 8;
++ decode |= decode >> 16;
++ decode++;
++ decode = ~(decode - 1);
++ } else {
++ decode = ~0;
++ }
++
++ /*
++ * If val is all ones, the core code is trying to size the reg,
++ * so update the mmconfig space with the real size.
++ *
++ * Note: this assumes the fixed size we got is a power of two.
++ */
++ return raw_pci_ext_ops->write(domain, busnum, devfn, reg, 4,
++ decode);
++ }
++
++ /* This is some other kind of BAR write, so just do it. */
++ return raw_pci_ext_ops->write(domain, busnum, devfn, reg, len, val);
++}
++
++/**
++ * type1_access_ok - check whether to use type 1
++ * @bus: bus number
++ * @devfn: device & function in question
++ *
++ * If the bus is on a Lincroft chip and it exists, or is not on a Lincroft at
++ * all, the we can go ahead with any reads & writes. If it's on a Lincroft,
++ * but doesn't exist, avoid the access altogether to keep the chip from
++ * hanging.
++ */
++static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
++{
++ /* This is a workaround for A0 LNC bug where PCI status register does
++ * not have new CAP bit set. can not be written by SW either.
++ *
++ * PCI header type in real LNC indicates a single function device, this
++ * will prevent probing other devices under the same function in PCI
++ * shim. Therefore, use the header type in shim instead.
++ */
++ if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
++ return 0;
++ if (bus == 0 && (devfn == PCI_DEVFN(2, 0) || devfn == PCI_DEVFN(0, 0)))
++ return 1;
++ return 0; /* langwell on others */
++}
++
++static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
++ int size, u32 *value)
++{
++ if (type1_access_ok(bus->number, devfn, where))
++ return pci_direct_conf1.read(pci_domain_nr(bus), bus->number,
++ devfn, where, size, value);
++ return raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
++ devfn, where, size, value);
++}
++
++static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
++ int size, u32 value)
++{
++ int offset;
++
++ /* On MRST, there is no PCI ROM BAR, this will cause a subsequent read
++ * to ROM BAR return 0 then being ignored.
++ */
++ if (where == PCI_ROM_ADDRESS)
++ return 0;
++
++ /*
++ * Devices with fixed BARs need special handling:
++ * - BAR sizing code will save, write ~0, read size, restore
++ * - so writes to fixed BARs need special handling
++ * - other writes to fixed BAR devices should go through mmconfig
++ */
++ offset = fixed_bar_cap(bus, devfn);
++ if (offset &&
++ (where >= PCI_BASE_ADDRESS_0 && where <= PCI_BASE_ADDRESS_5)) {
++ return pci_device_update_fixed(bus, devfn, where, size, value,
++ offset);
++ }
++
++ /*
++ * On Moorestown update both real & mmconfig space
++ * Note: early Lincroft silicon can't handle type 1 accesses to
++ * non-existent devices, so just eat the write in that case.
++ */
++ if (type1_access_ok(bus->number, devfn, where))
++ return pci_direct_conf1.write(pci_domain_nr(bus), bus->number,
++ devfn, where, size, value);
++ return raw_pci_ext_ops->write(pci_domain_nr(bus), bus->number, devfn,
++ where, size, value);
++}
++
++static int mrst_pci_irq_enable(struct pci_dev *dev)
++{
++ u8 pin;
++ struct io_apic_irq_attr irq_attr;
++
++ if (!dev->irq)
++ return 0;
++
++ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++
++ /* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
++ * IOAPIC RTE entries, so we just enable RTE for the device.
++ */
++ irq_attr.ioapic = mp_find_ioapic(dev->irq);
++ irq_attr.ioapic_pin = dev->irq;
++ irq_attr.trigger = 1; /* level */
++ irq_attr.polarity = 1; /* active low */
++ io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr);
++
++ return 0;
++}
++
++struct pci_ops pci_mrst_ops = {
++ .read = pci_read,
++ .write = pci_write,
++};
++
++/**
++ * pci_mrst_init - installs pci_mrst_ops
++ *
++ * Moorestown has an interesting PCI implementation (see above).
++ * Called when the early platform detection installs it.
++ */
++int __init pci_mrst_init(void)
++{
++ printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
++ pci_mmcfg_late_init();
++ pcibios_enable_irq = mrst_pci_irq_enable;
++ pci_root_ops = pci_mrst_ops;
++ /* Continue with standard init */
++ return 1;
++}
++
++/*
++ * Langwell devices reside at fixed offsets, don't try to move them.
++ */
++static void __devinit pci_fixed_bar_fixup(struct pci_dev *dev)
++{
++ unsigned long offset;
++ u32 size;
++ int i;
++
++ /* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */
++ offset = fixed_bar_cap(dev->bus, dev->devfn);
++ if (!offset || PCI_DEVFN(2, 0) == dev->devfn ||
++ PCI_DEVFN(2, 2) == dev->devfn)
++ return;
++
++ for (i = 0; i < PCI_ROM_RESOURCE; i++) {
++ pci_read_config_dword(dev, offset + 8 + (i * 4), &size);
++ dev->resource[i].end = dev->resource[i].start + size - 1;
++ dev->resource[i].flags |= IORESOURCE_PCI_FIXED;
++ }
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_fixed_bar_fixup);
++
+Index: linux-2.6.33/include/linux/pci_regs.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/pci_regs.h
++++ linux-2.6.33/include/linux/pci_regs.h
+@@ -507,6 +507,7 @@
+ #define PCI_EXT_CAP_ID_VC 2
+ #define PCI_EXT_CAP_ID_DSN 3
+ #define PCI_EXT_CAP_ID_PWR 4
++#define PCI_EXT_CAP_ID_VNDR 11
+ #define PCI_EXT_CAP_ID_ACS 13
+ #define PCI_EXT_CAP_ID_ARI 14
+ #define PCI_EXT_CAP_ID_ATS 15
+Index: linux-2.6.33/arch/x86/include/asm/fixmap.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/fixmap.h
++++ linux-2.6.33/arch/x86/include/asm/fixmap.h
+@@ -114,6 +114,10 @@ enum fixed_addresses {
+ FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
+ FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
+ __end_of_permanent_fixed_addresses,
++
++#ifdef CONFIG_X86_MRST
++ FIX_LNW_VRTC,
++#endif
+ /*
+ * 256 temporary boot-time mappings, used by early_ioremap(),
+ * before ioremap() is functional.
+Index: linux-2.6.33/arch/x86/include/asm/vrtc.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/include/asm/vrtc.h
+@@ -0,0 +1,30 @@
++#ifndef _MRST_VRTC_H
++#define _MRST_VRTC_H
++
++#ifdef CONFIG_X86_MRST
++extern unsigned char vrtc_cmos_read(unsigned char reg);
++extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
++
++extern struct sfi_rtc_table_entry sfi_mrtc_array[];
++extern int sfi_mrtc_num;
++
++extern unsigned long vrtc_get_time(void);
++extern int vrtc_set_mmss(unsigned long nowtime);
++
++#define MRST_VRTC_PGOFFSET (0xc00)
++
++#else
++static inline unsigned char vrtc_cmos_read(unsigned char reg)
++{
++ return 0xff;
++}
++
++static inline void vrtc_cmos_write(unsigned char val, unsigned char reg)
++{
++ return;
++}
++#endif
++
++#define MRST_VRTC_MAP_SZ (1024)
++
++#endif
+Index: linux-2.6.33/arch/x86/kernel/vrtc.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/vrtc.c
+@@ -0,0 +1,116 @@
++/*
++ * vrtc.c: Driver for virtual RTC device on Intel MID platform
++ *
++ * (C) Copyright 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * Note:
++ * VRTC is emulated by system controller firmware, the real HW
++ * RTC is located in the PMIC device. SCU FW shadows PMIC RTC
++ * in a memory mapped IO space that is visible to the host IA
++ * processor. However, any updates to VRTC requires an IPI call
++ * to the SCU FW.
++ *
++ * This driver is based on RTC CMOS driver.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sfi.h>
++
++#include <asm/vrtc.h>
++#include <asm/time.h>
++#include <asm/fixmap.h>
++
++static unsigned char *vrtc_va __read_mostly;
++
++static void vrtc_init_mmap(void)
++{
++ unsigned long rtc_paddr = sfi_mrtc_array[0].phys_addr;
++
++ BUG_ON(!rtc_paddr);
++
++ /* vRTC's register address may not be page aligned */
++ set_fixmap_nocache(FIX_LNW_VRTC, rtc_paddr);
++ vrtc_va = (unsigned char __iomem *)__fix_to_virt(FIX_LNW_VRTC);
++ vrtc_va += rtc_paddr & ~PAGE_MASK;
++}
++
++unsigned char vrtc_cmos_read(unsigned char reg)
++{
++ unsigned char retval;
++
++ /* vRTC's registers range from 0x0 to 0xD */
++ if (reg > 0xd)
++ return 0xff;
++
++ if (unlikely(!vrtc_va))
++ vrtc_init_mmap();
++
++ lock_cmos_prefix(reg);
++ retval = *(vrtc_va + (reg << 2));
++ lock_cmos_suffix(reg);
++ return retval;
++}
++EXPORT_SYMBOL(vrtc_cmos_read);
++
++void vrtc_cmos_write(unsigned char val, unsigned char reg)
++{
++ if (reg > 0xd)
++ return;
++
++ if (unlikely(!vrtc_va))
++ vrtc_init_mmap();
++
++ lock_cmos_prefix(reg);
++ *(vrtc_va + (reg << 2)) = val;
++ lock_cmos_suffix(reg);
++}
++EXPORT_SYMBOL(vrtc_cmos_write);
++
++unsigned long vrtc_get_time(void)
++{
++ u8 sec, min, hour, mday, mon;
++ u32 year;
++
++ while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
++ cpu_relax();
++
++ sec = vrtc_cmos_read(RTC_SECONDS);
++ min = vrtc_cmos_read(RTC_MINUTES);
++ hour = vrtc_cmos_read(RTC_HOURS);
++ mday = vrtc_cmos_read(RTC_DAY_OF_MONTH);
++ mon = vrtc_cmos_read(RTC_MONTH);
++ year = vrtc_cmos_read(RTC_YEAR);
++
++ /* vRTC YEAR reg contains the offset to 1970 */
++ year += 1970;
++
++ printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d "
++ "mon: %d year: %d\n", sec, min, hour, mday, mon, year);
++
++ return mktime(year, mon, mday, hour, min, sec);
++}
++
++/* Only care about the minutes and seconds */
++int vrtc_set_mmss(unsigned long nowtime)
++{
++ int real_sec, real_min;
++ int vrtc_min;
++
++ vrtc_min = vrtc_cmos_read(RTC_MINUTES);
++
++ real_sec = nowtime % 60;
++ real_min = nowtime / 60;
++ if (((abs(real_min - vrtc_min) + 15)/30) & 1)
++ real_min += 30;
++ real_min %= 60;
++
++ vrtc_cmos_write(real_sec, RTC_SECONDS);
++ vrtc_cmos_write(real_min, RTC_MINUTES);
++ return 0;
++}
+Index: linux-2.6.33/drivers/rtc/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/rtc/Kconfig
++++ linux-2.6.33/drivers/rtc/Kconfig
+@@ -423,6 +423,19 @@ config RTC_DRV_CMOS
+ This driver can also be built as a module. If so, the module
+ will be called rtc-cmos.
+
++config RTC_DRV_VRTC
++ tristate "Virtual RTC for MRST"
++ depends on X86_MRST
++ default y if X86_MRST
++
++ help
++ Say "yes" here to get direct support for the real time clock
++ found in Moorestown platform. The VRTC is a emulated RTC that
++ Derive its clock source from a realy RTC in PMIC. MC146818
++ stype programming interface is most conserved other than any
++ updates is done via IPC calls to the system controller FW.
++
++
+ config RTC_DRV_DS1216
+ tristate "Dallas DS1216"
+ depends on SNI_RM
+Index: linux-2.6.33/drivers/rtc/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/rtc/Makefile
++++ linux-2.6.33/drivers/rtc/Makefile
+@@ -28,6 +28,7 @@ obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq48
+ obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
+ obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
+ obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o
++obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o
+ obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
+ obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o
+ obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o
+Index: linux-2.6.33/drivers/rtc/rtc-mrst.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/rtc/rtc-mrst.c
+@@ -0,0 +1,660 @@
++/*
++ * rtc-mrst.c: Driver for Moorestown virtual RTC
++ *
++ * (C) Copyright 2009 Intel Corporation
++ * Author: Jacob Pan (jacob.jun.pan@intel.com)
++ * Feng Tang (feng.tang@intel.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * Note:
++ * VRTC is emulated by system controller firmware, the real HW
++ * RTC is located in the PMIC device. SCU FW shadows PMIC RTC
++ * in a memory mapped IO space that is visible to the host IA
++ * processor. However, any updates to VRTC requires an IPI call
++ * to the SCU FW.
++ *
++ * This driver is based on RTC CMOS driver.
++ */
++
++/*
++ * Note:
++ * * MRST vRTC only support binary mode and 24H mode
++ * * MRST vRTC only support PIE and AIE, no UIE
++ * * its alarm function is also limited to hr/min/sec.
++ * * so far it doesn't support wake event func
++ */
++
++#include <linux/mod_devicetable.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/sfi.h>
++
++#include <asm-generic/rtc.h>
++
++#include <asm/ipc_defs.h>
++#include <asm/vrtc.h>
++
++struct mrst_rtc {
++ struct rtc_device *rtc;
++ struct device *dev;
++ int irq;
++ struct resource *iomem;
++
++ void (*wake_on)(struct device *);
++ void (*wake_off)(struct device *);
++
++ u8 enabled_wake;
++ u8 suspend_ctrl;
++
++ /* Newer hardware extends the original register set */
++ u8 day_alrm;
++ u8 mon_alrm;
++ u8 century;
++};
++
++/* both platform and pnp busses use negative numbers for invalid irqs */
++#define is_valid_irq(n) ((n) >= 0)
++
++static const char driver_name[] = "rtc_mrst";
++
++#define RTC_IRQMASK (RTC_PF | RTC_AF)
++
++static inline int is_intr(u8 rtc_intr)
++{
++ if (!(rtc_intr & RTC_IRQF))
++ return 0;
++ return rtc_intr & RTC_IRQMASK;
++}
++
++/*
++ * rtc_time's year contains the increment over 1900, but vRTC's YEAR
++ * register can't be programmed to value larger than 0x64, so vRTC
++ * driver chose to use 1970 (UNIX time start point) as the base, and
++ * do the translation in read/write time
++ */
++static int mrst_read_time(struct device *dev, struct rtc_time *time)
++{
++ unsigned long flags;
++
++ if (rtc_is_updating())
++ mdelay(20);
++
++ spin_lock_irqsave(&rtc_lock, flags);
++ time->tm_sec = vrtc_cmos_read(RTC_SECONDS);
++ time->tm_min = vrtc_cmos_read(RTC_MINUTES);
++ time->tm_hour = vrtc_cmos_read(RTC_HOURS);
++ time->tm_mday = vrtc_cmos_read(RTC_DAY_OF_MONTH);
++ time->tm_mon = vrtc_cmos_read(RTC_MONTH);
++ time->tm_year = vrtc_cmos_read(RTC_YEAR);
++ spin_unlock_irqrestore(&rtc_lock, flags);
++
++ /* Adjust for the 1970/1900 */
++ time->tm_year += 70;
++ time->tm_mon--;
++ return RTC_24H;
++}
++
++static int mrst_set_time(struct device *dev, struct rtc_time *time)
++{
++ int ret;
++ unsigned long flags;
++ unsigned char mon, day, hrs, min, sec;
++ unsigned int yrs;
++
++ yrs = time->tm_year;
++ mon = time->tm_mon + 1; /* tm_mon starts at zero */
++ day = time->tm_mday;
++ hrs = time->tm_hour;
++ min = time->tm_min;
++ sec = time->tm_sec;
++
++ if (yrs < 70 || yrs > 138)
++ return -EINVAL;
++ yrs -= 70;
++
++ spin_lock_irqsave(&rtc_lock, flags);
++
++ /* Need think about leap year */
++ vrtc_cmos_write(yrs, RTC_YEAR);
++ vrtc_cmos_write(mon, RTC_MONTH);
++ vrtc_cmos_write(day, RTC_DAY_OF_MONTH);
++ vrtc_cmos_write(hrs, RTC_HOURS);
++ vrtc_cmos_write(min, RTC_MINUTES);
++ vrtc_cmos_write(sec, RTC_SECONDS);
++
++ ret = lnw_ipc_single_cmd(IPC_VRTC_CMD, IPC_VRTC_SET_TIME, 0, 0);
++ spin_unlock_irqrestore(&rtc_lock, flags);
++ return ret;
++}
++
++static int mrst_read_alarm(struct device *dev, struct rtc_wkalrm *t)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned char rtc_control;
++
++ if (!is_valid_irq(mrst->irq))
++ return -EIO;
++
++ /* Basic alarms only support hour, minute, and seconds fields.
++ * Some also support day and month, for alarms up to a year in
++ * the future.
++ */
++ t->time.tm_mday = -1;
++ t->time.tm_mon = -1;
++ t->time.tm_year = -1;
++
++ /* vRTC only supports binary mode */
++ spin_lock_irq(&rtc_lock);
++ t->time.tm_sec = vrtc_cmos_read(RTC_SECONDS_ALARM);
++ t->time.tm_min = vrtc_cmos_read(RTC_MINUTES_ALARM);
++ t->time.tm_hour = vrtc_cmos_read(RTC_HOURS_ALARM);
++
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ spin_unlock_irq(&rtc_lock);
++
++ t->enabled = !!(rtc_control & RTC_AIE);
++ t->pending = 0;
++
++ return 0;
++}
++
++static void mrst_checkintr(struct mrst_rtc *mrst, unsigned char rtc_control)
++{
++ unsigned char rtc_intr;
++
++ /*
++ * NOTE after changing RTC_xIE bits we always read INTR_FLAGS;
++ * allegedly some older rtcs need that to handle irqs properly
++ */
++ rtc_intr = vrtc_cmos_read(RTC_INTR_FLAGS);
++ rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
++ if (is_intr(rtc_intr))
++ rtc_update_irq(mrst->rtc, 1, rtc_intr);
++}
++
++static void mrst_irq_enable(struct mrst_rtc *mrst, unsigned char mask)
++{
++ unsigned char rtc_control;
++
++ /*
++ * Flush any pending IRQ status, notably for update irqs,
++ * before we enable new IRQs
++ */
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ mrst_checkintr(mrst, rtc_control);
++
++ rtc_control |= mask;
++ vrtc_cmos_write(rtc_control, RTC_CONTROL);
++
++ mrst_checkintr(mrst, rtc_control);
++}
++
++static void mrst_irq_disable(struct mrst_rtc *mrst, unsigned char mask)
++{
++ unsigned char rtc_control;
++
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ rtc_control &= ~mask;
++ vrtc_cmos_write(rtc_control, RTC_CONTROL);
++ mrst_checkintr(mrst, rtc_control);
++}
++
++static int mrst_set_alarm(struct device *dev, struct rtc_wkalrm *t)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned char hrs, min, sec;
++ int ret = 0;
++
++ if (!is_valid_irq(mrst->irq))
++ return -EIO;
++
++ hrs = t->time.tm_hour;
++ min = t->time.tm_min;
++ sec = t->time.tm_sec;
++
++ spin_lock_irq(&rtc_lock);
++ /* Next rtc irq must not be from previous alarm setting */
++ mrst_irq_disable(mrst, RTC_AIE);
++
++ /* Update alarm */
++ vrtc_cmos_write(hrs, RTC_HOURS_ALARM);
++ vrtc_cmos_write(min, RTC_MINUTES_ALARM);
++ vrtc_cmos_write(sec, RTC_SECONDS_ALARM);
++
++ ret = lnw_ipc_single_cmd(IPC_VRTC_CMD, IPC_VRTC_SET_ALARM, 0, 0);
++ spin_unlock_irq(&rtc_lock);
++
++ if (ret)
++ return ret;
++
++ spin_lock_irq(&rtc_lock);
++ if (t->enabled)
++ mrst_irq_enable(mrst, RTC_AIE);
++
++ spin_unlock_irq(&rtc_lock);
++
++ return 0;
++}
++
++
++static int mrst_irq_set_state(struct device *dev, int enabled)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned long flags;
++
++ if (!is_valid_irq(mrst->irq))
++ return -ENXIO;
++
++ spin_lock_irqsave(&rtc_lock, flags);
++
++ if (enabled)
++ mrst_irq_enable(mrst, RTC_PIE);
++ else
++ mrst_irq_disable(mrst, RTC_PIE);
++
++ spin_unlock_irqrestore(&rtc_lock, flags);
++ return 0;
++}
++
++#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
++
++/* Currently, the vRTC doesn't support UIE ON/OFF */
++static int
++mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned long flags;
++
++ switch (cmd) {
++ case RTC_AIE_OFF:
++ case RTC_AIE_ON:
++ if (!is_valid_irq(mrst->irq))
++ return -EINVAL;
++ break;
++ default:
++ /* PIE ON/OFF is handled by mrst_irq_set_state() */
++ return -ENOIOCTLCMD;
++ }
++
++ spin_lock_irqsave(&rtc_lock, flags);
++ switch (cmd) {
++ case RTC_AIE_OFF: /* alarm off */
++ mrst_irq_disable(mrst, RTC_AIE);
++ break;
++ case RTC_AIE_ON: /* alarm on */
++ mrst_irq_enable(mrst, RTC_AIE);
++ break;
++ }
++ spin_unlock_irqrestore(&rtc_lock, flags);
++ return 0;
++}
++
++#else
++#define mrst_rtc_ioctl NULL
++#endif
++
++#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
++
++static int mrst_procfs(struct device *dev, struct seq_file *seq)
++{
++ unsigned char rtc_control, valid;
++
++ spin_lock_irq(&rtc_lock);
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ valid = vrtc_cmos_read(RTC_VALID);
++ spin_unlock_irq(&rtc_lock);
++
++ return seq_printf(seq,
++ "periodic_IRQ\t: %s\n"
++ "square_wave\t: %s\n"
++ "BCD\t\t: %s\n"
++ "DST_enable\t: %s\n"
++ "periodic_freq\t: daily\n",
++ (rtc_control & RTC_PIE) ? "yes" : "no",
++ (rtc_control & RTC_SQWE) ? "yes" : "no",
++ (rtc_control & RTC_DM_BINARY) ? "no" : "yes",
++ (rtc_control & RTC_DST_EN) ? "yes" : "no");
++}
++
++#else
++#define mrst_procfs NULL
++#endif
++
++static const struct rtc_class_ops mrst_rtc_ops = {
++ .ioctl = mrst_rtc_ioctl,
++ .read_time = mrst_read_time,
++ .set_time = mrst_set_time,
++ .read_alarm = mrst_read_alarm,
++ .set_alarm = mrst_set_alarm,
++ .proc = mrst_procfs,
++ .irq_set_freq = NULL,
++ .irq_set_state = mrst_irq_set_state,
++};
++
++static struct mrst_rtc mrst_rtc;
++
++/*
++ * When vRTC IRQ is captured by SCU FW, FW will clear the AIE bit in
++ * Reg B, so no need for this driver to clear it
++ */
++static irqreturn_t mrst_interrupt(int irq, void *p)
++{
++ u8 irqstat;
++
++ spin_lock(&rtc_lock);
++ /* This read will clear all IRQ flags inside Reg C */
++ irqstat = vrtc_cmos_read(RTC_INTR_FLAGS);
++ spin_unlock(&rtc_lock);
++
++ irqstat &= RTC_IRQMASK | RTC_IRQF;
++ if (is_intr(irqstat)) {
++ rtc_update_irq(p, 1, irqstat);
++ return IRQ_HANDLED;
++ } else {
++ printk(KERN_ERR "vRTC: error in IRQ handler\n");
++ return IRQ_NONE;
++ }
++}
++
++static int __init
++vrtc_mrst_do_probe(struct device *dev, struct resource *iomem, int rtc_irq)
++{
++ int retval = 0;
++ unsigned char rtc_control;
++
++ /* There can be only one ... */
++ if (mrst_rtc.dev)
++ return -EBUSY;
++
++ if (!iomem)
++ return -ENODEV;
++
++ iomem = request_mem_region(iomem->start,
++ iomem->end + 1 - iomem->start,
++ driver_name);
++ if (!iomem) {
++ dev_dbg(dev, "i/o mem already in use.\n");
++ return -EBUSY;
++ }
++
++ mrst_rtc.irq = rtc_irq;
++ mrst_rtc.iomem = iomem;
++
++ mrst_rtc.day_alrm = 0;
++ mrst_rtc.mon_alrm = 0;
++ mrst_rtc.century = 0;
++ mrst_rtc.wake_on = NULL;
++ mrst_rtc.wake_off = NULL;
++
++ mrst_rtc.rtc = rtc_device_register(driver_name, dev,
++ &mrst_rtc_ops, THIS_MODULE);
++ if (IS_ERR(mrst_rtc.rtc)) {
++ retval = PTR_ERR(mrst_rtc.rtc);
++ goto cleanup0;
++ }
++
++ mrst_rtc.dev = dev;
++ dev_set_drvdata(dev, &mrst_rtc);
++ rename_region(iomem, dev_name(&mrst_rtc.rtc->dev));
++
++ spin_lock_irq(&rtc_lock);
++ mrst_irq_disable(&mrst_rtc, RTC_PIE | RTC_AIE);
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ spin_unlock_irq(&rtc_lock);
++
++ if (!(rtc_control & RTC_24H) || (rtc_control & (RTC_DM_BINARY)))
++ dev_dbg(dev, "TODO: support more than 24-hr BCD mode \n");
++
++ if (is_valid_irq(rtc_irq)) {
++ irq_handler_t rtc_mrst_int_handler;
++ rtc_mrst_int_handler = mrst_interrupt;
++
++ retval = request_irq(rtc_irq, rtc_mrst_int_handler,
++ IRQF_DISABLED, dev_name(&mrst_rtc.rtc->dev),
++ mrst_rtc.rtc);
++ if (retval < 0) {
++ dev_dbg(dev, "IRQ %d is already in use, err %d\n",
++ rtc_irq, retval);
++ goto cleanup1;
++ }
++ }
++
++ pr_info("vRTC driver for Moorewtown is initialized\n");
++ return 0;
++
++cleanup1:
++ mrst_rtc.dev = NULL;
++ rtc_device_unregister(mrst_rtc.rtc);
++cleanup0:
++ release_region(iomem->start, iomem->end + 1 - iomem->start);
++ pr_warning("vRTC driver for Moorewtown initialization Failed!!\n");
++ return retval;
++}
++
++static void rtc_mrst_do_shutdown(void)
++{
++ spin_lock_irq(&rtc_lock);
++ mrst_irq_disable(&mrst_rtc, RTC_IRQMASK);
++ spin_unlock_irq(&rtc_lock);
++}
++
++static void __exit rtc_mrst_do_remove(struct device *dev)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ struct resource *iomem;
++
++ rtc_mrst_do_shutdown();
++
++ if (is_valid_irq(mrst->irq))
++ free_irq(mrst->irq, mrst->rtc);
++
++ rtc_device_unregister(mrst->rtc);
++ mrst->rtc = NULL;
++
++ iomem = mrst->iomem;
++ release_region(iomem->start, iomem->end + 1 - iomem->start);
++ mrst->iomem = NULL;
++
++ mrst->dev = NULL;
++ dev_set_drvdata(dev, NULL);
++}
++
++#ifdef CONFIG_PM
++
++static int mrst_suspend(struct device *dev, pm_message_t mesg)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned char tmp;
++
++ /* Only the alarm might be a wakeup event source */
++ spin_lock_irq(&rtc_lock);
++ mrst->suspend_ctrl = tmp = vrtc_cmos_read(RTC_CONTROL);
++ if (tmp & (RTC_PIE | RTC_AIE)) {
++ unsigned char mask;
++
++ if (device_may_wakeup(dev))
++ mask = RTC_IRQMASK & ~RTC_AIE;
++ else
++ mask = RTC_IRQMASK;
++ tmp &= ~mask;
++ vrtc_cmos_write(tmp, RTC_CONTROL);
++
++ mrst_checkintr(mrst, tmp);
++ }
++ spin_unlock_irq(&rtc_lock);
++
++ if (tmp & RTC_AIE) {
++ mrst->enabled_wake = 1;
++ if (mrst->wake_on)
++ mrst->wake_on(dev);
++ else
++ enable_irq_wake(mrst->irq);
++ }
++
++ pr_debug("%s: suspend%s, ctrl %02x\n",
++ dev_name(&mrst_rtc.rtc->dev),
++ (tmp & RTC_AIE) ? ", alarm may wake" : "",
++ tmp);
++
++ return 0;
++}
++
++/*
++ * We want RTC alarms to wake us from e.g. ACPI G2/S5 "soft off", even
++ * after a detour through G3 "mechanical off", although the ACPI spec
++ * says wakeup should only work from G1/S4 "hibernate". To most users,
++ * distinctions between S4 and S5 are pointless. So when the hardware
++ * allows, don't draw that distinction.
++ */
++static inline int mrst_poweroff(struct device *dev)
++{
++ return mrst_suspend(dev, PMSG_HIBERNATE);
++}
++
++static int mrst_resume(struct device *dev)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned char tmp = mrst->suspend_ctrl;
++
++ /* Re-enable any irqs previously active */
++ if (tmp & RTC_IRQMASK) {
++ unsigned char mask;
++
++ if (mrst->enabled_wake) {
++ if (mrst->wake_off)
++ mrst->wake_off(dev);
++ else
++ disable_irq_wake(mrst->irq);
++ mrst->enabled_wake = 0;
++ }
++
++ spin_lock_irq(&rtc_lock);
++ do {
++ vrtc_cmos_write(tmp, RTC_CONTROL);
++
++ mask = vrtc_cmos_read(RTC_INTR_FLAGS);
++ mask &= (tmp & RTC_IRQMASK) | RTC_IRQF;
++ if (!is_intr(mask))
++ break;
++
++ rtc_update_irq(mrst->rtc, 1, mask);
++ tmp &= ~RTC_AIE;
++ } while (mask & RTC_AIE);
++ spin_unlock_irq(&rtc_lock);
++ }
++
++ pr_debug("%s: resume, ctrl %02x\n",
++ dev_name(&mrst_rtc.rtc->dev),
++ tmp);
++
++ return 0;
++}
++
++#else
++#define mrst_suspend NULL
++#define mrst_resume NULL
++
++static inline int mrst_poweroff(struct device *dev)
++{
++ return -ENOSYS;
++}
++
++#endif
++
++
++/*----------------------------------------------------------------*/
++
++/* Platform setup should have set up an RTC device, when PNP is
++ * unavailable ... this could happen even on (older) PCs.
++ */
++
++static int __init vrtc_mrst_platform_probe(struct platform_device *pdev)
++{
++ return vrtc_mrst_do_probe(&pdev->dev,
++ platform_get_resource(pdev, IORESOURCE_MEM, 0),
++ platform_get_irq(pdev, 0));
++}
++
++static int __exit vrtc_mrst_platform_remove(struct platform_device *pdev)
++{
++ rtc_mrst_do_remove(&pdev->dev);
++ return 0;
++}
++
++static void vrtc_mrst_platform_shutdown(struct platform_device *pdev)
++{
++ if (system_state == SYSTEM_POWER_OFF && !mrst_poweroff(&pdev->dev))
++ return;
++
++ rtc_mrst_do_shutdown();
++}
++
++/* Work with hotplug and coldplug */
++MODULE_ALIAS("platform:vrtc_mrst");
++
++static struct platform_driver vrtc_mrst_platform_driver = {
++ .remove = __exit_p(vrtc_mrst_platform_remove),
++ .shutdown = vrtc_mrst_platform_shutdown,
++ .driver = {
++ .name = (char *) driver_name,
++ .suspend = mrst_suspend,
++ .resume = mrst_resume,
++ }
++};
++
++/*
++ * Moorestown platform has memory mapped virtual RTC device that emulates
++ * the programming interface of the RTC.
++ */
++
++static struct resource vrtc_resources[] = {
++ [0] = {
++ .flags = IORESOURCE_MEM,
++ },
++ [1] = {
++ .flags = IORESOURCE_IRQ,
++ }
++};
++
++static struct platform_device vrtc_device = {
++ .name = "rtc_mrst",
++ .id = -1,
++ .resource = vrtc_resources,
++ .num_resources = ARRAY_SIZE(vrtc_resources),
++};
++
++static int __init vrtc_mrst_init(void)
++{
++ /* iomem resource */
++ vrtc_resources[0].start = sfi_mrtc_array[0].phys_addr;
++ vrtc_resources[0].end = sfi_mrtc_array[0].phys_addr +
++ MRST_VRTC_MAP_SZ;
++ /* irq resource */
++ vrtc_resources[1].start = sfi_mrtc_array[0].irq;
++ vrtc_resources[1].end = sfi_mrtc_array[0].irq;
++
++ platform_device_register(&vrtc_device);
++ return platform_driver_probe(&vrtc_mrst_platform_driver,
++ vrtc_mrst_platform_probe);
++}
++
++static void __exit vrtc_mrst_exit(void)
++{
++ platform_driver_unregister(&vrtc_mrst_platform_driver);
++ platform_device_unregister(&vrtc_device);
++}
++
++module_init(vrtc_mrst_init);
++module_exit(vrtc_mrst_exit);
++
++MODULE_AUTHOR("Jacob Pan; Feng Tang");
++MODULE_DESCRIPTION("Driver for Moorestown virtual RTC");
++MODULE_LICENSE("GPL");
+Index: linux-2.6.33/drivers/spi/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/spi/Kconfig
++++ linux-2.6.33/drivers/spi/Kconfig
+@@ -302,6 +302,18 @@ config SPI_NUC900
+ select SPI_BITBANG
+ help
+ SPI driver for Nuvoton NUC900 series ARM SoCs
++config SPI_MRST
++ tristate "SPI controller driver for Intel Moorestown platform "
++ depends on SPI_MASTER && PCI && X86_MRST
++ help
++ This is the SPI controller master driver for Intel Moorestown platform
++
++config SPI_MRST_DMA
++ boolean "Enable DMA for MRST SPI0 controller"
++ default y
++ depends on SPI_MRST && INTEL_LNW_DMAC2
++ help
++ This has to be enabled after Moorestown DMAC2 driver is enabled
+
+ #
+ # Add new SPI master controllers in alphabetical order above this line
+Index: linux-2.6.33/drivers/spi/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/spi/Makefile
++++ linux-2.6.33/drivers/spi/Makefile
+@@ -42,6 +42,7 @@ obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.
+ obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
+ obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
+ obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
++obj-$(CONFIG_SPI_MRST) += mrst_spi.o
+
+ # special build for s3c24xx spi driver with fiq support
+ spi_s3c24xx_hw-y := spi_s3c24xx.o
+Index: linux-2.6.33/drivers/spi/mrst_spi.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/spi/mrst_spi.c
+@@ -0,0 +1,1382 @@
++/*
++ * mrst_spi.c - Moorestown SPI controller driver (referring pxa2xx_spi.c)
++ *
++ * Copyright (C) Intel 2008 Feng Tang <feng.tang@intel.com>
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++/* Note:
++ *
++ * * FW will create a SPI device info block table, and driver need parse
++ * them out and use register_board_info to register them to kernel
++ */
++
++#include <linux/delay.h>
++#include <linux/highmem.h>
++#include <linux/pci.h>
++#include <linux/dma-mapping.h>
++#include <linux/interrupt.h>
++
++#include <linux/spi/spi.h>
++#include <linux/spi/mrst_spi.h>
++
++#define MRST_MAX_DMA_LEN 2047
++#ifdef CONFIG_SPI_MRST_DMA
++#include <linux/lnw_dma.h>
++#endif
++
++#ifdef CONFIG_DEBUG_FS
++#include <linux/debugfs.h>
++#endif
++
++#define DRIVER_NAME "mrst_spi"
++
++#define START_STATE ((void *)0)
++#define RUNNING_STATE ((void *)1)
++#define DONE_STATE ((void *)2)
++#define ERROR_STATE ((void *)-1)
++
++#define QUEUE_RUNNING 0
++#define QUEUE_STOPPED 1
++
++#define MRST_SPI_DEASSERT 0
++#define MRST_SPI_ASSERT 1
++
++/* HW info for MRST CLk Control Unit, one 32b reg */
++#define MRST_SPI_CLK_BASE 100000000 /* 100m */
++#define MRST_CLK_SPI0_REG 0xff11d86c
++#define CLK_SPI_BDIV_OFFSET 0
++#define CLK_SPI_BDIV_MASK 0x00000007
++#define CLK_SPI_CDIV_OFFSET 9
++#define CLK_SPI_CDIV_MASK 0x00000e00
++#define CLK_SPI_CDIV_100M 0x0
++#define CLK_SPI_CDIV_50M 0x1
++#define CLK_SPI_CDIV_33M 0x2
++#define CLK_SPI_CDIV_25M 0x3
++#define CLK_SPI_DISABLE_OFFSET 8
++
++/* per controller struct */
++struct driver_data {
++ /* Driver model hookup */
++ struct pci_dev *pdev;
++ struct spi_master *master;
++
++ struct spi_device *devices;
++ struct spi_device *cur_dev;
++ enum mrst_ssi_type type;
++
++ /* phy and virtual register addresses */
++ void *paddr;
++ void *vaddr;
++ u32 iolen;
++ int irq;
++ dma_addr_t dma_addr;
++ u32 freq; /* controller core clk freqency in Hz */
++
++ /* Driver message queue */
++ struct workqueue_struct *workqueue;
++ struct work_struct pump_messages;
++ spinlock_t lock;
++ struct list_head queue;
++ int busy;
++ int run;
++
++ /* Message Transfer pump */
++ struct tasklet_struct pump_transfers;
++
++ /* Current message transfer state info */
++ struct spi_message *cur_msg;
++ struct spi_transfer *cur_transfer;
++ struct chip_data *cur_chip;
++ struct chip_data *prev_chip;
++ size_t len;
++ void *tx;
++ void *tx_end;
++ void *rx;
++ void *rx_end;
++ int dma_mapped;
++ dma_addr_t rx_dma;
++ dma_addr_t tx_dma;
++ size_t rx_map_len;
++ size_t tx_map_len;
++ u8 n_bytes; /* current is a 1/2 bytes op */
++ u8 max_bits_per_word; /* SPI0's maxim width is 16 bits */
++ u32 dma_width;
++ int cs_change;
++ int (*write)(struct driver_data *drv_data);
++ int (*read)(struct driver_data *drv_data);
++ irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
++ void (*cs_control)(u32 command);
++
++#ifdef CONFIG_DEBUG_FS
++ struct dentry *debugfs;
++#endif
++
++ int dma_inited;
++
++#ifdef CONFIG_SPI_MRST_DMA
++ struct lnw_dma_slave dmas_tx;
++ struct lnw_dma_slave dmas_rx;
++ struct dma_chan *txchan;
++ struct dma_chan *rxchan;
++ int txdma_done;
++ int rxdma_done;
++
++ u64 tx_param;
++ u64 rx_param;
++ struct pci_dev *dma_dev;
++#endif
++};
++
++/* slave spi_dev related */
++struct chip_data {
++ /* cr0 and cr1 are only 16b valid */
++ u16 cr0;
++ u16 cr1;
++
++ u8 cs; /* chip select pin */
++ u8 n_bytes; /* current is a 1/2/4 byte op */
++ u8 tmode; /* TR/TO/RO/EEPROM */
++ u8 type; /* SPI/SSP/MicroWire */
++
++ u8 poll_mode; /* 1 means use poll mode */
++
++ u32 dma_width;
++ u32 rx_threshold;
++ u32 tx_threshold;
++ u8 enable_dma;
++ u8 bits_per_word;
++ u16 clk_div; /* baud rate divider */
++ u32 speed_hz; /* baud rate */
++ int (*write)(struct driver_data *drv_data);
++ int (*read)(struct driver_data *drv_data);
++ void (*cs_control)(u32 command);
++};
++
++#ifdef CONFIG_SPI_MRST_DMA
++static bool chan_filter(struct dma_chan *chan, void *param)
++{
++ struct driver_data *drv_data = param;
++ bool ret = false;
++
++ if (chan->device->dev == &drv_data->dma_dev->dev)
++ ret = true;
++ return ret;
++}
++
++static void mrst_spi_dma_init(struct driver_data *drv_data)
++{
++ struct lnw_dma_slave *rxs, *txs;
++ dma_cap_mask_t mask;
++ struct pci_dev *dmac2;
++
++ drv_data->txchan = NULL;
++ drv_data->rxchan = NULL;
++
++ /* mrst spi0 controller only work with mrst dma contrller 2 */
++ dmac2 = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL);
++ if (!dmac2) {
++ printk(KERN_WARNING
++ "MRST SPI0: can't find DMAC2, dma init failed\n");
++ return;
++ } else
++ drv_data->dma_dev = dmac2;
++
++ /* 1. init rx channel */
++ rxs = &drv_data->dmas_rx;
++
++ rxs->dirn = DMA_FROM_DEVICE;
++ rxs->hs_mode = LNW_DMA_HW_HS;
++ rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
++ rxs->src_width = LNW_DMA_WIDTH_16BIT;
++ rxs->dst_width = LNW_DMA_WIDTH_32BIT;
++ rxs->src_msize = LNW_DMA_MSIZE_16;
++ rxs->dst_msize = LNW_DMA_MSIZE_16;
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_MEMCPY, mask);
++ dma_cap_set(DMA_SLAVE, mask);
++
++ drv_data->rxchan = dma_request_channel(mask, chan_filter,
++ drv_data);
++ if (!drv_data->rxchan)
++ goto err_exit;
++ drv_data->rxchan->private = rxs;
++
++ /* 2. init tx channel */
++ txs = &drv_data->dmas_tx;
++
++ txs->dirn = DMA_TO_DEVICE;
++ txs->hs_mode = LNW_DMA_HW_HS;
++ txs->cfg_mode = LNW_DMA_MEM_TO_PER;
++ txs->src_width = LNW_DMA_WIDTH_32BIT;
++ txs->dst_width = LNW_DMA_WIDTH_16BIT;
++ txs->src_msize = LNW_DMA_MSIZE_16;
++ txs->dst_msize = LNW_DMA_MSIZE_16;
++
++ dma_cap_set(DMA_SLAVE, mask);
++ dma_cap_set(DMA_MEMCPY, mask);
++
++ drv_data->txchan = dma_request_channel(mask, chan_filter,
++ drv_data);
++ if (!drv_data->txchan)
++ goto free_rxchan;
++ drv_data->txchan->private = txs;
++
++ /* set the dma done bit to 1 */
++ drv_data->dma_inited = 1;
++ drv_data->txdma_done = 1;
++ drv_data->rxdma_done = 1;
++
++ drv_data->tx_param = ((u64)(u32)drv_data << 32)
++ | (u32)(&drv_data->txdma_done);
++ drv_data->rx_param = ((u64)(u32)drv_data << 32)
++ | (u32)(&drv_data->rxdma_done);
++ return;
++
++free_rxchan:
++ dma_release_channel(drv_data->rxchan);
++err_exit:
++ pci_dev_put(dmac2);
++ return;
++}
++
++static void mrst_spi_dma_exit(struct driver_data *drv_data)
++{
++ dma_release_channel(drv_data->txchan);
++ dma_release_channel(drv_data->rxchan);
++ pci_dev_put(drv_data->dma_dev);
++}
++
++
++static inline void unmap_dma_buffers(struct driver_data *drv_data);
++static void transfer_complete(struct driver_data *drv_data);
++
++static void mrst_spi_dma_done(void *arg)
++{
++ u64 *param = arg;
++ struct driver_data *drv_data;
++ int *done;
++
++ drv_data = (struct driver_data *)(u32)(*param >> 32);
++ done = (int *)(u32)(*param & 0xffffffff);
++
++ *done = 1;
++ /* wait till both tx/rx channels are done */
++ if (!drv_data->txdma_done || !drv_data->rxdma_done)
++ return;
++
++ transfer_complete(drv_data);
++}
++#endif
++
++
++#ifdef CONFIG_DEBUG_FS
++static int spi_show_regs_open(struct inode *inode, struct file *file)
++{
++ file->private_data = inode->i_private;
++ return 0;
++}
++
++#define SPI_REGS_BUFSIZE 1024
++static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ char *buf;
++ u32 len = 0;
++ ssize_t ret;
++ struct driver_data *drv_data;
++ void *reg;
++
++ drv_data = (struct driver_data *)file->private_data;
++ reg = drv_data->vaddr;
++
++ buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
++ if (!buf)
++ return 0;
++
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "MRST SPI0 registers:\n");
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "=================================\n");
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "CTRL0: \t\t0x%08x\n", read_ctrl0(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "CTRL1: \t\t0x%08x\n", read_ctrl1(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "SSIENR: \t0x%08x\n", read_ssienr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "SER: \t\t0x%08x\n", read_ser(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "BAUDR: \t\t0x%08x\n", read_baudr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "TXFTLR: \t0x%08x\n", read_txftlr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "RXFTLR: \t0x%08x\n", read_rxftlr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "TXFLR: \t\t0x%08x\n", read_txflr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "RXFLR: \t\t0x%08x\n", read_rxflr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "SR: \t\t0x%08x\n", read_sr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "IMR: \t\t0x%08x\n", read_imr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "ISR: \t\t0x%08x\n", read_isr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "DMACR: \t\t0x%08x\n", read_dmacr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "DMATDLR: \t0x%08x\n", read_dmatdlr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "DMARDLR: \t0x%08x\n", read_dmardlr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "=================================\n");
++
++ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
++ kfree(buf);
++ return ret;
++}
++
++static const struct file_operations mrst_spi_regs_ops = {
++ .owner = THIS_MODULE,
++ .open = spi_show_regs_open,
++ .read = spi_show_regs,
++};
++
++static int mrst_spi_debugfs_init(struct driver_data *drv_data)
++{
++ drv_data->debugfs = debugfs_create_dir("mrst_spi", NULL);
++ if (!drv_data->debugfs)
++ return -ENOMEM;
++
++ debugfs_create_file("registers", S_IFREG | S_IRUGO,
++ drv_data->debugfs, (void *)drv_data, &mrst_spi_regs_ops);
++ return 0;
++}
++
++static void mrst_spi_debugfs_remove(struct driver_data *drv_data)
++{
++ if (drv_data->debugfs)
++ debugfs_remove_recursive(drv_data->debugfs);
++}
++
++#else
++static inline int mrst_spi_debugfs_init(struct driver_data *drv_data)
++{
++}
++
++static inline void mrst_spi_debugfs_remove(struct driver_data *drv_data)
++{
++}
++#endif /* CONFIG_DEBUG_FS */
++
++static int flush(struct driver_data *drv_data)
++{
++ unsigned long limit = loops_per_jiffy << 1;
++ void *reg = drv_data->vaddr;
++
++ while (read_sr(reg) & SR_RF_NOT_EMPT) {
++ limit = loops_per_jiffy << 1;
++ while ((read_sr(reg) & SR_BUSY) && limit--)
++ ;
++ read_dr(reg);
++ }
++ return limit;
++}
++
++static void null_cs_control(u32 command)
++{
++}
++
++static int null_writer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++ u8 n_bytes = drv_data->n_bytes;
++
++ if (!(read_sr(reg) & SR_TF_NOT_FULL)
++ || (drv_data->tx == drv_data->tx_end))
++ return 0;
++
++ write_dr(0, reg);
++ drv_data->tx += n_bytes;
++ return 1;
++}
++
++static int null_reader(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++ u8 n_bytes = drv_data->n_bytes;
++
++ while ((read_sr(reg) & SR_RF_NOT_EMPT)
++ && (drv_data->rx < drv_data->rx_end)) {
++ read_dr(reg);
++ drv_data->rx += n_bytes;
++ }
++ return drv_data->rx == drv_data->rx_end;
++}
++
++static int u8_writer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++
++ if (!(read_sr(reg) & SR_TF_NOT_FULL)
++ || (drv_data->tx == drv_data->tx_end))
++ return 0;
++
++ write_dr(*(u8 *)(drv_data->tx), reg);
++ ++drv_data->tx;
++
++ while (read_sr(reg) & SR_BUSY)
++ ;
++ return 1;
++}
++
++static int u8_reader(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++
++ while ((read_sr(reg) & SR_RF_NOT_EMPT)
++ && (drv_data->rx < drv_data->rx_end)) {
++ *(u8 *)(drv_data->rx) = read_dr(reg);
++ ++drv_data->rx;
++ }
++
++ while (read_sr(reg) & SR_BUSY)
++ ;
++ return drv_data->rx == drv_data->rx_end;
++}
++
++static int u16_writer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++
++ if (!(read_sr(reg) & SR_TF_NOT_FULL)
++ || (drv_data->tx == drv_data->tx_end))
++ return 0;
++
++ write_dr(*(u16 *)(drv_data->tx), reg);
++ drv_data->tx += 2;
++ while (read_sr(reg) & SR_BUSY)
++ ;
++
++ return 1;
++}
++
++static int u16_reader(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++ u16 temp;
++
++ while ((read_sr(reg) & SR_RF_NOT_EMPT)
++ && (drv_data->rx < drv_data->rx_end)) {
++ temp = read_dr(reg);
++ *(u16 *)(drv_data->rx) = temp;
++ drv_data->rx += 2;
++ }
++
++ while (read_sr(reg) & SR_BUSY)
++ ;
++
++ return drv_data->rx == drv_data->rx_end;
++}
++
++static void *next_transfer(struct driver_data *drv_data)
++{
++ struct spi_message *msg = drv_data->cur_msg;
++ struct spi_transfer *trans = drv_data->cur_transfer;
++
++ /* Move to next transfer */
++ if (trans->transfer_list.next != &msg->transfers) {
++ drv_data->cur_transfer =
++ list_entry(trans->transfer_list.next,
++ struct spi_transfer,
++ transfer_list);
++ return RUNNING_STATE;
++ } else
++ return DONE_STATE;
++}
++
++/*
++ * Note: first step is the protocol driver prepares
++ * a dma-capable memory, and this func just need translate
++ * the virt addr to physical
++ */
++static int map_dma_buffers(struct driver_data *drv_data)
++{
++ if (!drv_data->cur_msg->is_dma_mapped || !drv_data->dma_inited
++ || !drv_data->cur_chip->enable_dma)
++ return 0;
++
++ if (drv_data->cur_transfer->tx_dma)
++ drv_data->tx_dma = drv_data->cur_transfer->tx_dma;
++
++ if (drv_data->cur_transfer->rx_dma)
++ drv_data->rx_dma = drv_data->cur_transfer->rx_dma;
++
++ return 1;
++}
++
++static inline void unmap_dma_buffers(struct driver_data *drv_data)
++{
++ if (!drv_data->dma_mapped)
++ return;
++ drv_data->dma_mapped = 0;
++}
++
++/* caller already set message->status; dma and pio irqs are blocked */
++static void giveback(struct driver_data *drv_data)
++{
++ struct spi_transfer *last_transfer;
++ unsigned long flags;
++ struct spi_message *msg;
++
++ spin_lock_irqsave(&drv_data->lock, flags);
++ msg = drv_data->cur_msg;
++ drv_data->cur_msg = NULL;
++ drv_data->cur_transfer = NULL;
++ drv_data->prev_chip = drv_data->cur_chip;
++ drv_data->cur_chip = NULL;
++ queue_work(drv_data->workqueue, &drv_data->pump_messages);
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++
++ last_transfer = list_entry(msg->transfers.prev,
++ struct spi_transfer,
++ transfer_list);
++
++ if (!last_transfer->cs_change)
++ drv_data->cs_control(MRST_SPI_DEASSERT);
++
++ msg->state = NULL;
++ if (msg->complete)
++ msg->complete(msg->context);
++}
++
++static void dma_transfer(struct driver_data *drv_data, int cs_change)
++{
++#ifdef CONFIG_SPI_MRST_DMA
++ void *reg = drv_data->vaddr;
++ struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
++ struct dma_chan *txchan, *rxchan;
++ enum dma_ctrl_flags flag;
++ u16 dmacr = 0;
++
++ /* 1. setup DMA related registers */
++ if (cs_change) {
++ mrst_spi_enable(reg, 0);
++
++ write_dmardlr(0xf, reg);
++ write_dmatdlr(0x10, reg);
++
++ if (drv_data->tx_dma)
++ dmacr |= 0x2;
++ if (drv_data->rx_dma)
++ dmacr |= 0x1;
++
++ write_dmacr(dmacr, reg);
++ mrst_spi_enable(reg, 1);
++ }
++
++ if (drv_data->tx_dma)
++ drv_data->txdma_done = 0;
++
++ if (drv_data->rx_dma)
++ drv_data->rxdma_done = 0;
++
++ /* 2. start the TX dma transfer */
++ txchan = drv_data->txchan;
++ rxchan = drv_data->rxchan;
++
++ flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
++
++ if (drv_data->tx_dma) {
++ txdesc = txchan->device->device_prep_dma_memcpy(txchan,
++ drv_data->dma_addr, drv_data->tx_dma,
++ drv_data->len, flag);
++
++ txdesc->callback = mrst_spi_dma_done;
++ txdesc->callback_param = &drv_data->tx_param;
++ }
++
++ /* 3. start the RX dma transfer */
++ if (drv_data->rx_dma) {
++ rxdesc = rxchan->device->device_prep_dma_memcpy(rxchan,
++ drv_data->rx_dma, drv_data->dma_addr,
++ drv_data->len, flag);
++
++ rxdesc->callback = mrst_spi_dma_done;
++ rxdesc->callback_param = &drv_data->rx_param;
++ }
++
++ /* rx must be started before tx due to spi instinct */
++ if (rxdesc)
++ rxdesc->tx_submit(rxdesc);
++ if (txdesc)
++ txdesc->tx_submit(txdesc);
++#endif
++}
++
++static void int_error_stop(struct driver_data *drv_data, const char *msg)
++{
++ void *reg = drv_data->vaddr;
++
++ /* Stop and reset hw */
++ flush(drv_data);
++ write_ssienr(0, reg);
++
++ dev_err(&drv_data->pdev->dev, "%s\n", msg);
++
++ drv_data->cur_msg->state = ERROR_STATE;
++ tasklet_schedule(&drv_data->pump_transfers);
++}
++
++static void transfer_complete(struct driver_data *drv_data)
++{
++ /* Update total byte transfered return count actual bytes read */
++ drv_data->cur_msg->actual_length += drv_data->len;
++
++ /* Move to next transfer */
++ drv_data->cur_msg->state = next_transfer(drv_data);
++
++ /* handle end of message */
++ if (drv_data->cur_msg->state == DONE_STATE) {
++ drv_data->cur_msg->status = 0;
++ giveback(drv_data);
++ } else
++ tasklet_schedule(&drv_data->pump_transfers);
++}
++
++static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++ u32 irq_status, irq_mask = 0x3f;
++
++ irq_status = read_isr(reg) & irq_mask;
++
++ /* error handling */
++ if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
++ read_txoicr(reg);
++ read_rxoicr(reg);
++ read_rxuicr(reg);
++ int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
++ return IRQ_HANDLED;
++ }
++
++ /* INT comes from tx */
++ if (drv_data->tx && (irq_status & SPI_INT_TXEI))
++ while (drv_data->tx < drv_data->tx_end) {
++ drv_data->write(drv_data);
++
++ if (drv_data->tx == drv_data->tx_end) {
++ spi_mask_intr(reg, SPI_INT_TXEI);
++ transfer_complete(drv_data);
++ }
++ }
++
++ /* INT comes from rx */
++ if (drv_data->rx && (irq_status & SPI_INT_RXFI)) {
++ if (drv_data->read(drv_data))
++ transfer_complete(drv_data);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t mrst_spi_irq(int irq, void *dev_id)
++{
++ struct driver_data *drv_data = dev_id;
++ void *reg = drv_data->vaddr;
++
++ if (!drv_data->cur_msg) {
++ spi_mask_intr(reg, SPI_INT_TXEI);
++ /* Never fail */
++ return IRQ_HANDLED;
++ }
++
++ return drv_data->transfer_handler(drv_data);
++}
++
++/* must be called inside pump_transfers() */
++static void poll_transfer(struct driver_data *drv_data)
++{
++ if (drv_data->tx)
++ while (drv_data->write(drv_data))
++ drv_data->read(drv_data);
++
++ drv_data->read(drv_data);
++ transfer_complete(drv_data);
++}
++
++static void pump_transfers(unsigned long data)
++{
++ struct driver_data *drv_data = (struct driver_data *)data;
++ struct spi_message *message = NULL;
++ struct spi_transfer *transfer = NULL;
++ struct spi_transfer *previous = NULL;
++ struct spi_device *spi = NULL;
++ struct chip_data *chip = NULL;
++ void *reg = drv_data->vaddr;
++ u8 bits = 0;
++ u8 imask = 0;
++ u8 cs_change = 0;
++ u16 rxint_level = 0;
++ u16 txint_level = 0;
++ u16 clk_div = 0;
++ u32 speed = 0;
++ u32 cr0 = 0;
++
++ /* get current state information */
++ message = drv_data->cur_msg;
++ transfer = drv_data->cur_transfer;
++ chip = drv_data->cur_chip;
++ spi = message->spi;
++
++ if (unlikely(!chip->clk_div)) {
++ /* default for 115200 UART device */
++ if (chip->speed_hz)
++ chip->clk_div = drv_data->freq / chip->speed_hz;
++ else
++ chip->clk_div = drv_data->freq / 115200;
++ }
++
++ /* handle for abort */
++ if (message->state == ERROR_STATE) {
++ message->status = -EIO;
++ goto early_exit;
++ }
++
++ /* handle end of message */
++ if (message->state == DONE_STATE) {
++ message->status = 0;
++ goto early_exit;
++ }
++
++ /* delay if requested at end of transfer*/
++ if (message->state == RUNNING_STATE) {
++ previous = list_entry(transfer->transfer_list.prev,
++ struct spi_transfer,
++ transfer_list);
++ if (previous->delay_usecs)
++ udelay(previous->delay_usecs);
++ }
++
++ drv_data->n_bytes = chip->n_bytes;
++ drv_data->dma_width = chip->dma_width;
++ drv_data->cs_control = chip->cs_control;
++
++ drv_data->rx_dma = transfer->rx_dma;
++ drv_data->tx_dma = transfer->tx_dma;
++ drv_data->tx = (void *)transfer->tx_buf;
++ drv_data->tx_end = drv_data->tx + transfer->len;
++ drv_data->rx = transfer->rx_buf;
++ drv_data->rx_end = drv_data->rx + transfer->len;
++ drv_data->write = drv_data->tx ? chip->write : null_writer;
++ drv_data->read = drv_data->rx ? chip->read : null_reader;
++ drv_data->cs_change = transfer->cs_change;
++ drv_data->len = drv_data->cur_transfer->len;
++ if (chip != drv_data->prev_chip)
++ cs_change = 1;
++
++ /* handle per transfer options for bpw and speed */
++ cr0 = chip->cr0;
++ if (transfer->speed_hz) {
++ speed = chip->speed_hz;
++
++ if (transfer->speed_hz != speed) {
++ speed = transfer->speed_hz;
++ if (speed > drv_data->freq) {
++ printk(KERN_ERR "MRST SPI0: unsupported"
++ "freq: %dHz\n", speed);
++ message->status = -EIO;
++ goto early_exit;
++ }
++
++ /* clk_div doesn't support odd number */
++ clk_div = (drv_data->freq + speed - 1) / speed;
++ clk_div = ((clk_div + 1) >> 1) << 1;
++
++ chip->speed_hz = speed;
++ chip->clk_div = clk_div;
++ }
++ }
++
++ if (transfer->bits_per_word) {
++ bits = transfer->bits_per_word;
++
++ switch (bits) {
++ case 8:
++ drv_data->n_bytes = 1;
++ drv_data->dma_width = 1;
++ drv_data->read = drv_data->read != null_reader ?
++ u8_reader : null_reader;
++ drv_data->write = drv_data->write != null_writer ?
++ u8_writer : null_writer;
++ break;
++ case 16:
++ drv_data->n_bytes = 2;
++ drv_data->dma_width = 2;
++ drv_data->read = drv_data->read != null_reader ?
++ u16_reader : null_reader;
++ drv_data->write = drv_data->write != null_writer ?
++ u16_writer : null_writer;
++ break;
++ default:
++ printk(KERN_ERR "MRST SPI0: unsupported bits:"
++ "%db\n", bits);
++ message->status = -EIO;
++ goto early_exit;
++ }
++
++ cr0 = (bits - 1)
++ | (chip->type << SPI_FRF_OFFSET)
++ | (spi->mode << SPI_MODE_OFFSET)
++ | (chip->tmode << SPI_TMOD_OFFSET);
++ }
++
++ message->state = RUNNING_STATE;
++
++ /* try to map dma buffer and do a dma transfer if successful */
++ drv_data->dma_mapped = 0;
++ if (drv_data->len && (drv_data->len <= MRST_MAX_DMA_LEN))
++ drv_data->dma_mapped = map_dma_buffers(drv_data);
++
++ if (!drv_data->dma_mapped && !chip->poll_mode) {
++ if (drv_data->rx) {
++ if (drv_data->len >= SPI_INT_THRESHOLD)
++ rxint_level = SPI_INT_THRESHOLD;
++ else
++ rxint_level = drv_data->len;
++ imask |= SPI_INT_RXFI;
++ }
++
++ if (drv_data->tx)
++ imask |= SPI_INT_TXEI;
++ drv_data->transfer_handler = interrupt_transfer;
++ }
++
++ /*
++ * reprogram registers only if
++ * 1. chip select changes
++ * 2. clk_div is changes
++ * 3. control value changes
++ */
++ if (read_ctrl0(reg) != cr0 || cs_change || clk_div) {
++ mrst_spi_enable(reg, 0);
++
++ if (read_ctrl0(reg) != cr0)
++ write_ctrl0(cr0, reg);
++
++ if (txint_level)
++ write_txftlr(txint_level, reg);
++
++ if (rxint_level)
++ write_rxftlr(rxint_level, reg);
++
++ /* set the interrupt mask, for poll mode just diable all int */
++ spi_mask_intr(reg, 0xff);
++ if (!chip->poll_mode)
++ spi_umask_intr(reg, imask);
++
++ spi_enable_clk(reg, clk_div ? clk_div : chip->clk_div);
++ spi_chip_sel(reg, spi->chip_select);
++ mrst_spi_enable(reg, 1);
++
++ if (cs_change)
++ drv_data->prev_chip = chip;
++ }
++
++ if (drv_data->dma_mapped)
++ dma_transfer(drv_data, cs_change);
++
++ if (chip->poll_mode)
++ poll_transfer(drv_data);
++
++ return;
++
++early_exit:
++ giveback(drv_data);
++ return;
++}
++
++static void pump_messages(struct work_struct *work)
++{
++ struct driver_data *drv_data =
++ container_of(work, struct driver_data, pump_messages);
++ unsigned long flags;
++
++ /* Lock queue and check for queue work */
++ spin_lock_irqsave(&drv_data->lock, flags);
++ if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
++ drv_data->busy = 0;
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ return;
++ }
++
++ /* Make sure we are not already running a message */
++ if (drv_data->cur_msg) {
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ return;
++ }
++
++ /* Extract head of queue */
++ drv_data->cur_msg = list_entry(drv_data->queue.next,
++ struct spi_message, queue);
++ list_del_init(&drv_data->cur_msg->queue);
++
++ /* Initial message state*/
++ drv_data->cur_msg->state = START_STATE;
++ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
++ struct spi_transfer,
++ transfer_list);
++ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
++
++ /* Mark as busy and launch transfers */
++ tasklet_schedule(&drv_data->pump_transfers);
++
++ drv_data->busy = 1;
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++}
++
++/* spi_device use this to queue in the their spi_msg */
++static int mrst_spi_transfer(struct spi_device *spi, struct spi_message *msg)
++{
++ struct driver_data *drv_data = spi_master_get_devdata(spi->master);
++ unsigned long flags;
++
++ spin_lock_irqsave(&drv_data->lock, flags);
++
++ if (drv_data->run == QUEUE_STOPPED) {
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ return -ESHUTDOWN;
++ }
++
++ msg->actual_length = 0;
++ msg->status = -EINPROGRESS;
++ msg->state = START_STATE;
++
++ list_add_tail(&msg->queue, &drv_data->queue);
++
++ if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) {
++
++ if (drv_data->cur_transfer || drv_data->cur_msg)
++ queue_work(drv_data->workqueue,
++ &drv_data->pump_messages);
++ else {
++ /* if no other data transaction in air, just go */
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ pump_messages(&drv_data->pump_messages);
++ return 0;
++ }
++ }
++
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ return 0;
++}
++
++/* this may be called twice for each spi dev */
++static int mrst_spi_setup(struct spi_device *spi)
++{
++ struct mrst_spi_chip *chip_info = NULL;
++ struct chip_data *chip;
++
++ if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
++ return -EINVAL;
++
++ /* Only alloc on first setup */
++ chip = spi_get_ctldata(spi);
++ if (!chip) {
++ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
++ if (!chip)
++ return -ENOMEM;
++
++ chip->cs_control = null_cs_control;
++ chip->enable_dma = 0;
++ }
++
++ /* protocol drivers may change the chip settings, so...
++ * if chip_info exists, use it */
++ chip_info = spi->controller_data;
++
++ /* chip_info doesn't always exist */
++ if (chip_info) {
++ if (chip_info->cs_control)
++ chip->cs_control = chip_info->cs_control;
++
++ chip->poll_mode = chip_info->poll_mode;
++ chip->type = chip_info->type;
++
++ chip->rx_threshold = 0;
++ chip->tx_threshold = 0;
++
++ chip->enable_dma = chip_info->enable_dma;
++ }
++
++ if (spi->bits_per_word <= 8) {
++ chip->n_bytes = 1;
++ chip->dma_width = 1;
++ chip->read = u8_reader;
++ chip->write = u8_writer;
++ } else if (spi->bits_per_word <= 16) {
++ chip->n_bytes = 2;
++ chip->dma_width = 2;
++ chip->read = u16_reader;
++ chip->write = u16_writer;
++ } else {
++ /* never take >16b case for MRST SPIC */
++ dev_err(&spi->dev, "invalid wordsize\n");
++ return -ENODEV;
++ }
++
++ chip->bits_per_word = spi->bits_per_word;
++ chip->speed_hz = spi->max_speed_hz;
++ chip->tmode = 0; /* Tx & Rx */
++ /* default SPI mode is SCPOL = 0, SCPH = 0 */
++ chip->cr0 = (chip->bits_per_word - 1)
++ | (chip->type << SPI_FRF_OFFSET)
++ | (spi->mode << SPI_MODE_OFFSET)
++ | (chip->tmode << SPI_TMOD_OFFSET);
++
++ spi_set_ctldata(spi, chip);
++ return 0;
++}
++
++static void mrst_spi_cleanup(struct spi_device *spi)
++{
++ struct chip_data *chip = spi_get_ctldata(spi);
++
++ kfree(chip);
++}
++
++static int __init init_queue(struct driver_data *drv_data)
++{
++ INIT_LIST_HEAD(&drv_data->queue);
++ spin_lock_init(&drv_data->lock);
++
++ drv_data->run = QUEUE_STOPPED;
++ drv_data->busy = 0;
++
++ tasklet_init(&drv_data->pump_transfers,
++ pump_transfers, (unsigned long)drv_data);
++
++ INIT_WORK(&drv_data->pump_messages, pump_messages);
++ drv_data->workqueue = create_singlethread_workqueue(
++ dev_name(drv_data->master->dev.parent));
++ if (drv_data->workqueue == NULL)
++ return -EBUSY;
++
++ return 0;
++}
++
++static int start_queue(struct driver_data *drv_data)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&drv_data->lock, flags);
++
++ if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ return -EBUSY;
++ }
++
++ drv_data->run = QUEUE_RUNNING;
++ drv_data->cur_msg = NULL;
++ drv_data->cur_transfer = NULL;
++ drv_data->cur_chip = NULL;
++ drv_data->prev_chip = NULL;
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++
++ queue_work(drv_data->workqueue, &drv_data->pump_messages);
++
++ return 0;
++}
++
++static int stop_queue(struct driver_data *drv_data)
++{
++ unsigned long flags;
++ unsigned limit = 500;
++ int status = 0;
++
++ spin_lock_irqsave(&drv_data->lock, flags);
++ drv_data->run = QUEUE_STOPPED;
++ while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ msleep(10);
++ spin_lock_irqsave(&drv_data->lock, flags);
++ }
++
++ if (!list_empty(&drv_data->queue) || drv_data->busy)
++ status = -EBUSY;
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++
++ return status;
++}
++
++static int destroy_queue(struct driver_data *drv_data)
++{
++ int status;
++
++ status = stop_queue(drv_data);
++ if (status != 0)
++ return status;
++ destroy_workqueue(drv_data->workqueue);
++ return 0;
++}
++
++/* restart the spic, disable all interrupts, clean rx fifo */
++static void spi_hw_init(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++
++ mrst_spi_enable(reg, 0x0);
++ spi_mask_intr(reg, 0xff);
++ mrst_spi_enable(reg, 0x1);
++
++ flush(drv_data);
++}
++
++static int __devinit mrst_spi_probe(struct pci_dev *pdev,
++ const struct pci_device_id *ent)
++{
++ int ret;
++ struct driver_data *drv_data;
++ struct spi_master *master;
++ struct device *dev = &pdev->dev;
++ u32 *clk_reg, clk_cdiv;
++ int pci_bar = 0;
++
++ BUG_ON(pdev == NULL);
++ BUG_ON(ent == NULL);
++
++ printk(KERN_INFO "MRST: found PCI SPI controller(ID: %04x:%04x)\n",
++ pdev->vendor, pdev->device);
++
++ ret = pci_enable_device(pdev);
++ if (ret)
++ return ret;
++
++ master = spi_alloc_master(dev, sizeof(struct driver_data));
++ if (!master) {
++ ret = -ENOMEM;
++ goto exit;
++ }
++
++ drv_data = spi_master_get_devdata(master);
++ drv_data->master = master;
++ drv_data->pdev = pdev;
++ drv_data->type = SSI_MOTO_SPI;
++ drv_data->prev_chip = NULL;
++
++ /* get basic io resource and map it */
++ drv_data->paddr = (void *)pci_resource_start(pdev, pci_bar);
++ drv_data->iolen = pci_resource_len(pdev, pci_bar);
++ drv_data->dma_addr = (dma_addr_t)(drv_data->paddr + 0x60);
++
++ ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
++ if (ret)
++ goto err_free_master;
++
++ drv_data->vaddr = ioremap_nocache((unsigned long)drv_data->paddr,
++ drv_data->iolen);
++ if (!drv_data->vaddr) {
++ ret = -ENOMEM;
++ goto err_free_pci;
++ }
++
++ clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
++ if (!clk_reg) {
++ ret = -ENOMEM;
++ goto err_iounmap;
++ }
++
++ /* get SPI controller operating freq info */
++ clk_cdiv = ((*clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET;
++ drv_data->freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
++ iounmap(clk_reg);
++
++ drv_data->irq = pdev->irq;
++ ret = request_irq(drv_data->irq, mrst_spi_irq, 0,
++ "mrst_spic0", drv_data);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "can not get IRQ\n");
++ goto err_iounmap;
++ }
++
++ spin_lock_init(&drv_data->lock);
++
++ master->mode_bits = SPI_CPOL | SPI_CPHA;
++
++ master->bus_num = 0;
++ master->num_chipselect = 16;
++ master->cleanup = mrst_spi_cleanup;
++ master->setup = mrst_spi_setup;
++ master->transfer = mrst_spi_transfer;
++
++ drv_data->dma_inited = 0;
++#ifdef CONFIG_SPI_MRST_DMA
++ mrst_spi_dma_init(drv_data);
++#endif
++
++ /* basic HW init */
++ spi_hw_init(drv_data);
++
++ /* Initial and start queue */
++ ret = init_queue(drv_data);
++ if (ret) {
++ dev_err(&pdev->dev, "problem initializing queue\n");
++ goto err_diable_hw;
++ }
++ ret = start_queue(drv_data);
++ if (ret) {
++ dev_err(&pdev->dev, "problem starting queue\n");
++ goto err_diable_hw;
++ }
++
++ ret = spi_register_master(master);
++ if (ret) {
++ dev_err(&pdev->dev, "problem registering spi master\n");
++ goto err_queue_alloc;
++ }
++
++ /* PCI hook and SPI hook use the same drv data */
++ pci_set_drvdata(pdev, drv_data);
++ mrst_spi_debugfs_init(drv_data);
++
++ return 0;
++
++err_queue_alloc:
++ destroy_queue(drv_data);
++#ifdef CONFIG_SPI_MRST_DMA
++ mrst_spi_dma_exit(drv_data);
++#endif
++err_diable_hw:
++ mrst_spi_enable(drv_data->vaddr, 0);
++ free_irq(drv_data->irq, drv_data);
++err_iounmap:
++ iounmap(drv_data->vaddr);
++err_free_pci:
++ pci_release_region(pdev, pci_bar);
++err_free_master:
++ spi_master_put(master);
++exit:
++ pci_disable_device(pdev);
++ return ret;
++}
++
++static void __devexit mrst_spi_remove(struct pci_dev *pdev)
++{
++ struct driver_data *drv_data = pci_get_drvdata(pdev);
++ void *reg;
++ int status = 0;
++
++ if (!drv_data)
++ return;
++
++ mrst_spi_debugfs_remove(drv_data);
++ pci_set_drvdata(pdev, NULL);
++
++ /* remove the queue */
++ status = destroy_queue(drv_data);
++ if (status != 0)
++ dev_err(&pdev->dev, "mrst_spi_remove: workqueue will not "
++ "complete, message memory not freed\n");
++
++#ifdef CONFIG_SPI_MRST_DMA
++ mrst_spi_dma_exit(drv_data);
++#endif
++
++ reg = drv_data->vaddr;
++ mrst_spi_enable(reg, 0);
++ spi_disable_clk(reg);
++
++ /* release IRQ */
++ free_irq(drv_data->irq, drv_data);
++
++ iounmap(drv_data->vaddr);
++ pci_release_region(pdev, 0);
++
++ /* disconnect from the SPI framework */
++ spi_unregister_master(drv_data->master);
++ pci_disable_device(pdev);
++}
++
++#ifdef CONFIG_PM
++static int mrst_spi_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct driver_data *drv_data = pci_get_drvdata(pdev);
++ void *reg = drv_data->vaddr;
++ int status = 0;
++
++ status = stop_queue(drv_data);
++ if (status)
++ return status;
++
++ mrst_spi_enable(reg, 0);
++ spi_disable_clk(reg);
++ return status;
++}
++
++static int mrst_spi_resume(struct pci_dev *pdev)
++{
++ struct driver_data *drv_data = pci_get_drvdata(pdev);
++ int status = 0;
++
++ spi_hw_init(drv_data);
++
++ /* Start the queue running */
++ status = start_queue(drv_data);
++ if (status)
++ dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
++ return status;
++}
++#else
++#define mrst_spi_suspend NULL
++#define mrst_spi_resume NULL
++#endif
++
++static const struct pci_device_id pci_ids[] __devinitdata = {
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
++ {},
++};
++
++static struct pci_driver mrst_spi_driver = {
++ .name = DRIVER_NAME,
++ .id_table = pci_ids,
++ .probe = mrst_spi_probe,
++ .remove = __devexit_p(mrst_spi_remove),
++ .suspend = mrst_spi_suspend,
++ .resume = mrst_spi_resume,
++};
++
++static int __init mrst_spi_init(void)
++{
++ return pci_register_driver(&mrst_spi_driver);
++}
++
++static void __exit mrst_spi_exit(void)
++{
++ pci_unregister_driver(&mrst_spi_driver);
++}
++
++module_init(mrst_spi_init);
++module_exit(mrst_spi_exit);
++
++MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
++MODULE_DESCRIPTION("Intel Moorestown SPI controller driver");
++MODULE_LICENSE("GPL v2");
+Index: linux-2.6.33/include/linux/spi/mrst_spi.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/include/linux/spi/mrst_spi.h
+@@ -0,0 +1,162 @@
++#ifndef MRST_SPI_HEADER_H
++#define MRST_SPI_HEADER_H
++#include <linux/io.h>
++
++/* bit fields in CTRLR0 */
++#define SPI_DFS_OFFSET 0
++
++#define SPI_FRF_OFFSET 4
++#define SPI_FRF_SPI 0x0
++#define SPI_FRF_SSP 0x1
++#define SPI_FRF_MICROWIRE 0x2
++#define SPI_FRF_RESV 0x3
++
++#define SPI_MODE_OFFSET 6
++#define SPI_SCPH_OFFSET 6
++#define SPI_SCOL_OFFSET 7
++#define SPI_TMOD_OFFSET 8
++#define SPI_TMOD_TR 0x0 /* xmit & recv */
++#define SPI_TMOD_TO 0x1 /* xmit only */
++#define SPI_TMOD_RO 0x2 /* recv only */
++#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
++
++#define SPI_SLVOE_OFFSET 10
++#define SPI_SRL_OFFSET 11
++#define SPI_CFS_OFFSET 12
++
++/* bit fields in SR, 7 bits */
++#define SR_MASK 0x7f /* cover 7 bits */
++#define SR_BUSY (1 << 0)
++#define SR_TF_NOT_FULL (1 << 1)
++#define SR_TF_EMPT (1 << 2)
++#define SR_RF_NOT_EMPT (1 << 3)
++#define SR_RF_FULL (1 << 4)
++#define SR_TX_ERR (1 << 5)
++#define SR_DCOL (1 << 6)
++
++/* bit fields in ISR, IMR, RISR, 7 bits */
++#define SPI_INT_TXEI (1 << 0)
++#define SPI_INT_TXOI (1 << 1)
++#define SPI_INT_RXUI (1 << 2)
++#define SPI_INT_RXOI (1 << 3)
++#define SPI_INT_RXFI (1 << 4)
++#define SPI_INT_MSTI (1 << 5)
++
++/* TX RX interrupt level threshhold, max can be 256 */
++#define SPI_INT_THRESHOLD 32
++
++#define DEFINE_MRST_SPI_RW_REG(reg, off) \
++static inline u32 read_##reg(void *p) \
++{ return readl(p + (off)); } \
++static inline void write_##reg(u32 v, void *p) \
++{ writel(v, p + (off)); }
++
++#define DEFINE_MRST_SPI_RO_REG(reg, off) \
++static inline u32 read_##reg(void *p) \
++{ return readl(p + (off)); } \
++
++DEFINE_MRST_SPI_RW_REG(ctrl0, 0x00)
++DEFINE_MRST_SPI_RW_REG(ctrl1, 0x04)
++DEFINE_MRST_SPI_RW_REG(ssienr, 0x08)
++DEFINE_MRST_SPI_RW_REG(mwcr, 0x0c)
++DEFINE_MRST_SPI_RW_REG(ser, 0x10)
++DEFINE_MRST_SPI_RW_REG(baudr, 0x14)
++DEFINE_MRST_SPI_RW_REG(txftlr, 0x18)
++DEFINE_MRST_SPI_RW_REG(rxftlr, 0x1c)
++DEFINE_MRST_SPI_RO_REG(txflr, 0x20)
++DEFINE_MRST_SPI_RO_REG(rxflr, 0x24)
++DEFINE_MRST_SPI_RO_REG(sr, 0x28)
++DEFINE_MRST_SPI_RW_REG(imr, 0x2c)
++DEFINE_MRST_SPI_RO_REG(isr, 0x30)
++DEFINE_MRST_SPI_RO_REG(risr, 0x34)
++DEFINE_MRST_SPI_RO_REG(txoicr, 0x38)
++DEFINE_MRST_SPI_RO_REG(rxoicr, 0x3c)
++DEFINE_MRST_SPI_RO_REG(rxuicr, 0x40)
++DEFINE_MRST_SPI_RO_REG(msticr, 0x44)
++DEFINE_MRST_SPI_RO_REG(icr, 0x48)
++DEFINE_MRST_SPI_RW_REG(dmacr, 0x4c)
++DEFINE_MRST_SPI_RW_REG(dmatdlr, 0x50)
++DEFINE_MRST_SPI_RW_REG(dmardlr, 0x54)
++DEFINE_MRST_SPI_RO_REG(idr, 0x58)
++DEFINE_MRST_SPI_RO_REG(version, 0x5c)
++DEFINE_MRST_SPI_RW_REG(dr, 0x60)
++
++static inline void mrst_spi_enable(void *reg, int enable)
++{
++ if (enable)
++ write_ssienr(0x1, reg);
++ else
++ write_ssienr(0x0, reg);
++}
++
++static inline void spi_enable_clk(void *reg, u16 div)
++{
++ write_baudr(div, reg);
++}
++
++static inline void spi_chip_sel(void *reg, u16 cs)
++{
++ if (cs > 4)
++ return;
++ write_ser((1 << cs), reg);
++}
++
++static inline void spi_disable_clk(void *reg)
++{
++ /* set the divider to 0 will diable the clock */
++ write_baudr(0, reg);
++}
++
++/* disable some INT */
++static inline void spi_mask_intr(void *reg, u32 mask)
++{
++ u32 imr;
++ imr = read_imr(reg) & ~mask;
++ write_imr(imr, reg);
++}
++
++/* enable INT */
++static inline void spi_umask_intr(void *reg, u32 mask)
++{
++ u32 imr;
++ imr = read_imr(reg) | mask;
++ write_imr(imr, reg);
++}
++
++enum mrst_ssi_type {
++ SSI_MOTO_SPI = 0,
++ SSI_TI_SSP,
++ SSI_NS_MICROWIRE,
++};
++
++/* usually will be controller_data for SPI slave devices */
++struct mrst_spi_chip {
++ u8 poll_mode; /* 0 for contoller polling mode */
++ u8 type; /* SPI/SSP/Micrwire */
++ u8 enable_dma;
++ void (*cs_control)(u32 command);
++};
++
++#define SPI_DIB_NAME_LEN 16
++#define SPI_DIB_SPEC_INFO_LEN 10
++
++#define MRST_GPE_IRQ_VIA_GPIO_BIT (1 << 15)
++/* SPI device info block related */
++struct spi_dib_header {
++ u32 signature;
++ u32 length;
++ u8 rev;
++ u8 checksum;
++ u8 dib[0];
++} __attribute__((packed));
++
++struct spi_dib {
++ u16 host_num;
++ u16 cs;
++ u16 irq;
++ char name[SPI_DIB_NAME_LEN];
++ u8 dev_data[SPI_DIB_SPEC_INFO_LEN];
++} __attribute__((packed));
++
++extern struct console early_mrst_console;
++#endif /* #ifndef MRST_SPI_HEADER_H */
+Index: linux-2.6.33/drivers/serial/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/serial/Kconfig
++++ linux-2.6.33/drivers/serial/Kconfig
+@@ -688,6 +688,27 @@ config SERIAL_SA1100_CONSOLE
+ your boot loader (lilo or loadlin) about how to pass options to the
+ kernel at boot time.)
+
++config SERIAL_MAX3110
++ tristate "SPI UART driver for Max3110"
++ depends on SPI_MRST
++ select SERIAL_CORE
++ select SERIAL_CORE_CONSOLE
++ help
++ This is the UART protocol driver for MAX3110 device on
++ Intel Moorestown platform
++
++config MRST_MAX3110
++ boolean "Add Max3110 support for Moorestown platform"
++ default y
++ depends on SERIAL_MAX3110
++
++config MRST_MAX3110_IRQ
++ boolean "Enable GPIO IRQ for Max3110 over Moorestown"
++ default n
++ depends on MRST_MAX3110 && GPIO_LANGWELL
++ help
++ This has to be enabled after Moorestown GPIO driver is loaded
++
+ config SERIAL_BFIN
+ tristate "Blackfin serial port support"
+ depends on BLACKFIN
+Index: linux-2.6.33/drivers/serial/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/serial/Makefile
++++ linux-2.6.33/drivers/serial/Makefile
+@@ -82,3 +82,4 @@ obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgd
+ obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
+ obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
+ obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
++obj-$(CONFIG_SERIAL_MAX3110) += max3110.o
+Index: linux-2.6.33/drivers/serial/max3110.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/serial/max3110.c
+@@ -0,0 +1,850 @@
++/*
++ * max3110.c - spi uart protocol driver for Maxim 3110 on Moorestown
++ *
++ * Copyright (C) Intel 2008 Feng Tang <feng.tang@intel.com>
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++/*
++ * Note:
++ * 1. From Max3110 spec, the Rx FIFO has 8 words, while the Tx FIFO only has
++ * 1 word. If SPI master controller doesn't support sclk frequency change,
++ * then the char need be sent out one by one with some delay
++ *
++ * 2. Currently only RX availabe interrrupt is used, no need for waiting TXE
++ * interrupt for a low speed UART device
++ */
++
++#include <linux/module.h>
++#include <linux/ioport.h>
++#include <linux/init.h>
++#include <linux/console.h>
++#include <linux/sysrq.h>
++#include <linux/platform_device.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial_core.h>
++#include <linux/serial_reg.h>
++
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#include <asm/atomic.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/mrst_spi.h>
++
++#include "max3110.h"
++
++#define PR_FMT "max3110: "
++
++struct uart_max3110 {
++ struct uart_port port;
++ struct spi_device *spi;
++ char *name;
++
++ wait_queue_head_t wq;
++ struct task_struct *main_thread;
++ struct task_struct *read_thread;
++ int mthread_up;
++ spinlock_t lock;
++
++ u32 baud;
++ u16 cur_conf;
++ u8 clock;
++ u8 parity, word_7bits;
++
++ atomic_t uart_tx_need;
++
++ /* console related */
++ struct circ_buf con_xmit;
++ atomic_t con_tx_need;
++
++ /* irq related */
++ u16 irq;
++ atomic_t irq_pending;
++};
++
++/* global data structure, may need be removed */
++struct uart_max3110 *pmax;
++static inline void receive_char(struct uart_max3110 *max, u8 ch);
++static void receive_chars(struct uart_max3110 *max,
++ unsigned char *str, int len);
++static int max3110_read_multi(struct uart_max3110 *max, int len, u8 *buf);
++static void max3110_console_receive(struct uart_max3110 *max);
++
++int max3110_write_then_read(struct uart_max3110 *max,
++ const u8 *txbuf, u8 *rxbuf, unsigned len, int always_fast)
++{
++ struct spi_device *spi = max->spi;
++ struct spi_message message;
++ struct spi_transfer x;
++ int ret;
++
++ if (!txbuf || !rxbuf)
++ return -EINVAL;
++
++ spi_message_init(&message);
++ memset(&x, 0, sizeof x);
++ x.len = len;
++ x.tx_buf = txbuf;
++ x.rx_buf = rxbuf;
++ spi_message_add_tail(&x, &message);
++
++ if (always_fast)
++ x.speed_hz = 3125000;
++ else if (max->baud)
++ x.speed_hz = max->baud;
++
++ /* Do the i/o */
++ ret = spi_sync(spi, &message);
++ return ret;
++}
++
++/* Write a u16 to the device, and return one u16 read back */
++int max3110_out(struct uart_max3110 *max, const u16 out)
++{
++ u16 tmp;
++ int ret;
++
++ ret = max3110_write_then_read(max, (u8 *)&out, (u8 *)&tmp, 2, 1);
++ if (ret)
++ return ret;
++
++ /* If some valid data is read back */
++ if (tmp & MAX3110_READ_DATA_AVAILABLE)
++ receive_char(max, (tmp & 0xff));
++
++ return ret;
++}
++
++#define MAX_READ_LEN 20
++/*
++ * This is usually used to read data from SPIC RX FIFO, which doesn't
++ * need any delay like flushing character out. It returns how many
++ * valide bytes are read back
++ */
++static int max3110_read_multi(struct uart_max3110 *max, int len, u8 *buf)
++{
++ u16 out[MAX_READ_LEN], in[MAX_READ_LEN];
++ u8 *pbuf, valid_str[MAX_READ_LEN];
++ int i, j, bytelen;
++
++ if (len > MAX_READ_LEN) {
++ pr_err(PR_FMT "read len %d is too large\n", len);
++ return 0;
++ }
++
++ bytelen = len * 2;
++ memset(out, 0, bytelen);
++ memset(in, 0, bytelen);
++
++ if (max3110_write_then_read(max, (u8 *)out, (u8 *)in, bytelen, 1))
++ return 0;
++
++ /* If caller don't provide a buffer, then handle received char */
++ pbuf = buf ? buf : valid_str;
++
++ for (i = 0, j = 0; i < len; i++) {
++ if (in[i] & MAX3110_READ_DATA_AVAILABLE)
++ pbuf[j++] = (u8)(in[i] & 0xff);
++ }
++
++ if (j && (pbuf == valid_str))
++ receive_chars(max, valid_str, j);
++
++ return j;
++}
++
++static void serial_m3110_con_putchar(struct uart_port *port, int ch)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ struct circ_buf *xmit = &max->con_xmit;
++
++ if (uart_circ_chars_free(xmit)) {
++ xmit->buf[xmit->head] = (char)ch;
++ xmit->head = (xmit->head + 1) & (PAGE_SIZE - 1);
++ }
++
++ if (!atomic_read(&max->con_tx_need)) {
++ atomic_set(&max->con_tx_need, 1);
++ wake_up_process(max->main_thread);
++ }
++}
++
++/*
++ * Print a string to the serial port trying not to disturb
++ * any possible real use of the port...
++ *
++ * The console_lock must be held when we get here.
++ */
++static void serial_m3110_con_write(struct console *co,
++ const char *s, unsigned int count)
++{
++ if (!pmax)
++ return;
++
++ uart_console_write(&pmax->port, s, count, serial_m3110_con_putchar);
++}
++
++static int __init
++serial_m3110_con_setup(struct console *co, char *options)
++{
++ struct uart_max3110 *max = pmax;
++ int baud = 115200;
++ int bits = 8;
++ int parity = 'n';
++ int flow = 'n';
++
++ pr_info(PR_FMT "setting up console\n");
++
++ if (!max) {
++ pr_err(PR_FMT "pmax is NULL, return");
++ return -ENODEV;
++ }
++
++ if (options)
++ uart_parse_options(options, &baud, &parity, &bits, &flow);
++
++ return uart_set_options(&max->port, co, baud, parity, bits, flow);
++}
++
++static struct tty_driver *serial_m3110_con_device(struct console *co,
++ int *index)
++{
++ struct uart_driver *p = co->data;
++ *index = co->index;
++ return p->tty_driver;
++}
++
++static struct uart_driver serial_m3110_reg;
++static struct console serial_m3110_console = {
++ .name = "ttyS",
++ .write = serial_m3110_con_write,
++ .device = serial_m3110_con_device,
++ .setup = serial_m3110_con_setup,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++ .data = &serial_m3110_reg,
++};
++
++#define MRST_CONSOLE (&serial_m3110_console)
++
++static unsigned int serial_m3110_tx_empty(struct uart_port *port)
++{
++ return 1;
++}
++
++static void serial_m3110_stop_tx(struct uart_port *port)
++{
++ return;
++}
++
++/* stop_rx will be called in spin_lock env */
++static void serial_m3110_stop_rx(struct uart_port *port)
++{
++ return;
++}
++
++#define WORDS_PER_XFER 128
++static inline void send_circ_buf(struct uart_max3110 *max,
++ struct circ_buf *xmit)
++{
++ int len, left = 0;
++ u16 obuf[WORDS_PER_XFER], ibuf[WORDS_PER_XFER];
++ u8 valid_str[WORDS_PER_XFER];
++ int i, j;
++
++ while (!uart_circ_empty(xmit)) {
++ left = uart_circ_chars_pending(xmit);
++ while (left) {
++ len = (left >= WORDS_PER_XFER) ? WORDS_PER_XFER : left;
++
++ memset(obuf, 0, len * 2);
++ memset(ibuf, 0, len * 2);
++ for (i = 0; i < len; i++) {
++ obuf[i] = (u8)xmit->buf[xmit->tail] | WD_TAG;
++ xmit->tail = (xmit->tail + 1) &
++ (UART_XMIT_SIZE - 1);
++ }
++ max3110_write_then_read(max, (u8 *)obuf,
++ (u8 *)ibuf, len * 2, 0);
++
++ for (i = 0, j = 0; i < len; i++) {
++ if (ibuf[i] & MAX3110_READ_DATA_AVAILABLE)
++ valid_str[j++] = (u8)(ibuf[i] & 0xff);
++ }
++
++ if (j)
++ receive_chars(max, valid_str, j);
++
++ max->port.icount.tx += len;
++ left -= len;
++ }
++ }
++}
++
++static void transmit_char(struct uart_max3110 *max)
++{
++ struct uart_port *port = &max->port;
++ struct circ_buf *xmit = &port->state->xmit;
++
++ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
++ return;
++
++ send_circ_buf(max, xmit);
++
++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
++ uart_write_wakeup(port);
++
++ if (uart_circ_empty(xmit))
++ serial_m3110_stop_tx(port);
++}
++
++/* This will be called by uart_write() and tty_write, can't
++ * go to sleep */
++static void serial_m3110_start_tx(struct uart_port *port)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++
++ if (!atomic_read(&max->uart_tx_need)) {
++ atomic_set(&max->uart_tx_need, 1);
++ wake_up_process(max->main_thread);
++ }
++}
++
++static void receive_chars(struct uart_max3110 *max, unsigned char *str, int len)
++{
++ struct uart_port *port = &max->port;
++ struct tty_struct *tty;
++ int usable;
++
++ /* If uart is not opened, just return */
++ if (!port->state)
++ return;
++
++ tty = port->state->port.tty;
++ if (!tty)
++ return; /* receive some char before the tty is opened */
++
++ while (len) {
++ usable = tty_buffer_request_room(tty, len);
++ if (usable) {
++ tty_insert_flip_string(tty, str, usable);
++ str += usable;
++ port->icount.rx += usable;
++ tty_flip_buffer_push(tty);
++ }
++ len -= usable;
++ }
++}
++
++static inline void receive_char(struct uart_max3110 *max, u8 ch)
++{
++ receive_chars(max, &ch, 1);
++}
++
++static void max3110_console_receive(struct uart_max3110 *max)
++{
++ int loop = 1, num, total = 0;
++ u8 recv_buf[512], *pbuf;
++
++ pbuf = recv_buf;
++ do {
++ num = max3110_read_multi(max, 8, pbuf);
++
++ if (num) {
++ loop = 10;
++ pbuf += num;
++ total += num;
++
++ if (total >= 500) {
++ receive_chars(max, recv_buf, total);
++ pbuf = recv_buf;
++ total = 0;
++ }
++ }
++ } while (--loop);
++
++ if (total)
++ receive_chars(max, recv_buf, total);
++}
++
++static int max3110_main_thread(void *_max)
++{
++ struct uart_max3110 *max = _max;
++ wait_queue_head_t *wq = &max->wq;
++ int ret = 0;
++ struct circ_buf *xmit = &max->con_xmit;
++
++ init_waitqueue_head(wq);
++ pr_info(PR_FMT "start main thread\n");
++
++ do {
++ wait_event_interruptible(*wq, (atomic_read(&max->irq_pending) ||
++ atomic_read(&max->con_tx_need) ||
++ atomic_read(&max->uart_tx_need)) ||
++ kthread_should_stop());
++ max->mthread_up = 1;
++
++#ifdef CONFIG_MRST_MAX3110_IRQ
++ if (atomic_read(&max->irq_pending)) {
++ max3110_console_receive(max);
++ atomic_set(&max->irq_pending, 0);
++ }
++#endif
++
++ /* first handle console output */
++ if (atomic_read(&max->con_tx_need)) {
++ send_circ_buf(max, xmit);
++ atomic_set(&max->con_tx_need, 0);
++ }
++
++ /* handle uart output */
++ if (atomic_read(&max->uart_tx_need)) {
++ transmit_char(max);
++ atomic_set(&max->uart_tx_need, 0);
++ }
++ max->mthread_up = 0;
++ } while (!kthread_should_stop());
++
++ return ret;
++}
++
++#ifdef CONFIG_MRST_MAX3110_IRQ
++irqreturn_t static serial_m3110_irq(int irq, void *dev_id)
++{
++ struct uart_max3110 *max = dev_id;
++
++ /* max3110's irq is a falling edge, not level triggered,
++ * so no need to disable the irq */
++ if (!atomic_read(&max->irq_pending)) {
++ atomic_inc(&max->irq_pending);
++ wake_up_process(max->main_thread);
++ }
++ return IRQ_HANDLED;
++}
++#else
++/* if don't use RX IRQ, then need a thread to polling read */
++static int max3110_read_thread(void *_max)
++{
++ struct uart_max3110 *max = _max;
++
++ pr_info(PR_FMT "start read thread\n");
++ do {
++ if (!max->mthread_up)
++ max3110_console_receive(max);
++
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule_timeout(HZ / 20);
++ } while (!kthread_should_stop());
++
++ return 0;
++}
++#endif
++
++static int serial_m3110_startup(struct uart_port *port)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ u16 config = 0;
++ int ret = 0;
++
++ if (port->line != 0)
++ pr_err(PR_FMT "uart port startup failed\n");
++
++ /* firstly disable all IRQ and config it to 115200, 8n1 */
++ config = WC_TAG | WC_FIFO_ENABLE
++ | WC_1_STOPBITS
++ | WC_8BIT_WORD
++ | WC_BAUD_DR2;
++ ret = max3110_out(max, config);
++
++ /* as we use thread to handle tx/rx, need set low latency */
++ port->state->port.tty->low_latency = 1;
++
++#ifdef CONFIG_MRST_MAX3110_IRQ
++ ret = request_irq(max->irq, serial_m3110_irq,
++ IRQ_TYPE_EDGE_FALLING, "max3110", max);
++ if (ret)
++ return ret;
++
++ /* enable RX IRQ only */
++ config |= WC_RXA_IRQ_ENABLE;
++ max3110_out(max, config);
++#else
++ /* if IRQ is disabled, start a read thread for input data */
++ max->read_thread =
++ kthread_run(max3110_read_thread, max, "max3110_read");
++#endif
++
++ max->cur_conf = config;
++ return 0;
++}
++
++static void serial_m3110_shutdown(struct uart_port *port)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ u16 config;
++
++ if (max->read_thread) {
++ kthread_stop(max->read_thread);
++ max->read_thread = NULL;
++ }
++
++#ifdef CONFIG_MRST_MAX3110_IRQ
++ free_irq(max->irq, max);
++#endif
++
++ /* Disable interrupts from this port */
++ config = WC_TAG | WC_SW_SHDI;
++ max3110_out(max, config);
++}
++
++static void serial_m3110_release_port(struct uart_port *port)
++{
++}
++
++static int serial_m3110_request_port(struct uart_port *port)
++{
++ return 0;
++}
++
++static void serial_m3110_config_port(struct uart_port *port, int flags)
++{
++ /* give it fake type */
++ port->type = PORT_PXA;
++}
++
++static int
++serial_m3110_verify_port(struct uart_port *port, struct serial_struct *ser)
++{
++ /* we don't want the core code to modify any port params */
++ return -EINVAL;
++}
++
++
++static const char *serial_m3110_type(struct uart_port *port)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ return max->name;
++}
++
++static void
++serial_m3110_set_termios(struct uart_port *port, struct ktermios *termios,
++ struct ktermios *old)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ unsigned char cval;
++ unsigned int baud, parity = 0;
++ int clk_div = -1;
++ u16 new_conf = max->cur_conf;
++
++ switch (termios->c_cflag & CSIZE) {
++ case CS7:
++ cval = UART_LCR_WLEN7;
++ new_conf |= WC_7BIT_WORD;
++ break;
++ default:
++ case CS8:
++ cval = UART_LCR_WLEN8;
++ new_conf |= WC_8BIT_WORD;
++ break;
++ }
++
++ baud = uart_get_baud_rate(port, termios, old, 0, 230400);
++
++ /* first calc the div for 1.8MHZ clock case */
++ switch (baud) {
++ case 300:
++ clk_div = WC_BAUD_DR384;
++ break;
++ case 600:
++ clk_div = WC_BAUD_DR192;
++ break;
++ case 1200:
++ clk_div = WC_BAUD_DR96;
++ break;
++ case 2400:
++ clk_div = WC_BAUD_DR48;
++ break;
++ case 4800:
++ clk_div = WC_BAUD_DR24;
++ break;
++ case 9600:
++ clk_div = WC_BAUD_DR12;
++ break;
++ case 19200:
++ clk_div = WC_BAUD_DR6;
++ break;
++ case 38400:
++ clk_div = WC_BAUD_DR3;
++ break;
++ case 57600:
++ clk_div = WC_BAUD_DR2;
++ break;
++ case 115200:
++ clk_div = WC_BAUD_DR1;
++ break;
++ default:
++ /* pick the previous baud rate */
++ baud = max->baud;
++ clk_div = max->cur_conf & WC_BAUD_DIV_MASK;
++ tty_termios_encode_baud_rate(termios, baud, baud);
++ }
++
++ if (max->clock & MAX3110_HIGH_CLK) {
++ clk_div += 1;
++ /* high clk version max3110 doesn't support B300 */
++ if (baud == 300)
++ baud = 600;
++ if (baud == 230400)
++ clk_div = WC_BAUD_DR1;
++ tty_termios_encode_baud_rate(termios, baud, baud);
++ }
++
++ new_conf = (new_conf & ~WC_BAUD_DIV_MASK) | clk_div;
++ if (termios->c_cflag & CSTOPB)
++ new_conf |= WC_2_STOPBITS;
++ else
++ new_conf &= ~WC_2_STOPBITS;
++
++ if (termios->c_cflag & PARENB) {
++ new_conf |= WC_PARITY_ENABLE;
++ parity |= UART_LCR_PARITY;
++ } else
++ new_conf &= ~WC_PARITY_ENABLE;
++
++ if (!(termios->c_cflag & PARODD))
++ parity |= UART_LCR_EPAR;
++ max->parity = parity;
++
++ uart_update_timeout(port, termios->c_cflag, baud);
++
++ new_conf |= WC_TAG;
++ if (new_conf != max->cur_conf) {
++ max3110_out(max, new_conf);
++ max->cur_conf = new_conf;
++ max->baud = baud;
++ }
++}
++
++/* don't handle hw handshaking */
++static unsigned int serial_m3110_get_mctrl(struct uart_port *port)
++{
++ return TIOCM_DSR | TIOCM_CAR | TIOCM_DSR;
++}
++
++static void serial_m3110_set_mctrl(struct uart_port *port, unsigned int mctrl)
++{
++}
++
++static void serial_m3110_break_ctl(struct uart_port *port, int break_state)
++{
++}
++
++static void serial_m3110_pm(struct uart_port *port, unsigned int state,
++ unsigned int oldstate)
++{
++}
++
++static void serial_m3110_enable_ms(struct uart_port *port)
++{
++}
++
++struct uart_ops serial_m3110_ops = {
++ .tx_empty = serial_m3110_tx_empty,
++ .set_mctrl = serial_m3110_set_mctrl,
++ .get_mctrl = serial_m3110_get_mctrl,
++ .stop_tx = serial_m3110_stop_tx,
++ .start_tx = serial_m3110_start_tx,
++ .stop_rx = serial_m3110_stop_rx,
++ .enable_ms = serial_m3110_enable_ms,
++ .break_ctl = serial_m3110_break_ctl,
++ .startup = serial_m3110_startup,
++ .shutdown = serial_m3110_shutdown,
++ .set_termios = serial_m3110_set_termios, /* must have */
++ .pm = serial_m3110_pm,
++ .type = serial_m3110_type,
++ .release_port = serial_m3110_release_port,
++ .request_port = serial_m3110_request_port,
++ .config_port = serial_m3110_config_port,
++ .verify_port = serial_m3110_verify_port,
++};
++
++static struct uart_driver serial_m3110_reg = {
++ .owner = THIS_MODULE,
++ .driver_name = "MRST serial",
++ .dev_name = "ttyS",
++ .major = TTY_MAJOR,
++ .minor = 64,
++ .nr = 1,
++ .cons = MRST_CONSOLE,
++};
++
++static int serial_m3110_suspend(struct spi_device *spi, pm_message_t state)
++{
++ return 0;
++}
++
++static int serial_m3110_resume(struct spi_device *spi)
++{
++ return 0;
++}
++
++#ifdef CONFIG_MRST_MAX3110
++static struct mrst_spi_chip spi0_uart = {
++ .poll_mode = 1,
++ .enable_dma = 0,
++ .type = SPI_FRF_SPI,
++};
++#endif
++
++static int serial_m3110_probe(struct spi_device *spi)
++{
++ struct uart_max3110 *max;
++ int ret;
++ unsigned char *buffer;
++
++ max = kzalloc(sizeof(*max), GFP_KERNEL);
++ if (!max)
++ return -ENOMEM;
++
++ /* set spi info */
++ spi->mode = SPI_MODE_0;
++ spi->bits_per_word = 16;
++#ifdef CONFIG_MRST_MAX3110
++ max->clock = MAX3110_HIGH_CLK;
++ spi->controller_data = &spi0_uart;
++#endif
++ spi_setup(spi);
++
++ max->port.type = PORT_PXA; /* need apply for a max3110 type */
++ max->port.fifosize = 2; /* only have 16b buffer */
++ max->port.ops = &serial_m3110_ops;
++ max->port.line = 0;
++ max->port.dev = &spi->dev;
++ max->port.uartclk = 115200;
++
++ max->spi = spi;
++ max->name = spi->modalias; /* use spi name as the name */
++ max->irq = (u16)spi->irq;
++
++ spin_lock_init(&max->lock);
++
++ max->word_7bits = 0;
++ max->parity = 0;
++ max->baud = 0;
++
++ max->cur_conf = 0;
++ atomic_set(&max->irq_pending, 0);
++
++ buffer = (unsigned char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ ret = -ENOMEM;
++ goto err_get_page;
++ }
++ max->con_xmit.buf = (unsigned char *)buffer;
++ max->con_xmit.head = max->con_xmit.tail = 0;
++
++ max->main_thread = kthread_run(max3110_main_thread,
++ max, "max3110_main");
++ if (IS_ERR(max->main_thread)) {
++ ret = PTR_ERR(max->main_thread);
++ goto err_kthread;
++ }
++
++ pmax = max;
++ /* give membase a psudo value to pass serial_core's check */
++ max->port.membase = (void *)0xff110000;
++ uart_add_one_port(&serial_m3110_reg, &max->port);
++
++ return 0;
++
++err_kthread:
++ free_page((unsigned long)buffer);
++err_get_page:
++ pmax = NULL;
++ kfree(max);
++ return ret;
++}
++
++static int max3110_remove(struct spi_device *dev)
++{
++ struct uart_max3110 *max = pmax;
++
++ if (!pmax)
++ return 0;
++
++ pmax = NULL;
++ uart_remove_one_port(&serial_m3110_reg, &max->port);
++
++ free_page((unsigned long)max->con_xmit.buf);
++
++ if (max->main_thread)
++ kthread_stop(max->main_thread);
++
++ kfree(max);
++ return 0;
++}
++
++static struct spi_driver uart_max3110_driver = {
++ .driver = {
++ .name = "spi_max3111",
++ .bus = &spi_bus_type,
++ .owner = THIS_MODULE,
++ },
++ .probe = serial_m3110_probe,
++ .remove = __devexit_p(max3110_remove),
++ .suspend = serial_m3110_suspend,
++ .resume = serial_m3110_resume,
++};
++
++
++int __init serial_m3110_init(void)
++{
++ int ret = 0;
++
++ ret = uart_register_driver(&serial_m3110_reg);
++ if (ret)
++ return ret;
++
++ ret = spi_register_driver(&uart_max3110_driver);
++ if (ret)
++ uart_unregister_driver(&serial_m3110_reg);
++
++ return ret;
++}
++
++void __exit serial_m3110_exit(void)
++{
++ spi_unregister_driver(&uart_max3110_driver);
++ uart_unregister_driver(&serial_m3110_reg);
++}
++
++module_init(serial_m3110_init);
++module_exit(serial_m3110_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("max3110-uart");
+Index: linux-2.6.33/drivers/serial/max3110.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/serial/max3110.h
+@@ -0,0 +1,59 @@
++#ifndef _MAX3110_HEAD_FILE_
++#define _MAX3110_HEAD_FILE_
++
++#define MAX3110_HIGH_CLK 0x1 /* 3.6864 MHZ */
++#define MAX3110_LOW_CLK 0x0 /* 1.8432 MHZ */
++
++/* status bits for all 4 MAX3110 operate modes */
++#define MAX3110_READ_DATA_AVAILABLE (1 << 15)
++#define MAX3110_WRITE_BUF_EMPTY (1 << 14)
++
++#define WC_TAG (3 << 14)
++#define RC_TAG (1 << 14)
++#define WD_TAG (2 << 14)
++#define RD_TAG (0 << 14)
++
++/* bits def for write configuration */
++#define WC_FIFO_ENABLE_MASK (1 << 13)
++#define WC_FIFO_ENABLE (0 << 13)
++
++#define WC_SW_SHDI (1 << 12)
++
++#define WC_IRQ_MASK (0xF << 8)
++#define WC_TXE_IRQ_ENABLE (1 << 11) /* TX empty irq */
++#define WC_RXA_IRQ_ENABLE (1 << 10) /* RX availabe irq */
++#define WC_PAR_HIGH_IRQ_ENABLE (1 << 9)
++#define WC_REC_ACT_IRQ_ENABLE (1 << 8)
++
++#define WC_IRDA_ENABLE (1 << 7)
++
++#define WC_STOPBITS_MASK (1 << 6)
++#define WC_2_STOPBITS (1 << 6)
++#define WC_1_STOPBITS (0 << 6)
++
++#define WC_PARITY_ENABLE_MASK (1 << 5)
++#define WC_PARITY_ENABLE (1 << 5)
++
++#define WC_WORDLEN_MASK (1 << 4)
++#define WC_7BIT_WORD (1 << 4)
++#define WC_8BIT_WORD (0 << 4)
++
++#define WC_BAUD_DIV_MASK (0xF)
++#define WC_BAUD_DR1 (0x0)
++#define WC_BAUD_DR2 (0x1)
++#define WC_BAUD_DR4 (0x2)
++#define WC_BAUD_DR8 (0x3)
++#define WC_BAUD_DR16 (0x4)
++#define WC_BAUD_DR32 (0x5)
++#define WC_BAUD_DR64 (0x6)
++#define WC_BAUD_DR128 (0x7)
++#define WC_BAUD_DR3 (0x8)
++#define WC_BAUD_DR6 (0x9)
++#define WC_BAUD_DR12 (0xA)
++#define WC_BAUD_DR24 (0xB)
++#define WC_BAUD_DR48 (0xC)
++#define WC_BAUD_DR96 (0xD)
++#define WC_BAUD_DR192 (0xE)
++#define WC_BAUD_DR384 (0xF)
++
++#endif
+Index: linux-2.6.33/arch/x86/Kconfig.debug
+===================================================================
+--- linux-2.6.33.orig/arch/x86/Kconfig.debug
++++ linux-2.6.33/arch/x86/Kconfig.debug
+@@ -43,6 +43,10 @@ config EARLY_PRINTK
+ with klogd/syslogd or the X server. You should normally N here,
+ unless you want to debug such a crash.
+
++config X86_MRST_EARLY_PRINTK
++ bool "Early printk for MRST platform support"
++ depends on EARLY_PRINTK && X86_MRST
++
+ config EARLY_PRINTK_DBGP
+ bool "Early printk via EHCI debug port"
+ default n
+Index: linux-2.6.33/arch/x86/kernel/early_printk.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/early_printk.c
++++ linux-2.6.33/arch/x86/kernel/early_printk.c
+@@ -14,6 +14,7 @@
+ #include <xen/hvc-console.h>
+ #include <asm/pci-direct.h>
+ #include <asm/fixmap.h>
++#include <linux/spi/mrst_spi.h>
+ #include <asm/pgtable.h>
+ #include <linux/usb/ehci_def.h>
+
+@@ -231,6 +232,10 @@ static int __init setup_early_printk(cha
+ if (!strncmp(buf, "xen", 3))
+ early_console_register(&xenboot_console, keep);
+ #endif
++#ifdef CONFIG_X86_MRST_EARLY_PRINTK
++ if (!strncmp(buf, "mrst", 4))
++ early_console_register(&early_mrst_console, keep);
++#endif
+ buf++;
+ }
+ return 0;
+Index: linux-2.6.33/arch/x86/kernel/mrst_earlyprintk.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/mrst_earlyprintk.c
+@@ -0,0 +1,177 @@
++/*
++ * mrst_earlyprintk.c - spi-uart early printk for Intel Moorestown platform
++ *
++ * Copyright (c) 2008 Intel Corporation
++ * Author: Feng Tang(feng.tang@intel.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ */
++
++#include <linux/console.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/spi/mrst_spi.h>
++
++#include <asm/fixmap.h>
++#include <asm/pgtable.h>
++
++#define MRST_SPI_TIMEOUT 0x200000
++#define MRST_REGBASE_SPI0 0xff128000
++#define MRST_CLK_SPI0_REG 0xff11d86c
++
++/* use SPI0 register for MRST x86 core */
++static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
++
++/* always contains a accessable address, start with 0 */
++static void *pspi;
++static u32 *pclk_spi0;
++static int mrst_spi_inited;
++
++/*
++ * One trick for the early printk is that it could be called
++ * before and after the real page table is enabled for kernel,
++ * so the PHY IO registers should be mapped twice. And a flag
++ * "real_pgt_is_up" is used as an indicator
++ */
++static int real_pgt_is_up;
++
++static void early_mrst_spi_init(void)
++{
++ u32 ctrlr0 = 0;
++ u32 spi0_cdiv;
++ static u32 freq; /* freq info only need be searched once */
++
++ if (pspi && mrst_spi_inited)
++ return;
++
++ if (!freq) {
++ set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, MRST_CLK_SPI0_REG);
++ pclk_spi0 = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
++ (MRST_CLK_SPI0_REG & (PAGE_SIZE - 1)));
++
++ spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9;
++ freq = 100000000 / (spi0_cdiv + 1);
++ }
++
++ set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, mrst_spi_paddr);
++ pspi = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
++ (mrst_spi_paddr & (PAGE_SIZE - 1)));
++
++ /* disable SPI controller */
++ write_ssienr(0x0, pspi);
++
++ /* set control param, 8 bits, transmit only mode */
++ ctrlr0 = read_ctrl0(pspi);
++
++ ctrlr0 &= 0xfcc0;
++ ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET)
++ | (SPI_TMOD_TO << SPI_TMOD_OFFSET);
++ write_ctrl0(ctrlr0, pspi);
++
++ /* change the spi0 clk to comply with 115200 bps */
++ write_baudr(freq/115200, pspi);
++
++ /* disable all INT for early phase */
++ write_imr(0x0, pspi);
++
++ /* set the cs to max3110 */
++ write_ser(0x2, pspi);
++
++ /* enable the HW, the last step for HW init */
++ write_ssienr(0x1, pspi);
++
++ mrst_spi_inited = 1;
++}
++
++/* set the ratio rate, INT */
++static void max3110_write_config(void)
++{
++ u16 config;
++
++ /* 115200, TM not set, no parity, 8bit word */
++ config = 0xc001;
++ write_dr(config, pspi);
++}
++
++/* transfer char to a eligibal word and send to max3110 */
++static void max3110_write_data(char c)
++{
++ u16 data;
++
++ data = 0x8000 | c;
++ write_dr(data, pspi);
++}
++
++/* slave select should be called in the read/write function */
++static int early_mrst_spi_putc(char c)
++{
++ unsigned int timeout;
++ u32 sr;
++
++ timeout = MRST_SPI_TIMEOUT;
++ /* early putc need make sure the TX FIFO is not full*/
++ while (timeout--) {
++ sr = read_sr(pspi);
++ if (!(sr & SR_TF_NOT_FULL))
++ cpu_relax();
++ else
++ break;
++ }
++
++ if (timeout == 0xffffffff) {
++ printk(KERN_INFO "SPI: waiting timeout \n");
++ return -1;
++ }
++
++ max3110_write_data(c);
++ return 0;
++}
++
++/* early SPI only use polling mode */
++static void early_mrst_spi_write(struct console *con,
++ const char *str, unsigned n)
++{
++ int i;
++
++ if ((read_cr3() == __pa(swapper_pg_dir)) && !real_pgt_is_up) {
++ mrst_spi_inited = 0;
++ real_pgt_is_up = 1;
++ }
++
++ if (!mrst_spi_inited) {
++ early_mrst_spi_init();
++ max3110_write_config();
++ }
++
++ for (i = 0; i < n && *str; i++) {
++ if (*str == '\n')
++ early_mrst_spi_putc('\r');
++ early_mrst_spi_putc(*str);
++
++ str++;
++ }
++}
++
++struct console early_mrst_console = {
++ .name = "earlymrst",
++ .write = early_mrst_spi_write,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++};
++
++/* a debug function */
++void mrst_early_printk(const char *fmt, ...)
++{
++ char buf[512];
++ int n;
++ va_list ap;
++
++ va_start(ap, fmt);
++ n = vscnprintf(buf, 512, fmt, ap);
++ va_end(ap);
++
++ early_mrst_console.write(&early_mrst_console, buf, n);
++}
+Index: linux-2.6.33/arch/x86/include/asm/ipc_defs.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/include/asm/ipc_defs.h
+@@ -0,0 +1,217 @@
++/*
++*ipc_defs.h - Header file defining data types and functions for ipc driver.
++*
++*Copyright (C) 2008 Intel Corp
++*Copyright (C) 2008 Sreenidhi Gurudatt <sreenidhi.b.gurudatt@intel.com>
++*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++*
++*This program is free software; you can redistribute it and/or modify
++*it under the terms of the GNU General Public License as published by
++*the Free Software Foundation; version 2 of the License.
++*
++*This program is distributed in the hope that it will be useful, but
++*WITHOUT ANY WARRANTY; without even the implied warranty of
++*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++*General Public License for more details.
++ *
++*You should have received a copy of the GNU General Public License along
++*with this program; if not, write to the Free Software Foundation, Inc.,
++*59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++*
++*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++*
++*This driver implements core IPC kernel functions to read/write and execute
++*various commands supported by System controller firmware for Moorestown
++*platform.
++*/
++
++#ifndef __IPC_DEFS_H__
++#define __IPC_DEFS_H__
++
++#include <linux/init.h>
++#include <linux/module.h>
++
++#define E_INVALID_CMD -249
++#define E_READ_USER_CMD -250
++#define E_READ_USER_DATA -251
++#define E_WRITE_USER_DATA -252
++#define E_PMIC_MALLOC -253
++
++#define MAX_PMICREGS 5
++#define MAX_PMIC_MOD_REGS 4
++
++#ifndef FALSE
++#define FALSE 0
++#define TRUE 1
++#endif
++#define SUCCESS 0
++
++/*
++ * List of commands sent by calling host
++ * drivers to IPC_Driver
++*/
++
++/* CCA battery driver specific commands.
++ * Thise commands are shared across IPC driver
++ * and calling host driver
++ */
++
++#define IPC_WATCHDOG 0xA0
++#define IPC_PROGRAM_BUS_MASTER 0xA1
++#define DEVICE_FW_UPGRADE 0xA2
++#define GET_FW_VERSION 0xA3
++
++#define IPC_BATT_CCA_READ 0xB0
++#define IPC_BATT_CCA_WRITE 0xB1
++#define IPC_BATT_GET_PROP 0xB2
++
++#define IPC_PMIC_REGISTER_READ_NON_BLOCKING 0xEB
++#define IPC_READ32 0xEC
++#define IPC_WRITE32 0xED
++#define IPC_LPE_READ 0xEE
++#define IPC_LPE_WRITE 0xEF
++#define IPC_SEND_COMMAND 0xFA
++#define IPC_PMIC_REGISTER_READ 0xFB
++#define IPC_PMIC_REGISTER_READ_MODIFY 0xFC
++#define IPC_PMIC_REGISTER_WRITE 0xFD
++#define IPC_CHECK_STATUS 0xFE
++#define GET_SCU_FIRMWARE_VERSION 0xFF
++
++#define MAX_PMICREGS 5
++#define MAX_PMIC_MOD_REGS 4
++
++/* Adding the error code*/
++#define E_INVALID_PARAM -0xA0
++#define E_NUM_ENTRIES_OUT_OF_RANGE -0xA1
++#define E_CMD_FAILED -0xA2
++#define E_NO_INTERRUPT_ON_IOC -0xA3
++#define E_QUEUE_IS_FULL -0xA4
++
++/* VRTC IPC CMD ID and sub id */
++#define IPC_VRTC_CMD 0xFA
++#define IPC_VRTC_SET_TIME 0x01
++#define IPC_VRTC_SET_ALARM 0x02
++
++struct ipc_cmd_val {
++ /*
++ *More fields to be added for
++ *future enhancements
++ */
++ u32 ipc_cmd_data;
++};
++
++struct ipc_cmd_type {
++ u8 cmd;
++ u32 data;
++ u8 value;
++ u8 ioc;
++};
++
++/*
++ * Structures defined for battery PMIC driver
++ * This structure is used by the following commands
++ * IPC_BATT_CCA_READ and IPC_BATT_CCA_WRITE
++ */
++struct ipc_batt_cca_data {
++ int cca_val;
++};
++
++/*
++ * Structures defined for battery PMIC driver
++ * This structure is used by IPC_BATT_GET_PROP
++ */
++struct ipc_batt_prop_data {
++ u32 batt_value1;
++ u8 batt_value2[5];
++};
++
++struct ipc_reg_data {
++ u8 ioc;
++ u32 address;
++ u32 data;
++};
++
++struct ipc_cmd {
++ u8 cmd;
++ u32 data;
++};
++
++struct pmicmodreg {
++ u16 register_address;
++ u8 value;
++ u8 bit_map;
++};
++
++struct pmicreg {
++ u16 register_address;
++ u8 value;
++};
++
++struct ipc_pmic_reg_data {
++ bool ioc;
++ struct pmicreg pmic_reg_data[MAX_PMICREGS];
++ u8 num_entries;
++};
++
++struct ipc_pmic_mod_reg_data {
++ bool ioc;
++ struct pmicmodreg pmic_mod_reg_data[MAX_PMIC_MOD_REGS];
++ u8 num_entries;
++};
++
++/* Firmware ingredient version information.
++ * fw_data[0] = scu_rt_minor;
++ * fw_data[1] = scu_rt_major;
++ * fw_data[2] = scu_bs_minor;
++ * fw_data[3] = scu_bs_major;
++ * fw_data[4] = punit_minor;
++ * fw_data[5] = punit_major;
++ * fw_data[6] = x86_minor;
++ * fw_data[7] = x86_major;
++ * fw_data[8] = spectra_minor;
++ * fw_data[9] = spectra_major;
++ * fw_data[10] = val_hook_minor;
++ * fw_data[11] = val_hook_major;
++ * fw_data[12] = ifw_minor;
++ * fw_data[13] = ifw_major;
++ * fw_data[14] = rfu1;
++ * fw_data[15] = rfu2;
++*/
++struct watchdog_reg_data {
++ int payload1;
++ int payload2;
++ bool ioc;
++};
++
++struct ipc_io_bus_master_regs {
++ u32 ctrl_reg_addr;
++ u32 ctrl_reg_data;
++};
++
++struct ipc_non_blocking_pmic_read{
++ struct ipc_pmic_reg_data pmic_nb_read;
++ void *context;
++ int (*callback_host)(struct ipc_pmic_reg_data pmic_read_data,
++ void *context);
++};
++
++int ipc_check_status(void);
++int mrst_get_firmware_version(unsigned char *mrst_fw_ver_info);
++int ipc_config_cmd(struct ipc_cmd_type ipc_cmd,
++ u32 ipc_cmd_len, void *cmd_data);
++int ipc_pmic_register_write(struct ipc_pmic_reg_data *p_write_reg_data,
++ u8 ipc_blocking_flag);
++int ipc_pmic_register_read(struct ipc_pmic_reg_data *p_read_reg_data);
++int ipc_pmic_register_read_modify(struct ipc_pmic_mod_reg_data
++ *p_read_mod_reg_data);
++int mrst_ipc_read32(struct ipc_reg_data *p_reg_data);
++int mrst_ipc_write32(struct ipc_reg_data *p_reg_data);
++int ipc_set_watchdog(struct watchdog_reg_data *p_watchdog_data);
++int ipc_program_io_bus_master(struct ipc_io_bus_master_regs
++ *p_reg_data);
++int ipc_pmic_register_read_non_blocking(struct ipc_non_blocking_pmic_read
++ *p_nb_read);
++int ipc_device_fw_upgrade(u8 *cmd_data, u32 ipc_cmd_len);
++int lnw_ipc_single_cmd(u8 cmd_id, u8 sub_id, int size, int msi);
++
++#endif
+Index: linux-2.6.33/arch/x86/kernel/ipc_mrst.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/ipc_mrst.c
+@@ -0,0 +1,1612 @@
++/*
++ * ipc_mrst.c: Driver for Langwell IPC1
++ *
++ * (C) Copyright 2008 Intel Corporation
++ * Author: Sreenidhi Gurudatt (sreenidhi.b.gurudatt@intel.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * Note:
++ * Langwell provides two IPC units to communicate with IA host. IPC1 is
++ * dedicated for IA. IPC commands results in LNW SCU interrupt. The
++ * initial implementation of this driver is platform specific. It will be
++ * converted to a PCI driver once SCU FW is in place.
++ * Log: Tested after submitting bugzilla patch - 24th December 08
++ * Log: Implemented Error Handling features and resolved IPC driver sighting
++ * PMIC Read/Write calls now take 80 to 200usecs - March 09 09.
++ * Log: Adding the IO BUS Master programming support - March 09 09.
++ */
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/sysdev.h>
++#include <linux/pm.h>
++#include <linux/pci.h>
++#include <asm/ipc_defs.h>
++#include <linux/workqueue.h>
++#include <linux/sched.h>
++
++#include "ipc_mrst.h"
++
++#ifndef CONFIG_PCI
++#error "This file is PCI bus glue.CONFIG_PCI must be defined."
++#endif
++
++/*virtual memory address for IPC base returned by IOREMAP().*/
++void __iomem *p_ipc_base;
++void __iomem *p_i2c_ser_bus;
++void __iomem *p_dfu_fw_base;
++void __iomem *p_dfu_mailbox_base;
++static unsigned char fw_ver_data[16];
++
++static wait_queue_head_t wait;
++static struct semaphore sema_ipc;
++static int scu_cmd_completed = FALSE;
++static bool non_blocking_read_flag = FALSE;
++static struct ipc_work_struct ipc_wq;
++static struct ipc_non_blocking_pmic_read pmic_read_que[MAX_NB_BUF_SIZE];
++static unsigned int cmd_id;
++static int (*callback)(struct ipc_pmic_reg_data pmic_read_data, void *context);
++static DEFINE_MUTEX(mrst_ipc_mutex);
++
++#ifdef LNW_IPC_DEBUG
++
++#define lnw_ipc_dbg(fmt, args...) \
++ do { printk(fmt, ## args); } while (0)
++#else
++#define lnw_ipc_dbg(fmt, args...) do { } while (0)
++#endif
++static const char ipc_name[] = "ipc_mrst";
++
++unsigned long lnw_ipc_address;
++static void __iomem *lnw_ipc_virt_address;
++static unsigned short cmdid_pool = 0xffff;
++static inline int lnw_ipc_set_mapping(struct pci_dev *dev)
++{
++ unsigned long cadr;
++ cadr = dev->resource[0].start;
++ cadr &= PCI_BASE_ADDRESS_MEM_MASK;
++ if (!cadr) {
++ printk(KERN_INFO "No PCI resource for IPC\n");
++ return -ENODEV;
++ }
++ lnw_ipc_virt_address = ioremap_nocache(cadr, 0x1000);
++ if (lnw_ipc_virt_address != NULL) {
++ dev_info(&dev->dev, "lnw ipc base found 0x%lup: 0x%p\n",
++ cadr, lnw_ipc_virt_address);
++ return 0;
++ }
++ printk(KERN_INFO "Failed map LNW IPC1 phy address at %lu\n", cadr);
++ return -ENODEV;
++}
++
++static inline void lnw_ipc_clear_mapping(void)
++{
++ iounmap(lnw_ipc_virt_address);
++ lnw_ipc_virt_address = NULL;
++}
++
++unsigned long lnw_ipc_readl(unsigned long a)
++{
++ return readl(lnw_ipc_virt_address + a);
++}
++
++static inline void lnw_ipc_writel(unsigned long d, unsigned long a)
++{
++ writel(d, lnw_ipc_virt_address + a);
++}
++
++static unsigned char lnw_ipc_assign_cmdid(void)
++{
++ unsigned char cmdid = 0;
++ unsigned short thebit;
++ thebit = cmdid_pool&(~cmdid_pool + 1);
++ printk(KERN_INFO "pool=0x%04x thebit=0x%04x\n",
++ cmdid_pool, thebit);
++ while (thebit >> cmdid)
++ cmdid++;
++ printk(KERN_INFO "Allocate IPC cmd ID %d\n", cmdid);
++ cmdid_pool &= ~thebit;
++ return cmdid;
++}
++
++int lnw_ipc_single_cmd(u8 cmd_id, u8 sub_id, int size, int msi)
++{
++ unsigned long cmdreg, stsreg, retry;
++
++ if (!lnw_ipc_virt_address) {
++ printk(KERN_ERR "No IPC mapping\n");
++ goto err_ipccmd;
++ }
++ if (size >= 16) {
++ printk(KERN_ERR "IPC message size too big %d\n", size);
++ goto err_ipccmd;
++ }
++
++ WARN_ON((msi != 0) && (msi != 1));
++
++ cmdreg = cmd_id
++ | (sub_id << 12)
++ | (size << 16)
++ | (msi << 8);
++
++ lnw_ipc_writel(cmdreg, LNW_IPC_CMD);
++
++ /* check status make sure the command is received by SCU */
++ retry = 1000;
++ stsreg = lnw_ipc_readl(LNW_IPC_STS);
++ if (stsreg & LNW_IPC_STS_ERR) {
++ lnw_ipc_dbg("IPC command ID %d error\n", cmd_id);
++ goto err_ipccmd;
++ }
++ while ((stsreg & LNW_IPC_STS_BUSY) && retry) {
++ lnw_ipc_dbg("IPC command ID %d busy\n", cmd_id);
++ stsreg = lnw_ipc_readl(LNW_IPC_STS);
++ udelay(10);
++ retry--;
++ }
++
++ if (!retry)
++ printk(KERN_ERR "IPC command ID %d failed/timeout", cmd_id);
++ else
++ lnw_ipc_dbg("IPC command ID %d completed\n", cmd_id);
++
++ return 0;
++
++err_ipccmd:
++ return -1;
++}
++EXPORT_SYMBOL(lnw_ipc_single_cmd);
++
++int lnw_ipc_send_cmd(unsigned char cmd, int size, int msi)
++{
++ unsigned long cmdreg, stsreg;
++ unsigned char cmdid, retry;
++
++ if (!lnw_ipc_virt_address) {
++ printk(KERN_ERR "No IPC mapping\n");
++ goto err_ipccmd;
++ }
++ if (size >= 16) {
++ printk(KERN_ERR "IPC message size too big %d\n", size);
++ goto err_ipccmd;
++ }
++
++ cmdid = lnw_ipc_assign_cmdid();
++ cmdreg = lnw_ipc_readl(LNW_IPC_CMD);
++ cmdreg |= cmdid << 12;
++ cmdreg |= size << 16;
++ if (msi)
++ cmdreg |= 1 << 8;
++ lnw_ipc_writel(cmdreg, LNW_IPC_CMD);
++ /* check status make sure the command is received by SCU */
++ retry = 10;
++ stsreg = lnw_ipc_readl(LNW_IPC_STS);
++ if (stsreg&LNW_IPC_STS_ERR) {
++ lnw_ipc_dbg("IPC command ID %d error\n", cmdid);
++ goto err_ipccmd;
++ }
++ while ((stsreg&LNW_IPC_STS_BUSY) || retry) {
++ lnw_ipc_dbg("IPC command ID %d busy\n", cmdid);
++ stsreg = lnw_ipc_readl(LNW_IPC_STS);
++ udelay(10);
++ retry--;
++ }
++ if (!retry)
++ lnw_ipc_dbg("IPC command ID %d failed/timeout\n", cmdid);
++ else
++ lnw_ipc_dbg("IPC command ID %d completed\n", cmdid);
++
++err_ipccmd:
++ return -1;
++}
++/*
++ * For IPC transfer modes except read DMA, there is no need for MSI,
++ * so the driver polls status after each IPC command is issued.
++ */
++static irqreturn_t ipc_irq(int irq, void *dev_id)
++{
++ union ipc_sts ipc_sts_reg;
++
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++
++ if (!ipc_sts_reg.ipc_sts_parts.busy) {
++ /*Call on NON Blocking flag being set.*/
++ if (non_blocking_read_flag == TRUE) {
++ schedule_work(&ipc_wq.ipc_work);
++ } else {
++ scu_cmd_completed = TRUE;
++ wake_up_interruptible(&wait);
++ }
++ }
++ return IRQ_HANDLED;
++}
++
++static const struct ipc_driver ipc_mrst_driver = {
++ .name = "MRST IPC Controller",
++ /*
++ * generic hardware linkage
++ */
++ .irq = ipc_irq,
++ .flags = 0,
++};
++
++static int ipc_mrst_pci_probe(struct pci_dev *dev,
++ const struct pci_device_id *id)
++{
++ int err, retval, i;
++ lnw_ipc_dbg("Attempt to enable IPC irq 0x%x, pin %d\n",
++ dev->irq, dev->pin);
++ err = pci_enable_device(dev);
++ if (err) {
++ dev_err(&dev->dev, "Failed to enable MSRT IPC(%d)\n",
++ err);
++ goto exit;
++ }
++ retval = pci_request_regions(dev, "ipc_mrst");
++ if (retval)
++ dev_err(&dev->dev, "Failed to allocate resource\
++ for MRST IPC(%d)\n", retval);
++
++ init_ipc_driver();
++
++ /* 0 means cmd ID is in use */
++ cmdid_pool = 0xffff;
++ /* initialize mapping */
++ retval = lnw_ipc_set_mapping(dev);
++ if (retval)
++ goto exit;
++ /* clear buffer */
++ for (i = 0; i < LNW_IPC_RWBUF_SIZE; i = i + 4) {
++ lnw_ipc_writel(0, LNW_IPC_WBUF + i);
++ lnw_ipc_writel(0, LNW_IPC_RBUF + i);
++ }
++ retval = request_irq(dev->irq, ipc_irq, IRQF_SHARED,
++ "ipc_mrst", (void *)&ipc_mrst_driver);
++ if (retval) {
++ printk(KERN_ERR "ipc: cannot register ISR %p irq %d ret %d\n",
++ ipc_irq, dev->irq, retval);
++ return -EIO;
++ }
++exit:
++ return 0;
++}
++
++void ipc_mrst_pci_remove(struct pci_dev *pdev)
++{
++ pci_release_regions(pdev);
++}
++
++/* PCI driver selection metadata; PCI hotplugging uses this */
++static const struct pci_device_id pci_ids[] = {
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)}
++};
++
++MODULE_DEVICE_TABLE(pci, pci_ids);
++
++/* pci driver glue; this is a "new style" PCI driver module */
++static struct pci_driver ipc_mrst_pci_driver = {
++ .name = (char *)ipc_name,
++ .id_table = pci_ids,
++ .probe = ipc_mrst_pci_probe,
++ .remove = ipc_mrst_pci_remove,
++};
++
++static int __init ipc_mrst_init(void)
++{
++ int retval = 0;
++ lnw_ipc_dbg("%s\n", __func__);
++ retval = pci_register_driver(&ipc_mrst_pci_driver);
++ if (retval < 0) {
++ printk(KERN_CRIT "Failed to register %s\n",
++ ipc_mrst_pci_driver.name);
++ pci_unregister_driver(&ipc_mrst_pci_driver);
++ } else {
++ printk(KERN_CRIT "****Loaded %s driver version %s****\n",
++ ipc_mrst_pci_driver.name, MRST_IPC_DRIVER_VERSION);
++ cache_mrst_firmware_version();
++ }
++ return retval;
++}
++
++static void __exit ipc_mrst_exit(void)
++{
++ iounmap(p_ipc_base);
++ iounmap(p_i2c_ser_bus);
++ pci_unregister_driver(&ipc_mrst_pci_driver);
++ de_init_ipc_driver();
++}
++
++/*
++ * Steps to read PMIC Register(Psuedocode)
++ * 1) Construct the SCU FW command structure with normal read
++ * 2) Fill the IPC_WBUF with the p_reg_data
++ * 3) write the command to(Memory Mapped address) IPC_CMD register
++ * 4) Wait for an interrupt from SCUFirmware or do a timeout.
++*/
++int ipc_check_status(void)
++{
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_INFO "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ lnw_ipc_dbg(KERN_INFO
++ "ipc_driver: in <%s> -><%s> file line = <%d>\n",
++ __func__, __FILE__, __LINE__);
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_check_status);
++
++int ipc_config_cmd(struct ipc_cmd_type cca_cmd, u32 ipc_cmd_len, void *cmd_data)
++{
++
++ union ipc_fw_cmd ipc_cca_cmd;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++ u32 ipc_wbuf;
++ u8 cbuf[MAX_NUM_ENTRIES] = { '\0' };
++ u32 rbuf_offset = 2;
++ u32 i = 0;
++
++ if ((&cca_cmd == NULL) || (cmd_data == NULL)) {
++ printk(KERN_INFO "Invalid arguments recieved:\
++ <%s> -> <%s> file line = <%d>\n", __func__, __FILE__, __LINE__);
++ return -EBUSY;
++ }
++
++ if (ipc_cmd_len < 4) {
++ printk(KERN_INFO
++ "ipc_send_config: Invalid input param (size) recieved \n");
++ return -EBUSY;
++ }
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_INFO "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++ lnw_ipc_dbg(KERN_INFO
++ "ipc_driver: in <%s> -> <%s> file at line no = <%d>\n",
++ __func__, __FILE__, __LINE__);
++
++ switch (cca_cmd.cmd) {
++ case IPC_BATT_CCA_READ:
++ {
++ struct ipc_batt_cca_data *cca_data =
++ (struct ipc_batt_cca_data *)cmd_data;
++
++ lnw_ipc_dbg(KERN_INFO "Recieved IPC_BATT_CCA_READ\n");
++ ipc_cca_cmd.cmd_parts.cmd = IPC_CCA_CMD_READ_WRITE;
++ ipc_cca_cmd.cmd_parts.ioc = cca_cmd.ioc;
++ ipc_cca_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cca_cmd.cmd_parts.cmd_ID = CCA_REG_READ;
++ ipc_cca_cmd.cmd_parts.size = 0;
++ ipc_cca_cmd.cmd_parts.rfu2 = 0x0;
++
++ lnw_ipc_dbg(KERN_INFO "ipc_cca_cmd.cmd_data = 0x%x\n",
++ ipc_cca_cmd.cmd_data);
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ __raw_writel(ipc_cca_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /* Wait for command completion from SCU firmware */
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++
++ ipc_wbuf =
++ __raw_readl(p_ipc_base + IPC_RBUF);
++ cca_data->cca_val = ipc_wbuf;
++ lnw_ipc_dbg(KERN_INFO
++ "CCA Read at (0x%.8x) = 0x%.8x\n",
++ (u32) (p_ipc_base + IPC_RBUF), ipc_wbuf);
++ break;
++ }
++ case IPC_BATT_CCA_WRITE:
++
++ ipc_cca_cmd.cmd_parts.cmd = IPC_CCA_CMD_READ_WRITE;
++ ipc_cca_cmd.cmd_parts.ioc = cca_cmd.ioc;
++ ipc_cca_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cca_cmd.cmd_parts.cmd_ID = CCA_REG_WRITE;
++ ipc_cca_cmd.cmd_parts.size = 0;
++ ipc_cca_cmd.cmd_parts.rfu2 = 0x0;
++
++ lnw_ipc_dbg(KERN_INFO "ipc_cca_cmd.cmd_data = 0x%x\n",
++ ipc_cca_cmd.cmd_data);
++
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ __raw_writel(cca_cmd.data, ((p_ipc_base + IPC_WBUF) + 4));
++ __raw_writel(ipc_cca_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /* Wait for command completion from SCU firmware */
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++
++ break;
++ case IPC_BATT_GET_PROP:
++ {
++ struct ipc_batt_prop_data *prop_data =
++ (struct ipc_batt_prop_data *)cmd_data;
++
++ lnw_ipc_dbg(KERN_CRIT "Recieved IPC_BATT_GET_PROP\n");
++
++ /* CCA Read Implementation here.*/
++ ipc_cca_cmd.cmd_parts.cmd = IPC_CCA_CMD_READ_WRITE;
++ ipc_cca_cmd.cmd_parts.ioc = cca_cmd.ioc;
++ ipc_cca_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cca_cmd.cmd_parts.cmd_ID = CCA_REG_GET_PROP;
++ ipc_cca_cmd.cmd_parts.size = 0;
++ ipc_cca_cmd.cmd_parts.rfu2 = 0x0;
++
++ lnw_ipc_dbg(KERN_CRIT "ipc_cca_cmd.cmd_data = 0x%x\n",
++ ipc_cca_cmd.cmd_data);
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ __raw_writel(ipc_cca_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ if (ipc_cca_cmd.cmd_parts.ioc == 0) {
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++ }
++
++ /* On wake-up fill the user buffer with IPC_RBUF data.*/
++ rbuf_offset = 0;
++ if ((ipc_cmd_len < 4) || (ipc_cmd_len > 9)) {
++ lnw_ipc_dbg(KERN_CRIT
++ "ipc_send_config: Invalid input param\
++ (size) recieved \n");
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++
++ if (ipc_cmd_len >= 4) {
++ ipc_wbuf = __raw_readl(p_ipc_base + IPC_RBUF);
++ lnw_ipc_dbg(KERN_CRIT
++ "Read ipc_wbuf at (0x%.8x) = 0x%.8x\n",
++ (u32) (p_ipc_base + IPC_RBUF + rbuf_offset),
++ ipc_wbuf);
++ rbuf_offset += 4;
++ for (i = 0; i < (ipc_cmd_len - 4); i++) {
++ cbuf[i] =
++ __raw_readb((p_ipc_base + IPC_RBUF +
++ rbuf_offset));
++ prop_data->batt_value2[i] = cbuf[i];
++ lnw_ipc_dbg(KERN_CRIT
++ "Read cbuf[%d] at (0x%.8x) = 0x%.8x\n",
++ i,
++ (u32) (p_ipc_base + IPC_RBUF +
++ rbuf_offset), cbuf[i]);
++ rbuf_offset++;
++ }
++
++ }
++
++ break;
++ }
++ default:
++ printk(KERN_CRIT "Recieved unknown option\n");
++ up(&sema_ipc);
++ return -ENODEV;
++ }
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_config_cmd);
++
++int mrst_get_firmware_version(unsigned char *mrst_fw_ver_info)
++{
++ int i = 0;
++ mutex_lock(&mrst_ipc_mutex);
++
++ if (mrst_fw_ver_info == NULL) {
++ WARN_ON(1);
++ return -EINVAL;
++ }
++ for (i = 0; i < 16; i++)
++ mrst_fw_ver_info[i] = fw_ver_data[i];
++
++ mutex_unlock(&mrst_ipc_mutex);
++ return 0;
++}
++EXPORT_SYMBOL(mrst_get_firmware_version);
++
++int init_ipc_driver(void)
++{
++ init_waitqueue_head(&wait);
++
++ sema_init(&sema_ipc, MAX_INSTANCES_ALLOWED);
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_CRIT "IPC_Driver module busy\n");
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++
++ INIT_WORK(&ipc_wq.ipc_work, mrst_pmic_read_handler);
++
++ /* Map the memory of ipc1 PMIC reg base */
++ p_ipc_base = ioremap_nocache(IPC_BASE_ADDRESS, IPC_MAX_ADDRESS);
++ if (p_ipc_base == NULL) {
++ printk(KERN_CRIT
++ "IPC Driver: unable to map the address of IPC 1 \n");
++ up(&sema_ipc);
++ return E_PMIC_MALLOC;
++ }
++
++ printk(KERN_CRIT "p_ipc_base = <0x%.8X>\
++ IPC_BASE_ADDRESS = <0x%.8X>\n", (u32) p_ipc_base, IPC_BASE_ADDRESS);
++ p_i2c_ser_bus = ioremap_nocache(I2C_SER_BUS, I2C_MAX_ADDRESS);
++ if (p_i2c_ser_bus == NULL) {
++ printk(KERN_CRIT
++ "IPC Driver: unable to map the address of IPC 1 \n");
++ up(&sema_ipc);
++ return E_PMIC_MALLOC;
++ }
++
++ printk(KERN_CRIT "p_i2c_ser_bus = <0x%.8X>\
++ I2C_SER_BUS = <0x%.8X>\n", (u32) p_i2c_ser_bus, I2C_SER_BUS);
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++
++int de_init_ipc_driver(void)
++{
++ if (down_interruptible(&sema_ipc)) {
++ lnw_ipc_dbg(KERN_CRIT "IPC_Driver module busy\n");
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++
++ lnw_ipc_dbg(KERN_CRIT
++ "ipc_driver: in <%s> -> <%s> file at line no = <%d>\n",
++ __func__, __FILE__, __LINE__);
++ IOUNMAP(p_ipc_base);
++ IOUNMAP(p_i2c_ser_bus);
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++
++int ipc_pmic_register_read(struct ipc_pmic_reg_data *p_read_reg_data)
++{
++ union ipc_fw_cmd ipc_cmd;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++ u32 *ipc_wbuf;
++ u8 cbuf[IPC_BUF_LEN] = { '\0' };
++ u32 cnt = 0;
++ u32 i = 0;
++ u32 rbuf_offset = 2;
++ u8 temp_value = 0;
++ u64 time_to_wait = 0;
++
++ ipc_wbuf = (u32 *)&cbuf;
++
++ if (p_read_reg_data == NULL) {
++ printk(KERN_CRIT "Invalid Input Param recieved in pmic read\n");
++ return -E_INVALID_PARAM;
++ }
++ if (p_read_reg_data->num_entries > MAX_NUM_ENTRIES) {
++ printk(KERN_CRIT "Invalid Input Param recieved in pmic read\n");
++ return -E_NUM_ENTRIES_OUT_OF_RANGE;
++ }
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_CRIT "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ ipc_cmd.cmd_parts.cmd = IPC_PMIC_CMD_READ_WRITE;
++ ipc_cmd.cmd_parts.ioc = p_read_reg_data->ioc;
++ ipc_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cmd.cmd_parts.cmd_ID = PMIC_REG_READ;
++ ipc_cmd.cmd_parts.size = 3 * (p_read_reg_data->num_entries);
++ ipc_cmd.cmd_parts.rfu2 = 0x0;
++
++ /* command is set. Fill the IPC_BUF */
++ lnw_ipc_dbg(KERN_INFO "p_read_reg_data->num_entries <0x%X>\n",
++ p_read_reg_data->num_entries);
++
++ lnw_ipc_dbg(KERN_INFO "p_read_reg_data->register_address <0x%X>\n",
++ p_read_reg_data->pmic_reg_data[0].register_address);
++
++ for (i = 0; i < p_read_reg_data->num_entries; i++) {
++ cbuf[cnt] = p_read_reg_data->pmic_reg_data[i].register_address;
++ cbuf[(cnt) + 1] =
++ (p_read_reg_data->pmic_reg_data[i].register_address >> 8);
++ cbuf[(cnt) + 2] = p_read_reg_data->pmic_reg_data[i].value;
++ cnt = cnt + 3;
++ }
++
++ rbuf_offset = 0;
++ for (i = 0; i < p_read_reg_data->num_entries; i++) {
++ __raw_writel(ipc_wbuf[i], ((p_ipc_base + IPC_WBUF)
++ + rbuf_offset));
++ rbuf_offset += 4;
++ if (i >= 3)
++ break;
++ }
++
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++
++ scu_cmd_completed = FALSE;
++ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /*wait for 10ms do not tie to kernel timer_ticks*/
++ time_to_wait = msecs_to_jiffies(IPC_TIMEOUT);
++
++ /* Wait for command completion from SCU firmware */
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, time_to_wait);
++
++ if (ipc_cmd.cmd_parts.ioc == 0) {
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "Timeout occured for ioc=0 and SCU is busy%d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++ }
++ /* IPC driver expects interrupt when IOC is set to 1.*/
++ if ((ipc_cmd.cmd_parts.ioc == 1) && (scu_cmd_completed == FALSE)) {
++ up(&sema_ipc);
++ return E_NO_INTERRUPT_ON_IOC;
++ }
++ rbuf_offset = 2;
++ for (i = 0; i < p_read_reg_data->num_entries; i++) {
++ temp_value = readb((p_ipc_base + IPC_RBUF + rbuf_offset));
++ p_read_reg_data->pmic_reg_data[i].value = temp_value;
++ rbuf_offset += 3;
++ }
++
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_pmic_register_read);
++
++int ipc_pmic_register_write(struct ipc_pmic_reg_data *p_write_reg_data,
++ u8 ipc_blocking_flag)
++{
++ union ipc_fw_cmd ipc_cmd;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++ u32 *ipc_wbuf;
++ u8 cbuf[IPC_BUF_LEN] = { '\0' };
++ u32 cnt = 0;
++ u32 i = 0;
++ u32 rbuf_offset = 2;
++
++ ipc_wbuf = (u32 *)&cbuf;
++
++ if (p_write_reg_data == NULL) {
++ printk(KERN_CRIT "Invalid Input Param recieved in pmic write\n");
++ return -E_INVALID_PARAM;
++ }
++ if (p_write_reg_data->num_entries > MAX_NUM_ENTRIES) {
++ printk(KERN_CRIT "Invalid Input Param recieved in pmic write\n");
++ return -E_NUM_ENTRIES_OUT_OF_RANGE;
++ }
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_INFO "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ ipc_cmd.cmd_parts.cmd = IPC_PMIC_CMD_READ_WRITE;
++ ipc_cmd.cmd_parts.ioc = p_write_reg_data->ioc;
++ ipc_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cmd.cmd_parts.cmd_ID = PMIC_REG_WRITE;
++ ipc_cmd.cmd_parts.size = 3 * (p_write_reg_data->num_entries);
++ ipc_cmd.cmd_parts.rfu2 = 0x0;
++
++ /* command is set. Fill the IPC_BUF */
++ lnw_ipc_dbg(KERN_INFO "p_write_reg_data->num_entries 0x%X>\n",
++ p_write_reg_data->num_entries);
++
++ lnw_ipc_dbg(KERN_INFO "p_write_reg_data->register_address 0x%X>\n",
++ p_write_reg_data->pmic_reg_data[0].register_address);
++ for (i = 0; i < p_write_reg_data->num_entries; i++) {
++ cbuf[cnt] = p_write_reg_data->pmic_reg_data[i].register_address;
++ cbuf[(cnt) + 1] =
++ (p_write_reg_data->pmic_reg_data[i].register_address >> 8);
++ cbuf[(cnt) + 2] = p_write_reg_data->pmic_reg_data[i].value;
++ cnt = cnt + 3;
++ }
++
++ rbuf_offset = 0;
++ for (i = 0; i < p_write_reg_data->num_entries; i++) {
++ __raw_writel(ipc_wbuf[i], ((p_ipc_base + IPC_WBUF)
++ + rbuf_offset));
++ rbuf_offset += 4;
++ if (i >= 3)
++ break;
++ }
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /* Wait for command completion from SCU firmware */
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_pmic_register_write);
++
++int ipc_pmic_register_read_modify(struct ipc_pmic_mod_reg_data
++ *p_read_mod_reg_data)
++{
++ union ipc_fw_cmd ipc_cmd;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++ u32 *ipc_wbuf;
++ u8 cbuf[IPC_BUF_LEN] = { '\0' };
++ u32 cnt = 0;
++ u32 i = 0;
++ u32 rbuf_offset = 2;
++ ipc_wbuf = (u32 *)&cbuf;
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_INFO "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ if (p_read_mod_reg_data == NULL) {
++ printk(KERN_CRIT "Invalid Input recieved pmic read modify\n");
++ up(&sema_ipc);
++ return -E_INVALID_PARAM;
++ }
++ if (p_read_mod_reg_data->num_entries > MAX_NUM_ENTRIES) {
++ printk(KERN_CRIT "Invalid Input recieved pmic read modify\n");
++ up(&sema_ipc);
++ return -E_NUM_ENTRIES_OUT_OF_RANGE;
++ }
++
++ ipc_cmd.cmd_parts.cmd = IPC_PMIC_CMD_READ_WRITE;
++ ipc_cmd.cmd_parts.ioc = p_read_mod_reg_data->ioc;
++ ipc_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cmd.cmd_parts.cmd_ID = PMIC_REG_READ_MODIFY;
++ ipc_cmd.cmd_parts.size = 3 * (p_read_mod_reg_data->num_entries);
++ ipc_cmd.cmd_parts.rfu2 = 0x0;
++
++ /* command is set. Fill the IPC_BUF */
++ lnw_ipc_dbg(KERN_INFO "p_read_mod_reg_data->num_entries <0x%X> \n",
++ p_read_mod_reg_data->num_entries);
++
++ for (i = 0; i < p_read_mod_reg_data->num_entries; i++) {
++ cbuf[cnt] =
++ p_read_mod_reg_data->pmic_mod_reg_data[i].register_address;
++ cbuf[(cnt) + 1] =
++ (p_read_mod_reg_data->pmic_mod_reg_data[i].
++ register_address >> 8);
++ cbuf[(cnt) + 2] =
++ p_read_mod_reg_data->pmic_mod_reg_data[i].value;
++ cbuf[(cnt) + 3] =
++ p_read_mod_reg_data->pmic_mod_reg_data[i].bit_map;
++ cnt = cnt + 4;
++ }
++
++ rbuf_offset = 0;
++ for (i = 0; i < p_read_mod_reg_data->num_entries; i++) {
++ __raw_writel(ipc_wbuf[i],
++ ((p_ipc_base + IPC_WBUF) + rbuf_offset));
++ rbuf_offset += 4;
++ if (i >= 3)
++ break;
++ }
++
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /* Wait for command completion from SCU firmware */
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ if (ipc_cmd.cmd_parts.ioc == 0) {
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++ }
++
++ /* IPC driver expects interrupt when IOC is set to 1.*/
++ if ((ipc_cmd.cmd_parts.ioc == 1) && (scu_cmd_completed == FALSE)) {
++ up(&sema_ipc);
++ return E_NO_INTERRUPT_ON_IOC;
++ }
++
++ /* On wake-up fill the user buffer with IPC_RBUF data.*/
++ rbuf_offset = 0;
++ for (i = 0; i < p_read_mod_reg_data->num_entries; i++) {
++ ipc_wbuf[i] =
++ __raw_readl((p_ipc_base + IPC_RBUF + rbuf_offset));
++ rbuf_offset += 4;
++ }
++
++ rbuf_offset = 2;
++ for (i = 0; i < p_read_mod_reg_data->num_entries; i++) {
++ p_read_mod_reg_data->pmic_mod_reg_data[i].value =
++ __raw_readb((p_ipc_base + IPC_RBUF + rbuf_offset));
++ rbuf_offset += 4;
++ }
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_pmic_register_read_modify);
++
++int ipc_pmic_register_read_non_blocking(
++ struct ipc_non_blocking_pmic_read *p_nb_read)
++{
++ union ipc_fw_cmd ipc_cmd;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++ u32 *ipc_wbuf;
++ u8 cbuf[IPC_BUF_LEN] = { '\0' };
++ u32 cnt = 0;
++ u32 i = 0;
++ u32 rbuf_offset = 2;
++ ipc_wbuf = (u32 *)&cbuf;
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_CRIT "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++ if (p_nb_read == NULL) {
++ printk(KERN_CRIT "Invalid Input Param recieved\
++ in non blocking pmic read\n");
++ up(&sema_ipc);
++ return -E_INVALID_PARAM;
++ }
++ if (p_nb_read->pmic_nb_read.num_entries > MAX_NUM_ENTRIES) {
++ printk(KERN_CRIT "Invalid Number Of Entries\
++ - non blocking pmic read\n");
++ up(&sema_ipc);
++ return -E_NUM_ENTRIES_OUT_OF_RANGE;
++ }
++
++ if (cmd_id >= MAX_NB_BUF_SIZE) {
++ printk(KERN_CRIT "Queue is full!! cannot service request!\n");
++ up(&sema_ipc);
++ return -E_QUEUE_IS_FULL;
++ }
++
++
++ non_blocking_read_flag = TRUE;
++ /*Copy the contents to this global structure for future use*/
++ pmic_read_que[cmd_id] = *(p_nb_read);
++ ipc_wq.cmd_id = cmd_id++;
++ callback = p_nb_read->callback_host;
++ pmic_read_que[cmd_id].callback_host = p_nb_read->callback_host;
++
++ ipc_cmd.cmd_parts.cmd = IPC_PMIC_CMD_READ_WRITE;
++ ipc_cmd.cmd_parts.ioc = 1;
++ ipc_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cmd.cmd_parts.cmd_ID = PMIC_REG_READ;
++ ipc_cmd.cmd_parts.size = 3 * (p_nb_read->pmic_nb_read.num_entries);
++ ipc_cmd.cmd_parts.rfu2 = 0x0;
++
++ /* command is set. Fill the IPC_BUF */
++ lnw_ipc_dbg(KERN_INFO "pmic_nb_read.num_entries <0x%X>\n",
++ p_nb_read->pmic_nb_read.num_entries);
++
++ lnw_ipc_dbg(KERN_INFO "pmic_nb_read.register_address <0x%X>\n",
++ p_nb_read->pmic_nb_read.pmic_reg_data[0].register_address);
++
++ for (i = 0; i < p_nb_read->pmic_nb_read.num_entries; i++) {
++ cbuf[cnt] =
++ p_nb_read->pmic_nb_read.pmic_reg_data[i].register_address;
++ cbuf[(cnt) + 1] = (p_nb_read->pmic_nb_read.pmic_reg_data[i]\
++ .register_address >> 8);
++ cbuf[(cnt) + 2] =
++ p_nb_read->pmic_nb_read.pmic_reg_data[i].value;
++ cnt = cnt + 3;
++ }
++ rbuf_offset = 0;
++ for (i = 0; i < p_nb_read->pmic_nb_read.num_entries; i++) {
++ __raw_writel(ipc_wbuf[i], ((p_ipc_base + IPC_WBUF)
++ + rbuf_offset));
++ rbuf_offset += 4;
++ if (i >= 3)
++ break;
++ }
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data = __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++ /*Control returns after issueing the command here*/
++ /*Data is read asynchronously later*/
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_pmic_register_read_non_blocking);
++
++int mrst_ipc_read32(struct ipc_reg_data *p_reg_data)
++{
++ union ipc_fw_cmd ipc_cmd;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++
++ if (p_reg_data == NULL) {
++ printk(KERN_CRIT "Invalid Input Param recieved\
++ in mrst_ipc_read32\n");
++ return -E_INVALID_PARAM;
++ }
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_INFO "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ lnw_ipc_dbg(KERN_INFO
++ "ipc_driver: Address = 0x%.8X\t: Data = 0x%.8X\n",
++ p_reg_data->address, p_reg_data->data);
++
++ ipc_cmd.cmd_parts.cmd = INDIRECT_READ;
++ ipc_cmd.cmd_parts.ioc = p_reg_data->ioc;
++ ipc_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cmd.cmd_parts.cmd_ID = 0x00;
++ ipc_cmd.cmd_parts.size = 4;
++ ipc_cmd.cmd_parts.rfu2 = 0x0;
++
++ lnw_ipc_dbg(KERN_INFO
++ "ipc_driver: IPC_CMD-> 0x%.8X\n", ipc_cmd.cmd_data);
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ /*
++ * Write the Address to IPC_SPTR
++ * Issue the command by writing to IPC_CMD
++ * Read the contents of IPC_RBUF to data
++ */
++
++ __raw_writel(p_reg_data->address, (p_ipc_base + IPC_SPTR));
++ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ if (ipc_cmd.cmd_parts.ioc == 0) {
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++ }
++ /* IPC driver expects interrupt when IOC is set to 1.*/
++ if ((ipc_cmd.cmd_parts.ioc == 1) && (scu_cmd_completed == FALSE)) {
++ up(&sema_ipc);
++ return E_NO_INTERRUPT_ON_IOC;
++ }
++
++ /* Command completed successfully Read the data */
++ p_reg_data->data =
++ __raw_readl(p_ipc_base + IPC_RBUF);
++ lnw_ipc_dbg(KERN_INFO
++ "ipc_driver: Data Recieved from IPC_RBUF = 0x%.8X\n",
++ p_reg_data->data);
++
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(mrst_ipc_read32);
++
++int mrst_ipc_write32(struct ipc_reg_data *p_reg_data)
++{
++ union ipc_fw_cmd ipc_cmd;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++
++ if (p_reg_data == NULL) {
++ printk(KERN_CRIT "Invalid Input Param recieved\
++ in mrst_ipc_write32\n");
++ return -E_INVALID_PARAM;
++ }
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_INFO "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ lnw_ipc_dbg(KERN_INFO
++ "ipc_driver: in <%s> -> <%s> file at line no = <%d>\n",
++ __func__, __FILE__, __LINE__);
++
++ ipc_cmd.cmd_parts.cmd = INDIRECT_WRITE;
++ ipc_cmd.cmd_parts.ioc = p_reg_data->ioc;
++ ipc_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cmd.cmd_parts.cmd_ID = 0x00;
++ ipc_cmd.cmd_parts.size = 4;
++ ipc_cmd.cmd_parts.rfu2 = 0x0;
++
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ __raw_writel(p_reg_data->address, (p_ipc_base + IPC_DPTR));
++ __raw_writel(p_reg_data->data, (p_ipc_base + IPC_WBUF));
++ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(mrst_ipc_write32);
++
++int ipc_set_watchdog(struct watchdog_reg_data *p_watchdog_reg_data)
++{
++ union ipc_fw_cmd ipc_cmd;
++ u32 *ipc_wbuf;
++ u8 cbuf[16] = { '\0' };
++ u32 rbuf_offset = 2;
++ u32 retry = MAX_RETRY_CNT;
++ union ipc_sts ipc_sts_reg;
++
++ ipc_wbuf = (u32 *)&cbuf;
++
++ if (p_watchdog_reg_data == NULL) {
++ printk(KERN_CRIT "Invalid Input Param recieved in pmic read\n");
++ return -E_INVALID_PARAM;
++ }
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_CRIT "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ ipc_cmd.cmd_parts.cmd = IPC_SET_WATCHDOG_TIMER;
++ ipc_cmd.cmd_parts.ioc = p_watchdog_reg_data->ioc;
++ ipc_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cmd.cmd_parts.size = 2;
++ ipc_cmd.cmd_parts.rfu2 = 0x0;
++
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++
++ ipc_wbuf[0] = p_watchdog_reg_data->payload1;
++ printk(KERN_INFO "p_watchdog_data->payload1 <0x%X>\n",
++ ipc_wbuf[0]);
++ __raw_writel(ipc_wbuf[0], ((p_ipc_base + IPC_WBUF) + rbuf_offset));
++
++ ipc_wbuf[1] = p_watchdog_reg_data->payload2;
++ lnw_ipc_dbg(KERN_INFO "p_watchdog_data->payload2 <0x%X>\n",
++ ipc_wbuf[1]);
++ __raw_writel(ipc_wbuf[1], ((p_ipc_base + IPC_WBUF) + rbuf_offset));
++
++ lnw_ipc_dbg(KERN_INFO "ipc_cmd.cmd_data is <0x%X>\n",
++ ipc_cmd.cmd_data);
++ /*execute the command by writing to IPC_CMD registers*/
++ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /* Wait for command completion from SCU firmware and return */
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ /* IPC driver expects interrupt when IOC is set to 1.*/
++ if ((ipc_cmd.cmd_parts.ioc == 1) && (scu_cmd_completed == FALSE)) {
++ up(&sema_ipc);
++ return E_NO_INTERRUPT_ON_IOC;
++ }
++
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++ lnw_ipc_dbg(KERN_CRIT "IPC Command status = 0x%x\n",
++ ipc_sts_reg.ipc_sts_data);
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_set_watchdog);
++
++int ipc_program_io_bus_master(struct ipc_io_bus_master_regs *p_reg_data)
++{
++ u32 io_bus_master_cmd = 0;
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_INFO "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ if (p_reg_data == NULL) {
++ printk(KERN_CRIT "Invalid Input Param recieved in\
++ <ipc_program_io_bus_master>\n");
++ up(&sema_ipc);
++ return -E_INVALID_PARAM;
++ }
++ printk(KERN_CRIT "p_reg_data->ctrl_reg_addr = 0x%x\n",\
++ p_reg_data->ctrl_reg_addr);
++ printk(KERN_CRIT "p_reg_data->ctrl_reg_data = 0x%x\n",\
++ p_reg_data->ctrl_reg_data);
++
++ /* Read the first byte for command*/
++ io_bus_master_cmd = (p_reg_data->ctrl_reg_addr)&(0xFF000000);
++ io_bus_master_cmd = (io_bus_master_cmd >> 24);
++
++ if (io_bus_master_cmd == NOP_CMD) {
++ printk(KERN_CRIT "NOP_CMD = 0x%x\n", io_bus_master_cmd);
++ } else if (io_bus_master_cmd == READ_CMD) {
++ lnw_ipc_dbg(KERN_CRIT "Address %#xp = data = %#x\n",
++ (unsigned int)(p_i2c_ser_bus + CTRL_REG_ADDR),
++ p_reg_data->ctrl_reg_addr);
++ __raw_writel(p_reg_data->ctrl_reg_addr,
++ (p_i2c_ser_bus + CTRL_REG_ADDR));
++ udelay(1000);/*Write Not getting updated without delay*/
++ p_reg_data->ctrl_reg_data =
++ __raw_readl(p_i2c_ser_bus + CTRL_REG_DATA);
++ lnw_ipc_dbg(KERN_CRIT "Data = %#x\n",
++ p_reg_data->ctrl_reg_data);
++ } else if (io_bus_master_cmd == WRITE_CMD) {
++ printk(KERN_CRIT"WRITE_CMD = 0x%x\n", io_bus_master_cmd);
++
++ __raw_writel(p_reg_data->ctrl_reg_data,
++ (p_i2c_ser_bus + CTRL_REG_DATA));
++ udelay(1000);
++ __raw_writel(p_reg_data->ctrl_reg_addr,
++ (p_i2c_ser_bus + CTRL_REG_ADDR));
++ } else {
++ printk(KERN_CRIT "in INVALID_CMD = 0x%x\n", io_bus_master_cmd);
++ up(&sema_ipc);
++ return -E_INVALID_CMD;
++ }
++ up(&sema_ipc);
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_program_io_bus_master);
++
++/*Work QUEUE Handler function:
++ *This function gets invoked by queue.
++ */
++static void mrst_pmic_read_handler(struct work_struct *work)
++{
++ static int i;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++ u32 rbuf_offset = 2;
++
++ u8 pmic_data = 0;
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_CRIT "IPC_Driver non-blocking read handler\n");
++ } else {
++ non_blocking_read_flag = FALSE;
++ pmic_data = __raw_readb((p_ipc_base + IPC_RBUF + 2));
++
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ pmic_data = -1 /*Invalid data*/;
++ } else {
++ rbuf_offset = 2;
++ cmd_id--;
++ for (i = 0; i < pmic_read_que[cmd_id].
++ pmic_nb_read.num_entries; i++) {
++ pmic_read_que[cmd_id].pmic_nb_read.
++ pmic_reg_data[i].value =
++ __raw_readb((p_ipc_base + IPC_RBUF
++ + rbuf_offset));
++ rbuf_offset += 3;
++ }
++ }
++ }
++ up(&sema_ipc);
++ /*Call the call-back function.
++ *The host driver is responsible for reading valid data.
++ */
++ pmic_read_que[cmd_id].callback_host(pmic_read_que[cmd_id].pmic_nb_read,
++ pmic_read_que[cmd_id].context);
++}
++
++
++/**
++ * int ipc_device_fw_upgrade() - API to upgrade the Integrated Firmware Image
++ * for Intel(R) Moorestown platform.
++ * @u8 *mrst_fw_buf: Command data.
++ * @u32 mrst_fw_buf_len: length of the command to be sent.
++ *
++ * This function provides and interface to send an IPC coulumb counter
++ * command to SCU Firmware and recieve a response. This is used by the
++ * PMIC battery driver on Moorestown platform.
++ */
++int ipc_device_fw_upgrade(u8 *mrst_fw_buf, u32 mrst_fw_buf_len)
++{
++ union ipc_fw_cmd ipc_dfu_cmd;
++ void __iomem *p_tmp_fw_base;
++ int retry_cnt = 0;
++
++ MailBox_t *pMailBox = NULL;
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_ERR "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ /* Map the memory of ipc1 PMIC reg base */
++ p_dfu_fw_base = ioremap_nocache(DFU_LOAD_ADDR, MIP_HEADER_SIZE);
++ p_tmp_fw_base = p_dfu_fw_base;
++ if (p_dfu_fw_base == NULL) {
++ up(&sema_ipc);
++ return E_PMIC_MALLOC;
++ }
++ p_dfu_mailbox_base = ioremap_nocache(DFU_MAILBOX_ADDR,
++ sizeof(MailBox_t));
++ if (p_dfu_mailbox_base == NULL) {
++ up(&sema_ipc);
++ return E_PMIC_MALLOC;
++ }
++
++ pMailBox = (MailBox_t*)p_dfu_mailbox_base;
++
++ ipc_dfu_cmd.cmd_data = FW_UPGRADE_READY_CMD;
++ writel(ipc_dfu_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /*IA initializes both IAFlag and SCUFlag to zero*/
++ pMailBox->SCUFlag = 0;
++ pMailBox->IAFlag = 0;
++
++ /*IA copies the 2KB MIP header to SRAM at 0xFFFC0000*/
++ memcpy((u8*)(p_dfu_fw_base), mrst_fw_buf, 0x800);
++ iounmap(p_tmp_fw_base);
++
++ /* IA sends "FW Update" IPC command (CMD_ID 0xFE; MSG_ID 0x02).
++ * Upon receiving this command, SCU will write the 2K MIP header
++ * from 0xFFFC0000 into NAND.
++ * SCU will write a status code into the Mailbox, and then set SCUFlag.
++ */
++
++ ipc_dfu_cmd.cmd_data = FW_UPGRADE_GO_CMD;
++ writel(ipc_dfu_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /*IA stalls until SCUFlag is set */
++ while (pMailBox->SCUFlag != 1)
++ udelay(100);
++
++ /* IA checks Mailbox status.
++ * If the status is 'BADN', then abort (bad NAND).
++ * If the status is 'TxLO', then continue.
++ */
++ while (pMailBox->Mailbox != TxLO)
++ udelay(10000);
++ udelay(10000);
++
++update_retry:
++ if (retry_cnt > 5)
++ goto exit_function;
++
++ if (pMailBox->Mailbox == TxLO) {
++ /* Map the memory of ipc1 PMIC reg base */
++ p_dfu_fw_base = ioremap_nocache(DFU_LOAD_ADDR, (128*1024));
++ p_tmp_fw_base = p_dfu_fw_base;
++ if (p_dfu_fw_base == NULL) {
++ up(&sema_ipc);
++ iounmap(p_dfu_mailbox_base);
++ return E_PMIC_MALLOC;
++ }
++
++ mrst_fw_buf = mrst_fw_buf+0x800;
++ memcpy((u8 *)(p_dfu_fw_base), mrst_fw_buf, 0x20000);
++ pMailBox->IAFlag = 0x1;
++ while (pMailBox->SCUFlag == 1)
++ udelay(100);
++
++ /* check for 'BADN' */
++ if (pMailBox->Mailbox == BADN) {
++ up(&sema_ipc);
++ iounmap(p_tmp_fw_base);
++ iounmap(p_dfu_mailbox_base);
++ return -1;
++ }
++
++ iounmap(p_tmp_fw_base);
++ } else {
++ up(&sema_ipc);
++ iounmap(p_dfu_mailbox_base);
++ return -1;
++ }
++
++ while (pMailBox->Mailbox != TxHI)
++ udelay(10000);
++ udelay(10000);
++
++ if (pMailBox->Mailbox == TxHI) {
++ /* Map the memory of ipc1 PMIC reg base */
++ p_dfu_fw_base = ioremap_nocache(DFU_LOAD_ADDR, (128*1024));
++ p_tmp_fw_base = p_dfu_fw_base;
++ if (p_dfu_fw_base == NULL) {
++ up(&sema_ipc);
++ iounmap(p_dfu_mailbox_base);
++ return E_PMIC_MALLOC;
++ }
++
++ mrst_fw_buf = mrst_fw_buf+0x20000;
++ memcpy((u8 *)(p_dfu_fw_base), mrst_fw_buf, 0x20000);
++ pMailBox->IAFlag = 0;
++ while (pMailBox->SCUFlag == 0)
++ udelay(100);
++
++ /* check for 'BADN' */
++ if (pMailBox->Mailbox == BADN) {
++ up(&sema_ipc);
++ iounmap(p_tmp_fw_base);
++ iounmap(p_dfu_mailbox_base);
++ return -1;
++ }
++
++ iounmap(p_tmp_fw_base);
++ } else {
++ up(&sema_ipc);
++ iounmap(p_dfu_mailbox_base);
++ return -1;
++ }
++
++ if (pMailBox->Mailbox == TxLO) {
++ ++retry_cnt;
++ goto update_retry;
++ }
++
++ if (pMailBox->Mailbox == DONE)
++ printk(KERN_INFO "Firmware update completed!\n");
++
++exit_function:
++ iounmap(p_dfu_mailbox_base);
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_device_fw_upgrade);
++
++static int cache_mrst_firmware_version(void)
++{
++ union ipc_sts ipc_sts_reg;
++ int i = 0;
++
++ mutex_lock(&mrst_ipc_mutex);
++
++ /*execute the command by writing to IPC_CMD registers*/
++ writel(IPC_GET_FW_VERSION, (p_ipc_base + IPC_CMD));
++ udelay(1000);
++
++ ipc_sts_reg.ipc_sts_data = readl(p_ipc_base + IPC_STS);
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_ERR "IPC GetSCUFW Version Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_ERR "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++
++ for (i = 0; i < 16 ; i++)
++ fw_ver_data[i] = readb(p_ipc_base + IPC_RBUF + i);
++ mutex_unlock(&mrst_ipc_mutex);
++ return 0;
++}
++
++MODULE_AUTHOR("Sreenidhi Gurudatt <sreenidhi.b.gurudatt@intel.com>");
++MODULE_DESCRIPTION("Intel Moorestown IPC driver");
++MODULE_LICENSE("GPL");
++
++module_init(ipc_mrst_init);
++module_exit(ipc_mrst_exit);
+Index: linux-2.6.33/arch/x86/kernel/ipc_mrst.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/ipc_mrst.h
+@@ -0,0 +1,241 @@
++/*
++ * ipc_mrst.h: Driver for Langwell IPC1
++ *
++ * (C) Copyright 2008 Intel Corporation
++ * Author: Sreenidhi Gurudatt (sreenidhi.b.gurudatt@intel.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * Note:
++ * Langwell provides two IPC units to communicate with IA host. IPC1 is
++ * dedicated for IA. IPC commands results in LNW SCU interrupt. The
++ * initial implementation of this driver is platform specific. It will be
++ * converted to a PCI driver once SCU FW is in place.
++ */
++#ifndef __IPC_MRST_H__
++#define __IPC_MRST_H__
++
++#include <linux/interrupt.h>
++#include <linux/workqueue.h>
++
++#define MRST_IPC_DRIVER_VERSION "0.01.004"
++#define IPC_TIMEOUT 10 /*in msecs*/
++#define MAX_RETRY_CNT 10
++#define MAX_NB_BUF_SIZE 100
++#define IPC_BUF_LEN 16
++#define MAX_NUM_ENTRIES 5
++#define USLEEP_STS_TIMEOUT 10
++
++#define LNW_IPC1_BASE 0xff11c000
++#define LNW_IPC1_MMAP_SIZE 1024
++
++#define LNW_IPC1
++#define LNW_IPC_CMD 0x00
++#define LNW_IPC_STS 0x04
++#define LNW_IPC_DPTR 0x08
++#define LNW_IPC_WBUF 0x80
++#define LNW_IPC_RBUF 0x90
++#define LNW_IPC_RWBUF_SIZE 16
++
++/* IPC status register layout */
++#define LNW_IPC_STS_BUSY (1<<0)
++#define LNW_IPC_STS_ERR (1<<1)
++#define LNW_IPC_STS_CMDID (0xF<<4)
++#define LNW_IPC_STS_INITID (0xFF<<8)
++#define LNW_IPC_STS_ERR_CODE (0xFF<<16)
++
++/* IPC command register layout */
++#define LNW_IPC_CMD_CMD (0xFF<<0)
++#define LNW_IPC_CMD_MSI (1<<8)
++#define LNW_IPC_CMD_ID (0xF<<12)
++#define LNW_IPC_CMD_SIZE (0xFF<<16)
++
++#define FW_UPGRADE_READY_CMD 0x10FE
++#define FW_UPGRADE_GO_CMD 0x20FE
++#define DFU_MAILBOX_ADDR 0xFFFFDFF4
++#define IPC_CMD_GO_TO_DFU_MODE 0x0001
++#define IPC_CMD_UPDATE_FW 0x0002
++#define IPC_CMD_FORCE_UPDATE_FW 0x0003
++
++/*256K storage size for loading the FW image.*/
++#define MAX_FW_SIZE 262144
++#define MIP_HEADER_SIZE 2048
++#define DONE 0x444f4e45
++#define BADN 0x4241444E
++#define TxHI 0x54784849
++#define TxLO 0x54784c4f
++
++typedef struct {
++ volatile unsigned int Mailbox;
++ volatile unsigned int SCUFlag;
++ volatile unsigned int IAFlag;
++} MailBox_t;
++
++enum IPC_CMD {
++ NORMAL_WRITE, /*0x00 Normal Write */
++ MSG_WRITE, /*0x01 Message Write */
++ INDIRECT_READ, /*0x02 Indirect Read */
++ RSVD, /*0x03 Reserved */
++ READ_DMA, /*0x04 Read DMA */
++ INDIRECT_WRITE, /*0x05 Indirect write */
++};
++
++int lnw_ipc_send_cmd(unsigned char cmd, int size, int msi);
++
++struct ipc_driver {
++ const char *name;
++ irqreturn_t(*irq) (int irq, void *ipc);
++ int flags;
++};
++
++/*
++ * defines specific to ipc_driver and
++ * not exposed outside
++ */
++
++/*cmd_ID fields for CCA Read/Writes*/
++
++#define CCA_REG_WRITE 0x0000
++#define CCA_REG_READ 0x0001
++#define CCA_REG_GET_PROP 0x0002
++
++#define IPC_SET_WATCHDOG_TIMER 0xF8
++#define IPC_CCA_CMD_READ_WRITE 0xEF
++#define IPC_DEVICE_FIRMWARE_UPGRADE 0xFE
++#define IPC_PMIC_CMD_READ_WRITE 0xFF
++#define IPC_GET_FW_VERSION 0xF4
++
++/*cmd_ID fields for CCA Read/Writes*/
++#define PMIC_REG_WRITE 0x0000
++#define PMIC_REG_READ 0x0001
++#define PMIC_REG_READ_MODIFY 0x0002
++#define LPE_READ 0x0003
++#define LPE_WRITE 0x0004
++
++#define IPC_CMD_GO_TO_DFU_MODE 0x0001
++#define IPC_CMD_UPDATE_FW 0x0002
++#define IPC_CMD_FORCE_UPDATE_FW 0x0003
++
++#define NORMAL_WRITE 0x00
++#define MESSAGE_WRITE 0x01
++#define INDIRECT_READ 0x02
++#define INDIRECT_WRITE 0x05
++#define READ_DMA 0x04
++
++
++/* Used to override user option */
++#define IOC 1
++
++#define IPC_REG_ISR_FAILED 0xFF
++
++/*
++ * IO remap functions for PMIC Register reads
++ * and writes.
++ */
++
++#ifdef UNIT_TEST
++#define IOREMAP(x, y) \
++ kmalloc((y), GFP_KERNEL);
++
++#define IOUNMAP(x) \
++ kfree((x));
++
++#define IOREAD32(x) \
++ *(u32 *) (x);
++
++#define IOWRITE32(x, y) \
++ *(u32 *) (y) = x;
++#else
++
++#define IOREMAP(x, y) \
++ ioremap_nocache((x), (y));
++
++#define IOUNMAP(x) \
++ iounmap((x));
++
++#define IOREAD32(x) \
++ ioread32((x));
++
++#define IOWRITE32(x, y) \
++ iowrite32((x), (y));
++
++#endif
++
++/*********************************************
++ * Define IPC_Base_Address and offsets
++ ********************************************/
++#define IPC_BASE_ADDRESS 0xFF11C000
++#define I2C_SER_BUS 0xFF12B000
++#define DFU_LOAD_ADDR 0xFFFC0000
++/*256K storage size for loading the FW image.*/
++#define MAX_FW_SIZE 262144
++
++#define NOP_CMD 0x00
++#define WRITE_CMD 0x01
++#define READ_CMD 0x02
++
++/* IPC2 offset addresses */
++#define IPC_MAX_ADDRESS 0x100
++/* I2C offset addresses - Confirm this */
++#define I2C_MAX_ADDRESS 0x10
++/* Offsets for CTRL_REG_ADDR and CTRL_REG_DATA */
++#define CTRL_REG_ADDR 0x00
++#define CTRL_REG_DATA 0x04
++#define I2C_MAX_ADDRESS 0x10
++
++#define IPC_CMD 0x00
++#define IPC_STS 0x04
++#define IPC_SPTR 0x08
++#define IPC_DPTR 0x0C
++#define IPC_WBUF 0x80
++#define IPC_RBUF 0x90
++
++#define MAX_INSTANCES_ALLOWED 1
++
++union ipc_sts {
++ struct {
++ u32 busy:1;
++ u32 error:1;
++ u32 rfu1:2;
++ u32 cmd_id:4;
++ u32 initiator_id:8;
++ u32 error_code:8;
++ u32 rfu3:8;
++ } ipc_sts_parts;
++ u32 ipc_sts_data;
++};
++
++union ipc_fw_cmd {
++ struct {
++ u32 cmd:8;
++ u32 ioc:1;
++ u32 rfu1:3;
++ u32 cmd_ID:4;
++ u32 size:8;
++ u32 rfu2:8;
++ } cmd_parts;
++ u32 cmd_data;
++};
++
++struct ipc_intr {
++ u8 cmd;
++ u32 data;
++
++};
++
++struct ipc_work_struct{
++ struct work_struct ipc_work;
++ unsigned int cmd_id;
++};
++
++int ipc_process_interrupt(struct ipc_intr intr_data);
++int init_ipc_driver(void);
++int de_init_ipc_driver(void);
++static int cache_mrst_firmware_version(void);
++static void mrst_pmic_read_handler(struct work_struct *work);
++static DECLARE_DELAYED_WORK(mrst_ipc, mrst_pmic_read_handler);
++
++#endif
+Index: linux-2.6.33/drivers/input/keyboard/gpio_keys.c
+===================================================================
+--- linux-2.6.33.orig/drivers/input/keyboard/gpio_keys.c
++++ linux-2.6.33/drivers/input/keyboard/gpio_keys.c
+@@ -45,6 +45,9 @@ static void gpio_keys_report_event(struc
+ int state = (gpio_get_value(button->gpio) ? 1 : 0) ^ button->active_low;
+
+ input_event(input, type, button->code, !!state);
++ /* if button disabled auto repeat */
++ if (state && test_bit(EV_REP, input->evbit) && button->norep)
++ input_event(input, type, button->code, 0);
+ input_sync(input);
+ }
+
+Index: linux-2.6.33/include/linux/gpio_keys.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/gpio_keys.h
++++ linux-2.6.33/include/linux/gpio_keys.h
+@@ -10,6 +10,7 @@ struct gpio_keys_button {
+ int type; /* input event type (EV_KEY, EV_SW) */
+ int wakeup; /* configure the button as a wake-up source */
+ int debounce_interval; /* debounce ticks interval in msecs */
++ unsigned int norep:1; /* more precise auto repeat control */
+ };
+
+ struct gpio_keys_platform_data {
+Index: linux-2.6.33/drivers/gpio/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/gpio/Kconfig
++++ linux-2.6.33/drivers/gpio/Kconfig
+@@ -224,6 +224,12 @@ config GPIO_TIMBERDALE
+
+ comment "SPI GPIO expanders:"
+
++config GPIO_LANGWELL_PMIC
++ bool "Intel Moorestown Platform Langwell GPIO support"
++ depends on SPI_MASTER
++ help
++ Say Y here to support Intel Moorestown platform GPIO.
++
+ config GPIO_MAX7301
+ tristate "Maxim MAX7301 GPIO expander"
+ depends on SPI_MASTER
+Index: linux-2.6.33/drivers/gpio/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/gpio/Makefile
++++ linux-2.6.33/drivers/gpio/Makefile
+@@ -7,6 +7,7 @@ obj-$(CONFIG_GPIOLIB) += gpiolib.o
+ obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
+ obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
+ obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
++obj-$(CONFIG_GPIO_LANGWELL_PMIC) += langwell_pmic_gpio.o
+ obj-$(CONFIG_GPIO_MAX7301) += max7301.o
+ obj-$(CONFIG_GPIO_MAX732X) += max732x.o
+ obj-$(CONFIG_GPIO_MC33880) += mc33880.o
+Index: linux-2.6.33/drivers/gpio/langwell_pmic_gpio.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/gpio/langwell_pmic_gpio.c
+@@ -0,0 +1,331 @@
++/* Moorestown PMIC GPIO (access through SPI and IPC) driver
++ * Copyright (c) 2008 - 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Moorestown platform pmic chip
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/stddef.h>
++#include <linux/ioport.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/device.h>
++#include <linux/pci.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/langwell_pmic_gpio.h>
++#include <linux/gpio.h>
++#include <asm/ipc_defs.h>
++
++/* register offset that IPC driver should use
++ * 8 GPIO + 8 GPOSW + 8GPO
++ */
++enum pmic_gpio_register {
++ GPIO0 = 0xE0,
++ GPIO7 = 0xE7,
++ GPIOINT = 0xE8,
++ GPOSWCTL0 = 0xEC,
++ GPOSWCTL5 = 0xF1,
++ GPO = 0xF4,
++};
++
++/* bits definitions for GPIO & GPOSW */
++#define GPIO_DRV 0x01
++#define GPIO_DIR 0x02
++#define GPIO_DIN 0x04
++#define GPIO_DOU 0x08
++#define GPIO_INTCTL 0x30
++#define GPIO_DBC 0xc0
++
++#define GPOSW_DRV 0x01
++#define GPOSW_DOU 0x08
++#define GPOSW_RDRV 0x30
++
++/* to schedule ipc read_modify in work queue for irq context */
++#define MAX_IPC_QUEUE 16
++struct ipc_cmd_queue {
++ struct ipc_pmic_mod_reg_data cmd[MAX_IPC_QUEUE];
++ struct work_struct work;
++};
++
++struct pmic_gpio {
++ struct gpio_chip chip;
++ struct ipc_cmd_queue cmd_queue;
++ void *gpiointr;
++ int irq;
++ struct spi_device *spi;
++ unsigned irq_base;
++};
++
++static int ipc_read_char(u16 offset)
++{
++ struct ipc_pmic_reg_data tmp;
++ tmp.ioc = 0;
++ tmp.pmic_reg_data[0].register_address = offset;
++ tmp.num_entries = 1;
++ if (ipc_pmic_register_read(&tmp)) {
++ printk(KERN_ERR "%s: IPC read error\n", __func__);
++ return 0;
++ }
++ return tmp.pmic_reg_data[0].value;
++}
++
++static int ipc_modify_char(u16 offset, u8 value, u8 mask)
++{
++ struct ipc_pmic_mod_reg_data tmp;
++
++ tmp.ioc = 0;
++ tmp.pmic_mod_reg_data[0].register_address = offset;
++ tmp.pmic_mod_reg_data[0].value = value;
++ tmp.pmic_mod_reg_data[0].bit_map = mask;
++ tmp.num_entries = 1;
++ return ipc_pmic_register_read_modify(&tmp);
++}
++
++static int queue_ipc_modify_char(struct pmic_gpio *pg,
++ u16 offset, u8 value, u8 mask)
++{
++ struct ipc_pmic_mod_reg_data *tmp;
++ int i;
++
++ for (i = 0; i < MAX_IPC_QUEUE; i ++) {
++ tmp = &pg->cmd_queue.cmd[i];
++ if (tmp->num_entries)
++ continue;
++ tmp->ioc = 0;
++ tmp->pmic_mod_reg_data[0].register_address = offset;
++ tmp->pmic_mod_reg_data[0].value = value;
++ tmp->pmic_mod_reg_data[0].bit_map = mask;
++ tmp->num_entries=1;
++ return i;
++ }
++ return -1;
++}
++
++static void ipc_modify_char_work(struct work_struct *work)
++{
++ struct pmic_gpio *pg =
++ container_of(work, struct pmic_gpio, cmd_queue.work);
++ struct ipc_pmic_mod_reg_data *tmp;
++ int i;
++
++ for (i = 0; i < MAX_IPC_QUEUE; i ++) {
++ tmp = &pg->cmd_queue.cmd[i];
++ if (tmp->num_entries) {
++ ipc_pmic_register_read_modify(tmp);
++ tmp->num_entries = 0;
++ }
++ }
++}
++
++static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
++{
++ if (offset > 8) {
++ printk(KERN_ERR
++ "%s: only pin 0-7 support input\n", __func__);
++ return -1;/* we only have 8 GPIO can use as input */
++ }
++ return ipc_modify_char(GPIO0 + offset, GPIO_DIR, GPIO_DIR);
++}
++
++static int pmic_gpio_direction_output(struct gpio_chip *chip,
++ unsigned offset, int value)
++{
++ int rc = 0;
++
++ if (offset < 8)/* it is GPIO */
++ rc = ipc_modify_char(GPIO0 + offset,
++ GPIO_DRV | (value ? GPIO_DOU : 0),
++ GPIO_DRV | GPIO_DOU | GPIO_DIR);
++ else if (offset < 16)/* it is GPOSW */
++ rc = ipc_modify_char(GPOSWCTL0 + offset - 8,
++ GPOSW_DRV | (value ? GPOSW_DOU : 0),
++ GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
++ else if (offset < 24)/* it is GPO */
++ rc = ipc_modify_char(GPO, value ? 1 << (offset - 16) : 0,
++ 1 << (offset - 16));
++
++ return rc;
++}
++
++static int pmic_gpio_get(struct gpio_chip *chip, unsigned offset)
++{
++ /* we only have 8 GPIO can use as input */
++ if (offset > 8) {
++ printk(KERN_ERR
++ "%s: only pin 0-7 support input\n", __func__);
++ return -1;
++ }
++ return ipc_read_char(GPIO0 + offset) & GPIO_DIN;
++}
++
++static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
++{
++ if (offset < 8)/* it is GPIO */
++ ipc_modify_char(GPIO0 + offset,
++ GPIO_DRV | (value ? GPIO_DOU : 0),
++ GPIO_DRV | GPIO_DOU);
++ else if (offset < 16)/* it is GPOSW */
++ ipc_modify_char(GPOSWCTL0 + offset - 8,
++ GPOSW_DRV | (value ? GPOSW_DOU : 0),
++ GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
++ else if (offset < 24)/* it is GPO */
++ ipc_modify_char(GPO, value ? 1 << (offset - 16) : 0,
++ 1 << (offset - 16));
++}
++
++static int pmic_irq_type(unsigned irq, unsigned type)
++{
++ struct pmic_gpio *pg = get_irq_chip_data(irq);
++ u32 gpio = irq - pg->irq_base;
++
++ if (gpio < 0 || gpio > pg->chip.ngpio)
++ return -EINVAL;
++
++ if (type & IRQ_TYPE_EDGE_RISING)
++ queue_ipc_modify_char(pg, GPIO0 + gpio, 0x20, 0x20);
++ else
++ queue_ipc_modify_char(pg, GPIO0 + gpio, 0x00, 0x20);
++
++ if (type & IRQ_TYPE_EDGE_FALLING)
++ queue_ipc_modify_char(pg, GPIO0 + gpio, 0x10, 0x10);
++ else
++ queue_ipc_modify_char(pg, GPIO0 + gpio, 0x00, 0x10);
++
++ schedule_work(&pg->cmd_queue.work);
++ return 0;
++};
++
++static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
++{
++ struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
++
++ return pg->irq_base + offset;
++}
++
++/* the gpiointr register is read-clear, so just do nothing. */
++static void pmic_irq_unmask(unsigned irq)
++{
++};
++
++static void pmic_irq_mask(unsigned irq)
++{
++};
++
++static struct irq_chip pmic_irqchip = {
++ .name = "PMIC-GPIO",
++ .mask = pmic_irq_mask,
++ .unmask = pmic_irq_unmask,
++ .set_type = pmic_irq_type,
++};
++
++static void pmic_irq_handler(unsigned irq, struct irq_desc *desc)
++{
++ struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq);
++ u8 intsts = *((u8 *)pg->gpiointr + 4);
++ int gpio;
++
++ for (gpio = 0; gpio < 8; gpio++) {
++ if (intsts & (1 << gpio)) {
++ pr_debug("pmic pin %d triggered\n", gpio);
++ generic_handle_irq(pg->irq_base + gpio);
++ }
++ }
++ desc->chip->eoi(irq);
++}
++
++static int __devinit pmic_gpio_probe(struct spi_device *spi)
++{
++ struct pmic_gpio *pg;
++ struct langwell_pmic_gpio_platform_data *pdata;
++ int retval;
++ int i;
++
++ printk(KERN_INFO "%s: PMIC GPIO driver loaded.\n", __func__);
++
++ pdata = spi->dev.platform_data;
++ if (!pdata || !pdata->gpio_base || !pdata->irq_base) {
++ dev_dbg(&spi->dev, "incorrect or missing platform data\n");
++ return -EINVAL;
++ }
++
++ pg = kzalloc(sizeof(*pg), GFP_KERNEL);
++ if (!pg)
++ return -ENOMEM;
++
++ dev_set_drvdata(&spi->dev, pg);
++
++ pg->irq = spi->irq;
++ /* setting up SRAM mapping for GPIOINT register */
++ pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8);
++ if (!pg->gpiointr) {
++ printk(KERN_ERR "%s: Can not map GPIOINT.\n", __func__);
++ retval = -EINVAL;
++ goto err2;
++ }
++ pg->irq_base = pdata->irq_base;
++ pg->chip.label = "langwell_pmic";
++ pg->chip.direction_input = pmic_gpio_direction_input;
++ pg->chip.direction_output = pmic_gpio_direction_output;
++ pg->chip.get = pmic_gpio_get;
++ pg->chip.set = pmic_gpio_set;
++ pg->chip.to_irq = pmic_gpio_to_irq;
++ pg->chip.base = pdata->gpio_base;
++ pg->chip.ngpio = 24;
++ pg->chip.can_sleep = 1;
++ pg->chip.dev = &spi->dev;
++ retval = gpiochip_add(&pg->chip);
++ if (retval) {
++ printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
++ goto err;
++ }
++ set_irq_data(pg->irq, pg);
++ set_irq_chained_handler(pg->irq, pmic_irq_handler);
++ for (i = 0; i < 8; i++) {
++ set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip,
++ handle_simple_irq, "demux");
++ set_irq_chip_data(i + pg->irq_base, pg);
++ }
++ INIT_WORK(&pg->cmd_queue.work, ipc_modify_char_work);
++ return 0;
++err:
++ iounmap(pg->gpiointr);
++err2:
++ kfree(pg);
++ return retval;
++}
++
++static struct spi_driver pmic_gpio_driver = {
++ .driver = {
++ .name = "pmic_gpio",
++ .owner = THIS_MODULE,
++ },
++ .probe = pmic_gpio_probe,
++};
++
++static int __init pmic_gpio_init(void)
++{
++ return spi_register_driver(&pmic_gpio_driver);
++}
++
++/* register after spi postcore initcall and before
++ * subsys initcalls that may rely on these GPIOs
++ */
++subsys_initcall(pmic_gpio_init);
+Index: linux-2.6.33/include/linux/spi/langwell_pmic_gpio.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/include/linux/spi/langwell_pmic_gpio.h
+@@ -0,0 +1,15 @@
++#ifndef LINUX_SPI_LANGWELL_PMIC_H
++#define LINUX_SPI_LANGWELL_PMIC_H
++
++struct langwell_pmic_gpio_platform_data {
++ /* the first IRQ of the chip */
++ unsigned irq_base;
++ /* number assigned to the first GPIO */
++ unsigned gpio_base;
++ /* sram address for gpiointr register, the langwell chip will map
++ * the PMIC spi GPIO expander's GPIOINTR register in sram.
++ */
++ unsigned gpiointr;
++};
++
++#endif
+Index: linux-2.6.33/drivers/gpio/pca953x.c
+===================================================================
+--- linux-2.6.33.orig/drivers/gpio/pca953x.c
++++ linux-2.6.33/drivers/gpio/pca953x.c
+@@ -14,6 +14,7 @@
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/gpio.h>
++#include <linux/interrupt.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c/pca953x.h>
+ #ifdef CONFIG_OF_GPIO
+@@ -50,6 +51,7 @@ MODULE_DEVICE_TABLE(i2c, pca953x_id);
+
+ struct pca953x_chip {
+ unsigned gpio_start;
++ unsigned irq_base;
+ uint16_t reg_output;
+ uint16_t reg_direction;
+
+@@ -182,6 +184,13 @@ static void pca953x_gpio_set_value(struc
+ chip->reg_output = reg_val;
+ }
+
++static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
++{
++ struct pca953x_chip *chip = container_of(gc, struct pca953x_chip,
++ gpio_chip);
++ return chip->irq_base + offset;
++}
++
+ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
+ {
+ struct gpio_chip *gc;
+@@ -192,6 +201,7 @@ static void pca953x_setup_gpio(struct pc
+ gc->direction_output = pca953x_gpio_direction_output;
+ gc->get = pca953x_gpio_get_value;
+ gc->set = pca953x_gpio_set_value;
++ gc->to_irq = pca953x_gpio_to_irq;
+ gc->can_sleep = 1;
+
+ gc->base = chip->gpio_start;
+@@ -250,6 +260,39 @@ pca953x_get_alt_pdata(struct i2c_client
+ }
+ #endif
+
++static void pca953x_irq_unmask(unsigned irq)
++{
++}
++
++static void pca953x_irq_mask(unsigned irq)
++{
++}
++
++static struct irq_chip pca953x_irqchip = {
++ .name = "pca953x",
++ .mask = pca953x_irq_mask,
++ .unmask = pca953x_irq_unmask,
++};
++
++static void pca953x_irq_handler(unsigned irq, struct irq_desc *desc)
++{
++ struct pca953x_chip *chip = (struct pca953x_chip *)get_irq_data(irq);
++ int i;
++
++ if (desc->chip->ack)
++ desc->chip->ack(irq);
++ /* we must call all sub-irqs, since there is no way to read
++ * I2C gpio expander's status in irq context. The driver itself
++ * would be reponsible to check if the irq is for him.
++ */
++ for (i = 0; i < chip->gpio_chip.ngpio; i++)
++ if (chip->reg_direction & (1u << i))
++ generic_handle_irq(chip->irq_base + i);
++
++ if (desc->chip->unmask)
++ desc->chip->unmask(irq);
++}
++
+ static int __devinit pca953x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+ {
+@@ -283,6 +326,8 @@ static int __devinit pca953x_probe(struc
+
+ chip->names = pdata->names;
+
++ chip->irq_base = pdata->irq_base;
++
+ /* initialize cached registers from their original values.
+ * we can't share this chip with another i2c master.
+ */
+@@ -314,6 +359,21 @@ static int __devinit pca953x_probe(struc
+ }
+
+ i2c_set_clientdata(client, chip);
++
++ if (chip->irq_base != (unsigned)-1) {
++ int i;
++
++ set_irq_type(client->irq,
++ IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING);
++ set_irq_data(client->irq, chip);
++ for (i = 0; i < chip->gpio_chip.ngpio; i++) {
++ set_irq_chip(i + chip->irq_base, &pca953x_irqchip);
++ __set_irq_handler(i + chip->irq_base,
++ handle_simple_irq, 0, "demux");
++ set_irq_chip_data(i + chip->irq_base, chip);
++ }
++ set_irq_chained_handler(client->irq, pca953x_irq_handler);
++ }
+ return 0;
+
+ out_failed:
+Index: linux-2.6.33/include/linux/i2c/pca953x.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/i2c/pca953x.h
++++ linux-2.6.33/include/linux/i2c/pca953x.h
+@@ -1,6 +1,8 @@
+ /* platform data for the PCA9539 16-bit I/O expander driver */
+
+ struct pca953x_platform_data {
++ /* number of the first IRQ */
++ unsigned irq_base;
+ /* number of the first GPIO */
+ unsigned gpio_base;
+
+Index: linux-2.6.33/drivers/input/keyboard/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/input/keyboard/Kconfig
++++ linux-2.6.33/drivers/input/keyboard/Kconfig
+@@ -73,7 +73,7 @@ config KEYBOARD_ATKBD
+ default y
+ select SERIO
+ select SERIO_LIBPS2
+- select SERIO_I8042 if X86
++ select SERIO_I8042 if X86 && !X86_MRST
+ select SERIO_GSCPS2 if GSC
+ help
+ Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
+Index: linux-2.6.33/drivers/input/mouse/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/input/mouse/Kconfig
++++ linux-2.6.33/drivers/input/mouse/Kconfig
+@@ -17,7 +17,7 @@ config MOUSE_PS2
+ default y
+ select SERIO
+ select SERIO_LIBPS2
+- select SERIO_I8042 if X86
++ select SERIO_I8042 if X86 && !X86_MRST
+ select SERIO_GSCPS2 if GSC
+ help
+ Say Y here if you have a PS/2 mouse connected to your system. This
+Index: linux-2.6.33/kernel/time/tick-broadcast.c
+===================================================================
+--- linux-2.6.33.orig/kernel/time/tick-broadcast.c
++++ linux-2.6.33/kernel/time/tick-broadcast.c
+@@ -214,10 +214,13 @@ static void tick_do_broadcast_on_off(uns
+
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
+
++ bc = tick_broadcast_device.evtdev;
++ if (!bc)
++ goto out;
++
+ cpu = smp_processor_id();
+ td = &per_cpu(tick_cpu_device, cpu);
+ dev = td->evtdev;
+- bc = tick_broadcast_device.evtdev;
+
+ /*
+ * Is the device not affected by the powerstate ?
+@@ -467,6 +470,9 @@ void tick_broadcast_oneshot_control(unsi
+ goto out;
+
+ bc = tick_broadcast_device.evtdev;
++ if (!bc)
++ goto out;
++
+ cpu = smp_processor_id();
+ td = &per_cpu(tick_cpu_device, cpu);
+ dev = td->evtdev;
+Index: linux-2.6.33/drivers/usb/core/hcd.h
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/core/hcd.h
++++ linux-2.6.33/drivers/usb/core/hcd.h
+@@ -104,6 +104,9 @@ struct usb_hcd {
+ unsigned wireless:1; /* Wireless USB HCD */
+ unsigned authorized_default:1;
+ unsigned has_tt:1; /* Integrated TT in root hub */
++ unsigned has_sram:1; /* Local SRAM for caching */
++ unsigned sram_no_payload:1; /* sram not for payload */
++ unsigned lpm_cap:1; /* LPM capable */
+
+ int irq; /* irq allocated */
+ void __iomem *regs; /* device memory/io */
+@@ -148,6 +151,13 @@ struct usb_hcd {
+ * (ohci 32, uhci 1024, ehci 256/512/1024).
+ */
+
++#ifdef CONFIG_USB_OTG
++ /* some otg HCDs need this to get USB_DEVICE_ADD and USB_DEVICE_REMOVE
++ * from root hub, we do not want to use USB notification chain, since
++ * it would be a over kill to use high level notification.
++ */
++ void (*otg_notify) (struct usb_device *udev, unsigned action);
++#endif
+ /* The HC driver's private data is stored at the end of
+ * this structure.
+ */
+Index: linux-2.6.33/drivers/usb/core/hub.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/core/hub.c
++++ linux-2.6.33/drivers/usb/core/hub.c
+@@ -1563,6 +1563,24 @@ static void hub_free_dev(struct usb_devi
+ hcd->driver->free_dev(hcd, udev);
+ }
+
++#ifdef CONFIG_USB_OTG
++
++static void otg_notify(struct usb_device *udev, unsigned action)
++{
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
++ if (hcd->otg_notify)
++ hcd->otg_notify(udev, action);
++}
++
++#else
++
++static inline void otg_notify(struct usb_device *udev, unsigned action)
++{
++}
++
++#endif
++
+ /**
+ * usb_disconnect - disconnect a device (usbcore-internal)
+ * @pdev: pointer to device being disconnected
+@@ -1620,7 +1638,7 @@ void usb_disconnect(struct usb_device **
+ * notifier chain (used by usbfs and possibly others).
+ */
+ device_del(&udev->dev);
+-
++ otg_notify(udev, USB_DEVICE_REMOVE);
+ /* Free the device number and delete the parent's children[]
+ * (or root_hub) pointer.
+ */
+@@ -1833,6 +1851,7 @@ int usb_new_device(struct usb_device *ud
+ * notifier chain (used by usbfs and possibly others).
+ */
+ err = device_add(&udev->dev);
++ otg_notify(udev, USB_DEVICE_ADD);
+ if (err) {
+ dev_err(&udev->dev, "can't device_add, error %d\n", err);
+ goto fail;
+Index: linux-2.6.33/drivers/usb/core/usb.h
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/core/usb.h
++++ linux-2.6.33/drivers/usb/core/usb.h
+@@ -178,4 +178,3 @@ extern void usb_notify_add_device(struct
+ extern void usb_notify_remove_device(struct usb_device *udev);
+ extern void usb_notify_add_bus(struct usb_bus *ubus);
+ extern void usb_notify_remove_bus(struct usb_bus *ubus);
+-
+Index: linux-2.6.33/drivers/usb/host/ehci-hcd.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/host/ehci-hcd.c
++++ linux-2.6.33/drivers/usb/host/ehci-hcd.c
+@@ -35,6 +35,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/debugfs.h>
++#include <linux/uaccess.h>
+
+ #include "../core/hcd.h"
+
+@@ -43,6 +44,8 @@
+ #include <asm/irq.h>
+ #include <asm/system.h>
+ #include <asm/unaligned.h>
++#include <linux/usb/otg.h>
++#include <linux/usb/langwell_otg.h>
+
+ /*-------------------------------------------------------------------------*/
+
+@@ -101,6 +104,11 @@ static int ignore_oc = 0;
+ module_param (ignore_oc, bool, S_IRUGO);
+ MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
+
++/* for link power management(LPM) feature */
++static unsigned int hird;
++module_param(hird, int, S_IRUGO);
++MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n");
++
+ #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+ /*-------------------------------------------------------------------------*/
+@@ -305,6 +313,7 @@ static void end_unlink_async(struct ehci
+ static void ehci_work(struct ehci_hcd *ehci);
+
+ #include "ehci-hub.c"
++#include "ehci-lpm.c"
+ #include "ehci-mem.c"
+ #include "ehci-q.c"
+ #include "ehci-sched.c"
+@@ -501,7 +510,8 @@ static void ehci_stop (struct usb_hcd *h
+ ehci_work (ehci);
+ spin_unlock_irq (&ehci->lock);
+ ehci_mem_cleanup (ehci);
+-
++ if (hcd->has_sram)
++ sram_deinit(hcd);
+ #ifdef EHCI_STATS
+ ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
+ ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
+@@ -577,6 +587,17 @@ static int ehci_init(struct usb_hcd *hcd
+ if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
+ log2_irq_thresh = 0;
+ temp = 1 << (16 + log2_irq_thresh);
++ if (HCC_32FRAME_PERIODIC_LIST(hcc_params))
++ ehci_dbg(ehci, "32 frame periodic list capable\n");
++ if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) {
++ ehci_dbg(ehci, "enable per-port change event %d\n", park);
++ temp |= CMD_PPCEE;
++ }
++ if (HCC_HW_PREFETCH(hcc_params)) {
++ ehci_dbg(ehci, "HW prefetch capable %d\n", park);
++ temp |= (CMD_ASPE | CMD_PSPE);
++ }
++
+ if (HCC_CANPARK(hcc_params)) {
+ /* HW default park == 3, on hardware that supports it (like
+ * NVidia and ALI silicon), maximizes throughput on the async
+@@ -590,7 +611,7 @@ static int ehci_init(struct usb_hcd *hcd
+ temp |= CMD_PARK;
+ temp |= park << 8;
+ }
+- ehci_dbg(ehci, "park %d\n", park);
++ ehci_dbg(ehci, "park %d ", park);
+ }
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+@@ -603,6 +624,17 @@ static int ehci_init(struct usb_hcd *hcd
+ default: BUG();
+ }
+ }
++ if (HCC_LPM(hcc_params)) {
++ /* support link power management EHCI 1.1 addendum */
++ ehci_dbg(ehci, "lpm\n");
++ hcd->lpm_cap = 1;
++ if (hird > 0xf) {
++ ehci_dbg(ehci, "hird %d invalid, use default 0",
++ hird);
++ hird = 0;
++ }
++ temp |= hird << 24;
++ }
+ ehci->command = temp;
+
+ /* Accept arbitrarily long scatter-gather lists */
+@@ -840,6 +872,7 @@ static int ehci_urb_enqueue (
+ ) {
+ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
+ struct list_head qtd_list;
++ int status;
+
+ INIT_LIST_HEAD (&qtd_list);
+
+@@ -855,7 +888,16 @@ static int ehci_urb_enqueue (
+ default:
+ if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
+ return -ENOMEM;
+- return submit_async(ehci, urb, &qtd_list, mem_flags);
++ status = submit_async(ehci, urb, &qtd_list, mem_flags);
++
++ /* check device LPM cap after set address */
++ if (usb_pipecontrol(urb->pipe)) {
++ if (((struct usb_ctrlrequest *)urb->setup_packet)
++ ->bRequest == USB_REQ_SET_ADDRESS &&
++ ehci_to_hcd(ehci)->lpm_cap)
++ ehci_lpm_check(ehci, urb->dev->portnum);
++ }
++ return status;
+
+ case PIPE_INTERRUPT:
+ if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
+@@ -1101,6 +1143,10 @@ MODULE_LICENSE ("GPL");
+ #ifdef CONFIG_PCI
+ #include "ehci-pci.c"
+ #define PCI_DRIVER ehci_pci_driver
++#ifdef CONFIG_USB_LANGWELL_OTG
++#include "ehci-langwell-pci.c"
++#define LNW_OTG_HOST_DRIVER ehci_otg_driver
++#endif
+ #endif
+
+ #ifdef CONFIG_USB_EHCI_FSL
+@@ -1213,8 +1259,19 @@ static int __init ehci_hcd_init(void)
+ if (retval < 0)
+ goto clean3;
+ #endif
++
++#ifdef LNW_OTG_HOST_DRIVER
++ retval = langwell_register_host(&LNW_OTG_HOST_DRIVER);
++ if (retval < 0)
++ goto clean4;
++#endif
+ return retval;
+
++#ifdef LNW_OTG_HOST_DRIVER
++clean4:
++ langwell_unregister_host(&LNW_OTG_HOST_DRIVER);
++#endif
++
+ #ifdef OF_PLATFORM_DRIVER
+ /* of_unregister_platform_driver(&OF_PLATFORM_DRIVER); */
+ clean3:
+@@ -1255,6 +1312,9 @@ static void __exit ehci_hcd_cleanup(void
+ #ifdef PS3_SYSTEM_BUS_DRIVER
+ ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
+ #endif
++#ifdef LNW_OTG_HOST_DRIVER
++ langwell_unregister_host(&LNW_OTG_HOST_DRIVER);
++#endif
+ #ifdef DEBUG
+ debugfs_remove(ehci_debug_root);
+ #endif
+Index: linux-2.6.33/drivers/usb/host/ehci-hub.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/host/ehci-hub.c
++++ linux-2.6.33/drivers/usb/host/ehci-hub.c
+@@ -112,6 +112,7 @@ static int ehci_bus_suspend (struct usb_
+ int port;
+ int mask;
+ u32 __iomem *hostpc_reg = NULL;
++ int rc = 0;
+
+ ehci_dbg(ehci, "suspend root hub\n");
+
+@@ -228,13 +229,18 @@ static int ehci_bus_suspend (struct usb_
+ ehci_readl(ehci, &ehci->regs->intr_enable);
+
+ ehci->next_statechange = jiffies + msecs_to_jiffies(10);
++
++#ifdef CONFIG_USB_OTG
++ if (ehci->has_otg && ehci->otg_suspend)
++ rc = ehci->otg_suspend(hcd);
++#endif
+ spin_unlock_irq (&ehci->lock);
+
+ /* ehci_work() may have re-enabled the watchdog timer, which we do not
+ * want, and so we must delete any pending watchdog timer events.
+ */
+ del_timer_sync(&ehci->watchdog);
+- return 0;
++ return rc;
+ }
+
+
+@@ -246,6 +252,7 @@ static int ehci_bus_resume (struct usb_h
+ u32 power_okay;
+ int i;
+ u8 resume_needed = 0;
++ int rc = 0;
+
+ if (time_before (jiffies, ehci->next_statechange))
+ msleep(5);
+@@ -295,7 +302,11 @@ static int ehci_bus_resume (struct usb_h
+ i = HCS_N_PORTS (ehci->hcs_params);
+ while (i--) {
+ temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
+- temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
++ temp &= ~(PORT_RWC_BITS | PORT_WKDISC_E | PORT_WKOC_E);
++ if (temp & PORT_CONNECT)
++ temp |= PORT_WKOC_E | PORT_WKDISC_E;
++ else
++ temp |= PORT_WKOC_E | PORT_WKCONN_E;
+ if (test_bit(i, &ehci->bus_suspended) &&
+ (temp & PORT_SUSPEND)) {
+ temp |= PORT_RESUME;
+@@ -340,9 +351,13 @@ static int ehci_bus_resume (struct usb_h
+ /* Now we can safely re-enable irqs */
+ ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
+
++#ifdef CONFIG_USB_OTG
++ if (ehci->has_otg && ehci->otg_resume)
++ rc = ehci->otg_resume(hcd);
++#endif
+ spin_unlock_irq (&ehci->lock);
+ ehci_handover_companion_ports(ehci);
+- return 0;
++ return rc;
+ }
+
+ #else
+@@ -678,10 +693,20 @@ static int ehci_hub_control (
+ if (temp & PORT_SUSPEND) {
+ if ((temp & PORT_PE) == 0)
+ goto error;
+- /* resume signaling for 20 msec */
+- temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
++ /* clear phy low power mode before resume */
++ if (hostpc_reg) {
++ temp1 = ehci_readl(ehci, hostpc_reg);
++ ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
++ hostpc_reg);
++ mdelay(5);
++ }
++ /* after PORT_PE check, the port must be
++ connected, set correct wakeup bits */
++ temp &= ~PORT_WKCONN_E;
++ temp |= PORT_WKDISC_E | PORT_WKOC_E;
+ ehci_writel(ehci, temp | PORT_RESUME,
+ status_reg);
++ /* resume signaling for 20 msec */
+ ehci->reset_done [wIndex] = jiffies
+ + msecs_to_jiffies (20);
+ }
+@@ -696,6 +721,23 @@ static int ehci_hub_control (
+ status_reg);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
++ /*
++ * for connection change, we need to enable
++ * appropriate wake bits.
++ */
++ temp |= PORT_WKOC_E;
++ if (temp & PORT_CONNECT) {
++ temp |= PORT_WKDISC_E;
++ temp &= ~PORT_WKCONN_E;
++ } else {
++ temp &= ~PORT_WKDISC_E;
++ temp |= PORT_WKCONN_E;
++ }
++ if (ehci_to_hcd(ehci)->lpm_cap) {
++ /* clear PORTSC bits on disconnect */
++ temp &= ~PORT_LPM;
++ temp &= ~PORT_DEV_ADDR;
++ }
+ ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_CSC,
+ status_reg);
+ break;
+Index: linux-2.6.33/drivers/usb/host/ehci-langwell-pci.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/usb/host/ehci-langwell-pci.c
+@@ -0,0 +1,195 @@
++/*
++ * Intel Moorestown Platform Langwell OTG EHCI Controller PCI Bus Glue.
++ *
++ * Copyright (c) 2008 - 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License 2 as published by the
++ * Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software Foundation,
++ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++static int usb_otg_suspend(struct usb_hcd *hcd)
++{
++ struct otg_transceiver *otg;
++ struct langwell_otg *iotg;
++
++ otg = otg_get_transceiver();
++ if (otg == NULL) {
++ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
++ return -EINVAL;
++ }
++ iotg = container_of(otg, struct langwell_otg, otg);
++ printk(KERN_INFO "%s OTG HNP update suspend\n", __func__);
++ if (iotg->otg.default_a)
++ iotg->hsm.a_suspend_req = 1;
++ else
++ iotg->hsm.b_bus_req = 0;
++ langwell_update_transceiver();
++ otg_put_transceiver(otg);
++ return 0;
++}
++
++static int usb_otg_resume(struct usb_hcd *hcd)
++{
++ struct otg_transceiver *otg;
++ struct langwell_otg *iotg;
++
++ otg = otg_get_transceiver();
++ if (otg == NULL) {
++ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
++ return -EINVAL;
++ }
++ iotg = container_of(otg, struct langwell_otg, otg);
++ printk(KERN_INFO "%s OTG HNP update resume\n", __func__);
++ if (iotg->otg.default_a) {
++ iotg->hsm.b_bus_resume = 1;
++ langwell_update_transceiver();
++ }
++ otg_put_transceiver(otg);
++ return 0;
++}
++
++/* the root hub will call this callback when device added/removed */
++static void otg_notify(struct usb_device *udev, unsigned action)
++{
++ struct otg_transceiver *otg;
++ struct langwell_otg *iotg;
++
++ otg = otg_get_transceiver();
++ if (otg == NULL) {
++ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
++ return;
++ }
++ iotg = container_of(otg, struct langwell_otg, otg);
++
++ switch (action) {
++ case USB_DEVICE_ADD:
++ pr_debug("Notify OTG HNP add device\n");
++ if (iotg->otg.default_a == 1)
++ iotg->hsm.b_conn = 1;
++ else
++ iotg->hsm.a_conn = 1;
++ break;
++ case USB_DEVICE_REMOVE:
++ pr_debug("Notify OTG HNP delete device\n");
++ if (iotg->otg.default_a == 1)
++ iotg->hsm.b_conn = 0;
++ else
++ iotg->hsm.a_conn = 0;
++ break;
++ default:
++ otg_put_transceiver(otg);
++ return ;
++ }
++ if (spin_trylock(&iotg->wq_lock)) {
++ langwell_update_transceiver();
++ spin_unlock(&iotg->wq_lock);
++ }
++ otg_put_transceiver(otg);
++ return;
++}
++
++static int ehci_langwell_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++ struct hc_driver *driver;
++ struct langwell_otg *iotg;
++ struct otg_transceiver *otg;
++ struct usb_hcd *hcd;
++ struct ehci_hcd *ehci;
++ int irq;
++ int retval;
++
++ pr_debug("initializing Langwell USB OTG Host Controller\n");
++
++ /* we need not call pci_enable_dev since otg transceiver already take
++ * the control of this device and this probe actaully gets called by
++ * otg transceiver driver with HNP protocol.
++ */
++ irq = pdev->irq;
++
++ if (!id)
++ return -EINVAL;
++ driver = (struct hc_driver *)id->driver_data;
++ if (!driver)
++ return -EINVAL;
++
++ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
++ if (!hcd) {
++ retval = -ENOMEM;
++ goto err1;
++ }
++
++ hcd->self.otg_port = 1;
++ ehci = hcd_to_ehci(hcd);
++ /* this will be called in ehci_bus_suspend and ehci_bus_resume */
++ ehci->otg_suspend = usb_otg_suspend;
++ ehci->otg_resume = usb_otg_resume;
++ /* this will be called by root hub code */
++ hcd->otg_notify = otg_notify;
++ otg = otg_get_transceiver();
++ if (otg == NULL) {
++ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
++ retval = -EINVAL;
++ goto err1;
++ }
++ iotg = container_of(otg, struct langwell_otg, otg);
++ hcd->regs = iotg->regs;
++ hcd->rsrc_start = pci_resource_start(pdev, 0);
++ hcd->rsrc_len = pci_resource_len(pdev, 0);
++
++ if (hcd->regs == NULL) {
++ dev_dbg(&pdev->dev, "error mapping memory\n");
++ retval = -EFAULT;
++ goto err2;
++ }
++ retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
++ if (retval != 0)
++ goto err2;
++ retval = otg_set_host(otg, &hcd->self);
++ if (!otg->default_a)
++ hcd->self.is_b_host = 1;
++ otg_put_transceiver(otg);
++ return retval;
++
++err2:
++ usb_put_hcd(hcd);
++err1:
++ dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval);
++ return retval;
++}
++
++void ehci_langwell_remove(struct pci_dev *dev)
++{
++ struct usb_hcd *hcd = pci_get_drvdata(dev);
++
++ if (!hcd)
++ return;
++ usb_remove_hcd(hcd);
++ usb_put_hcd(hcd);
++}
++
++/* Langwell OTG EHCI driver */
++static struct pci_driver ehci_otg_driver = {
++ .name = "ehci-langwell",
++ .id_table = pci_ids,
++
++ .probe = ehci_langwell_probe,
++ .remove = ehci_langwell_remove,
++
++#ifdef CONFIG_PM_SLEEP
++ .driver = {
++ .pm = &usb_hcd_pci_pm_ops
++ },
++#endif
++ .shutdown = usb_hcd_pci_shutdown,
++};
+Index: linux-2.6.33/drivers/usb/host/ehci-pci.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/host/ehci-pci.c
++++ linux-2.6.33/drivers/usb/host/ehci-pci.c
+@@ -41,6 +41,39 @@ static int ehci_pci_reinit(struct ehci_h
+ return 0;
+ }
+
++/* enable SRAM if sram detected */
++static void sram_init(struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
++
++ if (!hcd->has_sram)
++ return;
++ ehci->sram_addr = pci_resource_start(pdev, 1);
++ ehci->sram_size = pci_resource_len(pdev, 1);
++ ehci_info(ehci, "Found HCD SRAM at %x size:%x\n",
++ ehci->sram_addr, ehci->sram_size);
++ if (pci_request_region(pdev, 1, kobject_name(&pdev->dev.kobj))) {
++ ehci_warn(ehci, "SRAM request failed\n");
++ hcd->has_sram = 0;
++ } else if (!dma_declare_coherent_memory(&pdev->dev, ehci->sram_addr,
++ ehci->sram_addr, ehci->sram_size, DMA_MEMORY_MAP)) {
++ ehci_warn(ehci, "SRAM DMA declare failed\n");
++ pci_release_region(pdev, 1);
++ hcd->has_sram = 0;
++ }
++}
++
++static void sram_deinit(struct usb_hcd *hcd)
++{
++ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
++
++ if (!hcd->has_sram)
++ return;
++ dma_release_declared_memory(&pdev->dev);
++ pci_release_region(pdev, 1);
++}
++
+ /* called during probe() after chip reset completes */
+ static int ehci_pci_setup(struct usb_hcd *hcd)
+ {
+@@ -50,6 +83,7 @@ static int ehci_pci_setup(struct usb_hcd
+ u8 rev;
+ u32 temp;
+ int retval;
++ int force_otg_hc_mode = 0;
+
+ switch (pdev->vendor) {
+ case PCI_VENDOR_ID_TOSHIBA_2:
+@@ -63,6 +97,26 @@ static int ehci_pci_setup(struct usb_hcd
+ #endif
+ }
+ break;
++ case PCI_VENDOR_ID_INTEL:
++ if (pdev->device == 0x0811) {
++ ehci_info(ehci, "Detected Langwell OTG HC\n");
++ hcd->has_tt = 1;
++ ehci->has_hostpc = 1;
++#ifdef CONFIG_USB_OTG
++ ehci->has_otg = 1;
++#endif
++ force_otg_hc_mode = 1;
++ hcd->has_sram = 1;
++ hcd->sram_no_payload = 1;
++ sram_init(hcd);
++ } else if (pdev->device == 0x0806) {
++ ehci_info(ehci, "Detected Langwell MPH\n");
++ hcd->has_tt = 1;
++ ehci->has_hostpc = 1;
++ hcd->has_sram = 1;
++ hcd->sram_no_payload = 1;
++ sram_init(hcd);
++ }
+ }
+
+ ehci->caps = hcd->regs;
+@@ -98,6 +152,8 @@ static int ehci_pci_setup(struct usb_hcd
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
++ if (force_otg_hc_mode)
++ ehci_reset(ehci);
+
+ retval = ehci_halt(ehci);
+ if (retval)
+Index: linux-2.6.33/drivers/usb/host/ehci.h
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/host/ehci.h
++++ linux-2.6.33/drivers/usb/host/ehci.h
+@@ -139,8 +139,15 @@ struct ehci_hcd { /* one per controlle
+ #define OHCI_HCCTRL_LEN 0x4
+ __hc32 *ohci_hcctrl_reg;
+ unsigned has_hostpc:1;
+-
++#ifdef CONFIG_USB_OTG
++ unsigned has_otg:1; /* if it is otg host*/
++ /* otg host has additional bus_suspend and bus_resume */
++ int (*otg_suspend)(struct usb_hcd *hcd);
++ int (*otg_resume)(struct usb_hcd *hcd);
++#endif
+ u8 sbrn; /* packed release number */
++ unsigned int sram_addr;
++ unsigned int sram_size;
+
+ /* irq statistics */
+ #ifdef EHCI_STATS
+@@ -156,6 +163,7 @@ struct ehci_hcd { /* one per controlle
+ struct dentry *debug_async;
+ struct dentry *debug_periodic;
+ struct dentry *debug_registers;
++ struct dentry *debug_lpm;
+ #endif
+ };
+
+@@ -719,5 +727,10 @@ static inline u32 hc32_to_cpup (const st
+ #endif /* DEBUG */
+
+ /*-------------------------------------------------------------------------*/
+-
++#ifdef CONFIG_PCI
++static void sram_deinit(struct usb_hcd *hcd);
++#else
++static void sram_deinit(struct usb_hcd *hcd) { return; };
++#endif
++static unsigned ehci_lpm_check(struct ehci_hcd *ehci, int port);
+ #endif /* __LINUX_EHCI_HCD_H */
+Index: linux-2.6.33/include/linux/usb.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/usb.h
++++ linux-2.6.33/include/linux/usb.h
+@@ -1582,6 +1582,7 @@ usb_maxpacket(struct usb_device *udev, i
+ #define USB_DEVICE_REMOVE 0x0002
+ #define USB_BUS_ADD 0x0003
+ #define USB_BUS_REMOVE 0x0004
++
+ extern void usb_register_notify(struct notifier_block *nb);
+ extern void usb_unregister_notify(struct notifier_block *nb);
+
+Index: linux-2.6.33/drivers/usb/core/buffer.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/core/buffer.c
++++ linux-2.6.33/drivers/usb/core/buffer.c
+@@ -115,6 +115,11 @@ void *hcd_buffer_alloc(
+ return kmalloc(size, mem_flags);
+ }
+
++ /* we won't use internal SRAM as data payload, we can't get
++ any benefits from it */
++ if (hcd->has_sram && hcd->sram_no_payload)
++ return dma_alloc_coherent(NULL, size, dma, mem_flags);
++
+ for (i = 0; i < HCD_BUFFER_POOLS; i++) {
+ if (size <= pool_max [i])
+ return dma_pool_alloc(hcd->pool [i], mem_flags, dma);
+@@ -141,6 +146,11 @@ void hcd_buffer_free(
+ return;
+ }
+
++ if (hcd->has_sram && hcd->sram_no_payload) {
++ dma_free_coherent(NULL, size, addr, dma);
++ return;
++ }
++
+ for (i = 0; i < HCD_BUFFER_POOLS; i++) {
+ if (size <= pool_max [i]) {
+ dma_pool_free(hcd->pool [i], addr, dma);
+Index: linux-2.6.33/drivers/usb/host/ehci-dbg.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/host/ehci-dbg.c
++++ linux-2.6.33/drivers/usb/host/ehci-dbg.c
+@@ -98,13 +98,18 @@ static void dbg_hcc_params (struct ehci_
+ HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
+ } else {
+ ehci_dbg (ehci,
+- "%s hcc_params %04x thresh %d uframes %s%s%s\n",
++ "%s hcc_params %04x thresh %d uframes %s%s%s%s%s%s%s\n",
+ label,
+ params,
+ HCC_ISOC_THRES(params),
+ HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+ HCC_CANPARK(params) ? " park" : "",
+- HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
++ HCC_64BIT_ADDR(params) ? " 64 bit addr" : "",
++ HCC_LPM(params) ? " LPM" : "",
++ HCC_PER_PORT_CHANGE_EVENT(params) ? " ppce" : "",
++ HCC_HW_PREFETCH(params) ? " hw prefetch" : "",
++ HCC_32FRAME_PERIODIC_LIST(params) ?
++ " 32 peridic list" : "");
+ }
+ }
+ #else
+@@ -191,8 +196,9 @@ static int __maybe_unused
+ dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
+ {
+ return scnprintf (buf, len,
+- "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
++ "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s%s",
+ label, label [0] ? " " : "", status,
++ (status & STS_PPCE_MASK) ? " PPCE" : "",
+ (status & STS_ASS) ? " Async" : "",
+ (status & STS_PSS) ? " Periodic" : "",
+ (status & STS_RECL) ? " Recl" : "",
+@@ -210,8 +216,9 @@ static int __maybe_unused
+ dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
+ {
+ return scnprintf (buf, len,
+- "%s%sintrenable %02x%s%s%s%s%s%s",
++ "%s%sintrenable %02x%s%s%s%s%s%s%s",
+ label, label [0] ? " " : "", enable,
++ (enable & STS_PPCE_MASK) ? " PPCE" : "",
+ (enable & STS_IAA) ? " IAA" : "",
+ (enable & STS_FATAL) ? " FATAL" : "",
+ (enable & STS_FLR) ? " FLR" : "",
+@@ -228,9 +235,14 @@ static int
+ dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
+ {
+ return scnprintf (buf, len,
+- "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
++ "%s%scmd %07x %s%s%s%s%s%s=%d ithresh=%d%s%s%s%s prd=%s%s %s",
+ label, label [0] ? " " : "", command,
+- (command & CMD_PARK) ? "park" : "(park)",
++ (command & CMD_HIRD) ? " HIRD" : "",
++ (command & CMD_PPCEE) ? " PPCEE" : "",
++ (command & CMD_FSP) ? " FSP" : "",
++ (command & CMD_ASPE) ? " ASPE" : "",
++ (command & CMD_PSPE) ? " PSPE" : "",
++ (command & CMD_PARK) ? " park" : "(park)",
+ CMD_PARK_CNT (command),
+ (command >> 16) & 0x3f,
+ (command & CMD_LRESET) ? " LReset" : "",
+@@ -257,11 +269,21 @@ dbg_port_buf (char *buf, unsigned len, c
+ }
+
+ return scnprintf (buf, len,
+- "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
++ "%s%sp:%d sts %06x %d %s%s%s%s%s%s sig=%s%s%s%s%s%s%s%s%s%s%s",
+ label, label [0] ? " " : "", port, status,
++ status>>25,/*device address */
++ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ACK ?
++ " ACK" : "",
++ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_NYET ?
++ " NYET" : "",
++ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_STALL ?
++ " STALL" : "",
++ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ERR ?
++ " ERR" : "",
+ (status & PORT_POWER) ? " POWER" : "",
+ (status & PORT_OWNER) ? " OWNER" : "",
+ sig,
++ (status & PORT_LPM) ? " LPM" : "",
+ (status & PORT_RESET) ? " RESET" : "",
+ (status & PORT_SUSPEND) ? " SUSPEND" : "",
+ (status & PORT_RESUME) ? " RESUME" : "",
+@@ -330,6 +352,13 @@ static int debug_async_open(struct inode
+ static int debug_periodic_open(struct inode *, struct file *);
+ static int debug_registers_open(struct inode *, struct file *);
+ static int debug_async_open(struct inode *, struct file *);
++static int debug_lpm_open(struct inode *, struct file *);
++static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos);
++static ssize_t debug_lpm_write(struct file *file, const char __user *buffer,
++ size_t count, loff_t *ppos);
++static int debug_lpm_close(struct inode *inode, struct file *file);
++
+ static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
+ static int debug_close(struct inode *, struct file *);
+
+@@ -351,6 +380,13 @@ static const struct file_operations debu
+ .read = debug_output,
+ .release = debug_close,
+ };
++static const struct file_operations debug_lpm_fops = {
++ .owner = THIS_MODULE,
++ .open = debug_lpm_open,
++ .read = debug_lpm_read,
++ .write = debug_lpm_write,
++ .release = debug_lpm_close,
++};
+
+ static struct dentry *ehci_debug_root;
+
+@@ -917,6 +953,94 @@ static int debug_registers_open(struct i
+ return file->private_data ? 0 : -ENOMEM;
+ }
+
++static int debug_lpm_open(struct inode *inode, struct file *file)
++{
++ file->private_data = inode->i_private;
++ return 0;
++}
++static int debug_lpm_close(struct inode *inode, struct file *file)
++{
++ return 0;
++}
++static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ /* TODO: show lpm stats */
++ return 0;
++}
++
++
++static
++ssize_t debug_lpm_write(struct file *file, const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct usb_hcd *hcd;
++ struct ehci_hcd *ehci;
++ char buf[50];
++ size_t len;
++ u32 temp;
++ unsigned long port;
++ u32 __iomem *portsc ;
++ u32 params;
++
++ hcd = bus_to_hcd(file->private_data);
++ ehci = hcd_to_ehci(hcd);
++
++ len = min(count, sizeof(buf) - 1);
++ if (copy_from_user(buf, user_buf, len))
++ return -EFAULT;
++ buf[len] = '\0';
++ if (len > 0 && buf[len - 1] == '\n')
++ buf[len - 1] = '\0';
++
++ if (strncmp(buf, "enable", 5) == 0) {
++ if (strict_strtoul(buf + 7, 10, &port))
++ return -EINVAL;
++ params = ehci_readl(ehci, &ehci->caps->hcs_params);
++ if (port > HCS_N_PORTS(params)) {
++ ehci_dbg(ehci, "ERR: LPM on bad port %lu\n", port);
++ return -ENODEV;
++ }
++ portsc = &ehci->regs->port_status[port-1];
++ temp = ehci_readl(ehci, portsc);
++ if (!(temp & PORT_DEV_ADDR)) {
++ ehci_dbg(ehci, "LPM: no device attached\n");
++ return -ENODEV;
++ }
++ temp |= PORT_LPM;
++ ehci_writel(ehci, temp, portsc);
++ printk(KERN_INFO "force enable LPM for port %lu\n", port);
++ } else if (strncmp(buf, "hird=", 5) == 0) {
++ unsigned long hird;
++ if (strict_strtoul(buf + 5, 16, &hird))
++ return -EINVAL;
++ printk(KERN_INFO " setting hird %s %lu \n", buf + 6, hird);
++ temp = ehci_readl(ehci, &ehci->regs->command);
++ temp &= ~CMD_HIRD;
++ temp |= hird << 24;
++ ehci_writel(ehci, temp, &ehci->regs->command);
++ } else if (strncmp(buf, "disable", 7) == 0) {
++ if (strict_strtoul(buf + 8, 10, &port))
++ return -EINVAL;
++ params = ehci_readl(ehci, &ehci->caps->hcs_params);
++ if (port > HCS_N_PORTS(params)) {
++ ehci_dbg(ehci, "ERR: LPM off bad port %lu\n", port);
++ return -ENODEV;
++ }
++ portsc = &ehci->regs->port_status[port-1];
++ temp = ehci_readl(ehci, portsc);
++ if (!(temp & PORT_DEV_ADDR)) {
++ ehci_dbg(ehci, "ERR: no device attached\n");
++ return -ENODEV;
++ }
++ temp &= ~PORT_LPM;
++ ehci_writel(ehci, temp, portsc);
++ printk(KERN_INFO "disabled LPM for port %lu\n", port);
++ } else
++ return -EOPNOTSUPP;
++ return count;
++}
++
+ static inline void create_debug_files (struct ehci_hcd *ehci)
+ {
+ struct usb_bus *bus = &ehci_to_hcd(ehci)->self;
+@@ -940,6 +1064,10 @@ static inline void create_debug_files (s
+ ehci->debug_registers = debugfs_create_file("registers", S_IRUGO,
+ ehci->debug_dir, bus,
+ &debug_registers_fops);
++
++ ehci->debug_registers = debugfs_create_file("lpm", S_IRUGO|S_IWUGO,
++ ehci->debug_dir, bus,
++ &debug_lpm_fops);
+ if (!ehci->debug_registers)
+ goto registers_error;
+ return;
+Index: linux-2.6.33/drivers/usb/host/ehci-lpm.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/usb/host/ehci-lpm.c
+@@ -0,0 +1,90 @@
++/*
++ *
++ * Author: Jacob Pan <jacob.jun.pan@intel.com>
++ *
++ * Copyright 2009- Intel Corp.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software Foundation,
++ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* this file is part of ehci-hcd.c */
++static int ehci_lpm_set_da(struct ehci_hcd *ehci, int dev_addr, int port_num)
++{
++ u32 __iomem portsc;
++
++ ehci_dbg(ehci, "set dev address %d for port %d \n", dev_addr, port_num);
++ if (port_num > HCS_N_PORTS(ehci->hcs_params)) {
++ ehci_dbg(ehci, "invalid port number %d \n", port_num);
++ return -ENODEV;
++ }
++ portsc = ehci_readl(ehci, &ehci->regs->port_status[port_num-1]);
++ portsc &= ~PORT_DEV_ADDR;
++ portsc |= dev_addr<<25;
++ ehci_writel(ehci, portsc, &ehci->regs->port_status[port_num-1]);
++ return 0;
++}
++
++/*
++ * this function is called to put a link into L1 state. the steps are:
++ * - verify HC supports LPM
++ * - make sure all pipe idle on the link
++ * - shutdown all qh on the pipe
++ * - send LPM packet
++ * - confirm device ack
++ */
++static unsigned ehci_lpm_check(struct ehci_hcd *ehci, int port)
++{
++ u32 __iomem *portsc ;
++ u32 val32;
++ int retval;
++
++ portsc = &ehci->regs->port_status[port-1];
++ val32 = ehci_readl(ehci, portsc);
++ if (!(val32 & PORT_DEV_ADDR)) {
++ ehci_dbg(ehci, "LPM: no device attached\n");
++ return -ENODEV;
++ }
++ val32 |= PORT_LPM;
++ ehci_writel(ehci, val32, portsc);
++ mdelay(5);
++ val32 |= PORT_SUSPEND;
++ ehci_dbg(ehci, "Sending LPM 0x%08x to port %d\n", val32, port);
++ ehci_writel(ehci, val32, portsc);
++ /* wait for ACK */
++ mdelay(10);
++ retval = handshake(ehci, &ehci->regs->port_status[port-1], PORT_SSTS,
++ PORTSC_SUSPEND_STS_ACK, 125);
++ dbg_port(ehci, "LPM", port, val32);
++ if (retval != -ETIMEDOUT) {
++ ehci_dbg(ehci, "LPM: device ACK for LPM\n");
++ val32 |= PORT_LPM;
++ /*
++ * now device should be in L1 sleep, let's wake up the device
++ * so that we can complete enumeration.
++ */
++ ehci_writel(ehci, val32, portsc);
++ mdelay(10);
++ val32 |= PORT_RESUME;
++ ehci_writel(ehci, val32, portsc);
++ } else {
++ ehci_dbg(ehci, "LPM: device does not ACK, disable LPM %d\n",
++ retval);
++ val32 &= ~PORT_LPM;
++ retval = -ETIMEDOUT;
++ ehci_writel(ehci, val32, portsc);
++ }
++
++ return retval;
++}
+Index: linux-2.6.33/drivers/usb/host/ehci-q.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/host/ehci-q.c
++++ linux-2.6.33/drivers/usb/host/ehci-q.c
+@@ -643,6 +643,16 @@ qh_urb_transaction (
+ sizeof (struct usb_ctrlrequest),
+ token | (2 /* "setup" */ << 8), 8);
+
++ if (((struct usb_ctrlrequest *)urb->setup_packet)->bRequest
++ == USB_REQ_SET_ADDRESS) {
++ /* for LPM capable HC, set up device address*/
++ int dev_address = ((struct usb_ctrlrequest *)
++ (urb->setup_packet))->wValue;
++ if (ehci_to_hcd(ehci)->lpm_cap)
++ ehci_lpm_set_da(ehci, dev_address,
++ urb->dev->portnum);
++ }
++
+ /* ... and always at least one more pid */
+ token ^= QTD_TOGGLE;
+ qtd_prev = qtd;
+Index: linux-2.6.33/include/linux/usb/ehci_def.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/usb/ehci_def.h
++++ linux-2.6.33/include/linux/usb/ehci_def.h
+@@ -39,6 +39,12 @@ struct ehci_caps {
+ #define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
+
+ u32 hcc_params; /* HCCPARAMS - offset 0x8 */
++/* for 1.1 addendum */
++#define HCC_32FRAME_PERIODIC_LIST(p) ((p)&(1 << 19))
++#define HCC_PER_PORT_CHANGE_EVENT(p) ((p)&(1 << 18))
++#define HCC_LPM(p) ((p)&(1 << 17))
++#define HCC_HW_PREFETCH(p) ((p)&(1 << 16))
++
+ #define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */
+ #define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */
+ #define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */
+@@ -54,6 +60,13 @@ struct ehci_regs {
+
+ /* USBCMD: offset 0x00 */
+ u32 command;
++
++/* EHCI 1.1 addendum */
++#define CMD_HIRD (0xf<<24) /* host initiated resume duration */
++#define CMD_PPCEE (1<<15) /* per port change event enable */
++#define CMD_FSP (1<<14) /* fully synchronized prefetch */
++#define CMD_ASPE (1<<13) /* async schedule prefetch enable */
++#define CMD_PSPE (1<<12) /* periodic schedule prefetch enable */
+ /* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+ #define CMD_PARK (1<<11) /* enable "park" on async qh */
+ #define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
+@@ -67,6 +80,7 @@ struct ehci_regs {
+
+ /* USBSTS: offset 0x04 */
+ u32 status;
++#define STS_PPCE_MASK (0xff<<16) /* Per-Port change event 1-16 */
+ #define STS_ASS (1<<15) /* Async Schedule Status */
+ #define STS_PSS (1<<14) /* Periodic Schedule Status */
+ #define STS_RECL (1<<13) /* Reclamation */
+@@ -100,6 +114,14 @@ struct ehci_regs {
+
+ /* PORTSC: offset 0x44 */
+ u32 port_status [0]; /* up to N_PORTS */
++/* EHCI 1.1 addendum */
++#define PORTSC_SUSPEND_STS_ACK 0
++#define PORTSC_SUSPEND_STS_NYET 1
++#define PORTSC_SUSPEND_STS_STALL 2
++#define PORTSC_SUSPEND_STS_ERR 3
++
++#define PORT_DEV_ADDR (0x7f<<25) /* device address */
++#define PORT_SSTS (0x3<<23) /* suspend status */
+ /* 31:23 reserved */
+ #define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */
+ #define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */
+@@ -115,6 +137,7 @@ struct ehci_regs {
+ #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
+ /* 11:10 for detecting lowspeed devices (reset vs release ownership) */
+ /* 9 reserved */
++#define PORT_LPM (1<<9) /* LPM transaction */
+ #define PORT_RESET (1<<8) /* reset port */
+ #define PORT_SUSPEND (1<<7) /* suspend port */
+ #define PORT_RESUME (1<<6) /* resume it */
+Index: linux-2.6.33/arch/x86/include/asm/i8259.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/i8259.h
++++ linux-2.6.33/arch/x86/include/asm/i8259.h
+@@ -26,11 +26,6 @@ extern unsigned int cached_irq_mask;
+
+ extern spinlock_t i8259A_lock;
+
+-extern void init_8259A(int auto_eoi);
+-extern void enable_8259A_irq(unsigned int irq);
+-extern void disable_8259A_irq(unsigned int irq);
+-extern unsigned int startup_8259A_irq(unsigned int irq);
+-
+ /* the PIC may need a careful delay on some platforms, hence specific calls */
+ static inline unsigned char inb_pic(unsigned int port)
+ {
+@@ -57,7 +52,17 @@ static inline void outb_pic(unsigned cha
+
+ extern struct irq_chip i8259A_chip;
+
+-extern void mask_8259A(void);
+-extern void unmask_8259A(void);
++struct legacy_pic {
++ int nr_legacy_irqs;
++ struct irq_chip *chip;
++ void (*mask_all)(void);
++ void (*restore_mask)(void);
++ void (*init)(int auto_eoi);
++ int (*irq_pending)(unsigned int irq);
++ void (*make_irq)(unsigned int irq);
++};
++
++extern struct legacy_pic *legacy_pic;
++extern struct legacy_pic null_legacy_pic;
+
+ #endif /* _ASM_X86_I8259_H */
+Index: linux-2.6.33/arch/x86/kernel/i8259.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/i8259.c
++++ linux-2.6.33/arch/x86/kernel/i8259.c
+@@ -34,6 +34,12 @@
+ static int i8259A_auto_eoi;
+ DEFINE_SPINLOCK(i8259A_lock);
+ static void mask_and_ack_8259A(unsigned int);
++static void mask_8259A(void);
++static void unmask_8259A(void);
++static void disable_8259A_irq(unsigned int irq);
++static void enable_8259A_irq(unsigned int irq);
++static void init_8259A(int auto_eoi);
++static int i8259A_irq_pending(unsigned int irq);
+
+ struct irq_chip i8259A_chip = {
+ .name = "XT-PIC",
+@@ -63,7 +69,7 @@ unsigned int cached_irq_mask = 0xffff;
+ */
+ unsigned long io_apic_irqs;
+
+-void disable_8259A_irq(unsigned int irq)
++static void disable_8259A_irq(unsigned int irq)
+ {
+ unsigned int mask = 1 << irq;
+ unsigned long flags;
+@@ -77,7 +83,7 @@ void disable_8259A_irq(unsigned int irq)
+ spin_unlock_irqrestore(&i8259A_lock, flags);
+ }
+
+-void enable_8259A_irq(unsigned int irq)
++static void enable_8259A_irq(unsigned int irq)
+ {
+ unsigned int mask = ~(1 << irq);
+ unsigned long flags;
+@@ -91,7 +97,7 @@ void enable_8259A_irq(unsigned int irq)
+ spin_unlock_irqrestore(&i8259A_lock, flags);
+ }
+
+-int i8259A_irq_pending(unsigned int irq)
++static int i8259A_irq_pending(unsigned int irq)
+ {
+ unsigned int mask = 1<<irq;
+ unsigned long flags;
+@@ -107,7 +113,7 @@ int i8259A_irq_pending(unsigned int irq)
+ return ret;
+ }
+
+-void make_8259A_irq(unsigned int irq)
++static void make_8259A_irq(unsigned int irq)
+ {
+ disable_irq_nosync(irq);
+ io_apic_irqs &= ~(1<<irq);
+@@ -281,7 +287,7 @@ static int __init i8259A_init_sysfs(void
+
+ device_initcall(i8259A_init_sysfs);
+
+-void mask_8259A(void)
++static void mask_8259A(void)
+ {
+ unsigned long flags;
+
+@@ -293,7 +299,7 @@ void mask_8259A(void)
+ spin_unlock_irqrestore(&i8259A_lock, flags);
+ }
+
+-void unmask_8259A(void)
++static void unmask_8259A(void)
+ {
+ unsigned long flags;
+
+@@ -305,7 +311,7 @@ void unmask_8259A(void)
+ spin_unlock_irqrestore(&i8259A_lock, flags);
+ }
+
+-void init_8259A(int auto_eoi)
++static void init_8259A(int auto_eoi)
+ {
+ unsigned long flags;
+
+@@ -358,3 +364,47 @@ void init_8259A(int auto_eoi)
+
+ spin_unlock_irqrestore(&i8259A_lock, flags);
+ }
++
++/*
++ * make i8259 a driver so that we can select pic functions at run time. the goal
++ * is to make x86 binary compatible among pc compatible and non-pc compatible
++ * platforms, such as x86 MID.
++ */
++
++static void __init legacy_pic_noop(void) { };
++static void __init legacy_pic_uint_noop(unsigned int unused) { };
++static void __init legacy_pic_int_noop(int unused) { };
++
++static struct irq_chip dummy_pic_chip = {
++ .name = "dummy pic",
++ .mask = legacy_pic_uint_noop,
++ .unmask = legacy_pic_uint_noop,
++ .disable = legacy_pic_uint_noop,
++ .mask_ack = legacy_pic_uint_noop,
++};
++static int legacy_pic_irq_pending_noop(unsigned int irq)
++{
++ return 0;
++}
++
++struct legacy_pic null_legacy_pic = {
++ .nr_legacy_irqs = 0,
++ .chip = &dummy_pic_chip,
++ .mask_all = legacy_pic_noop,
++ .restore_mask = legacy_pic_noop,
++ .init = legacy_pic_int_noop,
++ .irq_pending = legacy_pic_irq_pending_noop,
++ .make_irq = legacy_pic_uint_noop,
++};
++
++struct legacy_pic default_legacy_pic = {
++ .nr_legacy_irqs = NR_IRQS_LEGACY,
++ .chip = &i8259A_chip,
++ .mask_all = mask_8259A,
++ .restore_mask = unmask_8259A,
++ .init = init_8259A,
++ .irq_pending = i8259A_irq_pending,
++ .make_irq = make_8259A_irq,
++};
++
++struct legacy_pic *legacy_pic = &default_legacy_pic;
+Index: linux-2.6.33/arch/x86/include/asm/hw_irq.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/hw_irq.h
++++ linux-2.6.33/arch/x86/include/asm/hw_irq.h
+@@ -53,13 +53,6 @@ extern void threshold_interrupt(void);
+ extern void call_function_interrupt(void);
+ extern void call_function_single_interrupt(void);
+
+-/* PIC specific functions */
+-extern void disable_8259A_irq(unsigned int irq);
+-extern void enable_8259A_irq(unsigned int irq);
+-extern int i8259A_irq_pending(unsigned int irq);
+-extern void make_8259A_irq(unsigned int irq);
+-extern void init_8259A(int aeoi);
+-
+ /* IOAPIC */
+ #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
+ extern unsigned long io_apic_irqs;
+Index: linux-2.6.33/arch/x86/kernel/apic/nmi.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/apic/nmi.c
++++ linux-2.6.33/arch/x86/kernel/apic/nmi.c
+@@ -177,7 +177,7 @@ int __init check_nmi_watchdog(void)
+ error:
+ if (nmi_watchdog == NMI_IO_APIC) {
+ if (!timer_through_8259)
+- disable_8259A_irq(0);
++ legacy_pic->chip->mask(0);
+ on_each_cpu(__acpi_nmi_disable, NULL, 1);
+ }
+
+Index: linux-2.6.33/arch/x86/kernel/irqinit.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/irqinit.c
++++ linux-2.6.33/arch/x86/kernel/irqinit.c
+@@ -123,7 +123,7 @@ void __init init_ISA_irqs(void)
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
+ init_bsp_APIC();
+ #endif
+- init_8259A(0);
++ legacy_pic->init(0);
+
+ /*
+ * 16 old-style INTA-cycle interrupts:
+Index: linux-2.6.33/drivers/misc/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/misc/Makefile
++++ linux-2.6.33/drivers/misc/Makefile
+@@ -20,6 +20,7 @@ obj-$(CONFIG_SGI_XP) += sgi-xp/
+ obj-$(CONFIG_SGI_GRU) += sgi-gru/
+ obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
+ obj-$(CONFIG_HP_ILO) += hpilo.o
++obj-$(CONFIG_MRST) += intel_mrst.o
+ obj-$(CONFIG_ISL29003) += isl29003.o
+ obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
+ obj-$(CONFIG_DS1682) += ds1682.o
+Index: linux-2.6.33/drivers/misc/intel_mrst.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/misc/intel_mrst.c
+@@ -0,0 +1,216 @@
++/*
++ * intel_mrst.c - Intel Moorestown Driver for misc functionality
++ *
++ * Copyright (C) 2009 Intel Corp
++ * Author: James Ausmus <james.ausmus@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This driver sets up initial PMIC register values for various voltage rails
++ * and GPIOs
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++
++#include <linux/delay.h>
++#include <asm/ipc_defs.h>
++
++
++MODULE_AUTHOR("James Ausmus");
++MODULE_AUTHOR("German Monroy");
++MODULE_DESCRIPTION("Intel MRST platform specific driver");
++MODULE_LICENSE("GPL");
++
++static int intel_mrst_pmic_read(unsigned int reg, unsigned int *value)
++{
++ struct ipc_pmic_reg_data pmic_data = { 0 };
++ int ret = 0;
++
++ pmic_data.pmic_reg_data[0].register_address = reg;
++ pmic_data.num_entries = 1;
++ ret = ipc_pmic_register_read(&pmic_data);
++ if (ret)
++ printk(KERN_ERR "intel_mrst_pmic_read: unable to read "
++ "PMIC register 0x%03x\n", reg);
++ else
++ *value = pmic_data.pmic_reg_data[0].value;
++
++ return ret;
++}
++
++static int intel_mrst_pmic_write(unsigned int reg, unsigned int value)
++{
++ struct ipc_pmic_reg_data pmic_data = { 0 };
++ int ret = 0;
++
++ pmic_data.pmic_reg_data[0].register_address = reg;
++ pmic_data.pmic_reg_data[0].value = value;
++ pmic_data.num_entries = 1;
++ ret = ipc_pmic_register_write(&pmic_data, 0);
++ if (ret) {
++ printk(KERN_ERR "intel_mrst_pmic_write: register 0x%03x "
++ "failed ipc_pmic_register_write of value %02x, "
++ "retval %d\n", reg, value, ret);
++ } else {
++ printk(KERN_INFO "intel_mrst_pmic_write: register "
++ "0x%03x, now=0x%02x\n",
++ reg, value);
++ }
++
++ return ret;
++}
++
++static int intel_mrst_sdio_EVP_power_up(void)
++{
++ intel_mrst_pmic_write(0xF4, 0x25);
++ intel_mrst_pmic_write(0x21, 0x00);
++ intel_mrst_pmic_write(0x4a, 0x7f);
++ intel_mrst_pmic_write(0x4b, 0x7f);
++ intel_mrst_pmic_write(0x4c, 0x3f);
++
++ intel_mrst_pmic_write(0x3b, 0x3f);
++ intel_mrst_pmic_write(0x3c, 0x3f);
++ mdelay(1);
++ intel_mrst_pmic_write(0xF4, 0x05);
++ mdelay(12);
++ intel_mrst_pmic_write(0xF4, 0x21);
++
++ return 0;
++
++}
++
++static int intel_mrst_sdio_EVP_power_down(void)
++{
++ intel_mrst_pmic_write(0xF4, 0x25);
++ intel_mrst_pmic_write(0x21, 0x00);
++
++ intel_mrst_pmic_write(0x4b, 0x00);
++ intel_mrst_pmic_write(0x4c, 0x00);
++
++ intel_mrst_pmic_write(0x3b, 0x00);
++ intel_mrst_pmic_write(0x3c, 0x00);
++ intel_mrst_pmic_write(0x4a, 0x00);
++
++ return 0;
++}
++
++static int intel_mrst_sdio_8688_power_up(void)
++{
++ intel_mrst_pmic_write(0x37, 0x3f); /* Set VDDQ for Marvell 8688 */
++ intel_mrst_pmic_write(0x4a, 0x3f); /* Set GYMXIOCNT for Marvell 8688 */
++ intel_mrst_pmic_write(0x4e, 0x3f); /* Set GYMX33CNT for Marvell 8688 */
++
++ intel_mrst_pmic_write(0x3a, 0x27); /* Enables the V3p3_FLASH line,
++ which routes to VIO_X1 and VIO_X2
++ on the MRVL8688 */
++
++ intel_mrst_pmic_write(0x4b, 0x27); /* Enable V1p2_VWYMXA for MRVL8688 */
++ intel_mrst_pmic_write(0x4c, 0x27); /* Enable V1p8_VWYMXARF for
++ MRVL8688 */
++
++ return 0;
++}
++
++static int intel_mrst_bringup_8688_sdio2(void)
++{
++ unsigned int temp = 0;
++
++ /* Register 0xf4 has 2 GPIO lines connected to the MRVL 8688:
++ * bit 4: PDn
++ * bit 3: WiFi RESETn */
++
++ intel_mrst_pmic_read(0xf4, &temp);
++ temp = temp|0x8;
++ intel_mrst_pmic_write(0xf4, temp);
++
++ temp = temp|0x10;
++ intel_mrst_pmic_write(0xf4, temp);
++
++ return 0;
++}
++
++static int intel_mrst_bringup_EVP_sdio2_Option_spi(void)
++{
++ unsigned int temp = 0;
++
++ /* Register 0xf4 has 3 GPIO lines connected to the EVP:
++ * bit 0: RF_KILL_N
++ * bit 2: H2D_INT
++ * bit 5: SYS_RST_N
++ */
++
++ /* Register 0xf4 has 2 GPIO lines connected to the Option:
++ * bit 0: GPO_WWAN_DISABLE
++ * bit 5: GPO_WWAN_RESET
++ */
++
++ intel_mrst_pmic_read(0xf4, &temp);
++ temp = temp|0x21;
++ temp = temp & 0xFB;
++ intel_mrst_pmic_write(0xf4, temp); /* Set RF_KILL_N & SYS_RST_N to
++ High. H2D_INT to LOW */
++
++ intel_mrst_pmic_read(0xf4, &temp); /* Set SYS_RST_N to Low */
++ temp = temp & 0xDF;
++ mdelay(1);
++ intel_mrst_pmic_write(0xf4, temp);
++
++ mdelay(12); /* Try to generate a 12mS delay here if possible */
++ intel_mrst_pmic_read(0xf4, &temp); /* Set SYS_RST_N to High */
++ temp = temp | 0x20;
++ intel_mrst_pmic_write(0xf4, temp);
++
++ return 0;
++}
++
++
++static int __init intel_mrst_module_init(void)
++{
++ int ret = 0;
++
++/* We only need the following PMIC register initializations if
++ * we are using the Marvell 8688 WLAN card on the SDIO2 port */
++
++#ifdef CONFIG_8688_RC
++
++ printk(KERN_INFO "intel_mrst_module_init: bringing up power for "
++ "8688 WLAN on SDIO2...\n");
++ ret = intel_mrst_bringup_8688_sdio2();
++
++#endif /* CONFIG_8688_RC */
++
++/* We only need the following PMIC register initializations if
++ * we are using the EVP on SDIO2 port or Option on SPI port */
++
++#if defined(CONFIG_EVP_SDIO2) || defined(CONFIG_SPI_MRST_GTM501)
++
++ printk(KERN_INFO "intel_mrst_module_init: bringing up power for "
++ "EvP on SDIO2 and Option on SPI...\n");
++ ret = intel_mrst_bringup_EVP_sdio2_Option_spi();
++
++#endif /* CONFIG_EVP_SDIO2 || CONFIG_SPI_MRST_GTM501 */
++ return ret;
++}
++
++static void __exit intel_mrst_module_exit(void)
++{
++}
++
++module_init(intel_mrst_module_init);
++module_exit(intel_mrst_module_exit);
+Index: linux-2.6.33/drivers/i2c/busses/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/i2c/busses/Kconfig
++++ linux-2.6.33/drivers/i2c/busses/Kconfig
+@@ -772,4 +772,14 @@ config SCx200_ACB
+ This support is also available as a module. If so, the module
+ will be called scx200_acb.
+
++config I2C_MRST
++ tristate "Intel Moorestown I2C Controller"
++ depends on PCI && GPIOLIB && GPIO_LANGWELL
++ default y
++ help
++ If you say yes to this option, support will be included for the Intel
++ Moorestown chipset I2C controller.
++ This driver can also be built as a module. If so, the module
++ will be called i2c-mrst.
++
+ endmenu
+Index: linux-2.6.33/drivers/i2c/busses/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/i2c/busses/Makefile
++++ linux-2.6.33/drivers/i2c/busses/Makefile
+@@ -72,6 +72,7 @@ obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
+ obj-$(CONFIG_I2C_STUB) += i2c-stub.o
+ obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
+ obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o
++obj-$(CONFIG_I2C_MRST) += i2c-mrst.o
+
+ ifeq ($(CONFIG_I2C_DEBUG_BUS),y)
+ EXTRA_CFLAGS += -DDEBUG
+Index: linux-2.6.33/drivers/i2c/busses/i2c-mrst.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/i2c/busses/i2c-mrst.c
+@@ -0,0 +1,953 @@
++/*
++ * Support for Moorestown Langwell I2C chip
++ *
++ * Copyright (c) 2009 Intel Corporation.
++ * Copyright (c) 2009 Synopsys. Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License, version
++ * 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT ANY
++ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
++ * details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc., 51
++ * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/stat.h>
++#include <linux/types.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/gpio.h>
++
++#include <linux/io.h>
++
++#include "i2c-mrst.h"
++
++#define MAX_T_POLL_COUNT 4000 /* FIXME */
++#define DEF_BAR 0
++#define VERSION "Version 0.5"
++
++#define mrst_i2c_read(reg) __raw_readl(reg)
++#define mrst_i2c_write(reg, val) __raw_writel((val), (reg))
++
++static int speed_mode = STANDARD;
++module_param(speed_mode, int, S_IRUGO);
++
++static int mrst_i2c_register_board_info(struct pci_dev *dev, int busnum)
++{
++ struct mrst_i2c_private *mrst = (struct mrst_i2c_private *)
++ pci_get_drvdata(dev);
++ int err;
++ unsigned short addr, irq, host;
++ char *name = NULL;
++ struct i2c_board_info *info = NULL;
++ unsigned int addr_off, irq_off, name_off, data_off, host_off;
++ unsigned int table_len, block_num, block = 0;
++ int i, j;
++ unsigned int start, len;
++ void __iomem *base = NULL, *ptr = NULL;
++
++ /* Determine the address of the I2C device info table area */
++ start = pci_resource_start(dev, 1);
++ len = pci_resource_len(dev, 1);
++ if (!start || len <= 0) {
++ err = -ENODEV;
++ return err;
++ }
++
++ err = pci_request_region(dev, 1, "mrst_i2c");
++ if (err) {
++ dev_err(&dev->dev, "Failed to request I2C region "
++ "0x%1x-0x%Lx\n", start,
++ (unsigned long long)pci_resource_end(dev, 1));
++ return err;
++ }
++
++ ptr = ioremap(start, len);
++ if (!ptr) {
++ dev_err(&dev->dev, "I/O memory remapping failed\n");
++ err = -ENOMEM;
++ goto err0;
++ }
++
++ if (len == 8) {
++ start = ioread32(ptr);
++ len = ioread32(ptr + 4);
++ iounmap(ptr);
++ dev_dbg(&dev->dev, "New FW: start 0x%x 0x%x\n", start, len);
++ base = ioremap(start, len);
++ } else {
++ dev_dbg(&dev->dev, "this is an old FW\n");
++ base = ptr;
++ }
++
++ /* Initialization */
++ name = kzalloc(sizeof(char) * NAME_LENGTH, GFP_KERNEL);
++ if (name == NULL) {
++ err = -ENOMEM;
++ goto err1;
++ }
++
++ info = kzalloc(sizeof(struct i2c_board_info), GFP_KERNEL);
++ if (info == NULL) {
++ dev_err(&dev->dev,
++ "Can't allocate interface for i2c_board_info\n");
++ err = -ENOMEM;
++ goto err2;
++ }
++
++ /* Get I2C info table length */
++ table_len = ioread32(base + I2C_INFO_TABLE_LENGTH);
++
++ /* Calculate the number of I2C device */
++ block_num = (table_len - HEAD_LENGTH)/BLOCK_LENGTH;
++ dev_dbg(&dev->dev, "the number of table is %d\n", block_num);
++ if (!block_num)
++ /* No I2C device info */
++ goto err3;
++
++ /* Initialize mrst_i2c_info array */
++ mrst->mrst_i2c_info = kzalloc(sizeof(struct i2c_board_info) *
++ block_num, GFP_KERNEL);
++ if (mrst->mrst_i2c_info == NULL) {
++ dev_err(&dev->dev,
++ "Can't allocate interface for i2c_board_info\n");
++ err = -ENOMEM;
++ goto err3;
++ }
++
++ mrst->data = kzalloc(sizeof(*mrst->data) * block_num, GFP_KERNEL);
++ if (mrst->data == NULL) {
++ dev_err(&dev->dev,
++ "Can't allocate interface for per device data\n");
++ err = -ENOMEM;
++ goto err4;
++ }
++
++ for (i = 0; i < block_num; i++) {
++ /* I2C device info block offsets */
++ host_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i;
++ addr_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i + I2C_DEV_ADDR;
++ irq_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i + I2C_DEV_IRQ;
++ name_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i + I2C_DEV_NAME;
++ data_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i + I2C_DEV_INFO;
++
++ /* Read PCI config table */
++ host = ioread16(base + host_off);
++ if (host != busnum)
++ continue;
++ addr = ioread16(base + addr_off);
++ irq = ioread16(base + irq_off);
++ for (j = 0; j < NAME_LENGTH; j++)
++ name[j] = ioread8(base + name_off + j);
++
++ for (j = 0; j < INFO_LENGTH; j++)
++ mrst->data[i][j] = ioread8(base + data_off + j);
++ dev_dbg(&dev->dev, "after read PCI config table: name = %s,"
++ " address = %x\n", name, addr);
++
++ /* Fill in i2c_board_info struct */
++ memcpy(info->type, name, NAME_LENGTH);
++ info->platform_data = mrst->data[i];
++ info->addr = addr;
++ info->irq = irq;
++
++ /* Add to mrst_i2c_info array */
++ memcpy(mrst->mrst_i2c_info + block, info,
++ sizeof(struct i2c_board_info));
++ block++;
++ }
++
++ /* Register i2c board info */
++ err = i2c_register_board_info(busnum, mrst->mrst_i2c_info, block);
++ goto err3;
++
++/* Clean up */
++err4:
++ kfree(mrst->mrst_i2c_info);
++err3:
++ kfree(info);
++err2:
++ kfree(name);
++err1:
++ iounmap(base);
++err0:
++ pci_release_region(dev, 1);
++ return err;
++}
++/* End update */
++
++/**
++ * mrst_i2c_disable - Disable I2C controller
++ * @adap: struct pointer to i2c_adapter
++ *
++ * Return Value:
++ * 0 success
++ * -EBUSY if device is busy
++ * -ETIMEOUT if i2c cannot be disabled within the given time
++ *
++ * I2C bus state should be checked prior to disabling the hardware. If bus is
++ * not in idle state, an errno is returned. Write "0" to IC_ENABLE to disable
++ * I2C controller.
++ */
++static int mrst_i2c_disable(struct i2c_adapter *adap)
++{
++ struct mrst_i2c_private *i2c =
++ (struct mrst_i2c_private *)i2c_get_adapdata(adap);
++
++ int count = 0;
++
++ /* Check if device is busy */
++ dev_dbg(&adap->dev, "mrst i2c disable\n");
++ while (mrst_i2c_read(i2c->base + IC_STATUS) & 0x1) {
++ dev_dbg(&adap->dev, "i2c is busy, count is %d\n", count);
++ if (count++ > 10000)
++ return -EBUSY;
++ }
++
++ /* Set IC_ENABLE to 0 */
++ mrst_i2c_write(i2c->base + IC_ENABLE, 0);
++
++ /* Disable all interupts */
++ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0000);
++
++ /* Clear all interrupts */
++ mrst_i2c_read(i2c->base + IC_CLR_INTR);
++
++ return 0;
++}
++
++/**
++ * mrst_i2c_hwinit - Initiate the I2C hardware registers. This function will
++ * be called in mrst_i2c_probe() before device registration.
++ * @dev: pci device struct pointer
++ *
++ * Return Values:
++ * 0 success
++ * -EBUSY i2c cannot be disabled
++ * -ETIMEDOUT i2c cannot be disabled
++ * -EFAULT If APB data width is not 32-bit wide
++ *
++ * I2C should be disabled prior to other register operation. If failed, an
++ * errno is returned. Mask and Clear all interrpts, this should be done at
++ * first. Set common registers which will not be modified during normal
++ * transfers, including: controll register, FIFO threshold and clock freq.
++ * Check APB data width at last.
++ */
++static int __devinit mrst_i2c_hwinit(struct pci_dev *dev)
++{
++ struct mrst_i2c_private *i2c =
++ (struct mrst_i2c_private *)pci_get_drvdata(dev);
++ int err = 0;
++
++ /* Disable i2c first */
++ err = mrst_i2c_disable(i2c->adap);
++ if (err)
++ return err;
++
++ /* Disable all interupts */
++ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0000);
++
++ /* Clear all interrupts */
++ mrst_i2c_read(i2c->base + IC_CLR_INTR);
++
++ /*
++ * Setup clock frequency and speed mode
++ * Enable restart condition,
++ * enable master FSM, disable slave FSM,
++ * use target address when initiating transfer
++ */
++ switch (speed_mode) {
++ case STANDARD:
++ mrst_i2c_write(i2c->base + IC_CON,
++ SLV_DIS | RESTART | STANDARD_MODE | MASTER_EN);
++ mrst_i2c_write(i2c->base + IC_SS_SCL_HCNT, 0x75);
++ mrst_i2c_write(i2c->base + IC_SS_SCL_LCNT, 0x7c);
++ break;
++ case FAST:
++ mrst_i2c_write(i2c->base + IC_CON,
++ SLV_DIS | RESTART | FAST_MODE | MASTER_EN);
++ mrst_i2c_write(i2c->base + IC_SS_SCL_HCNT, 0x15);
++ mrst_i2c_write(i2c->base + IC_SS_SCL_LCNT, 0x21);
++ break;
++ case HIGH:
++ mrst_i2c_write(i2c->base + IC_CON,
++ SLV_DIS | RESTART | HIGH_MODE | MASTER_EN);
++ mrst_i2c_write(i2c->base + IC_SS_SCL_HCNT, 0x7);
++ mrst_i2c_write(i2c->base + IC_SS_SCL_LCNT, 0xE);
++ break;
++ default:
++ ;
++ }
++
++ /* Set tranmit & receive FIFO threshold to zero */
++ mrst_i2c_write(i2c->base + IC_RX_TL, 0x3);
++ mrst_i2c_write(i2c->base + IC_TX_TL, 0x3);
++
++ mrst_i2c_write(i2c->base + IC_ENABLE, 1);
++
++ return err;
++}
++
++/**
++ * mrst_i2c_func - Return the supported three I2C operations.
++ * @adapter: i2c_adapter struct pointer
++ */
++static u32 mrst_i2c_func(struct i2c_adapter *adapter)
++{
++ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SMBUS_EMUL;
++}
++
++/**
++ * mrst_i2c_invalid_address - To check if the address in i2c message is
++ * correct.
++ * @p: i2c_msg struct pointer
++ *
++ * Return Values:
++ * 0 if the address is valid
++ * 1 if the address is invalid
++ */
++static inline int mrst_i2c_invalid_address(const struct i2c_msg *p)
++{
++ int ret = ((p->addr > 0x3ff) || (!(p->flags & I2C_M_TEN)
++ && (p->addr > 0x7f)));
++ return ret;
++}
++
++/**
++ * mrst_i2c_address_neq - To check if the addresses for different i2c messages
++ * are equal.
++ * @p1: first i2c_msg
++ * @p2: second i2c_msg
++ *
++ * Return Values:
++ * 0 if addresse are equal
++ * 1 if not equal
++ *
++ * Within a single transfer, I2C client may need to send its address more
++ * than one time. So a check for the address equation is needed.
++ */
++static inline int mrst_i2c_address_neq(const struct i2c_msg *p1,
++ const struct i2c_msg *p2)
++{
++ int ret = ((p1->addr != p2->addr) || ((p1->flags & (I2C_M_TEN))
++ != ((p2->flags) & (I2C_M_TEN))));
++ return ret;
++}
++
++/**
++ * mrst_i2c_abort - To handle transfer abortions and print error messages.
++ * @adap: i2c_adapter struct pointer
++ *
++ * By reading register IC_TX_ABRT_SOURCE, various transfer errors can be
++ * distingushed. At present, no circumstances have been found out that
++ * multiple errors would be occured simutaneously, so we simply use the
++ * register value directly.
++ *
++ * At last the error bits are cleared. (Note clear ABRT_SBYTE_NORSTRT bit need
++ * a few extra steps)
++ */
++static void mrst_i2c_abort(struct i2c_adapter *adap)
++{
++ struct mrst_i2c_private *i2c = (struct mrst_i2c_private *)
++ i2c_get_adapdata(adap);
++
++ /* Read about source register */
++ int abort = mrst_i2c_read(i2c->base + IC_TX_ABRT_SOURCE);
++
++ dev_dbg(&adap->dev, "Abort: ");
++
++ /* Single transfer error check:
++ * According to databook, TX/RX FIFOs would be flushed when
++ * the abort interrupt occured.
++ */
++ switch (abort) {
++ case (ABRT_MASTER_DIS):
++ dev_err(&adap->dev,
++ "initiate Master operation with Master mode"
++ "disabled.\n");
++
++ break;
++ case (ABRT_10B_RD_NORSTRT):
++ dev_err(&adap->dev,
++ "RESTART disabled and master sends READ cmd in 10-BIT"
++ "addressing.\n");
++ break;
++ case (ABRT_SBYTE_NORSTRT):
++ dev_err(&adap->dev,
++ "RESTART disabled and user is trying to send START"
++ "byte.\n");
++ /* Page 141 data book */
++ mrst_i2c_write(i2c->base + IC_TX_ABRT_SOURCE,
++ !(ABRT_SBYTE_NORSTRT));
++ mrst_i2c_write(i2c->base + IC_CON, RESTART);
++ mrst_i2c_write(i2c->base + IC_TAR, !(IC_TAR_SPECIAL));
++ break;
++ case (ABRT_SBYTE_ACKDET):
++ dev_err(&adap->dev,
++ "START byte was acknowledged.\n");
++ break;
++ case (ABRT_TXDATA_NOACK):
++ dev_err(&adap->dev,
++ "No acknowledge received from slave.\n");
++ break;
++ case (ABRT_10ADDR2_NOACK):
++ dev_err(&adap->dev,
++ "The 2nd address byte of 10-bit address not"
++ "acknowledged.\n");
++ break;
++ case (ABRT_10ADDR1_NOACK):
++ dev_dbg(&adap->dev,
++ "The 1st address byte of 10-bit address not"
++ "acknowledged.\n");
++ break;
++ case (ABRT_7B_ADDR_NOACK):
++ dev_err(&adap->dev,
++ "7-bit address not acknowledged.\n");
++ break;
++ default:
++ ;;
++ }
++
++ /* Clear TX_ABRT bit */
++ mrst_i2c_read(i2c->base + IC_CLR_TX_ABRT);
++}
++
++/**
++ * xfer_read - Internal function to implement master read transfer.
++ * @adap: i2c_adapter struct pointer
++ * @buf: buffer in i2c_msg
++ * @length: number of bytes to be read
++ *
++ * Return Values:
++ * 0 if the read transfer succeeds
++ * -ETIMEDOUT if cannot read the "raw" interrupt register
++ * -EINVAL if transfer abort occured
++ *
++ * For every byte, a "READ" command will be loaded into IC_DATA_CMD prior to
++ * data transfer. The actual "read" operation will be performed if the RX_FULL
++ * interrupt is occured.
++ *
++ * Note there may be two interrupt signals captured, one should read
++ * IC_RAW_INTR_STAT to seperate between errors and actual data.
++ */
++static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length)
++{
++ struct mrst_i2c_private *i2c = (struct mrst_i2c_private *)
++ i2c_get_adapdata(adap);
++ uint32_t reg_val;
++ int i = length;
++ unsigned count = 0;
++ uint32_t bit_get = 1 << 3; /* receive fifo not empty */
++
++ while (i--)
++ mrst_i2c_write(i2c->base + IC_DATA_CMD, (uint16_t)0x100);
++
++ i = length;
++ while (i--) {
++ count = 0;
++ reg_val = mrst_i2c_read(i2c->base + IC_STATUS);
++ while ((reg_val & bit_get) == 0) {
++ reg_val = mrst_i2c_read(i2c->base + IC_RAW_INTR_STAT);
++ if ((reg_val & 0x40) == 0x40)
++ goto read_abrt;
++ reg_val = mrst_i2c_read(i2c->base + IC_STATUS);
++ if (count++ > MAX_T_POLL_COUNT)
++ goto read_loop;
++ }
++
++ reg_val = mrst_i2c_read(i2c->base + IC_DATA_CMD);
++ *buf++ = reg_val;
++ }
++
++ return 0;
++
++read_loop:
++ dev_err(&adap->dev, "Time out in read\n");
++ return -ETIMEDOUT;
++read_abrt:
++ dev_err(&adap->dev, "Abort from read\n");
++ mrst_i2c_abort(adap);
++ return -EINVAL;
++}
++
++/**
++ * xfer_write - Internal function to implement master write transfer.
++ * @adap: i2c_adapter struct pointer
++ * @buf: buffer in i2c_msg
++ * @length: number of bytes to be read
++ *
++ * Return Values:
++ * 0 if the read transfer succeeds
++ * -ETIMEDOUT if cannot read the "raw" interrupt register
++ * -EINVAL if transfer abort occured
++ *
++ * For every byte, a "WRITE" command will be loaded into IC_DATA_CMD prior to
++ * data transfer. The actual "write" operation will be performed if the
++ * RX_FULL interrupt siganal is occured.
++ *
++ * Note there may be two interrupt signals captured, one should read
++ * IC_RAW_INTR_STAT to seperate between errors and actual data.
++ */
++static int xfer_write(struct i2c_adapter *adap,
++ unsigned char *buf, int length)
++{
++ struct mrst_i2c_private *i2c = (struct mrst_i2c_private *)
++ i2c_get_adapdata(adap);
++
++ int i;
++ uint32_t reg_val;
++ unsigned count = 0;
++ uint32_t bit_get = 1 << 2; /* transmit fifo completely empty */
++
++ for (i = 0; i < length; i++)
++ mrst_i2c_write(i2c->base + IC_DATA_CMD,
++ (uint16_t)(*(buf + i)));
++
++ reg_val = mrst_i2c_read(i2c->base + IC_STATUS);
++ while ((reg_val & bit_get) == 0) {
++ if (count++ > MAX_T_POLL_COUNT)
++ goto write_loop;
++ reg_val = mrst_i2c_read(i2c->base + IC_STATUS);
++ }
++
++ udelay(100);
++ reg_val = mrst_i2c_read(i2c->base + IC_RAW_INTR_STAT);
++ if ((reg_val & 0x40) == 0x40)
++ goto write_abrt;
++
++ return 0;
++
++write_loop:
++ dev_err(&adap->dev, "Time out in write\n");
++ return -ETIMEDOUT;
++write_abrt:
++ dev_err(&adap->dev, "Abort from write\n");
++ mrst_i2c_abort(adap);
++ return -EINVAL;
++}
++
++static int mrst_i2c_setup(struct i2c_adapter *adap, struct i2c_msg *pmsg)
++{
++ struct mrst_i2c_private *i2c =
++ (struct mrst_i2c_private *)i2c_get_adapdata(adap);
++ int err;
++ uint32_t reg_val;
++ uint32_t bit_mask;
++
++ /* Disable device first */
++ err = mrst_i2c_disable(adap);
++ if (err) {
++ dev_err(&adap->dev,
++ "Cannot disable i2c controller, timeout!\n");
++ return -ETIMEDOUT;
++ }
++
++
++ reg_val = mrst_i2c_read(i2c->base + IC_ENABLE);
++ if (reg_val & 0x1) {
++ dev_dbg(&adap->dev, "i2c busy, can't setup\n");
++ return -EINVAL;
++ }
++
++ /* set the speed mode to standard */
++ reg_val = mrst_i2c_read(i2c->base + IC_CON);
++ if ((reg_val & (1<<1 | 1<<2)) != 1<<1) {
++ dev_dbg(&adap->dev, "set standard mode\n");
++ mrst_i2c_write(i2c->base + IC_CON, (reg_val & (~0x6)) | 1<<1);
++ }
++
++ reg_val = mrst_i2c_read(i2c->base + IC_CON);
++ /* use 7-bit addressing */
++ if ((reg_val & (1<<4)) != 0x0) {
++ dev_dbg(&adap->dev, "set i2c 7 bit address mode\n");
++ mrst_i2c_write(i2c->base + IC_CON, reg_val & (~(1<<4)));
++ }
++
++ /*enable restart conditions */
++ reg_val = mrst_i2c_read(i2c->base + IC_CON);
++ if ((reg_val & (1<<5)) != 1<<5) {
++ dev_dbg(&adap->dev, "enable restart conditions\n");
++ mrst_i2c_write(i2c->base + IC_CON, (reg_val & (~(1 << 5)))
++ | 1 << 5);
++ }
++
++ /* enable master FSM */
++ reg_val = mrst_i2c_read(i2c->base + IC_CON);
++ dev_dbg(&adap->dev, "ic_con reg_val is 0x%x\n", reg_val);
++ if ((reg_val & (1<<6)) != 1<<6) {
++ dev_dbg(&adap->dev, "enable master FSM\n");
++ mrst_i2c_write(i2c->base + IC_CON, (reg_val & (~(1 << 6)))
++ | 1<<6);
++ dev_dbg(&adap->dev, "ic_con reg_val is 0x%x\n", reg_val);
++ }
++
++ /* use target address when initiating transfer */
++ reg_val = mrst_i2c_read(i2c->base + IC_TAR);
++ bit_mask = 1 << 11 | 1 << 10;
++
++ if ((reg_val & bit_mask) != 0x0) {
++ dev_dbg(&adap->dev, "WR: use target address when intiating"
++ "transfer, i2c_tx_target\n");
++ mrst_i2c_write(i2c->base + IC_TAR, reg_val & ~bit_mask);
++ }
++
++ /* set target address to the I2C slave address */
++ dev_dbg(&adap->dev, "set target address to the I2C slave address,"
++ "addr is %x\n", pmsg->addr);
++ mrst_i2c_write(i2c->base + IC_TAR, pmsg->addr
++ | (pmsg->flags & I2C_M_TEN ? IC_TAR_10BIT_ADDR : 0));
++
++ /* Enable I2C controller */
++ mrst_i2c_write(i2c->base + IC_ENABLE, ENABLE);
++
++ reg_val = mrst_i2c_read(i2c->base + IC_CON);
++
++ return 0;
++}
++
++/**
++ * mrst_i2c_xfer - Main master transfer routine.
++ * @adap: i2c_adapter struct pointer
++ * @pmsg: i2c_msg struct pointer
++ * @num: number of i2c_msg
++ *
++ * Return Values:
++ * + number of messages transfered
++ * -ETIMEDOUT If cannot disable I2C controller or read IC_STATUS
++ * -EINVAL If the address in i2c_msg is invalid
++ *
++ * This function will be registered in i2c-core and exposed to external
++ * I2C clients.
++ * 1. Disable I2C controller
++ * 2. Unmask three interrupts: RX_FULL, TX_EMPTY, TX_ABRT
++ * 3. Check if address in i2c_msg is valid
++ * 4. Enable I2C controller
++ * 5. Perform real transfer (call xfer_read or xfer_write)
++ * 6. Wait until the current transfer is finished(check bus state)
++ * 7. Mask and clear all interrupts
++ */
++static int mrst_i2c_xfer(struct i2c_adapter *adap,
++ struct i2c_msg *pmsg,
++ int num)
++{
++ struct mrst_i2c_private *i2c =
++ (struct mrst_i2c_private *)i2c_get_adapdata(adap);
++ int i, err;
++
++ dev_dbg(&adap->dev, "mrst_i2c_xfer, process %d msg(s)\n", num);
++ dev_dbg(&adap->dev, KERN_INFO "slave address is %x\n", pmsg->addr);
++
++ /* if number of messages equal 0*/
++ if (num == 0)
++ return 0;
++
++ /* Checked the sanity of passed messages. */
++ if (unlikely(mrst_i2c_invalid_address(&pmsg[0]))) {
++ dev_err(&adap->dev, "Invalid address 0x%03x (%d-bit)\n",
++ pmsg[0].addr, pmsg[0].flags & I2C_M_TEN ? 10 : 7);
++ return -EINVAL;
++ }
++ for (i = 0; i < num; i++) {
++ /* Message address equal? */
++ if (unlikely(mrst_i2c_address_neq(&pmsg[0], &pmsg[i]))) {
++ dev_err(&adap->dev, "Invalid address in msg[%d]\n", i);
++ return -EINVAL;
++ }
++ }
++
++ if (mrst_i2c_setup(adap, pmsg))
++ return -EINVAL;
++
++ for (i = 0; i < num; i++) {
++ dev_dbg(&adap->dev, " #%d: %sing %d byte%s %s 0x%02x\n", i,
++ pmsg->flags & I2C_M_RD ? "read" : "writ",
++ pmsg->len, pmsg->len > 1 ? "s" : "",
++ pmsg->flags & I2C_M_RD ? "from" : "to", pmsg->addr);
++
++
++ /* Read or Write */
++ if (pmsg->len && pmsg->buf) {
++ if (pmsg->flags & I2C_M_RD) {
++ dev_dbg(&adap->dev, "I2C_M_RD\n");
++ err = xfer_read(adap, pmsg->buf, pmsg->len);
++ } else {
++ dev_dbg(&adap->dev, "I2C_M_WR\n");
++ err = xfer_write(adap, pmsg->buf, pmsg->len);
++ }
++ if (err < 0)
++ goto err_1;
++ }
++ dev_dbg(&adap->dev, "msg[%d] transfer complete\n", i);
++ pmsg++; /* next message */
++ }
++ goto exit;
++
++err_1:
++ i = err;
++exit:
++ /* Mask interrupts */
++ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0000);
++ /* Clear all interrupts */
++ mrst_i2c_read(i2c->base + IC_CLR_INTR);
++
++ return i;
++}
++
++static int mrst_gpio_init(int sda, int scl)
++{
++ if (gpio_request(sda, "I2C_SDA"))
++ goto err_sda;
++
++ if (gpio_request(scl, "I2C_SCL"))
++ goto err_scl;
++
++ return 0;
++err_scl:
++ gpio_free(sda);
++err_sda:
++ return -1;
++}
++
++static struct pci_device_id mrst_i2c_ids[] = {
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0802)},
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0803)},
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0804)},
++ {0,}
++};
++MODULE_DEVICE_TABLE(pci, mrst_i2c_ids);
++
++static struct i2c_algorithm mrst_i2c_algorithm = {
++ .master_xfer = mrst_i2c_xfer,
++ .functionality = mrst_i2c_func,
++};
++
++static struct pci_driver mrst_i2c_driver = {
++ .name = "mrst_i2c",
++ .id_table = mrst_i2c_ids,
++ .probe = mrst_i2c_probe,
++ .remove = __devexit_p(mrst_i2c_remove),
++ .suspend = NULL,
++ .resume = NULL,
++};
++
++/**
++ * mrst_i2c_probe - I2C controller initialization routine
++ * @dev: pci device
++ * @id: device id
++ *
++ * Return Values:
++ * 0 success
++ * -ENODEV If cannot allocate pci resource
++ * -ENOMEM If the register base remapping failed, or
++ * if kzalloc failed
++ *
++ * Initialization steps:
++ * 1. Request for PCI resource
++ * 2. Remap the start address of PCI resource to register base
++ * 3. Request for device memory region
++ * 4. Fill in the struct members of mrst_i2c_private
++ * 5. Call mrst_i2c_hwinit() for hardware initialization
++ * 6. Register I2C adapter in i2c-core
++ */
++static int __devinit mrst_i2c_probe(struct pci_dev *dev,
++ const struct pci_device_id *id)
++{
++ struct mrst_i2c_private *mrst;
++ struct i2c_adapter *adap;
++ unsigned int start, len;
++ int err, busnum = 0;
++ void __iomem *base = NULL;
++ int gpio_sda = 0, gpio_scl = 0;
++
++ err = pci_enable_device(dev);
++ if (err) {
++ dev_err(&dev->dev, "Failed to enable I2C PCI device (%d)\n",
++ err);
++ goto exit;
++ }
++
++ /* Determine the address of the I2C area */
++ start = pci_resource_start(dev, DEF_BAR);
++ len = pci_resource_len(dev, DEF_BAR);
++ if (!start || len <= 0) {
++ dev_err(&dev->dev, "Base address initialization failed\n");
++ err = -ENODEV;
++ goto exit;
++ }
++ dev_dbg(&dev->dev, "mrst i2c resource start %x, len=%d\n",
++ start, len);
++ err = pci_request_region(dev, DEF_BAR, mrst_i2c_driver.name);
++ if (err) {
++ dev_err(&dev->dev, "Failed to request I2C region "
++ "0x%1x-0x%Lx\n", start,
++ (unsigned long long)pci_resource_end(dev, DEF_BAR));
++ goto exit;
++ }
++
++ base = ioremap_nocache(start, len);
++ if (!base) {
++ dev_err(&dev->dev, "I/O memory remapping failed\n");
++ err = -ENOMEM;
++ goto fail0;
++ }
++
++ /* Allocate the per-device data structure, mrst_i2c_private */
++ mrst = kzalloc(sizeof(struct mrst_i2c_private), GFP_KERNEL);
++ if (mrst == NULL) {
++ dev_err(&dev->dev, "Can't allocate interface!\n");
++ err = -ENOMEM;
++ goto fail1;
++ }
++
++ adap = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
++ if (adap == NULL) {
++ dev_err(&dev->dev, "Can't allocate interface!\n");
++ err = -ENOMEM;
++ goto fail2;
++ }
++
++ /* Initialize struct members */
++ snprintf(adap->name, sizeof(adap->name), "mrst_i2c");
++ adap->owner = THIS_MODULE;
++ adap->algo = &mrst_i2c_algorithm;
++ adap->class = I2C_CLASS_HWMON;
++ adap->dev.parent = &dev->dev;
++ mrst->adap = adap;
++ mrst->base = base;
++ mrst->speed = speed_mode;
++
++ pci_set_drvdata(dev, mrst);
++ i2c_set_adapdata(adap, mrst);
++
++ /* Initialize i2c controller */
++ err = mrst_i2c_hwinit(dev);
++ if (err < 0) {
++ dev_err(&dev->dev, "I2C interface initialization failed\n");
++ goto fail3;
++ }
++
++ switch (id->device) {
++ case 0x0802:
++ dev_dbg(&adap->dev, KERN_INFO "I2C0\n");
++ gpio_sda = GPIO_I2C_0_SDA;
++ gpio_scl = GPIO_I2C_0_SCL;
++ adap->nr = busnum = 0;
++ break;
++ case 0x0803:
++ dev_dbg(&adap->dev, KERN_INFO "I2C1\n");
++ gpio_sda = GPIO_I2C_1_SDA;
++ gpio_scl = GPIO_I2C_1_SCL;
++ adap->nr = busnum = 1;
++ break;
++ case 0x0804:
++ dev_dbg(&adap->dev, KERN_INFO "I2C2\n");
++ gpio_sda = GPIO_I2C_2_SDA;
++ gpio_scl = GPIO_I2C_2_SCL;
++ adap->nr = busnum = 2;
++ break;
++ default:
++ ;
++ }
++
++ /* Config GPIO pin for I2C */
++ err = mrst_gpio_init(gpio_sda, gpio_scl);
++ if (err) {
++ dev_err(&dev->dev, "GPIO %s registration failed\n",
++ adap->name);
++ goto fail3;
++ }
++
++ /* Register i2c board info */
++ /*mrst_i2c_register_board_info(dev, busnum);*/
++
++ /* Adapter registration */
++ err = i2c_add_numbered_adapter(adap);
++ if (err) {
++ dev_err(&dev->dev, "Adapter %s registration failed\n",
++ adap->name);
++ goto fail3;
++ }
++
++ dev_dbg(&dev->dev, "MRST I2C bus %d driver bind success.\n", busnum);
++ return 0;
++
++fail3:
++ i2c_set_adapdata(adap, NULL);
++ pci_set_drvdata(dev, NULL);
++ kfree(adap);
++fail2:
++ kfree(mrst);
++fail1:
++ iounmap(base);
++fail0:
++ pci_release_region(dev, DEF_BAR);
++exit:
++ return err;
++}
++
++static void __devexit mrst_i2c_remove(struct pci_dev *dev)
++{
++ struct mrst_i2c_private *mrst = (struct mrst_i2c_private *)
++ pci_get_drvdata(dev);
++ if (i2c_del_adapter(mrst->adap))
++ dev_err(&dev->dev, "Failed to delete i2c adapter");
++
++ kfree(mrst->mrst_i2c_info);
++ kfree(mrst->data);
++
++ switch (dev->device) {
++ case 0x0802:
++ gpio_free(GPIO_I2C_0_SDA);
++ gpio_free(GPIO_I2C_0_SCL);
++ break;
++ case 0x0803:
++ gpio_free(GPIO_I2C_1_SDA);
++ gpio_free(GPIO_I2C_1_SCL);
++ break;
++ case 0x0804:
++ gpio_free(GPIO_I2C_2_SDA);
++ gpio_free(GPIO_I2C_2_SCL);
++ break;
++ default:
++ break;
++ }
++
++ pci_set_drvdata(dev, NULL);
++ iounmap(mrst->base);
++ kfree(mrst);
++ pci_release_region(dev, DEF_BAR);
++}
++
++static int __init mrst_i2c_init(void)
++{
++ printk(KERN_NOTICE "Moorestown I2C driver %s\n", VERSION);
++ return pci_register_driver(&mrst_i2c_driver);
++}
++
++static void __exit mrst_i2c_exit(void)
++{
++ pci_unregister_driver(&mrst_i2c_driver);
++}
++
++module_init(mrst_i2c_init);
++module_exit(mrst_i2c_exit);
++
++MODULE_AUTHOR("Ba Zheng <zheng.ba@intel.com>");
++MODULE_DESCRIPTION("I2C driver for Moorestown Platform");
++MODULE_LICENSE("GPL");
++MODULE_VERSION(VERSION);
+Index: linux-2.6.33/drivers/i2c/busses/i2c-mrst.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/i2c/busses/i2c-mrst.h
+@@ -0,0 +1,282 @@
++#ifndef __I2C_MRST_H
++#define __I2C_MRST_H
++
++#include <linux/i2c.h>
++
++/* Update for 2.6.27 kernel by Wen */
++
++/* PCI config table macros */
++/* Offests */
++#define I2C_INFO_TABLE_LENGTH 4
++#define I2C_INFO_DEV_BLOCK 10
++#define I2C_DEV_ADDR 2
++#define I2C_DEV_IRQ 4
++#define I2C_DEV_NAME 6
++#define I2C_DEV_INFO 22
++/* Length */
++#define HEAD_LENGTH 10
++#define BLOCK_LENGTH 32
++#define ADDR_LENGTH 2
++#define IRQ_LENGTH 2
++#define NAME_LENGTH 16
++#define INFO_LENGTH 10
++
++struct mrst_i2c_private {
++ struct i2c_adapter *adap;
++ /* Register base address */
++ void __iomem *base;
++ /* Speed mode */
++ int speed;
++ struct i2c_board_info *mrst_i2c_info;
++ char (*data)[INFO_LENGTH];
++};
++
++/* Speed mode macros */
++#define STANDARD 100
++#define FAST 25
++#define HIGH 3
++
++/* Control register */
++#define IC_CON 0x00
++#define SLV_DIS (1 << 6) /* Disable slave mode */
++#define RESTART (1 << 5) /* Send a Restart condition */
++#define ADDR_10BIT (1 << 4) /* 10-bit addressing */
++#define STANDARD_MODE (1 << 1) /* standard mode */
++#define FAST_MODE (2 << 1) /* fast mode */
++#define HIGH_MODE (3 << 1) /* high speed mode */
++#define MASTER_EN (1 << 0) /* Master mode */
++
++/* Target address register */
++#define IC_TAR 0x04
++#define IC_TAR_10BIT_ADDR (1 << 12) /* 10-bit addressing */
++#define IC_TAR_SPECIAL (1 << 11) /* Perform special I2C cmd */
++#define IC_TAR_GC_OR_START (1 << 10) /* 0: Gerneral Call Address */
++ /* 1: START BYTE */
++
++/* Slave Address Register */
++#define IC_SAR 0x08 /* Not used in Master mode */
++
++/* High Speed Master Mode Code Address Register */
++#define IC_HS_MADDR 0x0c
++
++/* Rx/Tx Data Buffer and Command Register */
++#define IC_DATA_CMD 0x10
++#define IC_RD (1 << 8) /* 1: Read 0: Write */
++
++/* Standard Speed Clock SCL High Count Register */
++#define IC_SS_SCL_HCNT 0x14
++
++/* Standard Speed Clock SCL Low Count Register */
++#define IC_SS_SCL_LCNT 0x18
++
++/* Fast Speed Clock SCL High Count Register */
++#define IC_FS_SCL_HCNT 0x1c
++
++/* Fast Spedd Clock SCL Low Count Register */
++#define IC_FS_SCL_LCNT 0x20
++
++/* High Speed Clock SCL High Count Register */
++#define IC_HS_SCL_HCNT 0x24
++
++/* High Speed Clock SCL Low Count Register */
++#define IC_HS_SCL_LCNT 0x28
++
++/* Interrupt Status Register */
++#define IC_INTR_STAT 0x2c /* Read only */
++#define R_GEN_CALL (1 << 11)
++#define R_START_DET (1 << 10)
++#define R_STOP_DET (1 << 9)
++#define R_ACTIVITY (1 << 8)
++#define R_RX_DONE (1 << 7)
++#define R_TX_ABRT (1 << 6)
++#define R_RD_REQ (1 << 5)
++#define R_TX_EMPTY (1 << 4)
++#define R_TX_OVER (1 << 3)
++#define R_RX_FULL (1 << 2)
++#define R_RX_OVER (1 << 1)
++#define R_RX_UNDER (1 << 0)
++
++/* Interrupt Mask Register */
++#define IC_INTR_MASK 0x30 /* Read and Write */
++#define M_GEN_CALL (1 << 11)
++#define M_START_DET (1 << 10)
++#define M_STOP_DET (1 << 9)
++#define M_ACTIVITY (1 << 8)
++#define M_RX_DONE (1 << 7)
++#define M_TX_ABRT (1 << 6)
++#define M_RD_REQ (1 << 5)
++#define M_TX_EMPTY (1 << 4)
++#define M_TX_OVER (1 << 3)
++#define M_RX_FULL (1 << 2)
++#define M_RX_OVER (1 << 1)
++#define M_RX_UNDER (1 << 0)
++
++/* Raw Interrupt Status Register */
++#define IC_RAW_INTR_STAT 0x34 /* Read Only */
++#define GEN_CALL (1 << 11) /* General call */
++#define START_DET (1 << 10) /* (RE)START occured */
++#define STOP_DET (1 << 9) /* STOP occured */
++#define ACTIVITY (1 << 8) /* Bus busy */
++#define RX_DONE (1 << 7) /* Not used in Master mode */
++#define TX_ABRT (1 << 6) /* Transmit Abort */
++#define RD_REQ (1 << 5) /* Not used in Master mode */
++#define TX_EMPTY (1 << 4) /* TX FIFO <= threshold */
++#define TX_OVER (1 << 3) /* TX FIFO overflow */
++#define RX_FULL (1 << 2) /* RX FIFO >= threshold */
++#define RX_OVER (1 << 1) /* RX FIFO overflow */
++#define RX_UNDER (1 << 0) /* RX FIFO empty */
++
++/* Receive FIFO Threshold Register */
++#define IC_RX_TL 0x38
++
++/* Transmit FIFO Treshold Register */
++#define IC_TX_TL 0x3c
++
++/* Clear Combined and Individual Interrupt Register */
++#define IC_CLR_INTR 0x40
++#define CLR_INTR (1 << 0)
++
++/* Clear RX_UNDER Interrupt Register */
++#define IC_CLR_RX_UNDER 0x44
++#define CLR_RX_UNDER (1 << 0)
++
++/* Clear RX_OVER Interrupt Register */
++#define IC_CLR_RX_OVER 0x48
++#define CLR_RX_OVER (1 << 0)
++
++/* Clear TX_OVER Interrupt Register */
++#define IC_CLR_TX_OVER 0x4c
++#define CLR_TX_OVER (1 << 0)
++
++#define IC_CLR_RD_REQ 0x50
++
++/* Clear TX_ABRT Interrupt Register */
++#define IC_CLR_TX_ABRT 0x54
++#define CLR_TX_ABRT (1 << 0)
++
++#define IC_CLR_RX_DONE 0x58
++
++
++/* Clear ACTIVITY Interrupt Register */
++#define IC_CLR_ACTIVITY 0x5c
++#define CLR_ACTIVITY (1 << 0)
++
++/* Clear STOP_DET Interrupt Register */
++#define IC_CLR_STOP_DET 0x60
++#define CLR_STOP_DET (1 << 0)
++
++/* Clear START_DET Interrupt Register */
++#define IC_CLR_START_DET 0x64
++#define CLR_START_DET (1 << 0)
++
++/* Clear GEN_CALL Interrupt Register */
++#define IC_CLR_GEN_CALL 0x68
++#define CLR_GEN_CALL (1 << 0)
++
++/* Enable Register */
++#define IC_ENABLE 0x6c
++#define ENABLE (1 << 0)
++
++/* Status Register */
++#define IC_STATUS 0x70 /* Read Only */
++#define STAT_SLV_ACTIVITY (1 << 6) /* Slave not in idle */
++#define STAT_MST_ACTIVITY (1 << 5) /* Master not in idle */
++#define STAT_RFF (1 << 4) /* RX FIFO Full */
++#define STAT_RFNE (1 << 3) /* RX FIFO Not Empty */
++#define STAT_TFE (1 << 2) /* TX FIFO Empty */
++#define STAT_TFNF (1 << 1) /* TX FIFO Not Full */
++#define STAT_ACTIVITY (1 << 0) /* Activity Status */
++
++/* Transmit FIFO Level Register */
++#define IC_TXFLR 0x74 /* Read Only */
++#define TXFLR (1 << 0) /* TX FIFO level */
++
++/* Receive FIFO Level Register */
++#define IC_RXFLR 0x78 /* Read Only */
++#define RXFLR (1 << 0) /* RX FIFO level */
++
++/* Transmit Abort Source Register */
++#define IC_TX_ABRT_SOURCE 0x80
++#define ABRT_SLVRD_INTX (1 << 15)
++#define ABRT_SLV_ARBLOST (1 << 14)
++#define ABRT_SLVFLUSH_TXFIFO (1 << 13)
++#define ARB_LOST (1 << 12)
++#define ABRT_MASTER_DIS (1 << 11)
++#define ABRT_10B_RD_NORSTRT (1 << 10)
++#define ABRT_SBYTE_NORSTRT (1 << 9)
++#define ABRT_HS_NORSTRT (1 << 8)
++#define ABRT_SBYTE_ACKDET (1 << 7)
++#define ABRT_HS_ACKDET (1 << 6)
++#define ABRT_GCALL_READ (1 << 5)
++#define ABRT_GCALL_NOACK (1 << 4)
++#define ABRT_TXDATA_NOACK (1 << 3)
++#define ABRT_10ADDR2_NOACK (1 << 2)
++#define ABRT_10ADDR1_NOACK (1 << 1)
++#define ABRT_7B_ADDR_NOACK (1 << 0)
++
++/* Enable Status Register */
++#define IC_ENABLE_STATUS 0x9c
++#define IC_EN (1 << 0) /* I2C in an enabled state */
++
++/* Component Parameter Register 1*/
++#define IC_COMP_PARAM_1 0xf4
++#define APB_DATA_WIDTH (0x3 << 0)
++
++/* GPIO_PINS */
++#define GPIO_I2C_0_SDA 56
++#define GPIO_I2C_0_SCL 57
++
++#define GPIO_I2C_1_SDA 54
++#define GPIO_I2C_1_SCL 55
++
++#define GPIO_I2C_2_SDA 52
++#define GPIO_I2C_2_SCL 53
++
++/* added by xiaolin --begin */
++#define SS_MIN_SCL_HIGH 4000
++#define SS_MIN_SCL_LOW 4700
++#define FS_MIN_SCL_HIGH 600
++#define FS_MIN_SCL_LOW 1300
++#define HS_MIN_SCL_HIGH_100PF 60
++#define HS_MIN_SCL_LOW_100PF 120
++
++enum mrst_i2c_irq {
++ i2c_irq_none = 0x000,
++ i2c_irq_rx_under = 0x001,
++ i2c_irq_rx_over = 0x002,
++ i2c_irq_rx_full = 0x004,
++ i2c_irq_tx_over = 0x008,
++ i2c_irq_tx_empty = 0x010,
++ i2c_irq_rd_req = 0x020,
++ i2c_irq_tx_abrt = 0x040,
++ i2c_irq_rx_done = 0x080,
++ i2c_irq_activity = 0x100,
++ i2c_irq_stop_det = 0x200,
++ i2c_irq_start_det = 0x400,
++ i2c_irq_gen_call = 0x800,
++ i2c_irq_all = 0xfff
++};
++
++/* added by xiaolin --end */
++
++/* Function declarations */
++
++static int mrst_i2c_disable(struct i2c_adapter *);
++static int __devinit mrst_i2c_hwinit(struct pci_dev *);
++static u32 mrst_i2c_func(struct i2c_adapter *);
++static inline int mrst_i2c_invalid_address(const struct i2c_msg *);
++static inline int mrst_i2c_address_neq(const struct i2c_msg *,
++ const struct i2c_msg *);
++static int mrst_i2c_xfer(struct i2c_adapter *,
++ struct i2c_msg *,
++ int);
++static int __devinit mrst_i2c_probe(struct pci_dev *,
++ const struct pci_device_id *);
++static void __devexit mrst_i2c_remove(struct pci_dev *);
++static int __init mrst_i2c_init(void);
++static void __exit mrst_i2c_exit(void);
++static int xfer_read(struct i2c_adapter *,
++ unsigned char *, int);
++static int xfer_write(struct i2c_adapter *,
++ unsigned char *, int);
++#endif /* __I2C_MRST_H */
+Index: linux-2.6.33/drivers/i2c/i2c-boardinfo.c
+===================================================================
+--- linux-2.6.33.orig/drivers/i2c/i2c-boardinfo.c
++++ linux-2.6.33/drivers/i2c/i2c-boardinfo.c
+@@ -58,11 +58,13 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bu
+ * The board info passed can safely be __initdata, but be careful of embedded
+ * pointers (for platform_data, functions, etc) since that won't be copied.
+ */
+-int __init
++int
+ i2c_register_board_info(int busnum,
+ struct i2c_board_info const *info, unsigned len)
+ {
+ int status;
++ int flag = 0;
++ struct i2c_devinfo *devinfo;
+
+ down_write(&__i2c_board_lock);
+
+@@ -71,21 +73,32 @@ i2c_register_board_info(int busnum,
+ __i2c_first_dynamic_bus_num = busnum + 1;
+
+ for (status = 0; len; len--, info++) {
+- struct i2c_devinfo *devinfo;
+-
+- devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
+- if (!devinfo) {
+- pr_debug("i2c-core: can't register boardinfo!\n");
+- status = -ENOMEM;
+- break;
++ list_for_each_entry(devinfo, &__i2c_board_list, list) {
++ if (devinfo->busnum == busnum
++ && devinfo->board_info.addr == info->addr) {
++ flag = 1;
++ break;
++ }
+ }
+-
+- devinfo->busnum = busnum;
+- devinfo->board_info = *info;
+- list_add_tail(&devinfo->list, &__i2c_board_list);
++ if (flag != 1) {
++ struct i2c_devinfo *dev;
++ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
++ if (!dev) {
++ pr_debug("i2c-core: can't register"
++ "boardinfo!\n");
++ status = -ENOMEM;
++ break;
++ }
++
++ dev->busnum = busnum;
++ dev->board_info = *info;
++ list_add_tail(&dev->list, &__i2c_board_list);
++ }
++ flag = 0;
+ }
+
+ up_write(&__i2c_board_lock);
+
+ return status;
+ }
++EXPORT_SYMBOL_GPL(i2c_register_board_info);
+Index: linux-2.6.33/arch/x86/kernel/cpu/cpufreq/Kconfig
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/cpu/cpufreq/Kconfig
++++ linux-2.6.33/arch/x86/kernel/cpu/cpufreq/Kconfig
+@@ -10,6 +10,22 @@ if CPU_FREQ
+
+ comment "CPUFreq processor drivers"
+
++config X86_SFI_CPUFREQ
++ tristate "SFI Processor P-States driver"
++ depends on SFI_PROCESSOR_PM
++ select CPU_FREQ_TABLE
++ help
++ This driver adds a CPUFreq driver which utilizes the SFI
++ Processor Performance States.
++ This driver also supports Intel Enhanced Speedstep.
++
++ To compile this driver as a module, choose M here: the
++ module will be called sfi-cpufreq.
++
++ For details, take a look at <file:Documentation/cpu-freq/>.
++
++ If in doubt, say N.
++
+ config X86_ACPI_CPUFREQ
+ tristate "ACPI Processor P-States driver"
+ select CPU_FREQ_TABLE
+Index: linux-2.6.33/arch/x86/kernel/cpu/cpufreq/Makefile
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/cpu/cpufreq/Makefile
++++ linux-2.6.33/arch/x86/kernel/cpu/cpufreq/Makefile
+@@ -15,6 +15,7 @@ obj-$(CONFIG_X86_GX_SUSPMOD) += gx-susp
+ obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
+ obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
+ obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
++obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
+ obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
+ obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
+ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
+Index: linux-2.6.33/arch/x86/kernel/cpu/cpufreq/sfi-cpufreq.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/cpu/cpufreq/sfi-cpufreq.c
+@@ -0,0 +1,655 @@
++/*
++ * sfi_cpufreq.c - sfi Processor P-States Driver
++ *
++ *
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Vishwesh M Rudramuni
++ * Contact information: Vishwesh Rudramuni <vishwesh.m.rudramuni@intel.com>
++ */
++
++/*
++ * This sfi Processor P-States Driver re-uses most part of the code available
++ * in acpi cpufreq driver.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/smp.h>
++#include <linux/sched.h>
++#include <linux/cpufreq.h>
++#include <linux/compiler.h>
++#include <linux/dmi.h>
++
++#include <linux/sfi.h>
++#include <linux/sfi_processor.h>
++
++#include <linux/io.h>
++#include <asm/msr.h>
++#include <asm/processor.h>
++#include <asm/cpufeature.h>
++#include <linux/delay.h>
++#include <linux/uaccess.h>
++
++#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
++ "sfi-cpufreq", msg)
++
++MODULE_AUTHOR("Vishwesh Rudramuni");
++MODULE_DESCRIPTION("SFI Processor P-States Driver");
++MODULE_LICENSE("GPL");
++#define SYSTEM_INTEL_MSR_CAPABLE 0x1
++#define INTEL_MSR_RANGE (0xffff)
++#define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1)
++
++struct sfi_cpufreq_data {
++ struct sfi_processor_performance *sfi_data;
++ struct cpufreq_frequency_table *freq_table;
++ unsigned int max_freq;
++ unsigned int resume;
++ unsigned int cpu_feature;
++};
++
++static DEFINE_PER_CPU(struct sfi_cpufreq_data *, drv_data);
++
++/* sfi_perf_data is a pointer to percpu data. */
++static struct sfi_processor_performance *sfi_perf_data;
++
++static struct cpufreq_driver sfi_cpufreq_driver;
++
++static unsigned int sfi_pstate_strict;
++
++static int check_est_cpu(unsigned int cpuid)
++{
++ struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
++
++ if (cpu->x86_vendor != X86_VENDOR_INTEL ||
++ !cpu_has(cpu, X86_FEATURE_EST))
++ return 0;
++
++ return 1;
++}
++
++static unsigned extract_freq(u32 msr, struct sfi_cpufreq_data *data)
++{
++ int i;
++ struct sfi_processor_performance *perf;
++
++ msr &= INTEL_MSR_RANGE;
++ perf = data->sfi_data;
++
++ for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
++ if (msr == perf->states[data->freq_table[i].index].status)
++ return data->freq_table[i].frequency;
++ }
++ return data->freq_table[0].frequency;
++}
++
++
++struct msr_addr {
++ u32 reg;
++};
++
++
++struct drv_cmd {
++ unsigned int type;
++ cpumask_t mask;
++ u32 msr_reg;
++ u32 val;
++};
++
++static void do_drv_read(struct drv_cmd *cmd)
++{
++ u32 h;
++ rdmsr(cmd->msr_reg, cmd->val, h);
++}
++
++static void do_drv_write(struct drv_cmd *cmd)
++{
++ u32 lo, hi;
++
++ rdmsr(cmd->msr_reg, lo, hi);
++ lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
++ wrmsr(cmd->msr_reg, lo, hi);
++}
++
++static void drv_read(struct drv_cmd *cmd)
++{
++ cpumask_t saved_mask = current->cpus_allowed;
++ cmd->val = 0;
++
++ set_cpus_allowed(current, cmd->mask);
++ do_drv_read(cmd);
++ set_cpus_allowed(current, saved_mask);
++}
++
++static void drv_write(struct drv_cmd *cmd)
++{
++ cpumask_t saved_mask = current->cpus_allowed;
++ unsigned int i;
++
++ for_each_cpu_mask(i, cmd->mask) {
++ set_cpus_allowed(current, cpumask_of_cpu(i));
++ do_drv_write(cmd);
++ }
++
++ set_cpus_allowed(current, saved_mask);
++ return;
++}
++
++static u32 get_cur_val(cpumask_t mask)
++{
++ struct drv_cmd cmd;
++
++ if (unlikely(cpus_empty(mask)))
++ return 0;
++
++ cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
++ cmd.msr_reg = MSR_IA32_PERF_STATUS;
++ cmd.mask = mask;
++
++ drv_read(&cmd);
++
++ dprintk("get_cur_val = %u\n", cmd.val);
++
++ return cmd.val;
++}
++
++/*
++ * Return the measured active (C0) frequency on this CPU since last call
++ * to this function.
++ * Input: cpu number
++ * Return: Average CPU frequency in terms of max frequency (zero on error)
++ *
++ * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
++ * over a period of time, while CPU is in C0 state.
++ * IA32_MPERF counts at the rate of max advertised frequency
++ * IA32_APERF counts at the rate of actual CPU frequency
++ * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
++ * no meaning should be associated with absolute values of these MSRs.
++ */
++static unsigned int get_measured_perf(struct cpufreq_policy *policy,
++ unsigned int cpu)
++{
++ union {
++ struct {
++ u32 lo;
++ u32 hi;
++ } split;
++ u64 whole;
++ } aperf_cur, mperf_cur;
++
++ cpumask_t saved_mask;
++ unsigned int perf_percent;
++ unsigned int retval;
++
++ saved_mask = current->cpus_allowed;
++ set_cpus_allowed(current, cpumask_of_cpu(cpu));
++ if (get_cpu() != cpu) {
++ /* We were not able to run on requested processor */
++ put_cpu();
++ return 0;
++ }
++
++ rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi);
++ rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi);
++
++ wrmsr(MSR_IA32_APERF, 0, 0);
++ wrmsr(MSR_IA32_MPERF, 0, 0);
++
++#ifdef __i386__
++ /*
++ * We dont want to do 64 bit divide with 32 bit kernel
++ * Get an approximate value. Return failure in case we cannot get
++ * an approximate value.
++ */
++ if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) {
++ int shift_count;
++ u32 h;
++
++ h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi);
++ shift_count = fls(h);
++
++ aperf_cur.whole >>= shift_count;
++ mperf_cur.whole >>= shift_count;
++ }
++
++ if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) {
++ int shift_count = 7;
++ aperf_cur.split.lo >>= shift_count;
++ mperf_cur.split.lo >>= shift_count;
++ }
++
++ if (aperf_cur.split.lo && mperf_cur.split.lo)
++ perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo;
++ else
++ perf_percent = 0;
++
++#else
++ if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) {
++ int shift_count = 7;
++ aperf_cur.whole >>= shift_count;
++ mperf_cur.whole >>= shift_count;
++ }
++
++ if (aperf_cur.whole && mperf_cur.whole)
++ perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole;
++ else
++ perf_percent = 0;
++
++#endif
++
++ retval = per_cpu(drv_data, cpu)->max_freq * perf_percent / 100;
++
++ put_cpu();
++ set_cpus_allowed(current, saved_mask);
++
++ dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
++ return retval;
++}
++
++
++static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, cpu);
++ unsigned int freq;
++
++ unsigned int cached_freq;
++
++ dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
++
++ if (unlikely(data == NULL ||
++ data->sfi_data == NULL || data->freq_table == NULL)) {
++ return 0;
++ }
++ cached_freq = data->freq_table[data->sfi_data->state].frequency;
++ freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data);
++
++ if (freq != cached_freq) {
++ data->resume = 1;
++ return cached_freq;
++ }
++
++ dprintk("cur freq = %u\n", freq);
++
++ return freq;
++}
++
++static unsigned int check_freqs(cpumask_t mask, unsigned int freq,
++ struct sfi_cpufreq_data *data)
++{
++ unsigned int cur_freq;
++ unsigned int i;
++
++ for (i = 0; i < 100; i++) {
++ cur_freq = extract_freq(get_cur_val(mask), data);
++ if (cur_freq == freq)
++ return 1;
++ udelay(10);
++ }
++ return 0;
++}
++
++static int sfi_cpufreq_target(struct cpufreq_policy *policy,
++ unsigned int target_freq, unsigned int relation)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
++ struct sfi_processor_performance *perf;
++ struct cpufreq_freqs freqs;
++ cpumask_t online_policy_cpus;
++ struct drv_cmd cmd;
++ unsigned int next_state = 0; /* Index into freq_table */
++ unsigned int next_perf_state = 0; /* Index into perf table */
++ unsigned int i;
++ int result = 0;
++
++ dprintk("sfi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
++
++ if (unlikely(data == NULL ||
++ data->sfi_data == NULL || data->freq_table == NULL)) {
++ return -ENODEV;
++ }
++
++ perf = data->sfi_data;
++ result = cpufreq_frequency_table_target(policy,
++ data->freq_table,
++ target_freq,
++ relation, &next_state);
++ if (unlikely(result))
++ return -ENODEV;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ /* cpufreq holds the hotplug lock, so we are safe from here on */
++ cpus_and(online_policy_cpus, cpu_online_map, *policy->cpus);
++#else
++ online_policy_cpus = policy->cpus;
++#endif
++
++ next_perf_state = data->freq_table[next_state].index;
++ if (perf->state == next_perf_state) {
++ if (unlikely(data->resume)) {
++ dprintk("Called after resume, resetting to P%d\n",
++ next_perf_state);
++ data->resume = 0;
++ } else {
++ dprintk("Already at target state (P%d)\n",
++ next_perf_state);
++ return 0;
++ }
++ }
++
++ cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
++ cmd.msr_reg = MSR_IA32_PERF_CTL;
++ cmd.val = (u32) perf->states[next_perf_state].control;
++
++ cpus_clear(cmd.mask);
++
++ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
++ cmd.mask = online_policy_cpus;
++ else
++ cpu_set(policy->cpu, cmd.mask);
++
++ freqs.old = perf->states[perf->state].core_frequency * 1000;
++ freqs.new = data->freq_table[next_state].frequency;
++ for_each_cpu_mask(i, cmd.mask) {
++ freqs.cpu = i;
++ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
++ }
++
++ drv_write(&cmd);
++
++ if (sfi_pstate_strict) {
++ if (!check_freqs(cmd.mask, freqs.new, data)) {
++ dprintk("sfi_cpufreq_target failed (%d)\n",
++ policy->cpu);
++ return -EAGAIN;
++ }
++ }
++
++ for_each_cpu_mask(i, cmd.mask) {
++ freqs.cpu = i;
++ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
++ }
++ perf->state = next_perf_state;
++
++ return result;
++}
++
++static int sfi_cpufreq_verify(struct cpufreq_policy *policy)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
++
++ dprintk("sfi_cpufreq_verify\n");
++
++ return cpufreq_frequency_table_verify(policy, data->freq_table);
++}
++
++/*
++ * sfi_cpufreq_early_init - initialize SFI P-States library
++ *
++ * Initialize the SFI P-States library (drivers/acpi/processor_perflib.c)
++ * in order to determine correct frequency and voltage pairings. We can
++ * do _PDC and _PSD and find out the processor dependency for the
++ * actual init that will happen later...
++ */
++static int __init sfi_cpufreq_early_init(void)
++{
++ int i;
++ struct sfi_processor *pr;
++
++ dprintk("sfi_cpufreq_early_init\n");
++
++ sfi_perf_data = alloc_percpu(struct sfi_processor_performance);
++ if (!sfi_perf_data) {
++ dprintk("Memory allocation error for sfi_perf_data.\n");
++ return -ENOMEM;
++ }
++
++ for_each_possible_cpu(i) {
++ pr = per_cpu(sfi_processors, i);
++ if (!pr || !pr->performance)
++ continue;
++
++ /* Assume no coordination on any error parsing domain info */
++ cpus_clear(*pr->performance->shared_cpu_map);
++ cpu_set(i, *pr->performance->shared_cpu_map);
++ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
++ pr->performance = NULL; /* Will be set for real in register */
++ }
++
++ /* _PSD & _PDC is not supported in SFI.Its just a placeholder.
++ * sfi_processor_preregister_performance(sfi_perf_data);
++ * TBD: We need to study what we need to do here
++ */
++ return 0;
++}
++
++
++static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
++{
++ unsigned int i;
++ unsigned int valid_states = 0;
++ unsigned int cpu = policy->cpu;
++ struct sfi_cpufreq_data *data;
++ unsigned int result = 0;
++ struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
++ struct sfi_processor_performance *perf;
++
++ dprintk("sfi_cpufreq_cpu_init\n");
++
++ data = kzalloc(sizeof(struct sfi_cpufreq_data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ data->sfi_data = per_cpu_ptr(sfi_perf_data, cpu);
++ per_cpu(drv_data, cpu) = data;
++
++ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
++ sfi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
++
++
++ result = sfi_processor_register_performance(data->sfi_data, cpu);
++ if (result)
++ goto err_free;
++
++ perf = data->sfi_data;
++ policy->shared_type = perf->shared_type;
++
++ /*
++ * Will let policy->cpus know about dependency only when software
++ * coordination is required.
++ */
++ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
++ policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
++ memcpy(policy->cpus, perf->shared_cpu_map
++ , sizeof(cpumask_var_t));
++ }
++
++ /* capability check */
++ if (perf->state_count <= 1) {
++ dprintk("No P-States\n");
++ result = -ENODEV;
++ goto err_unreg;
++ }
++
++ dprintk("HARDWARE addr space\n");
++ if (!check_est_cpu(cpu)) {
++ result = -ENODEV;
++ goto err_unreg;
++ }
++
++ data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
++ data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
++ (perf->state_count+1), GFP_KERNEL);
++ if (!data->freq_table) {
++ result = -ENOMEM;
++ goto err_unreg;
++ }
++
++ /* detect transition latency */
++ policy->cpuinfo.transition_latency = 0;
++ for (i = 0; i < perf->state_count; i++) {
++ if ((perf->states[i].transition_latency * 1000) >
++ policy->cpuinfo.transition_latency)
++ policy->cpuinfo.transition_latency =
++ perf->states[i].transition_latency * 1000;
++ }
++
++ data->max_freq = perf->states[0].core_frequency * 1000;
++ /* table init */
++ for (i = 0; i < perf->state_count; i++) {
++ if (i > 0 && perf->states[i].core_frequency >=
++ data->freq_table[valid_states-1].frequency / 1000)
++ continue;
++
++ data->freq_table[valid_states].index = i;
++ data->freq_table[valid_states].frequency =
++ perf->states[i].core_frequency * 1000;
++ valid_states++;
++ }
++ data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
++ perf->state = 0;
++
++ result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
++ if (result)
++ goto err_freqfree;
++
++ sfi_cpufreq_driver.get = get_cur_freq_on_cpu;
++ policy->cur = get_cur_freq_on_cpu(cpu);
++
++ /* notify BIOS that we exist
++ * currently not being done.
++ */
++
++ /* Check for APERF/MPERF support in hardware */
++ if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) {
++ unsigned int ecx;
++ ecx = cpuid_ecx(6);
++ if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY)
++ sfi_cpufreq_driver.getavg = get_measured_perf;
++ }
++
++ dprintk("CPU%u - SFI performance management activated.\n", cpu);
++ for (i = 0; i < perf->state_count; i++)
++ dprintk(" %cP%d: %d MHz, %d uS\n",
++ (i == perf->state ? '*' : ' '), i,
++ (u32) perf->states[i].core_frequency,
++ (u32) perf->states[i].transition_latency);
++
++ cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
++
++ /*
++ * the first call to ->target() should result in us actually
++ * writing something to the appropriate registers.
++ */
++ data->resume = 1;
++
++ return result;
++
++err_freqfree:
++ kfree(data->freq_table);
++err_unreg:
++ sfi_processor_unregister_performance(perf, cpu);
++err_free:
++ kfree(data);
++ per_cpu(drv_data, cpu) = NULL;
++
++ return result;
++}
++
++static int sfi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
++
++ dprintk("sfi_cpufreq_cpu_exit\n");
++
++ if (data) {
++ cpufreq_frequency_table_put_attr(policy->cpu);
++ per_cpu(drv_data, policy->cpu) = NULL;
++ /* acpi_processor_unregister_performance(data->acpi_data,
++ * policy->cpu);
++ * TBD: Need to study how do we do this
++ */
++ sfi_processor_unregister_performance(data->sfi_data,
++ policy->cpu);
++ kfree(data);
++ }
++
++ return 0;
++}
++
++static int sfi_cpufreq_resume(struct cpufreq_policy *policy)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
++
++ dprintk("sfi_cpufreq_resume\n");
++
++ data->resume = 1;
++
++ return 0;
++}
++
++static struct freq_attr *sfi_cpufreq_attr[] = {
++ &cpufreq_freq_attr_scaling_available_freqs,
++ NULL,
++};
++
++static struct cpufreq_driver sfi_cpufreq_driver = {
++ .verify = sfi_cpufreq_verify,
++ .target = sfi_cpufreq_target,
++ .init = sfi_cpufreq_cpu_init,
++ .exit = sfi_cpufreq_cpu_exit,
++ .resume = sfi_cpufreq_resume,
++ .name = "sfi-cpufreq",
++ .owner = THIS_MODULE,
++ .attr = sfi_cpufreq_attr,
++};
++
++static int __init sfi_cpufreq_init(void)
++{
++ int ret;
++
++ dprintk("sfi_cpufreq_init\n");
++
++ ret = sfi_cpufreq_early_init();
++ if (ret)
++ return ret;
++
++ return cpufreq_register_driver(&sfi_cpufreq_driver);
++}
++
++static void __exit sfi_cpufreq_exit(void)
++{
++ dprintk("sfi_cpufreq_exit\n");
++
++ cpufreq_unregister_driver(&sfi_cpufreq_driver);
++
++ free_percpu(sfi_perf_data);
++
++ return;
++}
++
++module_param(sfi_pstate_strict, uint, 0644);
++MODULE_PARM_DESC(sfi_pstate_strict,
++ "value 0 or non-zero. non-zero -> strict sfi checks are "
++ "performed during frequency changes.");
++
++late_initcall(sfi_cpufreq_init);
++module_exit(sfi_cpufreq_exit);
++
++MODULE_ALIAS("sfi");
+Index: linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_core.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_core.c
+@@ -0,0 +1,134 @@
++/*
++ * sfi_processor_core.c
++ *
++ * Copyright (C) 2008 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Sujith Thomas
++ * Contact information: Sujith Thomas <sujith.thomas@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/sfi.h>
++#include <linux/cpu.h>
++#include <linux/sfi_processor.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Sujith Thomas");
++MODULE_DESCRIPTION("Processor enumeration based on SFI table.");
++
++DEFINE_PER_CPU(struct sfi_processor *, sfi_processors);
++
++int sfi_cstate_num;
++struct sfi_cstate_table_entry sfi_cstate_array[SFI_C_STATES_MAX];
++
++static int __init sfi_parse_idle(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_cstate_table_entry *pentry;
++ int totallen;
++
++ sb = (struct sfi_table_simple *)table;
++ if (!sb) {
++ printk(KERN_WARNING "SFI: Unable to map IDLE\n");
++ return -ENODEV;
++ }
++
++ if (!sfi_cstate_num) {
++ sfi_cstate_num = SFI_GET_NUM_ENTRIES(sb, struct sfi_cstate_table_entry);
++ pentry = (struct sfi_cstate_table_entry *)sb->pentry;
++ totallen = sfi_cstate_num * sizeof(*pentry);
++ memcpy(sfi_cstate_array, pentry, totallen);
++ }
++
++ printk(KERN_INFO "SFI: IDLE C-state info (num = %d):\n",
++ sfi_cstate_num);
++ pentry = sfi_cstate_array;
++ for (totallen = 0; totallen < sfi_cstate_num; totallen++, pentry++) {
++ printk(KERN_INFO "Cstate[%d]: hint = 0x%08x, latency = %dms\n",
++ totallen, pentry->hint, pentry->latency);
++ }
++
++ return 0;
++}
++
++static int __init sfi_init_cpus(void/*struct sfi_table_header *table*/)
++{
++ struct sfi_processor *pr;
++ int i;
++ int result = 0;
++
++
++ for (i = 0; i < num_processors; i++) {
++ pr = kzalloc(sizeof(struct sfi_processor), GFP_KERNEL);
++ pr->id = early_per_cpu(x86_cpu_to_apicid, i);
++//sfi_cpu_array[i].apicid;
++ per_cpu(sfi_processors, pr->id) = pr;
++
++#ifdef CONFIG_SFI_CPUIDLE
++ result = sfi_processor_power_init(pr);
++#endif
++ }
++ return result;
++}
++
++static int __init sfi_processor_init(void)
++{
++ int result = 0;
++
++ sfi_table_parse(SFI_SIG_IDLE, NULL, NULL, sfi_parse_idle);
++
++#ifdef CONFIG_SFI_CPUIDLE
++ if (sfi_cstate_num > 0)
++ result = cpuidle_register_driver(&sfi_idle_driver);
++ if (result)
++ return result;
++#endif
++ result = sfi_init_cpus();
++#ifdef CONFIG_SFI_CPUIDLE
++ if (result)
++ cpuidle_unregister_driver(&sfi_idle_driver);
++
++#endif
++ return result;
++}
++
++static void __exit sfi_processor_exit(void)
++{
++ struct sfi_processor *pr;
++ int i;
++ for (i = 0; i < num_processors; i++) {
++ pr = per_cpu(sfi_processors, i);
++ if (pr) {
++#ifdef CONFIG_SFI_CPUIDLE
++ sfi_processor_power_exit(pr);
++#endif
++ kfree(pr);
++ per_cpu(sfi_processors, i) = NULL;
++ }
++ }
++
++#ifdef CONFIG_SFI_CPUIDLE
++ cpuidle_unregister_driver(&sfi_idle_driver);
++#endif
++
++}
++
++module_init(sfi_processor_init);
++module_exit(sfi_processor_exit);
+Index: linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_idle.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_idle.c
+@@ -0,0 +1,490 @@
++/*
++ * sfi_processor_idle.c
++ *
++ * Copyright (C) 2009 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Sujith Thomas
++ * Contact information: Sujith Thomas <sujith.thomas@intel.com>
++ * Author: Vishwesh Rudramuni
++ * Contact information: Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>
++ */
++
++#include <asm/processor.h>
++#include <linux/sfi_processor.h>
++#include <linux/sched.h>
++#include <linux/clockchips.h>
++#include <linux/sfi.h>
++
++#ifdef CONFIG_MSTWN_POWER_MGMT
++#include <linux/intel_mid.h>
++#endif
++
++static short mwait_supported[SFI_PROCESSOR_MAX_POWER];
++
++#define MWAIT_SUBSTATE_MASK (0xf)
++#define MWAIT_SUBSTATE_SIZE (4)
++
++#ifdef CONFIG_MSTWN_POWER_MGMT
++#define MID_S0I1_STATE 1
++#define MID_S0I3_STATE 3
++static int p1_c6;
++static int __init s0ix_latency_setup(char *str);
++static u32 s0ix_latency = 20000;
++__setup("s0ix_latency=", s0ix_latency_setup);
++#endif
++
++#define CPUID_MWAIT_LEAF (5)
++#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
++#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
++
++#define MWAIT_ECX_INTERRUPT_BREAK (0x1)
++
++static unsigned int latency_factor __read_mostly = 4;
++module_param(latency_factor, uint, 0644);
++
++static int sfi_idle_enter_bm(struct cpuidle_device *dev,
++ struct cpuidle_state *state);
++
++struct cpuidle_driver sfi_idle_driver = {
++ .name = "sfi_idle",
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Callers should disable interrupts before the call and enable
++ * interrupts after return.
++ */
++static void sfi_safe_halt(void)
++{
++ current_thread_info()->status &= ~TS_POLLING;
++ /*
++ * TS_POLLING-cleared state must be visible before we
++ * test NEED_RESCHED:
++ */
++ smp_mb();
++ if (!need_resched()) {
++ safe_halt();
++ local_irq_disable();
++ }
++ current_thread_info()->status |= TS_POLLING;
++}
++
++static int sfi_idle_enter_c1(struct cpuidle_device *dev,
++ struct cpuidle_state *state)
++{
++ ktime_t t1, t2;
++ s64 diff = 0;
++
++ local_irq_disable();
++
++ t1 = ktime_get();
++ sfi_safe_halt();
++ t2 = ktime_get();
++
++ local_irq_enable();
++
++ diff = ktime_to_us(ktime_sub(t2, t1));
++
++ if (diff > INT_MAX)
++ diff = INT_MAX;
++
++ return (int)diff;
++}
++
++static int sfi_idle_enter_simple(struct cpuidle_device *dev,
++ struct cpuidle_state *state)
++{
++ ktime_t t1, t2;
++ s64 diff = 0;
++ struct sfi_cstate_table_entry *data;
++
++ data = (struct sfi_cstate_table_entry *)cpuidle_get_statedata(state);
++ if (unlikely(!data))
++ return 0;
++
++
++ local_irq_disable();
++ current_thread_info()->status &= ~TS_POLLING;
++ /*
++ * TS_POLLING-cleared state must be visible before we test
++ * NEED_RESCHED:
++ */
++ smp_mb();
++
++ if (unlikely(need_resched())) {
++ current_thread_info()->status |= TS_POLLING;
++ local_irq_enable();
++ return 0;
++ }
++
++ t1 = ktime_get();
++ mwait_idle_with_hints(data->hint, MWAIT_ECX_INTERRUPT_BREAK);
++ t2 = ktime_get();
++
++ local_irq_enable();
++ current_thread_info()->status |= TS_POLLING;
++
++ diff = ktime_to_us(ktime_sub(t2, t1));
++ if (diff > INT_MAX)
++ diff = INT_MAX;
++
++ return (int)diff;
++}
++
++#ifdef CONFIG_MSTWN_POWER_MGMT
++static int __init s0ix_latency_setup(char *str)
++{
++ u32 latency;
++
++ latency = memparse(str, &str);
++ if (latency > 150)
++ s0ix_latency = latency;
++
++ printk(KERN_INFO "latency for c7 is %x\n", latency);
++ return 1;
++}
++
++static int s0i3_enter_bm(struct cpuidle_device *dev,
++ struct cpuidle_state *state)
++{
++ ktime_t t1, t2;
++ s64 diff_us = 0;
++ s64 diff_ns = 0;
++ struct sfi_processor *pr;
++ struct cpuidle_state *next_state;
++ int pr_id;
++ int ret;
++
++ pr_id = smp_processor_id();
++
++ pr = __get_cpu_var(sfi_processors);
++ if (unlikely(!pr))
++ return 0;
++
++ switch (g_ospm_base->platform_sx_state) {
++ case MID_S0I3_STATE:
++ if (pr_id == 0) {
++ t1 = ktime_get();
++
++ /* Tell the scheduler that we
++ * are going deep-idle:
++ */
++ sched_clock_idle_sleep_event();
++
++ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
++ &pr->id);
++
++ mid_suspend_enter(MID_S0I3_STATE);
++
++ t2 = ktime_get();
++
++ diff_us = ktime_to_us(ktime_sub(t2, t1));
++ diff_ns = ktime_to_ns(ktime_sub(t2, t1));
++
++ /* Tell the scheduler how much
++ * we idled:
++ */
++ sched_clock_idle_wakeup_event(diff_ns);
++ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
++ &pr->id);
++
++ if (diff_us > INT_MAX)
++ diff_us = INT_MAX;
++
++ return (int)diff_us;
++
++ } else {
++ ret = sfi_idle_enter_c1(dev, state);
++ return ret;
++ }
++ break;
++ case MID_S0I1_STATE:
++ if ((pr_id == 0) && (p1_c6 == 1)) {
++ /* pmu_issue_command(s0i1) only for thread 0 rest
++ * fall through
++ */
++ mid_suspend_enter(MID_S0I1_STATE);
++ }
++ next_state = &dev->states[4];
++ ret = sfi_idle_enter_bm(dev, next_state);
++ return ret;
++ break;
++ default:
++ next_state = &dev->states[4];
++ ret = sfi_idle_enter_bm(dev, next_state);
++ dev->last_state = &dev->states[4];
++ return ret;
++ break;
++
++ }
++
++ return 0;
++
++}
++#endif
++
++static int sfi_idle_enter_bm(struct cpuidle_device *dev,
++ struct cpuidle_state *state)
++{
++
++ ktime_t t1, t2;
++ s64 diff_us = 0;
++ s64 diff_ns = 0;
++ struct sfi_cstate_table_entry *data;
++ struct sfi_processor *pr;
++
++ pr = __get_cpu_var(sfi_processors);
++ if (unlikely(!pr))
++ return 0;
++
++ data = (struct sfi_cstate_table_entry *)cpuidle_get_statedata(state);
++ if (unlikely(!data))
++ return 0;
++
++ local_irq_disable();
++ current_thread_info()->status &= ~TS_POLLING;
++ /*
++ * TS_POLLING-cleared state must be visible before we test
++ * NEED_RESCHED:
++ */
++ smp_mb();
++
++ if (unlikely(need_resched())) {
++ current_thread_info()->status |= TS_POLLING;
++ local_irq_enable();
++ return 0;
++ }
++
++ t1 = ktime_get();
++
++ /* Tell the scheduler that we are going deep-idle: */
++ sched_clock_idle_sleep_event();
++
++ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &pr->id);
++
++
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ if ((smp_processor_id() == 1) && (data->hint == 0x52))
++ p1_c6 = 1;
++#endif
++
++ mwait_idle_with_hints(data->hint, MWAIT_ECX_INTERRUPT_BREAK);
++
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ if ((smp_processor_id() == 1) && (data->hint == 0x52))
++ p1_c6 = 0;
++#endif
++
++ t2 = ktime_get();
++
++ diff_us = ktime_to_us(ktime_sub(t2, t1));
++ diff_ns = ktime_to_ns(ktime_sub(t2, t1));
++
++ /* Tell the scheduler how much we idled: */
++ sched_clock_idle_wakeup_event(diff_ns);
++
++ local_irq_enable();
++ current_thread_info()->status |= TS_POLLING;
++
++ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &pr->id);
++
++ if (diff_us > INT_MAX)
++ diff_us = INT_MAX;
++
++ return (int)diff_us;
++
++}
++
++/**
++ * sfi_processor_setup_cpuidle - prepares and configures CPUIDLE
++ * @pr: the SFI processor
++ */
++static int sfi_processor_setup_cpuidle(struct sfi_processor *pr)
++{
++ int i;
++ int count = CPUIDLE_DRIVER_STATE_START;
++ struct cpuidle_state *state;
++ struct cpuidle_device *dev = &pr->power.dev;
++
++ for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
++ dev->states[i].name[0] = '\0';
++ dev->states[i].desc[0] = '\0';
++ }
++
++ for (i = 1; i < SFI_PROCESSOR_MAX_POWER; i++) {
++
++ /*Mwait not supported by processor */
++ if (!mwait_supported[i])
++ continue;
++
++ state = &dev->states[count];
++
++ snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
++ snprintf(state->desc, CPUIDLE_DESC_LEN, "C%d", i);
++
++ state->exit_latency = pr->power.states[count].exit_latency;
++ state->target_residency = state->exit_latency * latency_factor;
++ state->power_usage = pr->power.states[count].power_usage;
++ state->flags = 0;
++ cpuidle_set_statedata(state, &pr->power.sfi_cstates[count]);
++
++ printk
++ (KERN_INFO "State details Name:%s, Desc:%s, \
++ exit_latency:%d,target_residency%d,power_usage%d,hint%d",
++ state->name, state->desc, state->exit_latency,
++ state->target_residency, state->power_usage,
++ pr->power.sfi_cstates[count].hint);
++
++ switch (i) {
++ case SFI_STATE_C1:
++ state->flags |= CPUIDLE_FLAG_SHALLOW;
++ state->enter = sfi_idle_enter_c1;
++ break;
++
++ case SFI_STATE_C2:
++ state->flags |= CPUIDLE_FLAG_BALANCED;
++ state->flags |= CPUIDLE_FLAG_TIME_VALID;
++ state->enter = sfi_idle_enter_simple;
++ break;
++
++ case SFI_STATE_C3:
++ case SFI_STATE_C4:
++ case SFI_STATE_C5:
++ case SFI_STATE_C6:
++ state->flags |= CPUIDLE_FLAG_DEEP;
++ state->flags |= CPUIDLE_FLAG_TIME_VALID;
++ state->flags |= CPUIDLE_FLAG_CHECK_BM;
++ state->enter = sfi_idle_enter_bm;
++ break;
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ case STATE_S0IX:
++ state->flags |= CPUIDLE_FLAG_DEEP;
++ state->flags |= CPUIDLE_FLAG_TIME_VALID;
++ state->flags |= CPUIDLE_FLAG_CHECK_BM;
++ state->enter = s0i3_enter_bm;
++ break;
++#endif
++ }
++
++ count++;
++ if (count == CPUIDLE_STATE_MAX)
++ break;
++ }
++
++ dev->state_count = count;
++ if (!count)
++ return -EINVAL;
++
++ return 0;
++}
++
++int sfi_cstate_probe(unsigned int hint)
++{
++ int retval;
++ unsigned int eax, ebx, ecx, edx;
++ unsigned int edx_part;
++ unsigned int cstate_type;
++ unsigned int num_cstate_subtype;
++
++ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
++
++ /* Check whether this particular CState is supported or not */
++ cstate_type = (hint >> MWAIT_SUBSTATE_SIZE) + 1;
++ edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
++ num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
++
++ retval = 0;
++ if (num_cstate_subtype < (hint & MWAIT_SUBSTATE_MASK)) {
++ retval = -1;
++ goto out;
++ }
++
++ /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
++ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
++ !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
++ retval = -1;
++ goto out;
++ }
++
++ if (!mwait_supported[cstate_type]) {
++ mwait_supported[cstate_type] = 1;
++ printk(KERN_DEBUG
++ "Monitor-Mwait will be used to enter C-%d state\n",
++ cstate_type);
++ }
++
++out:
++ return retval;
++}
++
++int sfi_processor_power_init(struct sfi_processor *pr)
++{
++
++ int totallen;
++ struct sfi_cstate_table_entry *pentry;
++ u32 sfi_max_states;
++
++ pentry = sfi_cstate_array;
++
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ sfi_max_states = SFI_PROCESSOR_MAX_POWER - 1;
++#else
++ sfi_max_states = SFI_PROCESSOR_MAX_POWER;
++#endif
++
++ for (totallen = 1; totallen <= sfi_cstate_num &&
++ totallen < sfi_max_states; totallen++, pentry++) {
++ pr->power.states[totallen].power_usage = 0;
++ pr->power.states[totallen].exit_latency = pentry->latency;
++
++ pr->power.sfi_cstates[totallen].hint = pentry->hint;
++ pr->power.sfi_cstates[totallen].latency = pentry->latency;
++
++ sfi_cstate_probe(pentry->hint);
++
++ printk(KERN_INFO "Cstate[%d]: hint = 0x%08x, latency = %dms\n",
++ totallen, pentry->hint, pentry->latency);
++ }
++
++#ifdef CONFIG_MSTWN_POWER_MGMT
++
++ p1_c6 = 0;
++
++ /* this initialization is for the S0i3 state */
++ pr->power.states[totallen].power_usage = 0;
++ pr->power.states[totallen].exit_latency = s0ix_latency;
++
++ pr->power.sfi_cstates[totallen].hint = 0;
++ pr->power.sfi_cstates[totallen].latency = s0ix_latency;
++
++ mwait_supported[STATE_S0IX] = 1;
++#endif
++
++ sfi_processor_setup_cpuidle(pr);
++ pr->power.dev.cpu = pr->id;
++ if (cpuidle_register_device(&pr->power.dev))
++ return -EIO;
++
++ return 0;
++}
++
++int sfi_processor_power_exit(struct sfi_processor *pr)
++{
++ cpuidle_unregister_device(&pr->power.dev);
++ return 0;
++}
+Index: linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_perflib.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_perflib.c
+@@ -0,0 +1,185 @@
++/*
++ * sfi_Processor_perflib.c - sfi Processor P-States Library
++ *
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Vishwesh M Rudramuni
++ * Contact information: Vishwesh Rudramuni <vishwesh.m.rudramuni@intel.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/cpufreq.h>
++#include <linux/sfi.h>
++#include <linux/sfi_processor.h>
++
++#define SFI_PROCESSOR_COMPONENT 0x01000000
++#define SFI_PROCESSOR_CLASS "processor"
++#define SFI_PROCESSOR_FILE_PERFORMANCE "performance"
++#define _COMPONENT SFI_PROCESSOR_COMPONENT
++
++static DEFINE_MUTEX(performance_mutex);
++
++/* Use cpufreq debug layer for _PPC changes. */
++#define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
++ "cpufreq-core", msg)
++
++static void sfi_cpufreq_add_file(struct sfi_processor *pr)
++{
++ return;
++}
++static void sfi_cpufreq_remove_file(struct sfi_processor *pr)
++{
++ return;
++}
++
++struct sfi_cpufreq_table_entry sfi_cpufreq_array[SFI_PROCESSOR_MAX_POWER];
++EXPORT_SYMBOL_GPL(sfi_cpufreq_array);
++
++int sfi_cpufreq_num;
++EXPORT_SYMBOL_GPL(sfi_cpufreq_num);
++
++static int __init sfi_parse_freq(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_cpufreq_table_entry *pentry;
++ int totallen;
++
++ sb = (struct sfi_table_simple *)table;
++ if (!sb) {
++ printk(KERN_WARNING "SFI: Unable to map FREQ\n");
++ return -ENODEV;
++ }
++
++ if (!sfi_cpufreq_num) {
++ sfi_cpufreq_num = SFI_GET_NUM_ENTRIES(sb,
++ struct sfi_cpufreq_table_entry);
++ pentry = (struct sfi_cpufreq_table_entry *)sb->pentry;
++ totallen = sfi_cpufreq_num * sizeof(*pentry);
++ memcpy(sfi_cpufreq_array, pentry, totallen);
++ }
++
++ printk(KERN_INFO "SFI: P state info (num = %d):\n", sfi_cpufreq_num);
++ pentry = sfi_cpufreq_array;
++ for (totallen = 0; totallen < sfi_cpufreq_num; totallen++, pentry++) {
++ printk(KERN_INFO "Pstate[%d]: freq = %dMHz latency = %dms"
++ " ctrl = 0x%08x\n", totallen, pentry->freq,
++ pentry->latency, pentry->ctrl_val);
++ }
++
++ return 0;
++}
++
++
++static int sfi_processor_get_performance_states(struct sfi_processor *pr)
++{
++ int result = 0;
++ int i;
++
++ sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, sfi_parse_freq);
++
++
++ pr->performance->state_count = sfi_cpufreq_num;
++ pr->performance->states =
++ kmalloc(sizeof(struct sfi_processor_px) * sfi_cpufreq_num,
++ GFP_KERNEL);
++ if (!pr->performance->states)
++ result = -ENOMEM;
++
++ printk(KERN_INFO "Num p-states %d\n", sfi_cpufreq_num);
++
++ /* Populate the P-states info from the SFI table here */
++ for (i = 0; i < sfi_cpufreq_num; i++) {
++ pr->performance->states[i].core_frequency = \
++ sfi_cpufreq_array[i].freq;
++ pr->performance->states[i].transition_latency = \
++ sfi_cpufreq_array[i].latency;
++ pr->performance->states[i].control = \
++ sfi_cpufreq_array[i].ctrl_val;
++ printk(KERN_INFO "State [%d]: core_frequency[%d] \
++ transition_latency[%d] \
++ control[0x%x] status[0x%x]\n", i,
++ (u32) pr->performance->states[i].core_frequency,
++ (u32) pr->performance->states[i].transition_latency,
++ (u32) pr->performance->states[i].control,
++ (u32) pr->performance->states[i].status);
++ }
++
++ return result;
++}
++
++int
++sfi_processor_register_performance(struct sfi_processor_performance
++ *performance, unsigned int cpu)
++{
++ struct sfi_processor *pr;
++
++ mutex_lock(&performance_mutex);
++
++ pr = per_cpu(sfi_processors, cpu);
++ if (!pr) {
++ mutex_unlock(&performance_mutex);
++ return -ENODEV;
++ }
++
++ if (pr->performance) {
++ mutex_unlock(&performance_mutex);
++ return -EBUSY;
++ }
++
++ WARN_ON(!performance);
++
++ pr->performance = performance;
++
++ sfi_processor_get_performance_states(pr);
++
++ sfi_cpufreq_add_file(pr);
++
++ mutex_unlock(&performance_mutex);
++ return 0;
++}
++EXPORT_SYMBOL(sfi_processor_register_performance);
++
++void
++sfi_processor_unregister_performance(struct sfi_processor_performance
++ *performance, unsigned int cpu)
++{
++ struct sfi_processor *pr;
++
++
++ mutex_lock(&performance_mutex);
++
++ pr = per_cpu(sfi_processors, cpu);
++ if (!pr) {
++ mutex_unlock(&performance_mutex);
++ return;
++ }
++
++ if (pr->performance)
++ kfree(pr->performance->states);
++ pr->performance = NULL;
++
++ sfi_cpufreq_remove_file(pr);
++
++ mutex_unlock(&performance_mutex);
++
++ return;
++}
++EXPORT_SYMBOL(sfi_processor_unregister_performance);
+Index: linux-2.6.33/drivers/sfi/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/sfi/Kconfig
++++ linux-2.6.33/drivers/sfi/Kconfig
+@@ -15,3 +15,13 @@ menuconfig SFI
+ For more information, see http://simplefirmware.org
+
+ Say 'Y' here to enable the kernel to boot on SFI-only platforms.
++config SFI_PROCESSOR_PM
++ bool "SFI Processor Power Management"
++ depends on SFI && X86_LOCAL_APIC
++ default y
++
++config SFI_CPUIDLE
++ bool "SFI Processor C-State driver"
++ depends on SFI_PROCESSOR_PM && CPU_IDLE
++ default y
++
+Index: linux-2.6.33/include/linux/sfi_processor.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/include/linux/sfi_processor.h
+@@ -0,0 +1,102 @@
++/*
++ * sfi_processor.h
++ *
++ * Copyright (C) 2008 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Sujith Thomas
++ * Contact information: Sujith Thomas <sujith.thomas@intel.com>
++ */
++
++#ifndef __SFI_PROCESSOR_H__
++#define __SFI_PROCESSOR_H__
++#include <linux/sfi.h>
++#include <linux/cpuidle.h>
++
++#define SFI_PROCESSOR_MAX_POWER 7
++
++#define CPU_SFI_GET_NUM(ptable, entry) \
++ ((ptable->header.length - SFI_TBL_HEADER_LEN) / \
++ (sizeof(struct entry)))
++
++struct sfi_processor_power {
++ struct cpuidle_device dev;
++ u32 default_state;
++ int count;
++ struct cpuidle_state states[SFI_PROCESSOR_MAX_POWER];
++ struct sfi_cstate_table_entry sfi_cstates[SFI_PROCESSOR_MAX_POWER];
++};
++
++struct sfi_processor_flags {
++ u8 valid;
++ u8 power;
++};
++
++struct sfi_processor {
++ u32 id;
++ struct sfi_processor_flags flags;
++ struct sfi_processor_power power;
++ struct sfi_processor_performance *performance;
++};
++
++/* Performance management */
++struct sfi_processor_px {
++ u32 core_frequency; /* megahertz */
++ u32 transition_latency; /* microseconds */
++ u32 control; /* control value */
++ u32 status; /* success indicator */
++};
++
++struct sfi_processor_performance {
++ unsigned int state;
++ unsigned int state_count;
++ struct sfi_processor_px *states;
++ cpumask_var_t shared_cpu_map;
++ unsigned int shared_type;
++};
++
++#define SFI_STATE_C0 (u8) 0
++#define SFI_STATE_C1 (u8) 1
++#define SFI_STATE_C2 (u8) 2
++#define SFI_STATE_C3 (u8) 3
++#define SFI_STATE_C4 (u8) 4
++#define SFI_STATE_C5 (u8) 5
++#define SFI_STATE_C6 (u8) 6
++
++#define SFI_C_STATES_MAX SFI_STATE_C6
++#define SFI_C_STATE_COUNT 6
++
++extern struct cpuidle_driver sfi_idle_driver;
++
++/* for communication between multiple parts of the processor kernel module */
++DECLARE_PER_CPU(struct sfi_processor *, sfi_processors);
++
++int sfi_processor_power_init(struct sfi_processor *pr);
++int sfi_processor_power_exit(struct sfi_processor *pr);
++extern int sfi_processor_register_performance(struct sfi_processor_performance
++ *performance, unsigned int cpu);
++extern void sfi_processor_unregister_performance(struct
++ sfi_processor_performance
++ *performance,
++ unsigned int cpu);
++extern struct sfi_cstate_table_entry sfi_cstate_array[SFI_C_STATES_MAX];
++extern int sfi_cstate_num;
++
++extern struct sfi_cstate_table_entry sfi_cstate_array[SFI_C_STATES_MAX];
++extern int sfi_cstate_num;
++
++#endif /*__SFI_PROCESSOR_H__*/
+Index: linux-2.6.33/include/linux/sfi.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/sfi.h
++++ linux-2.6.33/include/linux/sfi.h
+@@ -120,6 +120,13 @@ struct sfi_cstate_table_entry {
+ u32 latency; /* latency in ms */
+ } __packed;
+
++
++struct sfi_cpufreq_table_entry {
++ u32 freq;
++ u32 latency; /* transition latency in ms for this pstate */
++ u32 ctrl_val; /* value to write to PERF_CTL to enter thisstate */
++}__packed;
++
+ struct sfi_apic_table_entry {
+ u64 phys_addr; /* phy base addr for APIC reg */
+ } __packed;