summaryrefslogtreecommitdiff
path: root/meta/packages/linux
diff options
context:
space:
mode:
Diffstat (limited to 'meta/packages/linux')
-rw-r--r--meta/packages/linux/linux-dummy.bb2
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0001-drm-remove-define-for-non-linux-systems.patch48
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0002-i915-remove-settable-use_mi_batchbuffer_start.patch60
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0003-i915-Ignore-X-server-provided-mmio-address.patch41
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0004-i915-Use-more-consistent-names-for-regs-and-store.patch2746
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch424
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch46
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0007-i915-Initialize-hardware-status-page-at-device-load.patch137
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0008-Add-Intel-ACPI-IGD-OpRegion-support.patch572
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0009-drm-fix-sysfs-error-path.patch23
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0010-i915-separate-suspend-resume-functions.patch1079
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0011-drm-vblank-rework.patch1534
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0012-Export-shmem_file_setup-for-DRM-GEM.patch25
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch24
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch5483
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0015-i915-Add-chip-set-ID-param.patch35
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch205
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0017-i915-Make-use-of-sarea_priv-conditional.patch147
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch44
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch32
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch23
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch23
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch58
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0023-drm-clean-up-many-sparse-warnings-in-i915.patch192
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0024-fastboot-create-a-asynchronous-initlevel.patch136
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch62
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch54
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch40
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0028-fastboot-sync-the-async-execution-before-late_initc.patch95
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0029-fastboot-make-fastboot-a-config-option.patch56
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch67
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch41
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0032-fastboot-remove-wait-for-all-devices-before-mounti.patch44
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch32
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0034-fastboot-fix-typo-in-init-Kconfig-text.patch29
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0035-fastboot-remove-duplicate-unpack_to_rootfs.patch161
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0036-warning-fix-init-do_mounts_md-c.patch82
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0037-init-initramfs.c-unused-function-when-compiling-wit.patch37
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch38
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0039-Add-a-script-to-visualize-the-kernel-boot-process.patch183
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch91
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0041-r8169-8101e.patch940
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/0042-intelfb-945gme.patch153
-rw-r--r--meta/packages/linux/linux-moblin-2.6.27-rc6/defconfig-eee9012407
-rw-r--r--meta/packages/linux/linux-moblin.inc2
-rw-r--r--meta/packages/linux/linux-moblin_2.6.27-rc1.bb2
-rw-r--r--meta/packages/linux/linux-moblin_2.6.27-rc6.bb54
47 files changed, 17805 insertions, 4 deletions
diff --git a/meta/packages/linux/linux-dummy.bb b/meta/packages/linux/linux-dummy.bb
index a3e3af4c6..8d0686a2d 100644
--- a/meta/packages/linux/linux-dummy.bb
+++ b/meta/packages/linux/linux-dummy.bb
@@ -9,7 +9,7 @@ PACKAGES_DYNAMIC += "kernel-image-*"
#COMPATIBLE_MACHINE = "your_machine"
-PR = "r0"
+PR = "r1"
SRC_URI = ""
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0001-drm-remove-define-for-non-linux-systems.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0001-drm-remove-define-for-non-linux-systems.patch
new file mode 100644
index 000000000..588c1af70
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0001-drm-remove-define-for-non-linux-systems.patch
@@ -0,0 +1,48 @@
+commit 2e6ec7cdc09f36be1cbe9aeaccfc45f307fc0060
+Author: Carlos R. Mafra <crmafra2@gmail.com>
+Date: Wed Jul 30 12:29:37 2008 -0700
+
+ drm: remove #define's for non-linux systems
+
+ There is no point in considering FreeBSD et al. in the linux kernel
+ source code.
+
+ Signed-off-by: Carlos R. Mafra <crmafra@gmail.com>
+ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+diff --git a/include/drm/drm.h b/include/drm/drm.h
+index 38d3c6b..0864c69 100644
+--- a/include/drm/drm.h
++++ b/include/drm/drm.h
+@@ -36,7 +36,6 @@
+ #ifndef _DRM_H_
+ #define _DRM_H_
+
+-#if defined(__linux__)
+ #if defined(__KERNEL__)
+ #endif
+ #include <asm/ioctl.h> /* For _IO* macros */
+@@ -46,22 +45,6 @@
+ #define DRM_IOC_WRITE _IOC_WRITE
+ #define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
+ #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+-#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
+-#if defined(__FreeBSD__) && defined(IN_MODULE)
+-/* Prevent name collision when including sys/ioccom.h */
+-#undef ioctl
+-#include <sys/ioccom.h>
+-#define ioctl(a,b,c) xf86ioctl(a,b,c)
+-#else
+-#include <sys/ioccom.h>
+-#endif /* __FreeBSD__ && xf86ioctl */
+-#define DRM_IOCTL_NR(n) ((n) & 0xff)
+-#define DRM_IOC_VOID IOC_VOID
+-#define DRM_IOC_READ IOC_OUT
+-#define DRM_IOC_WRITE IOC_IN
+-#define DRM_IOC_READWRITE IOC_INOUT
+-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+-#endif
+
+ #define DRM_MAJOR 226
+ #define DRM_MAX_MINOR 15
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0002-i915-remove-settable-use_mi_batchbuffer_start.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0002-i915-remove-settable-use_mi_batchbuffer_start.patch
new file mode 100644
index 000000000..f3c41f7cb
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0002-i915-remove-settable-use_mi_batchbuffer_start.patch
@@ -0,0 +1,60 @@
+commit 91019197abbfde388d0b71b0fc8979a936c23fe3
+Author: Keith Packard <keithp@keithp.com>
+Date: Wed Jul 30 12:28:47 2008 -0700
+
+ i915: remove settable use_mi_batchbuffer_start
+
+ The driver can know what hardware requires MI_BATCH_BUFFER vs
+ MI_BATCH_BUFFER_START; there's no reason to let user mode configure this.
+
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 8897434..24adbde 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -159,13 +159,6 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+ dev_priv->current_page = 0;
+ dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+
+- /* We are using separate values as placeholders for mechanisms for
+- * private backbuffer/depthbuffer usage.
+- */
+- dev_priv->use_mi_batchbuffer_start = 0;
+- if (IS_I965G(dev)) /* 965 doesn't support older method */
+- dev_priv->use_mi_batchbuffer_start = 1;
+-
+ /* Allow hardware batchbuffers unless told otherwise.
+ */
+ dev_priv->allow_batchbuffer = 1;
+@@ -486,7 +479,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
+ return ret;
+ }
+
+- if (dev_priv->use_mi_batchbuffer_start) {
++ if (!IS_I830(dev) && !IS_845G(dev)) {
+ BEGIN_LP_RING(2);
+ if (IS_I965G(dev)) {
+ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
+@@ -697,8 +690,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
+
+ switch (param->param) {
+ case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
+- if (!IS_I965G(dev))
+- dev_priv->use_mi_batchbuffer_start = param->value;
+ break;
+ case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
+ dev_priv->tex_lru_log_granularity = param->value;
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index d7326d9..2d441d3 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -99,7 +99,6 @@ typedef struct drm_i915_private {
+ int front_offset;
+ int current_page;
+ int page_flipping;
+- int use_mi_batchbuffer_start;
+
+ wait_queue_head_t irq_queue;
+ atomic_t irq_received;
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0003-i915-Ignore-X-server-provided-mmio-address.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0003-i915-Ignore-X-server-provided-mmio-address.patch
new file mode 100644
index 000000000..9f7e0b4bc
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0003-i915-Ignore-X-server-provided-mmio-address.patch
@@ -0,0 +1,41 @@
+commit 20ae3cf7d4a9ae8d23bcffa67c9a34fc2640d217
+Author: Keith Packard <keithp@keithp.com>
+Date: Wed Jul 30 12:36:08 2008 -0700
+
+ i915: Ignore X server provided mmio address
+
+ It is already correctly detected by the kernel for use in suspend/resume.
+
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 24adbde..01a869b 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -121,13 +121,6 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+ return -EINVAL;
+ }
+
+- dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
+- if (!dev_priv->mmio_map) {
+- i915_dma_cleanup(dev);
+- DRM_ERROR("can not find mmio map!\n");
+- return -EINVAL;
+- }
+-
+ dev_priv->sarea_priv = (drm_i915_sarea_t *)
+ ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
+
+@@ -194,11 +187,6 @@ static int i915_dma_resume(struct drm_device * dev)
+ return -EINVAL;
+ }
+
+- if (!dev_priv->mmio_map) {
+- DRM_ERROR("can not find mmio map!\n");
+- return -EINVAL;
+- }
+-
+ if (dev_priv->ring.map.handle == NULL) {
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0004-i915-Use-more-consistent-names-for-regs-and-store.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0004-i915-Use-more-consistent-names-for-regs-and-store.patch
new file mode 100644
index 000000000..f7a310ea6
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0004-i915-Use-more-consistent-names-for-regs-and-store.patch
@@ -0,0 +1,2746 @@
+commit 573e91575687018b4307f53a50f4da0084dbdf3d
+Author: Jesse Barnes <jbarnes@virtuousgeek.org>
+Date: Tue Jul 29 11:54:06 2008 -0700
+
+ i915: Use more consistent names for regs, and store them in a separate file.
+
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 01a869b..7be580b 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -40,11 +40,11 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+- u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
++ u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ int i;
+
+ for (i = 0; i < 10000; i++) {
+- ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
++ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->Size;
+@@ -67,8 +67,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+
+- ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+- ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
++ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
++ ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->Size;
+@@ -98,13 +98,13 @@ static int i915_dma_cleanup(struct drm_device * dev)
+ drm_pci_free(dev, dev_priv->status_page_dmah);
+ dev_priv->status_page_dmah = NULL;
+ /* Need to rewrite hardware status page */
+- I915_WRITE(0x02080, 0x1ffff000);
++ I915_WRITE(HWS_PGA, 0x1ffff000);
+ }
+
+ if (dev_priv->status_gfx_addr) {
+ dev_priv->status_gfx_addr = 0;
+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
+- I915_WRITE(0x2080, 0x1ffff000);
++ I915_WRITE(HWS_PGA, 0x1ffff000);
+ }
+
+ return 0;
+@@ -170,7 +170,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+ dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+- I915_WRITE(0x02080, dev_priv->dma_status_page);
++ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+ }
+ DRM_DEBUG("Enabled hardware status page\n");
+ return 0;
+@@ -201,9 +201,9 @@ static int i915_dma_resume(struct drm_device * dev)
+ DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
+
+ if (dev_priv->status_gfx_addr != 0)
+- I915_WRITE(0x02080, dev_priv->status_gfx_addr);
++ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ else
+- I915_WRITE(0x02080, dev_priv->dma_status_page);
++ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+ DRM_DEBUG("Enabled hardware status page\n");
+
+ return 0;
+@@ -402,8 +402,8 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
+ dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
+
+ BEGIN_LP_RING(4);
+- OUT_RING(CMD_STORE_DWORD_IDX);
+- OUT_RING(20);
++ OUT_RING(MI_STORE_DWORD_INDEX);
++ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+@@ -505,7 +505,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
+ i915_kernel_lost_context(dev);
+
+ BEGIN_LP_RING(2);
+- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
++ OUT_RING(MI_FLUSH | MI_READ_FLUSH);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+
+@@ -530,8 +530,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
+ dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
+
+ BEGIN_LP_RING(4);
+- OUT_RING(CMD_STORE_DWORD_IDX);
+- OUT_RING(20);
++ OUT_RING(MI_STORE_DWORD_INDEX);
++ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+@@ -728,8 +728,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
+ dev_priv->hw_status_page = dev_priv->hws_map.handle;
+
+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+- I915_WRITE(0x02080, dev_priv->status_gfx_addr);
+- DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
++ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
++ DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
+ dev_priv->status_gfx_addr);
+ DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
+ return 0;
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 93aed1c..6c99aab 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -279,13 +279,13 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
+ dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
+ dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
+ dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
+- dev_priv->saveDSPABASE = I915_READ(DSPABASE);
++ dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
+ if (IS_I965G(dev)) {
+ dev_priv->saveDSPASURF = I915_READ(DSPASURF);
+ dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+ }
+ i915_save_palette(dev, PIPE_A);
+- dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
++ dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
+
+ /* Pipe & plane B info */
+ dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
+@@ -307,13 +307,13 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
+ dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
+ dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
+ dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
+- dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
++ dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
+ if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
+ dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
+ dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+ }
+ i915_save_palette(dev, PIPE_B);
+- dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
++ dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+
+ /* CRT state */
+ dev_priv->saveADPA = I915_READ(ADPA);
+@@ -328,9 +328,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
+ dev_priv->saveLVDS = I915_READ(LVDS);
+ if (!IS_I830(dev) && !IS_845G(dev))
+ dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+- dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
+- dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
+- dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
++ dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
++ dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
++ dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+
+ /* FIXME: save TV & SDVO state */
+
+@@ -341,19 +341,19 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
+ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+
+ /* Interrupt state */
+- dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
+- dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
+- dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
++ dev_priv->saveIIR = I915_READ(IIR);
++ dev_priv->saveIER = I915_READ(IER);
++ dev_priv->saveIMR = I915_READ(IMR);
+
+ /* VGA state */
+- dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
+- dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
+- dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
++ dev_priv->saveVGA0 = I915_READ(VGA0);
++ dev_priv->saveVGA1 = I915_READ(VGA1);
++ dev_priv->saveVGA_PD = I915_READ(VGA_PD);
+ dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+
+ /* Clock gating state */
+ dev_priv->saveD_STATE = I915_READ(D_STATE);
+- dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
++ dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
+
+ /* Cache mode state */
+ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+@@ -363,7 +363,7 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
+
+ /* Scratch space */
+ for (i = 0; i < 16; i++) {
+- dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
++ dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+ dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+ }
+ for (i = 0; i < 3; i++)
+@@ -424,7 +424,7 @@ static int i915_resume(struct drm_device *dev)
+ I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
+ I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
+ I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
+- I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
++ I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
+ I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+ if (IS_I965G(dev)) {
+ I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
+@@ -436,7 +436,7 @@ static int i915_resume(struct drm_device *dev)
+ i915_restore_palette(dev, PIPE_A);
+ /* Enable the plane */
+ I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
+- I915_WRITE(DSPABASE, I915_READ(DSPABASE));
++ I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
+
+ /* Pipe & plane B info */
+ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
+@@ -466,7 +466,7 @@ static int i915_resume(struct drm_device *dev)
+ I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
+ I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
+ I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
+- I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
++ I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
+ I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+ if (IS_I965G(dev)) {
+ I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
+@@ -478,7 +478,7 @@ static int i915_resume(struct drm_device *dev)
+ i915_restore_palette(dev, PIPE_B);
+ /* Enable the plane */
+ I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
+- I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
++ I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+
+ /* CRT state */
+ I915_WRITE(ADPA, dev_priv->saveADPA);
+@@ -493,9 +493,9 @@ static int i915_resume(struct drm_device *dev)
+
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+- I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
+- I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
+- I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
++ I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
++ I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
++ I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
+ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+
+ /* FIXME: restore TV & SDVO state */
+@@ -508,14 +508,14 @@ static int i915_resume(struct drm_device *dev)
+
+ /* VGA state */
+ I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+- I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
+- I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
+- I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
++ I915_WRITE(VGA0, dev_priv->saveVGA0);
++ I915_WRITE(VGA1, dev_priv->saveVGA1);
++ I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
+ udelay(150);
+
+ /* Clock gating state */
+ I915_WRITE (D_STATE, dev_priv->saveD_STATE);
+- I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
++ I915_WRITE(CG_2D_DIS, dev_priv->saveCG_2D_DIS);
+
+ /* Cache mode state */
+ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+@@ -524,7 +524,7 @@ static int i915_resume(struct drm_device *dev)
+ I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+
+ for (i = 0; i < 16; i++) {
+- I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
++ I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
+ I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
+ }
+ for (i = 0; i < 3; i++)
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 2d441d3..afb51a3 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -30,6 +30,8 @@
+ #ifndef _I915_DRV_H_
+ #define _I915_DRV_H_
+
++#include "i915_reg.h"
++
+ /* General customization:
+ */
+
+@@ -138,7 +140,7 @@ typedef struct drm_i915_private {
+ u32 saveDSPASTRIDE;
+ u32 saveDSPASIZE;
+ u32 saveDSPAPOS;
+- u32 saveDSPABASE;
++ u32 saveDSPAADDR;
+ u32 saveDSPASURF;
+ u32 saveDSPATILEOFF;
+ u32 savePFIT_PGM_RATIOS;
+@@ -159,24 +161,24 @@ typedef struct drm_i915_private {
+ u32 saveDSPBSTRIDE;
+ u32 saveDSPBSIZE;
+ u32 saveDSPBPOS;
+- u32 saveDSPBBASE;
++ u32 saveDSPBADDR;
+ u32 saveDSPBSURF;
+ u32 saveDSPBTILEOFF;
+- u32 saveVCLK_DIVISOR_VGA0;
+- u32 saveVCLK_DIVISOR_VGA1;
+- u32 saveVCLK_POST_DIV;
++ u32 saveVGA0;
++ u32 saveVGA1;
++ u32 saveVGA_PD;
+ u32 saveVGACNTRL;
+ u32 saveADPA;
+ u32 saveLVDS;
+- u32 saveLVDSPP_ON;
+- u32 saveLVDSPP_OFF;
++ u32 savePP_ON_DELAYS;
++ u32 savePP_OFF_DELAYS;
+ u32 saveDVOA;
+ u32 saveDVOB;
+ u32 saveDVOC;
+ u32 savePP_ON;
+ u32 savePP_OFF;
+ u32 savePP_CONTROL;
+- u32 savePP_CYCLE;
++ u32 savePP_DIVISOR;
+ u32 savePFIT_CONTROL;
+ u32 save_palette_a[256];
+ u32 save_palette_b[256];
+@@ -189,7 +191,7 @@ typedef struct drm_i915_private {
+ u32 saveIMR;
+ u32 saveCACHE_MODE_0;
+ u32 saveD_STATE;
+- u32 saveDSPCLK_GATE_D;
++ u32 saveCG_2D_DIS;
+ u32 saveMI_ARB_STATE;
+ u32 saveSWF0[16];
+ u32 saveSWF1[16];
+@@ -283,816 +285,26 @@ extern void i915_mem_release(struct drm_device * dev,
+ if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
+ dev_priv->ring.tail = outring; \
+ dev_priv->ring.space -= outcount * 4; \
+- I915_WRITE(LP_RING + RING_TAIL, outring); \
++ I915_WRITE(PRB0_TAIL, outring); \
+ } while(0)
+
+-extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
+-
+-/* Extended config space */
+-#define LBB 0xf4
+-
+-/* VGA stuff */
+-
+-#define VGA_ST01_MDA 0x3ba
+-#define VGA_ST01_CGA 0x3da
+-
+-#define VGA_MSR_WRITE 0x3c2
+-#define VGA_MSR_READ 0x3cc
+-#define VGA_MSR_MEM_EN (1<<1)
+-#define VGA_MSR_CGA_MODE (1<<0)
+-
+-#define VGA_SR_INDEX 0x3c4
+-#define VGA_SR_DATA 0x3c5
+-
+-#define VGA_AR_INDEX 0x3c0
+-#define VGA_AR_VID_EN (1<<5)
+-#define VGA_AR_DATA_WRITE 0x3c0
+-#define VGA_AR_DATA_READ 0x3c1
+-
+-#define VGA_GR_INDEX 0x3ce
+-#define VGA_GR_DATA 0x3cf
+-/* GR05 */
+-#define VGA_GR_MEM_READ_MODE_SHIFT 3
+-#define VGA_GR_MEM_READ_MODE_PLANE 1
+-/* GR06 */
+-#define VGA_GR_MEM_MODE_MASK 0xc
+-#define VGA_GR_MEM_MODE_SHIFT 2
+-#define VGA_GR_MEM_A0000_AFFFF 0
+-#define VGA_GR_MEM_A0000_BFFFF 1
+-#define VGA_GR_MEM_B0000_B7FFF 2
+-#define VGA_GR_MEM_B0000_BFFFF 3
+-
+-#define VGA_DACMASK 0x3c6
+-#define VGA_DACRX 0x3c7
+-#define VGA_DACWX 0x3c8
+-#define VGA_DACDATA 0x3c9
+-
+-#define VGA_CR_INDEX_MDA 0x3b4
+-#define VGA_CR_DATA_MDA 0x3b5
+-#define VGA_CR_INDEX_CGA 0x3d4
+-#define VGA_CR_DATA_CGA 0x3d5
+-
+-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
+-#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
+-#define CMD_REPORT_HEAD (7<<23)
+-#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
+-#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
+-
+-#define INST_PARSER_CLIENT 0x00000000
+-#define INST_OP_FLUSH 0x02000000
+-#define INST_FLUSH_MAP_CACHE 0x00000001
+-
+-#define BB1_START_ADDR_MASK (~0x7)
+-#define BB1_PROTECTED (1<<0)
+-#define BB1_UNPROTECTED (0<<0)
+-#define BB2_END_ADDR_MASK (~0x7)
+-
+-/* Framebuffer compression */
+-#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
+-#define FBC_LL_BASE 0x03204 /* 4k page aligned */
+-#define FBC_CONTROL 0x03208
+-#define FBC_CTL_EN (1<<31)
+-#define FBC_CTL_PERIODIC (1<<30)
+-#define FBC_CTL_INTERVAL_SHIFT (16)
+-#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
+-#define FBC_CTL_STRIDE_SHIFT (5)
+-#define FBC_CTL_FENCENO (1<<0)
+-#define FBC_COMMAND 0x0320c
+-#define FBC_CMD_COMPRESS (1<<0)
+-#define FBC_STATUS 0x03210
+-#define FBC_STAT_COMPRESSING (1<<31)
+-#define FBC_STAT_COMPRESSED (1<<30)
+-#define FBC_STAT_MODIFIED (1<<29)
+-#define FBC_STAT_CURRENT_LINE (1<<0)
+-#define FBC_CONTROL2 0x03214
+-#define FBC_CTL_FENCE_DBL (0<<4)
+-#define FBC_CTL_IDLE_IMM (0<<2)
+-#define FBC_CTL_IDLE_FULL (1<<2)
+-#define FBC_CTL_IDLE_LINE (2<<2)
+-#define FBC_CTL_IDLE_DEBUG (3<<2)
+-#define FBC_CTL_CPU_FENCE (1<<1)
+-#define FBC_CTL_PLANEA (0<<0)
+-#define FBC_CTL_PLANEB (1<<0)
+-#define FBC_FENCE_OFF 0x0321b
+-
+-#define FBC_LL_SIZE (1536)
+-#define FBC_LL_PAD (32)
+-
+-/* Interrupt bits:
+- */
+-#define USER_INT_FLAG (1<<1)
+-#define VSYNC_PIPEB_FLAG (1<<5)
+-#define VSYNC_PIPEA_FLAG (1<<7)
+-#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
+-
+-#define I915REG_HWSTAM 0x02098
+-#define I915REG_INT_IDENTITY_R 0x020a4
+-#define I915REG_INT_MASK_R 0x020a8
+-#define I915REG_INT_ENABLE_R 0x020a0
+-
+-#define I915REG_PIPEASTAT 0x70024
+-#define I915REG_PIPEBSTAT 0x71024
+-
+-#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
+-#define I915_VBLANK_CLEAR (1UL<<1)
+-
+-#define SRX_INDEX 0x3c4
+-#define SRX_DATA 0x3c5
+-#define SR01 1
+-#define SR01_SCREEN_OFF (1<<5)
+-
+-#define PPCR 0x61204
+-#define PPCR_ON (1<<0)
+-
+-#define DVOB 0x61140
+-#define DVOB_ON (1<<31)
+-#define DVOC 0x61160
+-#define DVOC_ON (1<<31)
+-#define LVDS 0x61180
+-#define LVDS_ON (1<<31)
+-
+-#define ADPA 0x61100
+-#define ADPA_DPMS_MASK (~(3<<10))
+-#define ADPA_DPMS_ON (0<<10)
+-#define ADPA_DPMS_SUSPEND (1<<10)
+-#define ADPA_DPMS_STANDBY (2<<10)
+-#define ADPA_DPMS_OFF (3<<10)
+-
+-#define NOPID 0x2094
+-#define LP_RING 0x2030
+-#define HP_RING 0x2040
+-/* The binner has its own ring buffer:
+- */
+-#define HWB_RING 0x2400
+-
+-#define RING_TAIL 0x00
+-#define TAIL_ADDR 0x001FFFF8
+-#define RING_HEAD 0x04
+-#define HEAD_WRAP_COUNT 0xFFE00000
+-#define HEAD_WRAP_ONE 0x00200000
+-#define HEAD_ADDR 0x001FFFFC
+-#define RING_START 0x08
+-#define START_ADDR 0x0xFFFFF000
+-#define RING_LEN 0x0C
+-#define RING_NR_PAGES 0x001FF000
+-#define RING_REPORT_MASK 0x00000006
+-#define RING_REPORT_64K 0x00000002
+-#define RING_REPORT_128K 0x00000004
+-#define RING_NO_REPORT 0x00000000
+-#define RING_VALID_MASK 0x00000001
+-#define RING_VALID 0x00000001
+-#define RING_INVALID 0x00000000
+-
+-/* Instruction parser error reg:
+- */
+-#define IPEIR 0x2088
+-
+-/* Scratch pad debug 0 reg:
+- */
+-#define SCPD0 0x209c
+-
+-/* Error status reg:
+- */
+-#define ESR 0x20b8
+-
+-/* Secondary DMA fetch address debug reg:
+- */
+-#define DMA_FADD_S 0x20d4
+-
+-/* Memory Interface Arbitration State
+- */
+-#define MI_ARB_STATE 0x20e4
+-
+-/* Cache mode 0 reg.
+- * - Manipulating render cache behaviour is central
+- * to the concept of zone rendering, tuning this reg can help avoid
+- * unnecessary render cache reads and even writes (for z/stencil)
+- * at beginning and end of scene.
+- *
+- * - To change a bit, write to this reg with a mask bit set and the
+- * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
+- */
+-#define Cache_Mode_0 0x2120
+-#define CACHE_MODE_0 0x2120
+-#define CM0_MASK_SHIFT 16
+-#define CM0_IZ_OPT_DISABLE (1<<6)
+-#define CM0_ZR_OPT_DISABLE (1<<5)
+-#define CM0_DEPTH_EVICT_DISABLE (1<<4)
+-#define CM0_COLOR_EVICT_DISABLE (1<<3)
+-#define CM0_DEPTH_WRITE_DISABLE (1<<1)
+-#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+-
+-
+-/* Graphics flush control. A CPU write flushes the GWB of all writes.
+- * The data is discarded.
+- */
+-#define GFX_FLSH_CNTL 0x2170
+-
+-/* Binner control. Defines the location of the bin pointer list:
+- */
+-#define BINCTL 0x2420
+-#define BC_MASK (1 << 9)
+-
+-/* Binned scene info.
+- */
+-#define BINSCENE 0x2428
+-#define BS_OP_LOAD (1 << 8)
+-#define BS_MASK (1 << 22)
+-
+-/* Bin command parser debug reg:
+- */
+-#define BCPD 0x2480
+-
+-/* Bin memory control debug reg:
+- */
+-#define BMCD 0x2484
+-
+-/* Bin data cache debug reg:
+- */
+-#define BDCD 0x2488
+-
+-/* Binner pointer cache debug reg:
+- */
+-#define BPCD 0x248c
+-
+-/* Binner scratch pad debug reg:
+- */
+-#define BINSKPD 0x24f0
+-
+-/* HWB scratch pad debug reg:
+- */
+-#define HWBSKPD 0x24f4
+-
+-/* Binner memory pool reg:
+- */
+-#define BMP_BUFFER 0x2430
+-#define BMP_PAGE_SIZE_4K (0 << 10)
+-#define BMP_BUFFER_SIZE_SHIFT 1
+-#define BMP_ENABLE (1 << 0)
+-
+-/* Get/put memory from the binner memory pool:
+- */
+-#define BMP_GET 0x2438
+-#define BMP_PUT 0x2440
+-#define BMP_OFFSET_SHIFT 5
+-
+-/* 3D state packets:
+- */
+-#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
+-
+-#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+-#define SC_UPDATE_SCISSOR (0x1<<1)
+-#define SC_ENABLE_MASK (0x1<<0)
+-#define SC_ENABLE (0x1<<0)
+-
+-#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+-
+-#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+-#define SCI_YMIN_MASK (0xffff<<16)
+-#define SCI_XMIN_MASK (0xffff<<0)
+-#define SCI_YMAX_MASK (0xffff<<16)
+-#define SCI_XMAX_MASK (0xffff<<0)
+-
+-#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+-#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+-#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+-#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
+-#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+-#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+-
+-#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+-
+-#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
+-#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
+-#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
+-#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
+-#define XY_SRC_COPY_BLT_SRC_TILED (1<<15)
+-#define XY_SRC_COPY_BLT_DST_TILED (1<<11)
+-
+-#define MI_BATCH_BUFFER ((0x30<<23)|1)
+-#define MI_BATCH_BUFFER_START (0x31<<23)
+-#define MI_BATCH_BUFFER_END (0xA<<23)
+-#define MI_BATCH_NON_SECURE (1)
+-#define MI_BATCH_NON_SECURE_I965 (1<<8)
+-
+-#define MI_WAIT_FOR_EVENT ((0x3<<23))
+-#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
+-#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
+-#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+-
+-#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
+-
+-#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+-#define ASYNC_FLIP (1<<22)
+-#define DISPLAY_PLANE_A (0<<20)
+-#define DISPLAY_PLANE_B (1<<20)
+-
+-/* Display regs */
+-#define DSPACNTR 0x70180
+-#define DSPBCNTR 0x71180
+-#define DISPPLANE_SEL_PIPE_MASK (1<<24)
+-
+-/* Define the region of interest for the binner:
+- */
+-#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
+-
+-#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+-
+-#define CMD_MI_FLUSH (0x04 << 23)
+-#define MI_NO_WRITE_FLUSH (1 << 2)
+-#define MI_READ_FLUSH (1 << 0)
+-#define MI_EXE_FLUSH (1 << 1)
+-#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
+-#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
+-
+-#define BREADCRUMB_BITS 31
+-#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
+-
+-#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
+-#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
+-
+-#define BLC_PWM_CTL 0x61254
+-#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+-
+-#define BLC_PWM_CTL2 0x61250
+ /**
+- * This is the most significant 15 bits of the number of backlight cycles in a
+- * complete cycle of the modulated backlight control.
++ * Reads a dword out of the status page, which is written to from the command
++ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
++ * MI_STORE_DATA_IMM.
+ *
+- * The actual value is this field multiplied by two.
+- */
+-#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+-#define BLM_LEGACY_MODE (1 << 16)
+-/**
+- * This is the number of cycles out of the backlight modulation cycle for which
+- * the backlight is on.
++ * The following dwords have a reserved meaning:
++ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
++ * 4: ring 0 head pointer
++ * 5: ring 1 head pointer (915-class)
++ * 6: ring 2 head pointer (915-class)
+ *
+- * This field must be no greater than the number of cycles in the complete
+- * backlight modulation cycle.
+- */
+-#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
+-#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+-
+-#define I915_GCFGC 0xf0
+-#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
+-#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
+-#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
+-#define I915_DISPLAY_CLOCK_MASK (7 << 4)
+-
+-#define I855_HPLLCC 0xc0
+-#define I855_CLOCK_CONTROL_MASK (3 << 0)
+-#define I855_CLOCK_133_200 (0 << 0)
+-#define I855_CLOCK_100_200 (1 << 0)
+-#define I855_CLOCK_100_133 (2 << 0)
+-#define I855_CLOCK_166_250 (3 << 0)
+-
+-/* p317, 319
++ * The area from dword 0x10 to 0x3ff is available for driver usage.
+ */
+-#define VCLK2_VCO_M 0x6008 /* treat as 16 bit? (includes msbs) */
+-#define VCLK2_VCO_N 0x600a
+-#define VCLK2_VCO_DIV_SEL 0x6012
+-
+-#define VCLK_DIVISOR_VGA0 0x6000
+-#define VCLK_DIVISOR_VGA1 0x6004
+-#define VCLK_POST_DIV 0x6010
+-/** Selects a post divisor of 4 instead of 2. */
+-# define VGA1_PD_P2_DIV_4 (1 << 15)
+-/** Overrides the p2 post divisor field */
+-# define VGA1_PD_P1_DIV_2 (1 << 13)
+-# define VGA1_PD_P1_SHIFT 8
+-/** P1 value is 2 greater than this field */
+-# define VGA1_PD_P1_MASK (0x1f << 8)
+-/** Selects a post divisor of 4 instead of 2. */
+-# define VGA0_PD_P2_DIV_4 (1 << 7)
+-/** Overrides the p2 post divisor field */
+-# define VGA0_PD_P1_DIV_2 (1 << 5)
+-# define VGA0_PD_P1_SHIFT 0
+-/** P1 value is 2 greater than this field */
+-# define VGA0_PD_P1_MASK (0x1f << 0)
+-
+-/* PCI D state control register */
+-#define D_STATE 0x6104
+-#define DSPCLK_GATE_D 0x6200
+-
+-/* I830 CRTC registers */
+-#define HTOTAL_A 0x60000
+-#define HBLANK_A 0x60004
+-#define HSYNC_A 0x60008
+-#define VTOTAL_A 0x6000c
+-#define VBLANK_A 0x60010
+-#define VSYNC_A 0x60014
+-#define PIPEASRC 0x6001c
+-#define BCLRPAT_A 0x60020
+-#define VSYNCSHIFT_A 0x60028
+-
+-#define HTOTAL_B 0x61000
+-#define HBLANK_B 0x61004
+-#define HSYNC_B 0x61008
+-#define VTOTAL_B 0x6100c
+-#define VBLANK_B 0x61010
+-#define VSYNC_B 0x61014
+-#define PIPEBSRC 0x6101c
+-#define BCLRPAT_B 0x61020
+-#define VSYNCSHIFT_B 0x61028
+-
+-#define PP_STATUS 0x61200
+-# define PP_ON (1 << 31)
+-/**
+- * Indicates that all dependencies of the panel are on:
+- *
+- * - PLL enabled
+- * - pipe enabled
+- * - LVDS/DVOB/DVOC on
+- */
+-# define PP_READY (1 << 30)
+-# define PP_SEQUENCE_NONE (0 << 28)
+-# define PP_SEQUENCE_ON (1 << 28)
+-# define PP_SEQUENCE_OFF (2 << 28)
+-# define PP_SEQUENCE_MASK 0x30000000
+-#define PP_CONTROL 0x61204
+-# define POWER_TARGET_ON (1 << 0)
+-
+-#define LVDSPP_ON 0x61208
+-#define LVDSPP_OFF 0x6120c
+-#define PP_CYCLE 0x61210
+-
+-#define PFIT_CONTROL 0x61230
+-# define PFIT_ENABLE (1 << 31)
+-# define PFIT_PIPE_MASK (3 << 29)
+-# define PFIT_PIPE_SHIFT 29
+-# define VERT_INTERP_DISABLE (0 << 10)
+-# define VERT_INTERP_BILINEAR (1 << 10)
+-# define VERT_INTERP_MASK (3 << 10)
+-# define VERT_AUTO_SCALE (1 << 9)
+-# define HORIZ_INTERP_DISABLE (0 << 6)
+-# define HORIZ_INTERP_BILINEAR (1 << 6)
+-# define HORIZ_INTERP_MASK (3 << 6)
+-# define HORIZ_AUTO_SCALE (1 << 5)
+-# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
+-
+-#define PFIT_PGM_RATIOS 0x61234
+-# define PFIT_VERT_SCALE_MASK 0xfff00000
+-# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+-
+-#define PFIT_AUTO_RATIOS 0x61238
+-
+-
+-#define DPLL_A 0x06014
+-#define DPLL_B 0x06018
+-# define DPLL_VCO_ENABLE (1 << 31)
+-# define DPLL_DVO_HIGH_SPEED (1 << 30)
+-# define DPLL_SYNCLOCK_ENABLE (1 << 29)
+-# define DPLL_VGA_MODE_DIS (1 << 28)
+-# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
+-# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
+-# define DPLL_MODE_MASK (3 << 26)
+-# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+-# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+-# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
+-# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+-# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+-# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+-/**
+- * The i830 generation, in DAC/serial mode, defines p1 as two plus this
+- * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+- */
+-# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+-/**
+- * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+- * this field (only one bit may be set).
+- */
+-# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+-# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+-# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required in DVO non-gang */
+-# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+-# define PLL_REF_INPUT_DREFCLK (0 << 13)
+-# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
+-# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
+-# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+-# define PLL_REF_INPUT_MASK (3 << 13)
+-# define PLL_LOAD_PULSE_PHASE_SHIFT 9
+-/*
+- * Parallel to Serial Load Pulse phase selection.
+- * Selects the phase for the 10X DPLL clock for the PCIe
+- * digital display port. The range is 4 to 13; 10 or more
+- * is just a flip delay. The default is 6
+- */
+-# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+-# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
+-
+-/**
+- * SDVO multiplier for 945G/GM. Not used on 965.
+- *
+- * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+- */
+-# define SDVO_MULTIPLIER_MASK 0x000000ff
+-# define SDVO_MULTIPLIER_SHIFT_HIRES 4
+-# define SDVO_MULTIPLIER_SHIFT_VGA 0
+-
+-/** @defgroup DPLL_MD
+- * @{
+- */
+-/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+-#define DPLL_A_MD 0x0601c
+-/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+-#define DPLL_B_MD 0x06020
+-/**
+- * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+- *
+- * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
+- */
+-# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
+-# define DPLL_MD_UDI_DIVIDER_SHIFT 24
+-/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+-# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
+-# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
+-/**
+- * SDVO/UDI pixel multiplier.
+- *
+- * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+- * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
+- * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+- * dummy bytes in the datastream at an increased clock rate, with both sides of
+- * the link knowing how many bytes are fill.
+- *
+- * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+- * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
+- * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+- * through an SDVO command.
+- *
+- * This register field has values of multiplication factor minus 1, with
+- * a maximum multiplier of 5 for SDVO.
+- */
+-# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
+-# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
+-/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+- * This best be set to the default value (3) or the CRT won't work. No,
+- * I don't entirely understand what this does...
+- */
+-# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+-# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+-/** @} */
+-
+-#define DPLL_TEST 0x606c
+-# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
+-# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
+-# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
+-# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
+-# define DPLLB_TEST_N_BYPASS (1 << 19)
+-# define DPLLB_TEST_M_BYPASS (1 << 18)
+-# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
+-# define DPLLA_TEST_N_BYPASS (1 << 3)
+-# define DPLLA_TEST_M_BYPASS (1 << 2)
+-# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+-
+-#define ADPA 0x61100
+-#define ADPA_DAC_ENABLE (1<<31)
+-#define ADPA_DAC_DISABLE 0
+-#define ADPA_PIPE_SELECT_MASK (1<<30)
+-#define ADPA_PIPE_A_SELECT 0
+-#define ADPA_PIPE_B_SELECT (1<<30)
+-#define ADPA_USE_VGA_HVPOLARITY (1<<15)
+-#define ADPA_SETS_HVPOLARITY 0
+-#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
+-#define ADPA_VSYNC_CNTL_ENABLE 0
+-#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
+-#define ADPA_HSYNC_CNTL_ENABLE 0
+-#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+-#define ADPA_VSYNC_ACTIVE_LOW 0
+-#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+-#define ADPA_HSYNC_ACTIVE_LOW 0
+-
+-#define FPA0 0x06040
+-#define FPA1 0x06044
+-#define FPB0 0x06048
+-#define FPB1 0x0604c
+-# define FP_N_DIV_MASK 0x003f0000
+-# define FP_N_DIV_SHIFT 16
+-# define FP_M1_DIV_MASK 0x00003f00
+-# define FP_M1_DIV_SHIFT 8
+-# define FP_M2_DIV_MASK 0x0000003f
+-# define FP_M2_DIV_SHIFT 0
+-
+-
+-#define PORT_HOTPLUG_EN 0x61110
+-# define SDVOB_HOTPLUG_INT_EN (1 << 26)
+-# define SDVOC_HOTPLUG_INT_EN (1 << 25)
+-# define TV_HOTPLUG_INT_EN (1 << 18)
+-# define CRT_HOTPLUG_INT_EN (1 << 9)
+-# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+-
+-#define PORT_HOTPLUG_STAT 0x61114
+-# define CRT_HOTPLUG_INT_STATUS (1 << 11)
+-# define TV_HOTPLUG_INT_STATUS (1 << 10)
+-# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+-# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+-# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+-# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+-# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
+-# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
+-
+-#define SDVOB 0x61140
+-#define SDVOC 0x61160
+-#define SDVO_ENABLE (1 << 31)
+-#define SDVO_PIPE_B_SELECT (1 << 30)
+-#define SDVO_STALL_SELECT (1 << 29)
+-#define SDVO_INTERRUPT_ENABLE (1 << 26)
+-/**
+- * 915G/GM SDVO pixel multiplier.
+- *
+- * Programmed value is multiplier - 1, up to 5x.
+- *
+- * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+- */
+-#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+-#define SDVO_PORT_MULTIPLY_SHIFT 23
+-#define SDVO_PHASE_SELECT_MASK (15 << 19)
+-#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+-#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+-#define SDVOC_GANG_MODE (1 << 16)
+-#define SDVO_BORDER_ENABLE (1 << 7)
+-#define SDVOB_PCIE_CONCURRENCY (1 << 3)
+-#define SDVO_DETECTED (1 << 2)
+-/* Bits to be preserved when writing */
+-#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
+-#define SDVOC_PRESERVE_MASK (1 << 17)
+-
+-/** @defgroup LVDS
+- * @{
+- */
+-/**
+- * This register controls the LVDS output enable, pipe selection, and data
+- * format selection.
+- *
+- * All of the clock/data pairs are force powered down by power sequencing.
+- */
+-#define LVDS 0x61180
+-/**
+- * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
+- * the DPLL semantics change when the LVDS is assigned to that pipe.
+- */
+-# define LVDS_PORT_EN (1 << 31)
+-/** Selects pipe B for LVDS data. Must be set on pre-965. */
+-# define LVDS_PIPEB_SELECT (1 << 30)
+-
+-/**
+- * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+- * pixel.
+- */
+-# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
+-# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
+-# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
+-/**
+- * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+- * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+- * on.
+- */
+-# define LVDS_A3_POWER_MASK (3 << 6)
+-# define LVDS_A3_POWER_DOWN (0 << 6)
+-# define LVDS_A3_POWER_UP (3 << 6)
+-/**
+- * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
+- * is set.
+- */
+-# define LVDS_CLKB_POWER_MASK (3 << 4)
+-# define LVDS_CLKB_POWER_DOWN (0 << 4)
+-# define LVDS_CLKB_POWER_UP (3 << 4)
+-
+-/**
+- * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
+- * setting for whether we are in dual-channel mode. The B3 pair will
+- * additionally only be powered up when LVDS_A3_POWER_UP is set.
+- */
+-# define LVDS_B0B3_POWER_MASK (3 << 2)
+-# define LVDS_B0B3_POWER_DOWN (0 << 2)
+-# define LVDS_B0B3_POWER_UP (3 << 2)
+-
+-#define PIPEACONF 0x70008
+-#define PIPEACONF_ENABLE (1<<31)
+-#define PIPEACONF_DISABLE 0
+-#define PIPEACONF_DOUBLE_WIDE (1<<30)
+-#define I965_PIPECONF_ACTIVE (1<<30)
+-#define PIPEACONF_SINGLE_WIDE 0
+-#define PIPEACONF_PIPE_UNLOCKED 0
+-#define PIPEACONF_PIPE_LOCKED (1<<25)
+-#define PIPEACONF_PALETTE 0
+-#define PIPEACONF_GAMMA (1<<24)
+-#define PIPECONF_FORCE_BORDER (1<<25)
+-#define PIPECONF_PROGRESSIVE (0 << 21)
+-#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
+-#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
+-
+-#define DSPARB 0x70030
+-#define DSPARB_CSTART_MASK (0x7f << 7)
+-#define DSPARB_CSTART_SHIFT 7
+-#define DSPARB_BSTART_MASK (0x7f)
+-#define DSPARB_BSTART_SHIFT 0
+-
+-#define PIPEBCONF 0x71008
+-#define PIPEBCONF_ENABLE (1<<31)
+-#define PIPEBCONF_DISABLE 0
+-#define PIPEBCONF_DOUBLE_WIDE (1<<30)
+-#define PIPEBCONF_DISABLE 0
+-#define PIPEBCONF_GAMMA (1<<24)
+-#define PIPEBCONF_PALETTE 0
+-
+-#define PIPEBGCMAXRED 0x71010
+-#define PIPEBGCMAXGREEN 0x71014
+-#define PIPEBGCMAXBLUE 0x71018
+-#define PIPEBSTAT 0x71024
+-#define PIPEBFRAMEHIGH 0x71040
+-#define PIPEBFRAMEPIXEL 0x71044
+-
+-#define DSPACNTR 0x70180
+-#define DSPBCNTR 0x71180
+-#define DISPLAY_PLANE_ENABLE (1<<31)
+-#define DISPLAY_PLANE_DISABLE 0
+-#define DISPPLANE_GAMMA_ENABLE (1<<30)
+-#define DISPPLANE_GAMMA_DISABLE 0
+-#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
+-#define DISPPLANE_8BPP (0x2<<26)
+-#define DISPPLANE_15_16BPP (0x4<<26)
+-#define DISPPLANE_16BPP (0x5<<26)
+-#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
+-#define DISPPLANE_32BPP (0x7<<26)
+-#define DISPPLANE_STEREO_ENABLE (1<<25)
+-#define DISPPLANE_STEREO_DISABLE 0
+-#define DISPPLANE_SEL_PIPE_MASK (1<<24)
+-#define DISPPLANE_SEL_PIPE_A 0
+-#define DISPPLANE_SEL_PIPE_B (1<<24)
+-#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
+-#define DISPPLANE_SRC_KEY_DISABLE 0
+-#define DISPPLANE_LINE_DOUBLE (1<<20)
+-#define DISPPLANE_NO_LINE_DOUBLE 0
+-#define DISPPLANE_STEREO_POLARITY_FIRST 0
+-#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
+-/* plane B only */
+-#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
+-#define DISPPLANE_ALPHA_TRANS_DISABLE 0
+-#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
+-#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
+-
+-#define DSPABASE 0x70184
+-#define DSPASTRIDE 0x70188
+-
+-#define DSPBBASE 0x71184
+-#define DSPBADDR DSPBBASE
+-#define DSPBSTRIDE 0x71188
+-
+-#define DSPAKEYVAL 0x70194
+-#define DSPAKEYMASK 0x70198
+-
+-#define DSPAPOS 0x7018C /* reserved */
+-#define DSPASIZE 0x70190
+-#define DSPBPOS 0x7118C
+-#define DSPBSIZE 0x71190
+-
+-#define DSPASURF 0x7019C
+-#define DSPATILEOFF 0x701A4
+-
+-#define DSPBSURF 0x7119C
+-#define DSPBTILEOFF 0x711A4
+-
+-#define VGACNTRL 0x71400
+-# define VGA_DISP_DISABLE (1 << 31)
+-# define VGA_2X_MODE (1 << 30)
+-# define VGA_PIPE_B_SELECT (1 << 29)
+-
+-/*
+- * Some BIOS scratch area registers. The 845 (and 830?) store the amount
+- * of video memory available to the BIOS in SWF1.
+- */
+-
+-#define SWF0 0x71410
+-
+-/*
+- * 855 scratch registers.
+- */
+-#define SWF10 0x70410
+-
+-#define SWF30 0x72414
+-
+-/*
+- * Overlay registers. These are overlay registers accessed via MMIO.
+- * Those loaded via the overlay register page are defined in i830_video.c.
+- */
+-#define OVADD 0x30000
+-
+-#define DOVSTA 0x30008
+-#define OC_BUF (0x3<<20)
++#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
++#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5)
+
+-#define OGAMC5 0x30010
+-#define OGAMC4 0x30014
+-#define OGAMC3 0x30018
+-#define OGAMC2 0x3001c
+-#define OGAMC1 0x30020
+-#define OGAMC0 0x30024
+-/*
+- * Palette registers
+- */
+-#define PALETTE_A 0x0a000
+-#define PALETTE_B 0x0a800
++extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
+
+ #define IS_I830(dev) ((dev)->pci_device == 0x3577)
+ #define IS_845G(dev) ((dev)->pci_device == 0x2562)
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index df03611..4a2de78 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -31,10 +31,6 @@
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+
+-#define USER_INT_FLAG (1<<1)
+-#define VSYNC_PIPEB_FLAG (1<<5)
+-#define VSYNC_PIPEA_FLAG (1<<7)
+-
+ #define MAX_NOPID ((u32)~0)
+
+ /**
+@@ -236,40 +232,43 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ u16 temp;
+ u32 pipea_stats, pipeb_stats;
+
+- pipea_stats = I915_READ(I915REG_PIPEASTAT);
+- pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
++ pipea_stats = I915_READ(PIPEASTAT);
++ pipeb_stats = I915_READ(PIPEBSTAT);
+
+- temp = I915_READ16(I915REG_INT_IDENTITY_R);
++ temp = I915_READ16(IIR);
+
+- temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
++ temp &= (I915_USER_INTERRUPT |
++ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
++ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT);
+
+ DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
+
+ if (temp == 0)
+ return IRQ_NONE;
+
+- I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+- (void) I915_READ16(I915REG_INT_IDENTITY_R);
++ I915_WRITE16(IIR, temp);
++ (void) I915_READ16(IIR);
+ DRM_READMEMORYBARRIER();
+
+ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+- if (temp & USER_INT_FLAG)
++ if (temp & I915_USER_INTERRUPT)
+ DRM_WAKEUP(&dev_priv->irq_queue);
+
+- if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
++ if (temp & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
++ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
+ int vblank_pipe = dev_priv->vblank_pipe;
+
+ if ((vblank_pipe &
+ (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
+ == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
+- if (temp & VSYNC_PIPEA_FLAG)
++ if (temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
+ atomic_inc(&dev->vbl_received);
+- if (temp & VSYNC_PIPEB_FLAG)
++ if (temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
+ atomic_inc(&dev->vbl_received2);
+- } else if (((temp & VSYNC_PIPEA_FLAG) &&
++ } else if (((temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
+ (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
+- ((temp & VSYNC_PIPEB_FLAG) &&
++ ((temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
+ (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
+ atomic_inc(&dev->vbl_received);
+
+@@ -278,12 +277,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+
+ if (dev_priv->swaps_pending > 0)
+ drm_locked_tasklet(dev, i915_vblank_tasklet);
+- I915_WRITE(I915REG_PIPEASTAT,
++ I915_WRITE(PIPEASTAT,
+ pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
+- I915_VBLANK_CLEAR);
+- I915_WRITE(I915REG_PIPEBSTAT,
++ PIPE_VBLANK_INTERRUPT_STATUS);
++ I915_WRITE(PIPEBSTAT,
+ pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
+- I915_VBLANK_CLEAR);
++ PIPE_VBLANK_INTERRUPT_STATUS);
+ }
+
+ return IRQ_HANDLED;
+@@ -304,12 +303,12 @@ static int i915_emit_irq(struct drm_device * dev)
+ dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
+
+ BEGIN_LP_RING(6);
+- OUT_RING(CMD_STORE_DWORD_IDX);
+- OUT_RING(20);
++ OUT_RING(MI_STORE_DWORD_INDEX);
++ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ OUT_RING(0);
+- OUT_RING(GFX_OP_USER_INTERRUPT);
++ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+
+ return dev_priv->counter;
+@@ -421,11 +420,11 @@ static void i915_enable_interrupt (struct drm_device *dev)
+
+ flag = 0;
+ if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
+- flag |= VSYNC_PIPEA_FLAG;
++ flag |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+ if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
+- flag |= VSYNC_PIPEB_FLAG;
++ flag |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+- I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
++ I915_WRITE16(IER, I915_USER_INTERRUPT | flag);
+ }
+
+ /* Set the vblank monitor pipe
+@@ -465,11 +464,11 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+ return -EINVAL;
+ }
+
+- flag = I915_READ(I915REG_INT_ENABLE_R);
++ flag = I915_READ(IER);
+ pipe->pipe = 0;
+- if (flag & VSYNC_PIPEA_FLAG)
++ if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
+ pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
+- if (flag & VSYNC_PIPEB_FLAG)
++ if (flag & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
+ pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
+
+ return 0;
+@@ -587,9 +586,9 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+- I915_WRITE16(I915REG_HWSTAM, 0xfffe);
+- I915_WRITE16(I915REG_INT_MASK_R, 0x0);
+- I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
++ I915_WRITE16(HWSTAM, 0xfffe);
++ I915_WRITE16(IMR, 0x0);
++ I915_WRITE16(IER, 0x0);
+ }
+
+ void i915_driver_irq_postinstall(struct drm_device * dev)
+@@ -614,10 +613,10 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
+ if (!dev_priv)
+ return;
+
+- I915_WRITE16(I915REG_HWSTAM, 0xffff);
+- I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
+- I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
++ I915_WRITE16(HWSTAM, 0xffff);
++ I915_WRITE16(IMR, 0xffff);
++ I915_WRITE16(IER, 0x0);
+
+- temp = I915_READ16(I915REG_INT_IDENTITY_R);
+- I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
++ temp = I915_READ16(IIR);
++ I915_WRITE16(IIR, temp);
+ }
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+new file mode 100644
+index 0000000..477c64e
+--- /dev/null
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -0,0 +1,1405 @@
++/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _I915_REG_H_
++#define _I915_REG_H_
++
++/* MCH MMIO space */
++/** 915-945 and GM965 MCH register controlling DRAM channel access */
++#define DCC 0x200
++#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
++#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
++#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
++#define DCC_ADDRESSING_MODE_MASK (3 << 0)
++#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
++
++/** 965 MCH register controlling DRAM channel configuration */
++#define CHDECMISC 0x111
++#define CHDECMISC_FLEXMEMORY (1 << 1)
++
++/*
++ * The Bridge device's PCI config space has information about the
++ * fb aperture size and the amount of pre-reserved memory.
++ */
++#define INTEL_GMCH_CTRL 0x52
++#define INTEL_GMCH_ENABLED 0x4
++#define INTEL_GMCH_MEM_MASK 0x1
++#define INTEL_GMCH_MEM_64M 0x1
++#define INTEL_GMCH_MEM_128M 0
++
++#define INTEL_855_GMCH_GMS_MASK (0x7 << 4)
++#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
++
++#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
++#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
++
++/* PCI config space */
++
++#define HPLLCC 0xc0 /* 855 only */
++#define GC_CLOCK_CONTROL_MASK (3 << 0)
++#define GC_CLOCK_133_200 (0 << 0)
++#define GC_CLOCK_100_200 (1 << 0)
++#define GC_CLOCK_100_133 (2 << 0)
++#define GC_CLOCK_166_250 (3 << 0)
++#define GCFGC 0xf0 /* 915+ only */
++#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
++#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
++#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
++#define GC_DISPLAY_CLOCK_MASK (7 << 4)
++#define LBB 0xf4
++
++/* VGA stuff */
++
++#define VGA_ST01_MDA 0x3ba
++#define VGA_ST01_CGA 0x3da
++
++#define VGA_MSR_WRITE 0x3c2
++#define VGA_MSR_READ 0x3cc
++#define VGA_MSR_MEM_EN (1<<1)
++#define VGA_MSR_CGA_MODE (1<<0)
++
++#define VGA_SR_INDEX 0x3c4
++#define VGA_SR_DATA 0x3c5
++
++#define VGA_AR_INDEX 0x3c0
++#define VGA_AR_VID_EN (1<<5)
++#define VGA_AR_DATA_WRITE 0x3c0
++#define VGA_AR_DATA_READ 0x3c1
++
++#define VGA_GR_INDEX 0x3ce
++#define VGA_GR_DATA 0x3cf
++/* GR05 */
++#define VGA_GR_MEM_READ_MODE_SHIFT 3
++#define VGA_GR_MEM_READ_MODE_PLANE 1
++/* GR06 */
++#define VGA_GR_MEM_MODE_MASK 0xc
++#define VGA_GR_MEM_MODE_SHIFT 2
++#define VGA_GR_MEM_A0000_AFFFF 0
++#define VGA_GR_MEM_A0000_BFFFF 1
++#define VGA_GR_MEM_B0000_B7FFF 2
++#define VGA_GR_MEM_B0000_BFFFF 3
++
++#define VGA_DACMASK 0x3c6
++#define VGA_DACRX 0x3c7
++#define VGA_DACWX 0x3c8
++#define VGA_DACDATA 0x3c9
++
++#define VGA_CR_INDEX_MDA 0x3b4
++#define VGA_CR_DATA_MDA 0x3b5
++#define VGA_CR_INDEX_CGA 0x3d4
++#define VGA_CR_DATA_CGA 0x3d5
++
++/*
++ * Memory interface instructions used by the kernel
++ */
++#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
++
++#define MI_NOOP MI_INSTR(0, 0)
++#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
++#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
++#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
++#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
++#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
++#define MI_FLUSH MI_INSTR(0x04, 0)
++#define MI_READ_FLUSH (1 << 0)
++#define MI_EXE_FLUSH (1 << 1)
++#define MI_NO_WRITE_FLUSH (1 << 2)
++#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
++#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
++#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
++#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
++#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
++#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
++#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
++#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
++#define MI_STORE_DWORD_INDEX_SHIFT 2
++#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
++#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
++#define MI_BATCH_NON_SECURE (1)
++#define MI_BATCH_NON_SECURE_I965 (1<<8)
++#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
++
++/*
++ * 3D instructions used by the kernel
++ */
++#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
++
++#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
++#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
++#define SC_UPDATE_SCISSOR (0x1<<1)
++#define SC_ENABLE_MASK (0x1<<0)
++#define SC_ENABLE (0x1<<0)
++#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
++#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
++#define SCI_YMIN_MASK (0xffff<<16)
++#define SCI_XMIN_MASK (0xffff<<0)
++#define SCI_YMAX_MASK (0xffff<<16)
++#define SCI_XMAX_MASK (0xffff<<0)
++#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
++#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
++#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
++#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
++#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
++#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
++#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
++#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
++#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
++#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
++#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
++#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
++#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
++#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
++#define BLT_DEPTH_8 (0<<24)
++#define BLT_DEPTH_16_565 (1<<24)
++#define BLT_DEPTH_16_1555 (2<<24)
++#define BLT_DEPTH_32 (3<<24)
++#define BLT_ROP_GXCOPY (0xcc<<16)
++#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
++#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
++#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
++#define ASYNC_FLIP (1<<22)
++#define DISPLAY_PLANE_A (0<<20)
++#define DISPLAY_PLANE_B (1<<20)
++
++/*
++ * Instruction and interrupt control regs
++ */
++
++#define PRB0_TAIL 0x02030
++#define PRB0_HEAD 0x02034
++#define PRB0_START 0x02038
++#define PRB0_CTL 0x0203c
++#define TAIL_ADDR 0x001FFFF8
++#define HEAD_WRAP_COUNT 0xFFE00000
++#define HEAD_WRAP_ONE 0x00200000
++#define HEAD_ADDR 0x001FFFFC
++#define RING_NR_PAGES 0x001FF000
++#define RING_REPORT_MASK 0x00000006
++#define RING_REPORT_64K 0x00000002
++#define RING_REPORT_128K 0x00000004
++#define RING_NO_REPORT 0x00000000
++#define RING_VALID_MASK 0x00000001
++#define RING_VALID 0x00000001
++#define RING_INVALID 0x00000000
++#define PRB1_TAIL 0x02040 /* 915+ only */
++#define PRB1_HEAD 0x02044 /* 915+ only */
++#define PRB1_START 0x02048 /* 915+ only */
++#define PRB1_CTL 0x0204c /* 915+ only */
++#define ACTHD_I965 0x02074
++#define HWS_PGA 0x02080
++#define HWS_ADDRESS_MASK 0xfffff000
++#define HWS_START_ADDRESS_SHIFT 4
++#define IPEIR 0x02088
++#define NOPID 0x02094
++#define HWSTAM 0x02098
++#define SCPD0 0x0209c /* 915+ only */
++#define IER 0x020a0
++#define IIR 0x020a4
++#define IMR 0x020a8
++#define ISR 0x020ac
++#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
++#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
++#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
++#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
++#define I915_HWB_OOM_INTERRUPT (1<<13)
++#define I915_SYNC_STATUS_INTERRUPT (1<<12)
++#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
++#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
++#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
++#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
++#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
++#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
++#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
++#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
++#define I915_DEBUG_INTERRUPT (1<<2)
++#define I915_USER_INTERRUPT (1<<1)
++#define I915_ASLE_INTERRUPT (1<<0)
++#define EIR 0x020b0
++#define EMR 0x020b4
++#define ESR 0x020b8
++#define INSTPM 0x020c0
++#define ACTHD 0x020c8
++#define FW_BLC 0x020d8
++#define FW_BLC_SELF 0x020e0 /* 915+ only */
++#define MI_ARB_STATE 0x020e4 /* 915+ only */
++#define CACHE_MODE_0 0x02120 /* 915+ only */
++#define CM0_MASK_SHIFT 16
++#define CM0_IZ_OPT_DISABLE (1<<6)
++#define CM0_ZR_OPT_DISABLE (1<<5)
++#define CM0_DEPTH_EVICT_DISABLE (1<<4)
++#define CM0_COLOR_EVICT_DISABLE (1<<3)
++#define CM0_DEPTH_WRITE_DISABLE (1<<1)
++#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
++#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
++
++/*
++ * Framebuffer compression (915+ only)
++ */
++
++#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
++#define FBC_LL_BASE 0x03204 /* 4k page aligned */
++#define FBC_CONTROL 0x03208
++#define FBC_CTL_EN (1<<31)
++#define FBC_CTL_PERIODIC (1<<30)
++#define FBC_CTL_INTERVAL_SHIFT (16)
++#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
++#define FBC_CTL_STRIDE_SHIFT (5)
++#define FBC_CTL_FENCENO (1<<0)
++#define FBC_COMMAND 0x0320c
++#define FBC_CMD_COMPRESS (1<<0)
++#define FBC_STATUS 0x03210
++#define FBC_STAT_COMPRESSING (1<<31)
++#define FBC_STAT_COMPRESSED (1<<30)
++#define FBC_STAT_MODIFIED (1<<29)
++#define FBC_STAT_CURRENT_LINE (1<<0)
++#define FBC_CONTROL2 0x03214
++#define FBC_CTL_FENCE_DBL (0<<4)
++#define FBC_CTL_IDLE_IMM (0<<2)
++#define FBC_CTL_IDLE_FULL (1<<2)
++#define FBC_CTL_IDLE_LINE (2<<2)
++#define FBC_CTL_IDLE_DEBUG (3<<2)
++#define FBC_CTL_CPU_FENCE (1<<1)
++#define FBC_CTL_PLANEA (0<<0)
++#define FBC_CTL_PLANEB (1<<0)
++#define FBC_FENCE_OFF 0x0321b
++
++#define FBC_LL_SIZE (1536)
++
++/*
++ * GPIO regs
++ */
++#define GPIOA 0x5010
++#define GPIOB 0x5014
++#define GPIOC 0x5018
++#define GPIOD 0x501c
++#define GPIOE 0x5020
++#define GPIOF 0x5024
++#define GPIOG 0x5028
++#define GPIOH 0x502c
++# define GPIO_CLOCK_DIR_MASK (1 << 0)
++# define GPIO_CLOCK_DIR_IN (0 << 1)
++# define GPIO_CLOCK_DIR_OUT (1 << 1)
++# define GPIO_CLOCK_VAL_MASK (1 << 2)
++# define GPIO_CLOCK_VAL_OUT (1 << 3)
++# define GPIO_CLOCK_VAL_IN (1 << 4)
++# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
++# define GPIO_DATA_DIR_MASK (1 << 8)
++# define GPIO_DATA_DIR_IN (0 << 9)
++# define GPIO_DATA_DIR_OUT (1 << 9)
++# define GPIO_DATA_VAL_MASK (1 << 10)
++# define GPIO_DATA_VAL_OUT (1 << 11)
++# define GPIO_DATA_VAL_IN (1 << 12)
++# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
++
++/*
++ * Clock control & power management
++ */
++
++#define VGA0 0x6000
++#define VGA1 0x6004
++#define VGA_PD 0x6010
++#define VGA0_PD_P2_DIV_4 (1 << 7)
++#define VGA0_PD_P1_DIV_2 (1 << 5)
++#define VGA0_PD_P1_SHIFT 0
++#define VGA0_PD_P1_MASK (0x1f << 0)
++#define VGA1_PD_P2_DIV_4 (1 << 15)
++#define VGA1_PD_P1_DIV_2 (1 << 13)
++#define VGA1_PD_P1_SHIFT 8
++#define VGA1_PD_P1_MASK (0x1f << 8)
++#define DPLL_A 0x06014
++#define DPLL_B 0x06018
++#define DPLL_VCO_ENABLE (1 << 31)
++#define DPLL_DVO_HIGH_SPEED (1 << 30)
++#define DPLL_SYNCLOCK_ENABLE (1 << 29)
++#define DPLL_VGA_MODE_DIS (1 << 28)
++#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
++#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
++#define DPLL_MODE_MASK (3 << 26)
++#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
++#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
++#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
++#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
++#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
++#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
++
++#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
++#define I915_CRC_ERROR_ENABLE (1UL<<29)
++#define I915_CRC_DONE_ENABLE (1UL<<28)
++#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
++#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
++#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
++#define I915_DPST_EVENT_ENABLE (1UL<<23)
++#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
++#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
++#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
++#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
++#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
++#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
++#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
++#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
++#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
++#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
++#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
++#define I915_DPST_EVENT_STATUS (1UL<<7)
++#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
++#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
++#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
++#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
++#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1)
++#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
++
++#define SRX_INDEX 0x3c4
++#define SRX_DATA 0x3c5
++#define SR01 1
++#define SR01_SCREEN_OFF (1<<5)
++
++#define PPCR 0x61204
++#define PPCR_ON (1<<0)
++
++#define DVOB 0x61140
++#define DVOB_ON (1<<31)
++#define DVOC 0x61160
++#define DVOC_ON (1<<31)
++#define LVDS 0x61180
++#define LVDS_ON (1<<31)
++
++#define ADPA 0x61100
++#define ADPA_DPMS_MASK (~(3<<10))
++#define ADPA_DPMS_ON (0<<10)
++#define ADPA_DPMS_SUSPEND (1<<10)
++#define ADPA_DPMS_STANDBY (2<<10)
++#define ADPA_DPMS_OFF (3<<10)
++
++#define RING_TAIL 0x00
++#define TAIL_ADDR 0x001FFFF8
++#define RING_HEAD 0x04
++#define HEAD_WRAP_COUNT 0xFFE00000
++#define HEAD_WRAP_ONE 0x00200000
++#define HEAD_ADDR 0x001FFFFC
++#define RING_START 0x08
++#define START_ADDR 0xFFFFF000
++#define RING_LEN 0x0C
++#define RING_NR_PAGES 0x001FF000
++#define RING_REPORT_MASK 0x00000006
++#define RING_REPORT_64K 0x00000002
++#define RING_REPORT_128K 0x00000004
++#define RING_NO_REPORT 0x00000000
++#define RING_VALID_MASK 0x00000001
++#define RING_VALID 0x00000001
++#define RING_INVALID 0x00000000
++
++/* Scratch pad debug 0 reg:
++ */
++#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
++/*
++ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
++ * this field (only one bit may be set).
++ */
++#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
++#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
++/* i830, required in DVO non-gang */
++#define PLL_P2_DIVIDE_BY_4 (1 << 23)
++#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
++#define PLL_REF_INPUT_DREFCLK (0 << 13)
++#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
++#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
++#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
++#define PLL_REF_INPUT_MASK (3 << 13)
++#define PLL_LOAD_PULSE_PHASE_SHIFT 9
++/*
++ * Parallel to Serial Load Pulse phase selection.
++ * Selects the phase for the 10X DPLL clock for the PCIe
++ * digital display port. The range is 4 to 13; 10 or more
++ * is just a flip delay. The default is 6
++ */
++#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
++#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
++/*
++ * SDVO multiplier for 945G/GM. Not used on 965.
++ */
++#define SDVO_MULTIPLIER_MASK 0x000000ff
++#define SDVO_MULTIPLIER_SHIFT_HIRES 4
++#define SDVO_MULTIPLIER_SHIFT_VGA 0
++#define DPLL_A_MD 0x0601c /* 965+ only */
++/*
++ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
++ *
++ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
++ */
++#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
++#define DPLL_MD_UDI_DIVIDER_SHIFT 24
++/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
++#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
++#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
++/*
++ * SDVO/UDI pixel multiplier.
++ *
++ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
++ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
++ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
++ * dummy bytes in the datastream at an increased clock rate, with both sides of
++ * the link knowing how many bytes are fill.
++ *
++ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
++ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
++ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
++ * through an SDVO command.
++ *
++ * This register field has values of multiplication factor minus 1, with
++ * a maximum multiplier of 5 for SDVO.
++ */
++#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
++#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
++/*
++ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
++ * This best be set to the default value (3) or the CRT won't work. No,
++ * I don't entirely understand what this does...
++ */
++#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
++#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
++#define DPLL_B_MD 0x06020 /* 965+ only */
++#define FPA0 0x06040
++#define FPA1 0x06044
++#define FPB0 0x06048
++#define FPB1 0x0604c
++#define FP_N_DIV_MASK 0x003f0000
++#define FP_N_DIV_SHIFT 16
++#define FP_M1_DIV_MASK 0x00003f00
++#define FP_M1_DIV_SHIFT 8
++#define FP_M2_DIV_MASK 0x0000003f
++#define FP_M2_DIV_SHIFT 0
++#define DPLL_TEST 0x606c
++#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
++#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
++#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
++#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
++#define DPLLB_TEST_N_BYPASS (1 << 19)
++#define DPLLB_TEST_M_BYPASS (1 << 18)
++#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
++#define DPLLA_TEST_N_BYPASS (1 << 3)
++#define DPLLA_TEST_M_BYPASS (1 << 2)
++#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
++#define D_STATE 0x6104
++#define CG_2D_DIS 0x6200
++#define CG_3D_DIS 0x6204
++
++/*
++ * Palette regs
++ */
++
++#define PALETTE_A 0x0a000
++#define PALETTE_B 0x0a800
++
++/*
++ * Overlay regs
++ */
++
++#define OVADD 0x30000
++#define DOVSTA 0x30008
++#define OC_BUF (0x3<<20)
++#define OGAMC5 0x30010
++#define OGAMC4 0x30014
++#define OGAMC3 0x30018
++#define OGAMC2 0x3001c
++#define OGAMC1 0x30020
++#define OGAMC0 0x30024
++
++/*
++ * Display engine regs
++ */
++
++/* Pipe A timing regs */
++#define HTOTAL_A 0x60000
++#define HBLANK_A 0x60004
++#define HSYNC_A 0x60008
++#define VTOTAL_A 0x6000c
++#define VBLANK_A 0x60010
++#define VSYNC_A 0x60014
++#define PIPEASRC 0x6001c
++#define BCLRPAT_A 0x60020
++
++/* Pipe B timing regs */
++#define HTOTAL_B 0x61000
++#define HBLANK_B 0x61004
++#define HSYNC_B 0x61008
++#define VTOTAL_B 0x6100c
++#define VBLANK_B 0x61010
++#define VSYNC_B 0x61014
++#define PIPEBSRC 0x6101c
++#define BCLRPAT_B 0x61020
++
++/* VGA port control */
++#define ADPA 0x61100
++#define ADPA_DAC_ENABLE (1<<31)
++#define ADPA_DAC_DISABLE 0
++#define ADPA_PIPE_SELECT_MASK (1<<30)
++#define ADPA_PIPE_A_SELECT 0
++#define ADPA_PIPE_B_SELECT (1<<30)
++#define ADPA_USE_VGA_HVPOLARITY (1<<15)
++#define ADPA_SETS_HVPOLARITY 0
++#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
++#define ADPA_VSYNC_CNTL_ENABLE 0
++#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
++#define ADPA_HSYNC_CNTL_ENABLE 0
++#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
++#define ADPA_VSYNC_ACTIVE_LOW 0
++#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
++#define ADPA_HSYNC_ACTIVE_LOW 0
++#define ADPA_DPMS_MASK (~(3<<10))
++#define ADPA_DPMS_ON (0<<10)
++#define ADPA_DPMS_SUSPEND (1<<10)
++#define ADPA_DPMS_STANDBY (2<<10)
++#define ADPA_DPMS_OFF (3<<10)
++
++/* Hotplug control (945+ only) */
++#define PORT_HOTPLUG_EN 0x61110
++#define SDVOB_HOTPLUG_INT_EN (1 << 26)
++#define SDVOC_HOTPLUG_INT_EN (1 << 25)
++#define TV_HOTPLUG_INT_EN (1 << 18)
++#define CRT_HOTPLUG_INT_EN (1 << 9)
++#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
++
++#define PORT_HOTPLUG_STAT 0x61114
++#define CRT_HOTPLUG_INT_STATUS (1 << 11)
++#define TV_HOTPLUG_INT_STATUS (1 << 10)
++#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
++#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
++#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
++#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
++#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
++#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
++
++/* SDVO port control */
++#define SDVOB 0x61140
++#define SDVOC 0x61160
++#define SDVO_ENABLE (1 << 31)
++#define SDVO_PIPE_B_SELECT (1 << 30)
++#define SDVO_STALL_SELECT (1 << 29)
++#define SDVO_INTERRUPT_ENABLE (1 << 26)
++/**
++ * 915G/GM SDVO pixel multiplier.
++ *
++ * Programmed value is multiplier - 1, up to 5x.
++ *
++ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
++ */
++#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
++#define SDVO_PORT_MULTIPLY_SHIFT 23
++#define SDVO_PHASE_SELECT_MASK (15 << 19)
++#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
++#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
++#define SDVOC_GANG_MODE (1 << 16)
++#define SDVO_BORDER_ENABLE (1 << 7)
++#define SDVOB_PCIE_CONCURRENCY (1 << 3)
++#define SDVO_DETECTED (1 << 2)
++/* Bits to be preserved when writing */
++#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26))
++#define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26))
++
++/* DVO port control */
++#define DVOA 0x61120
++#define DVOB 0x61140
++#define DVOC 0x61160
++#define DVO_ENABLE (1 << 31)
++#define DVO_PIPE_B_SELECT (1 << 30)
++#define DVO_PIPE_STALL_UNUSED (0 << 28)
++#define DVO_PIPE_STALL (1 << 28)
++#define DVO_PIPE_STALL_TV (2 << 28)
++#define DVO_PIPE_STALL_MASK (3 << 28)
++#define DVO_USE_VGA_SYNC (1 << 15)
++#define DVO_DATA_ORDER_I740 (0 << 14)
++#define DVO_DATA_ORDER_FP (1 << 14)
++#define DVO_VSYNC_DISABLE (1 << 11)
++#define DVO_HSYNC_DISABLE (1 << 10)
++#define DVO_VSYNC_TRISTATE (1 << 9)
++#define DVO_HSYNC_TRISTATE (1 << 8)
++#define DVO_BORDER_ENABLE (1 << 7)
++#define DVO_DATA_ORDER_GBRG (1 << 6)
++#define DVO_DATA_ORDER_RGGB (0 << 6)
++#define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6)
++#define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6)
++#define DVO_VSYNC_ACTIVE_HIGH (1 << 4)
++#define DVO_HSYNC_ACTIVE_HIGH (1 << 3)
++#define DVO_BLANK_ACTIVE_HIGH (1 << 2)
++#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */
++#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */
++#define DVO_PRESERVE_MASK (0x7<<24)
++#define DVOA_SRCDIM 0x61124
++#define DVOB_SRCDIM 0x61144
++#define DVOC_SRCDIM 0x61164
++#define DVO_SRCDIM_HORIZONTAL_SHIFT 12
++#define DVO_SRCDIM_VERTICAL_SHIFT 0
++
++/* LVDS port control */
++#define LVDS 0x61180
++/*
++ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
++ * the DPLL semantics change when the LVDS is assigned to that pipe.
++ */
++#define LVDS_PORT_EN (1 << 31)
++/* Selects pipe B for LVDS data. Must be set on pre-965. */
++#define LVDS_PIPEB_SELECT (1 << 30)
++/*
++ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
++ * pixel.
++ */
++#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
++#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
++#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
++/*
++ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
++ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
++ * on.
++ */
++#define LVDS_A3_POWER_MASK (3 << 6)
++#define LVDS_A3_POWER_DOWN (0 << 6)
++#define LVDS_A3_POWER_UP (3 << 6)
++/*
++ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
++ * is set.
++ */
++#define LVDS_CLKB_POWER_MASK (3 << 4)
++#define LVDS_CLKB_POWER_DOWN (0 << 4)
++#define LVDS_CLKB_POWER_UP (3 << 4)
++/*
++ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
++ * setting for whether we are in dual-channel mode. The B3 pair will
++ * additionally only be powered up when LVDS_A3_POWER_UP is set.
++ */
++#define LVDS_B0B3_POWER_MASK (3 << 2)
++#define LVDS_B0B3_POWER_DOWN (0 << 2)
++#define LVDS_B0B3_POWER_UP (3 << 2)
++
++/* Panel power sequencing */
++#define PP_STATUS 0x61200
++#define PP_ON (1 << 31)
++/*
++ * Indicates that all dependencies of the panel are on:
++ *
++ * - PLL enabled
++ * - pipe enabled
++ * - LVDS/DVOB/DVOC on
++ */
++#define PP_READY (1 << 30)
++#define PP_SEQUENCE_NONE (0 << 28)
++#define PP_SEQUENCE_ON (1 << 28)
++#define PP_SEQUENCE_OFF (2 << 28)
++#define PP_SEQUENCE_MASK 0x30000000
++#define PP_CONTROL 0x61204
++#define POWER_TARGET_ON (1 << 0)
++#define PP_ON_DELAYS 0x61208
++#define PP_OFF_DELAYS 0x6120c
++#define PP_DIVISOR 0x61210
++
++/* Panel fitting */
++#define PFIT_CONTROL 0x61230
++#define PFIT_ENABLE (1 << 31)
++#define PFIT_PIPE_MASK (3 << 29)
++#define PFIT_PIPE_SHIFT 29
++#define VERT_INTERP_DISABLE (0 << 10)
++#define VERT_INTERP_BILINEAR (1 << 10)
++#define VERT_INTERP_MASK (3 << 10)
++#define VERT_AUTO_SCALE (1 << 9)
++#define HORIZ_INTERP_DISABLE (0 << 6)
++#define HORIZ_INTERP_BILINEAR (1 << 6)
++#define HORIZ_INTERP_MASK (3 << 6)
++#define HORIZ_AUTO_SCALE (1 << 5)
++#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
++#define PFIT_PGM_RATIOS 0x61234
++#define PFIT_VERT_SCALE_MASK 0xfff00000
++#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
++#define PFIT_AUTO_RATIOS 0x61238
++
++/* Backlight control */
++#define BLC_PWM_CTL 0x61254
++#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
++#define BLC_PWM_CTL2 0x61250 /* 965+ only */
++/*
++ * This is the most significant 15 bits of the number of backlight cycles in a
++ * complete cycle of the modulated backlight control.
++ *
++ * The actual value is this field multiplied by two.
++ */
++#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
++#define BLM_LEGACY_MODE (1 << 16)
++/*
++ * This is the number of cycles out of the backlight modulation cycle for which
++ * the backlight is on.
++ *
++ * This field must be no greater than the number of cycles in the complete
++ * backlight modulation cycle.
++ */
++#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
++#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
++
++/* TV port control */
++#define TV_CTL 0x68000
++/** Enables the TV encoder */
++# define TV_ENC_ENABLE (1 << 31)
++/** Sources the TV encoder input from pipe B instead of A. */
++# define TV_ENC_PIPEB_SELECT (1 << 30)
++/** Outputs composite video (DAC A only) */
++# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
++/** Outputs SVideo video (DAC B/C) */
++# define TV_ENC_OUTPUT_SVIDEO (1 << 28)
++/** Outputs Component video (DAC A/B/C) */
++# define TV_ENC_OUTPUT_COMPONENT (2 << 28)
++/** Outputs Composite and SVideo (DAC A/B/C) */
++# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28)
++# define TV_TRILEVEL_SYNC (1 << 21)
++/** Enables slow sync generation (945GM only) */
++# define TV_SLOW_SYNC (1 << 20)
++/** Selects 4x oversampling for 480i and 576p */
++# define TV_OVERSAMPLE_4X (0 << 18)
++/** Selects 2x oversampling for 720p and 1080i */
++# define TV_OVERSAMPLE_2X (1 << 18)
++/** Selects no oversampling for 1080p */
++# define TV_OVERSAMPLE_NONE (2 << 18)
++/** Selects 8x oversampling */
++# define TV_OVERSAMPLE_8X (3 << 18)
++/** Selects progressive mode rather than interlaced */
++# define TV_PROGRESSIVE (1 << 17)
++/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */
++# define TV_PAL_BURST (1 << 16)
++/** Field for setting delay of Y compared to C */
++# define TV_YC_SKEW_MASK (7 << 12)
++/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
++# define TV_ENC_SDP_FIX (1 << 11)
++/**
++ * Enables a fix for the 915GM only.
++ *
++ * Not sure what it does.
++ */
++# define TV_ENC_C0_FIX (1 << 10)
++/** Bits that must be preserved by software */
++# define TV_CTL_SAVE ((3 << 8) | (3 << 6))
++# define TV_FUSE_STATE_MASK (3 << 4)
++/** Read-only state that reports all features enabled */
++# define TV_FUSE_STATE_ENABLED (0 << 4)
++/** Read-only state that reports that Macrovision is disabled in hardware*/
++# define TV_FUSE_STATE_NO_MACROVISION (1 << 4)
++/** Read-only state that reports that TV-out is disabled in hardware. */
++# define TV_FUSE_STATE_DISABLED (2 << 4)
++/** Normal operation */
++# define TV_TEST_MODE_NORMAL (0 << 0)
++/** Encoder test pattern 1 - combo pattern */
++# define TV_TEST_MODE_PATTERN_1 (1 << 0)
++/** Encoder test pattern 2 - full screen vertical 75% color bars */
++# define TV_TEST_MODE_PATTERN_2 (2 << 0)
++/** Encoder test pattern 3 - full screen horizontal 75% color bars */
++# define TV_TEST_MODE_PATTERN_3 (3 << 0)
++/** Encoder test pattern 4 - random noise */
++# define TV_TEST_MODE_PATTERN_4 (4 << 0)
++/** Encoder test pattern 5 - linear color ramps */
++# define TV_TEST_MODE_PATTERN_5 (5 << 0)
++/**
++ * This test mode forces the DACs to 50% of full output.
++ *
++ * This is used for load detection in combination with TVDAC_SENSE_MASK
++ */
++# define TV_TEST_MODE_MONITOR_DETECT (7 << 0)
++# define TV_TEST_MODE_MASK (7 << 0)
++
++#define TV_DAC 0x68004
++/**
++ * Reports that DAC state change logic has reported change (RO).
++ *
++ * This gets cleared when TV_DAC_STATE_EN is cleared
++*/
++# define TVDAC_STATE_CHG (1 << 31)
++# define TVDAC_SENSE_MASK (7 << 28)
++/** Reports that DAC A voltage is above the detect threshold */
++# define TVDAC_A_SENSE (1 << 30)
++/** Reports that DAC B voltage is above the detect threshold */
++# define TVDAC_B_SENSE (1 << 29)
++/** Reports that DAC C voltage is above the detect threshold */
++# define TVDAC_C_SENSE (1 << 28)
++/**
++ * Enables DAC state detection logic, for load-based TV detection.
++ *
++ * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
++ * to off, for load detection to work.
++ */
++# define TVDAC_STATE_CHG_EN (1 << 27)
++/** Sets the DAC A sense value to high */
++# define TVDAC_A_SENSE_CTL (1 << 26)
++/** Sets the DAC B sense value to high */
++# define TVDAC_B_SENSE_CTL (1 << 25)
++/** Sets the DAC C sense value to high */
++# define TVDAC_C_SENSE_CTL (1 << 24)
++/** Overrides the ENC_ENABLE and DAC voltage levels */
++# define DAC_CTL_OVERRIDE (1 << 7)
++/** Sets the slew rate. Must be preserved in software */
++# define ENC_TVDAC_SLEW_FAST (1 << 6)
++# define DAC_A_1_3_V (0 << 4)
++# define DAC_A_1_1_V (1 << 4)
++# define DAC_A_0_7_V (2 << 4)
++# define DAC_A_OFF (3 << 4)
++# define DAC_B_1_3_V (0 << 2)
++# define DAC_B_1_1_V (1 << 2)
++# define DAC_B_0_7_V (2 << 2)
++# define DAC_B_OFF (3 << 2)
++# define DAC_C_1_3_V (0 << 0)
++# define DAC_C_1_1_V (1 << 0)
++# define DAC_C_0_7_V (2 << 0)
++# define DAC_C_OFF (3 << 0)
++
++/**
++ * CSC coefficients are stored in a floating point format with 9 bits of
++ * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n,
++ * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
++ * -1 (0x3) being the only legal negative value.
++ */
++#define TV_CSC_Y 0x68010
++# define TV_RY_MASK 0x07ff0000
++# define TV_RY_SHIFT 16
++# define TV_GY_MASK 0x00000fff
++# define TV_GY_SHIFT 0
++
++#define TV_CSC_Y2 0x68014
++# define TV_BY_MASK 0x07ff0000
++# define TV_BY_SHIFT 16
++/**
++ * Y attenuation for component video.
++ *
++ * Stored in 1.9 fixed point.
++ */
++# define TV_AY_MASK 0x000003ff
++# define TV_AY_SHIFT 0
++
++#define TV_CSC_U 0x68018
++# define TV_RU_MASK 0x07ff0000
++# define TV_RU_SHIFT 16
++# define TV_GU_MASK 0x000007ff
++# define TV_GU_SHIFT 0
++
++#define TV_CSC_U2 0x6801c
++# define TV_BU_MASK 0x07ff0000
++# define TV_BU_SHIFT 16
++/**
++ * U attenuation for component video.
++ *
++ * Stored in 1.9 fixed point.
++ */
++# define TV_AU_MASK 0x000003ff
++# define TV_AU_SHIFT 0
++
++#define TV_CSC_V 0x68020
++# define TV_RV_MASK 0x0fff0000
++# define TV_RV_SHIFT 16
++# define TV_GV_MASK 0x000007ff
++# define TV_GV_SHIFT 0
++
++#define TV_CSC_V2 0x68024
++# define TV_BV_MASK 0x07ff0000
++# define TV_BV_SHIFT 16
++/**
++ * V attenuation for component video.
++ *
++ * Stored in 1.9 fixed point.
++ */
++# define TV_AV_MASK 0x000007ff
++# define TV_AV_SHIFT 0
++
++#define TV_CLR_KNOBS 0x68028
++/** 2s-complement brightness adjustment */
++# define TV_BRIGHTNESS_MASK 0xff000000
++# define TV_BRIGHTNESS_SHIFT 24
++/** Contrast adjustment, as a 2.6 unsigned floating point number */
++# define TV_CONTRAST_MASK 0x00ff0000
++# define TV_CONTRAST_SHIFT 16
++/** Saturation adjustment, as a 2.6 unsigned floating point number */
++# define TV_SATURATION_MASK 0x0000ff00
++# define TV_SATURATION_SHIFT 8
++/** Hue adjustment, as an integer phase angle in degrees */
++# define TV_HUE_MASK 0x000000ff
++# define TV_HUE_SHIFT 0
++
++#define TV_CLR_LEVEL 0x6802c
++/** Controls the DAC level for black */
++# define TV_BLACK_LEVEL_MASK 0x01ff0000
++# define TV_BLACK_LEVEL_SHIFT 16
++/** Controls the DAC level for blanking */
++# define TV_BLANK_LEVEL_MASK 0x000001ff
++# define TV_BLANK_LEVEL_SHIFT 0
++
++#define TV_H_CTL_1 0x68030
++/** Number of pixels in the hsync. */
++# define TV_HSYNC_END_MASK 0x1fff0000
++# define TV_HSYNC_END_SHIFT 16
++/** Total number of pixels minus one in the line (display and blanking). */
++# define TV_HTOTAL_MASK 0x00001fff
++# define TV_HTOTAL_SHIFT 0
++
++#define TV_H_CTL_2 0x68034
++/** Enables the colorburst (needed for non-component color) */
++# define TV_BURST_ENA (1 << 31)
++/** Offset of the colorburst from the start of hsync, in pixels minus one. */
++# define TV_HBURST_START_SHIFT 16
++# define TV_HBURST_START_MASK 0x1fff0000
++/** Length of the colorburst */
++# define TV_HBURST_LEN_SHIFT 0
++# define TV_HBURST_LEN_MASK 0x0001fff
++
++#define TV_H_CTL_3 0x68038
++/** End of hblank, measured in pixels minus one from start of hsync */
++# define TV_HBLANK_END_SHIFT 16
++# define TV_HBLANK_END_MASK 0x1fff0000
++/** Start of hblank, measured in pixels minus one from start of hsync */
++# define TV_HBLANK_START_SHIFT 0
++# define TV_HBLANK_START_MASK 0x0001fff
++
++#define TV_V_CTL_1 0x6803c
++/** XXX */
++# define TV_NBR_END_SHIFT 16
++# define TV_NBR_END_MASK 0x07ff0000
++/** XXX */
++# define TV_VI_END_F1_SHIFT 8
++# define TV_VI_END_F1_MASK 0x00003f00
++/** XXX */
++# define TV_VI_END_F2_SHIFT 0
++# define TV_VI_END_F2_MASK 0x0000003f
++
++#define TV_V_CTL_2 0x68040
++/** Length of vsync, in half lines */
++# define TV_VSYNC_LEN_MASK 0x07ff0000
++# define TV_VSYNC_LEN_SHIFT 16
++/** Offset of the start of vsync in field 1, measured in one less than the
++ * number of half lines.
++ */
++# define TV_VSYNC_START_F1_MASK 0x00007f00
++# define TV_VSYNC_START_F1_SHIFT 8
++/**
++ * Offset of the start of vsync in field 2, measured in one less than the
++ * number of half lines.
++ */
++# define TV_VSYNC_START_F2_MASK 0x0000007f
++# define TV_VSYNC_START_F2_SHIFT 0
++
++#define TV_V_CTL_3 0x68044
++/** Enables generation of the equalization signal */
++# define TV_EQUAL_ENA (1 << 31)
++/** Length of vsync, in half lines */
++# define TV_VEQ_LEN_MASK 0x007f0000
++# define TV_VEQ_LEN_SHIFT 16
++/** Offset of the start of equalization in field 1, measured in one less than
++ * the number of half lines.
++ */
++# define TV_VEQ_START_F1_MASK 0x0007f00
++# define TV_VEQ_START_F1_SHIFT 8
++/**
++ * Offset of the start of equalization in field 2, measured in one less than
++ * the number of half lines.
++ */
++# define TV_VEQ_START_F2_MASK 0x000007f
++# define TV_VEQ_START_F2_SHIFT 0
++
++#define TV_V_CTL_4 0x68048
++/**
++ * Offset to start of vertical colorburst, measured in one less than the
++ * number of lines from vertical start.
++ */
++# define TV_VBURST_START_F1_MASK 0x003f0000
++# define TV_VBURST_START_F1_SHIFT 16
++/**
++ * Offset to the end of vertical colorburst, measured in one less than the
++ * number of lines from the start of NBR.
++ */
++# define TV_VBURST_END_F1_MASK 0x000000ff
++# define TV_VBURST_END_F1_SHIFT 0
++
++#define TV_V_CTL_5 0x6804c
++/**
++ * Offset to start of vertical colorburst, measured in one less than the
++ * number of lines from vertical start.
++ */
++# define TV_VBURST_START_F2_MASK 0x003f0000
++# define TV_VBURST_START_F2_SHIFT 16
++/**
++ * Offset to the end of vertical colorburst, measured in one less than the
++ * number of lines from the start of NBR.
++ */
++# define TV_VBURST_END_F2_MASK 0x000000ff
++# define TV_VBURST_END_F2_SHIFT 0
++
++#define TV_V_CTL_6 0x68050
++/**
++ * Offset to start of vertical colorburst, measured in one less than the
++ * number of lines from vertical start.
++ */
++# define TV_VBURST_START_F3_MASK 0x003f0000
++# define TV_VBURST_START_F3_SHIFT 16
++/**
++ * Offset to the end of vertical colorburst, measured in one less than the
++ * number of lines from the start of NBR.
++ */
++# define TV_VBURST_END_F3_MASK 0x000000ff
++# define TV_VBURST_END_F3_SHIFT 0
++
++#define TV_V_CTL_7 0x68054
++/**
++ * Offset to start of vertical colorburst, measured in one less than the
++ * number of lines from vertical start.
++ */
++# define TV_VBURST_START_F4_MASK 0x003f0000
++# define TV_VBURST_START_F4_SHIFT 16
++/**
++ * Offset to the end of vertical colorburst, measured in one less than the
++ * number of lines from the start of NBR.
++ */
++# define TV_VBURST_END_F4_MASK 0x000000ff
++# define TV_VBURST_END_F4_SHIFT 0
++
++#define TV_SC_CTL_1 0x68060
++/** Turns on the first subcarrier phase generation DDA */
++# define TV_SC_DDA1_EN (1 << 31)
++/** Turns on the first subcarrier phase generation DDA */
++# define TV_SC_DDA2_EN (1 << 30)
++/** Turns on the first subcarrier phase generation DDA */
++# define TV_SC_DDA3_EN (1 << 29)
++/** Sets the subcarrier DDA to reset frequency every other field */
++# define TV_SC_RESET_EVERY_2 (0 << 24)
++/** Sets the subcarrier DDA to reset frequency every fourth field */
++# define TV_SC_RESET_EVERY_4 (1 << 24)
++/** Sets the subcarrier DDA to reset frequency every eighth field */
++# define TV_SC_RESET_EVERY_8 (2 << 24)
++/** Sets the subcarrier DDA to never reset the frequency */
++# define TV_SC_RESET_NEVER (3 << 24)
++/** Sets the peak amplitude of the colorburst.*/
++# define TV_BURST_LEVEL_MASK 0x00ff0000
++# define TV_BURST_LEVEL_SHIFT 16
++/** Sets the increment of the first subcarrier phase generation DDA */
++# define TV_SCDDA1_INC_MASK 0x00000fff
++# define TV_SCDDA1_INC_SHIFT 0
++
++#define TV_SC_CTL_2 0x68064
++/** Sets the rollover for the second subcarrier phase generation DDA */
++# define TV_SCDDA2_SIZE_MASK 0x7fff0000
++# define TV_SCDDA2_SIZE_SHIFT 16
++/** Sets the increent of the second subcarrier phase generation DDA */
++# define TV_SCDDA2_INC_MASK 0x00007fff
++# define TV_SCDDA2_INC_SHIFT 0
++
++#define TV_SC_CTL_3 0x68068
++/** Sets the rollover for the third subcarrier phase generation DDA */
++# define TV_SCDDA3_SIZE_MASK 0x7fff0000
++# define TV_SCDDA3_SIZE_SHIFT 16
++/** Sets the increent of the third subcarrier phase generation DDA */
++# define TV_SCDDA3_INC_MASK 0x00007fff
++# define TV_SCDDA3_INC_SHIFT 0
++
++#define TV_WIN_POS 0x68070
++/** X coordinate of the display from the start of horizontal active */
++# define TV_XPOS_MASK 0x1fff0000
++# define TV_XPOS_SHIFT 16
++/** Y coordinate of the display from the start of vertical active (NBR) */
++# define TV_YPOS_MASK 0x00000fff
++# define TV_YPOS_SHIFT 0
++
++#define TV_WIN_SIZE 0x68074
++/** Horizontal size of the display window, measured in pixels*/
++# define TV_XSIZE_MASK 0x1fff0000
++# define TV_XSIZE_SHIFT 16
++/**
++ * Vertical size of the display window, measured in pixels.
++ *
++ * Must be even for interlaced modes.
++ */
++# define TV_YSIZE_MASK 0x00000fff
++# define TV_YSIZE_SHIFT 0
++
++#define TV_FILTER_CTL_1 0x68080
++/**
++ * Enables automatic scaling calculation.
++ *
++ * If set, the rest of the registers are ignored, and the calculated values can
++ * be read back from the register.
++ */
++# define TV_AUTO_SCALE (1 << 31)
++/**
++ * Disables the vertical filter.
++ *
++ * This is required on modes more than 1024 pixels wide */
++# define TV_V_FILTER_BYPASS (1 << 29)
++/** Enables adaptive vertical filtering */
++# define TV_VADAPT (1 << 28)
++# define TV_VADAPT_MODE_MASK (3 << 26)
++/** Selects the least adaptive vertical filtering mode */
++# define TV_VADAPT_MODE_LEAST (0 << 26)
++/** Selects the moderately adaptive vertical filtering mode */
++# define TV_VADAPT_MODE_MODERATE (1 << 26)
++/** Selects the most adaptive vertical filtering mode */
++# define TV_VADAPT_MODE_MOST (3 << 26)
++/**
++ * Sets the horizontal scaling factor.
++ *
++ * This should be the fractional part of the horizontal scaling factor divided
++ * by the oversampling rate. TV_HSCALE should be less than 1, and set to:
++ *
++ * (src width - 1) / ((oversample * dest width) - 1)
++ */
++# define TV_HSCALE_FRAC_MASK 0x00003fff
++# define TV_HSCALE_FRAC_SHIFT 0
++
++#define TV_FILTER_CTL_2 0x68084
++/**
++ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
++ *
++ * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
++ */
++# define TV_VSCALE_INT_MASK 0x00038000
++# define TV_VSCALE_INT_SHIFT 15
++/**
++ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
++ *
++ * \sa TV_VSCALE_INT_MASK
++ */
++# define TV_VSCALE_FRAC_MASK 0x00007fff
++# define TV_VSCALE_FRAC_SHIFT 0
++
++#define TV_FILTER_CTL_3 0x68088
++/**
++ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
++ *
++ * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
++ *
++ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
++ */
++# define TV_VSCALE_IP_INT_MASK 0x00038000
++# define TV_VSCALE_IP_INT_SHIFT 15
++/**
++ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
++ *
++ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
++ *
++ * \sa TV_VSCALE_IP_INT_MASK
++ */
++# define TV_VSCALE_IP_FRAC_MASK 0x00007fff
++# define TV_VSCALE_IP_FRAC_SHIFT 0
++
++#define TV_CC_CONTROL 0x68090
++# define TV_CC_ENABLE (1 << 31)
++/**
++ * Specifies which field to send the CC data in.
++ *
++ * CC data is usually sent in field 0.
++ */
++# define TV_CC_FID_MASK (1 << 27)
++# define TV_CC_FID_SHIFT 27
++/** Sets the horizontal position of the CC data. Usually 135. */
++# define TV_CC_HOFF_MASK 0x03ff0000
++# define TV_CC_HOFF_SHIFT 16
++/** Sets the vertical position of the CC data. Usually 21 */
++# define TV_CC_LINE_MASK 0x0000003f
++# define TV_CC_LINE_SHIFT 0
++
++#define TV_CC_DATA 0x68094
++# define TV_CC_RDY (1 << 31)
++/** Second word of CC data to be transmitted. */
++# define TV_CC_DATA_2_MASK 0x007f0000
++# define TV_CC_DATA_2_SHIFT 16
++/** First word of CC data to be transmitted. */
++# define TV_CC_DATA_1_MASK 0x0000007f
++# define TV_CC_DATA_1_SHIFT 0
++
++#define TV_H_LUMA_0 0x68100
++#define TV_H_LUMA_59 0x681ec
++#define TV_H_CHROMA_0 0x68200
++#define TV_H_CHROMA_59 0x682ec
++#define TV_V_LUMA_0 0x68300
++#define TV_V_LUMA_42 0x683a8
++#define TV_V_CHROMA_0 0x68400
++#define TV_V_CHROMA_42 0x684a8
++
++/* Display & cursor control */
++
++/* Pipe A */
++#define PIPEADSL 0x70000
++#define PIPEACONF 0x70008
++#define PIPEACONF_ENABLE (1<<31)
++#define PIPEACONF_DISABLE 0
++#define PIPEACONF_DOUBLE_WIDE (1<<30)
++#define I965_PIPECONF_ACTIVE (1<<30)
++#define PIPEACONF_SINGLE_WIDE 0
++#define PIPEACONF_PIPE_UNLOCKED 0
++#define PIPEACONF_PIPE_LOCKED (1<<25)
++#define PIPEACONF_PALETTE 0
++#define PIPEACONF_GAMMA (1<<24)
++#define PIPECONF_FORCE_BORDER (1<<25)
++#define PIPECONF_PROGRESSIVE (0 << 21)
++#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
++#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
++#define PIPEASTAT 0x70024
++#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
++#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
++#define PIPE_CRC_DONE_ENABLE (1UL<<28)
++#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
++#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
++#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
++#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
++#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
++#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
++#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
++#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
++#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
++#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
++#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
++#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
++#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
++#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
++#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
++#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
++#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
++#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
++#define PIPE_DPST_EVENT_STATUS (1UL<<7)
++#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
++#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
++#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
++#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
++#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
++#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
++#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
++
++#define DSPARB 0x70030
++#define DSPARB_CSTART_MASK (0x7f << 7)
++#define DSPARB_CSTART_SHIFT 7
++#define DSPARB_BSTART_MASK (0x7f)
++#define DSPARB_BSTART_SHIFT 0
++/*
++ * The two pipe frame counter registers are not synchronized, so
++ * reading a stable value is somewhat tricky. The following code
++ * should work:
++ *
++ * do {
++ * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
++ * PIPE_FRAME_HIGH_SHIFT;
++ * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
++ * PIPE_FRAME_LOW_SHIFT);
++ * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
++ * PIPE_FRAME_HIGH_SHIFT);
++ * } while (high1 != high2);
++ * frame = (high1 << 8) | low1;
++ */
++#define PIPEAFRAMEHIGH 0x70040
++#define PIPE_FRAME_HIGH_MASK 0x0000ffff
++#define PIPE_FRAME_HIGH_SHIFT 0
++#define PIPEAFRAMEPIXEL 0x70044
++#define PIPE_FRAME_LOW_MASK 0xff000000
++#define PIPE_FRAME_LOW_SHIFT 24
++#define PIPE_PIXEL_MASK 0x00ffffff
++#define PIPE_PIXEL_SHIFT 0
++
++/* Cursor A & B regs */
++#define CURACNTR 0x70080
++#define CURSOR_MODE_DISABLE 0x00
++#define CURSOR_MODE_64_32B_AX 0x07
++#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
++#define MCURSOR_GAMMA_ENABLE (1 << 26)
++#define CURABASE 0x70084
++#define CURAPOS 0x70088
++#define CURSOR_POS_MASK 0x007FF
++#define CURSOR_POS_SIGN 0x8000
++#define CURSOR_X_SHIFT 0
++#define CURSOR_Y_SHIFT 16
++#define CURBCNTR 0x700c0
++#define CURBBASE 0x700c4
++#define CURBPOS 0x700c8
++
++/* Display A control */
++#define DSPACNTR 0x70180
++#define DISPLAY_PLANE_ENABLE (1<<31)
++#define DISPLAY_PLANE_DISABLE 0
++#define DISPPLANE_GAMMA_ENABLE (1<<30)
++#define DISPPLANE_GAMMA_DISABLE 0
++#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
++#define DISPPLANE_8BPP (0x2<<26)
++#define DISPPLANE_15_16BPP (0x4<<26)
++#define DISPPLANE_16BPP (0x5<<26)
++#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
++#define DISPPLANE_32BPP (0x7<<26)
++#define DISPPLANE_STEREO_ENABLE (1<<25)
++#define DISPPLANE_STEREO_DISABLE 0
++#define DISPPLANE_SEL_PIPE_MASK (1<<24)
++#define DISPPLANE_SEL_PIPE_A 0
++#define DISPPLANE_SEL_PIPE_B (1<<24)
++#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
++#define DISPPLANE_SRC_KEY_DISABLE 0
++#define DISPPLANE_LINE_DOUBLE (1<<20)
++#define DISPPLANE_NO_LINE_DOUBLE 0
++#define DISPPLANE_STEREO_POLARITY_FIRST 0
++#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
++#define DSPAADDR 0x70184
++#define DSPASTRIDE 0x70188
++#define DSPAPOS 0x7018C /* reserved */
++#define DSPASIZE 0x70190
++#define DSPASURF 0x7019C /* 965+ only */
++#define DSPATILEOFF 0x701A4 /* 965+ only */
++
++/* VBIOS flags */
++#define SWF00 0x71410
++#define SWF01 0x71414
++#define SWF02 0x71418
++#define SWF03 0x7141c
++#define SWF04 0x71420
++#define SWF05 0x71424
++#define SWF06 0x71428
++#define SWF10 0x70410
++#define SWF11 0x70414
++#define SWF14 0x71420
++#define SWF30 0x72414
++#define SWF31 0x72418
++#define SWF32 0x7241c
++
++/* Pipe B */
++#define PIPEBDSL 0x71000
++#define PIPEBCONF 0x71008
++#define PIPEBSTAT 0x71024
++#define PIPEBFRAMEHIGH 0x71040
++#define PIPEBFRAMEPIXEL 0x71044
++
++/* Display B control */
++#define DSPBCNTR 0x71180
++#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
++#define DISPPLANE_ALPHA_TRANS_DISABLE 0
++#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
++#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
++#define DSPBADDR 0x71184
++#define DSPBSTRIDE 0x71188
++#define DSPBPOS 0x7118C
++#define DSPBSIZE 0x71190
++#define DSPBSURF 0x7119C
++#define DSPBTILEOFF 0x711A4
++
++/* VBIOS regs */
++#define VGACNTRL 0x71400
++# define VGA_DISP_DISABLE (1 << 31)
++# define VGA_2X_MODE (1 << 30)
++# define VGA_PIPE_B_SELECT (1 << 29)
++
++#endif /* _I915_REG_H_ */
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch
new file mode 100644
index 000000000..9337475c3
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch
@@ -0,0 +1,424 @@
+commit 4f99970852559935b27bc634318f34c18c5fd143
+Author: Eric Anholt <eric@anholt.net>
+Date: Tue Jul 29 12:10:39 2008 -0700
+
+ i915: Add support for MSI and interrupt mitigation.
+
+ Previous attempts at interrupt mitigation had been foiled by i915_wait_irq's
+ failure to update the sarea seqno value when the status page indicated that
+ the seqno had already been passed. MSI support has been seen to cut CPU
+ costs by up to 40% in some workloads by avoiding other expensive interrupt
+ handlers for frequent graphics interrupts.
+
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
+index 53f0e5a..61ed515 100644
+--- a/drivers/gpu/drm/drm_irq.c
++++ b/drivers/gpu/drm/drm_irq.c
+@@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
+ p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+ return -EINVAL;
+
+- p->irq = dev->irq;
++ p->irq = dev->pdev->irq;
+
+ DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+ p->irq);
+@@ -89,7 +89,7 @@ static int drm_irq_install(struct drm_device * dev)
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return -EINVAL;
+
+- if (dev->irq == 0)
++ if (dev->pdev->irq == 0)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+@@ -107,7 +107,7 @@ static int drm_irq_install(struct drm_device * dev)
+ dev->irq_enabled = 1;
+ mutex_unlock(&dev->struct_mutex);
+
+- DRM_DEBUG("irq=%d\n", dev->irq);
++ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+
+ if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
+ init_waitqueue_head(&dev->vbl_queue);
+@@ -127,8 +127,12 @@ static int drm_irq_install(struct drm_device * dev)
+ if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
+ sh_flags = IRQF_SHARED;
+
+- ret = request_irq(dev->irq, dev->driver->irq_handler,
++ ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
+ sh_flags, dev->devname, dev);
++ /* Expose the device irq number to drivers that want to export it for
++ * whatever reason.
++ */
++ dev->irq = dev->pdev->irq;
+ if (ret < 0) {
+ mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = 0;
+@@ -164,11 +168,11 @@ int drm_irq_uninstall(struct drm_device * dev)
+ if (!irq_enabled)
+ return -EINVAL;
+
+- DRM_DEBUG("irq=%d\n", dev->irq);
++ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+
+ dev->driver->irq_uninstall(dev);
+
+- free_irq(dev->irq, dev);
++ free_irq(dev->pdev->irq, dev);
+
+ dev->locked_tasklet_func = NULL;
+
+@@ -201,7 +205,7 @@ int drm_control(struct drm_device *dev, void *data,
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return 0;
+ if (dev->if_version < DRM_IF_VERSION(1, 2) &&
+- ctl->irq != dev->irq)
++ ctl->irq != dev->pdev->irq)
+ return -EINVAL;
+ return drm_irq_install(dev);
+ case DRM_UNINST_HANDLER:
+@@ -239,7 +243,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
+ int ret = 0;
+ unsigned int flags, seq;
+
+- if ((!dev->irq) || (!dev->irq_enabled))
++ if ((!dev->pdev->irq) || (!dev->irq_enabled))
+ return -EINVAL;
+
+ if (vblwait->request.type &
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 7be580b..10bfb0c 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -84,7 +84,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
+ * may not have been called from userspace and after dev_private
+ * is freed, it's too late.
+ */
+- if (dev->irq)
++ if (dev->irq_enabled)
+ drm_irq_uninstall(dev);
+
+ if (dev_priv->ring.virtual_start) {
+@@ -644,7 +644,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
+
+ switch (param->param) {
+ case I915_PARAM_IRQ_ACTIVE:
+- value = dev->irq ? 1 : 0;
++ value = dev->irq_enabled;
+ break;
+ case I915_PARAM_ALLOW_BATCHBUFFER:
+ value = dev_priv->allow_batchbuffer ? 1 : 0;
+@@ -763,6 +763,20 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
+ _DRM_KERNEL | _DRM_DRIVER,
+ &dev_priv->mmio_map);
++
++
++ /* On the 945G/GM, the chipset reports the MSI capability on the
++ * integrated graphics even though the support isn't actually there
++ * according to the published specs. It doesn't appear to function
++ * correctly in testing on 945G.
++ * This may be a side effect of MSI having been made available for PEG
++ * and the registers being closely associated.
++ */
++ if (!IS_I945G(dev) && !IS_I945GM(dev))
++ pci_enable_msi(dev->pdev);
++
++ spin_lock_init(&dev_priv->user_irq_lock);
++
+ return ret;
+ }
+
+@@ -770,6 +784,9 @@ int i915_driver_unload(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
++ if (dev->pdev->msi_enabled)
++ pci_disable_msi(dev->pdev);
++
+ if (dev_priv->mmio_map)
+ drm_rmmap(dev, dev_priv->mmio_map);
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index afb51a3..8daf0d8 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -105,6 +105,12 @@ typedef struct drm_i915_private {
+ wait_queue_head_t irq_queue;
+ atomic_t irq_received;
+ atomic_t irq_emitted;
++ /** Protects user_irq_refcount and irq_mask_reg */
++ spinlock_t user_irq_lock;
++ /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
++ int user_irq_refcount;
++ /** Cached value of IMR to avoid reads in updating the bitfield */
++ u32 irq_mask_reg;
+
+ int tex_lru_log_granularity;
+ int allow_batchbuffer;
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 4a2de78..24d11ed 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -33,6 +33,31 @@
+
+ #define MAX_NOPID ((u32)~0)
+
++/** These are the interrupts used by the driver */
++#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
++ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \
++ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
++
++static inline void
++i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
++{
++ if ((dev_priv->irq_mask_reg & mask) != 0) {
++ dev_priv->irq_mask_reg &= ~mask;
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
++ (void) I915_READ(IMR);
++ }
++}
++
++static inline void
++i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
++{
++ if ((dev_priv->irq_mask_reg & mask) != mask) {
++ dev_priv->irq_mask_reg |= mask;
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
++ (void) I915_READ(IMR);
++ }
++}
++
+ /**
+ * Emit blits for scheduled buffer swaps.
+ *
+@@ -229,46 +254,50 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ {
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+- u16 temp;
+ u32 pipea_stats, pipeb_stats;
++ u32 iir;
+
+ pipea_stats = I915_READ(PIPEASTAT);
+ pipeb_stats = I915_READ(PIPEBSTAT);
+
+- temp = I915_READ16(IIR);
+-
+- temp &= (I915_USER_INTERRUPT |
+- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT);
++ if (dev->pdev->msi_enabled)
++ I915_WRITE(IMR, ~0);
++ iir = I915_READ(IIR);
+
+- DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
++ DRM_DEBUG("iir=%08x\n", iir);
+
+- if (temp == 0)
++ if (iir == 0) {
++ if (dev->pdev->msi_enabled) {
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
++ (void) I915_READ(IMR);
++ }
+ return IRQ_NONE;
++ }
+
+- I915_WRITE16(IIR, temp);
+- (void) I915_READ16(IIR);
+- DRM_READMEMORYBARRIER();
++ I915_WRITE(IIR, iir);
++ if (dev->pdev->msi_enabled)
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
++ (void) I915_READ(IIR); /* Flush posted writes */
+
+ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+- if (temp & I915_USER_INTERRUPT)
++ if (iir & I915_USER_INTERRUPT)
+ DRM_WAKEUP(&dev_priv->irq_queue);
+
+- if (temp & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
++ if (iir & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
++ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
+ int vblank_pipe = dev_priv->vblank_pipe;
+
+ if ((vblank_pipe &
+ (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
+ == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
+- if (temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
++ if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
+ atomic_inc(&dev->vbl_received);
+- if (temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
++ if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
+ atomic_inc(&dev->vbl_received2);
+- } else if (((temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
++ } else if (((iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
+ (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
+- ((temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
++ ((iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
+ (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
+ atomic_inc(&dev->vbl_received);
+
+@@ -314,6 +343,27 @@ static int i915_emit_irq(struct drm_device * dev)
+ return dev_priv->counter;
+ }
+
++static void i915_user_irq_get(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++
++ spin_lock(&dev_priv->user_irq_lock);
++ if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
++ i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
++ spin_unlock(&dev_priv->user_irq_lock);
++}
++
++static void i915_user_irq_put(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++
++ spin_lock(&dev_priv->user_irq_lock);
++ BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
++ if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
++ i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
++ spin_unlock(&dev_priv->user_irq_lock);
++}
++
+ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+@@ -322,13 +372,17 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+ DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
+ READ_BREADCRUMB(dev_priv));
+
+- if (READ_BREADCRUMB(dev_priv) >= irq_nr)
++ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
++ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ return 0;
++ }
+
+ dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
++ i915_user_irq_get(dev);
+ DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
+ READ_BREADCRUMB(dev_priv) >= irq_nr);
++ i915_user_irq_put(dev);
+
+ if (ret == -EBUSY) {
+ DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
+@@ -413,20 +467,6 @@ int i915_irq_wait(struct drm_device *dev, void *data,
+ return i915_wait_irq(dev, irqwait->irq_seq);
+ }
+
+-static void i915_enable_interrupt (struct drm_device *dev)
+-{
+- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+- u16 flag;
+-
+- flag = 0;
+- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
+- flag |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
+- flag |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+-
+- I915_WRITE16(IER, I915_USER_INTERRUPT | flag);
+-}
+-
+ /* Set the vblank monitor pipe
+ */
+ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+@@ -434,6 +474,7 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_vblank_pipe_t *pipe = data;
++ u32 enable_mask = 0, disable_mask = 0;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+@@ -445,9 +486,20 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+ return -EINVAL;
+ }
+
+- dev_priv->vblank_pipe = pipe->pipe;
++ if (pipe->pipe & DRM_I915_VBLANK_PIPE_A)
++ enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
++ else
++ disable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
++
++ if (pipe->pipe & DRM_I915_VBLANK_PIPE_B)
++ enable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
++ else
++ disable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+- i915_enable_interrupt (dev);
++ i915_enable_irq(dev_priv, enable_mask);
++ i915_disable_irq(dev_priv, disable_mask);
++
++ dev_priv->vblank_pipe = pipe->pipe;
+
+ return 0;
+ }
+@@ -464,7 +516,7 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+ return -EINVAL;
+ }
+
+- flag = I915_READ(IER);
++ flag = I915_READ(IMR);
+ pipe->pipe = 0;
+ if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
+ pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
+@@ -586,9 +638,9 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+- I915_WRITE16(HWSTAM, 0xfffe);
+- I915_WRITE16(IMR, 0x0);
+- I915_WRITE16(IER, 0x0);
++ I915_WRITE(HWSTAM, 0xfffe);
++ I915_WRITE(IMR, 0x0);
++ I915_WRITE(IER, 0x0);
+ }
+
+ void i915_driver_irq_postinstall(struct drm_device * dev)
+@@ -601,7 +653,18 @@ void i915_driver_irq_postinstall(struct drm_device * dev)
+
+ if (!dev_priv->vblank_pipe)
+ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
+- i915_enable_interrupt(dev);
++
++ /* Set initial unmasked IRQs to just the selected vblank pipes. */
++ dev_priv->irq_mask_reg = ~0;
++ if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
++ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
++ if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
++ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
++
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
++ I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
++ (void) I915_READ(IER);
++
+ DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+ }
+
+@@ -613,10 +676,10 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
+ if (!dev_priv)
+ return;
+
+- I915_WRITE16(HWSTAM, 0xffff);
+- I915_WRITE16(IMR, 0xffff);
+- I915_WRITE16(IER, 0x0);
++ I915_WRITE(HWSTAM, 0xffff);
++ I915_WRITE(IMR, 0xffff);
++ I915_WRITE(IER, 0x0);
+
+- temp = I915_READ16(IIR);
+- I915_WRITE16(IIR, temp);
++ temp = I915_READ(IIR);
++ I915_WRITE(IIR, temp);
+ }
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch
new file mode 100644
index 000000000..8736250f0
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch
@@ -0,0 +1,46 @@
+commit 1236e8610ab9c6f9f8297e60530bedb2640c7224
+Author: Keith Packard <keithp@keithp.com>
+Date: Wed Jul 30 12:21:20 2008 -0700
+
+ i915: Track progress inside of batchbuffers for determining wedgedness.
+
+ This avoids early termination for long-running commands.
+
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 10bfb0c..4c72a01 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -40,11 +40,15 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
++ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
++ u32 last_acthd = I915_READ(acthd_reg);
++ u32 acthd;
+ u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ int i;
+
+- for (i = 0; i < 10000; i++) {
++ for (i = 0; i < 100000; i++) {
+ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
++ acthd = I915_READ(acthd_reg);
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->Size;
+@@ -55,8 +59,13 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
+
+ if (ring->head != last_head)
+ i = 0;
++ if (acthd != last_acthd)
++ i = 0;
+
+ last_head = ring->head;
++ last_acthd = acthd;
++ msleep_interruptible(10);
++
+ }
+
+ return -EBUSY;
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0007-i915-Initialize-hardware-status-page-at-device-load.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0007-i915-Initialize-hardware-status-page-at-device-load.patch
new file mode 100644
index 000000000..79f068f42
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0007-i915-Initialize-hardware-status-page-at-device-load.patch
@@ -0,0 +1,137 @@
+commit 75fed4ae8454aa975c274b2585ec2287dd15773d
+Author: Keith Packard <keithp@keithp.com>
+Date: Wed Jul 30 13:03:43 2008 -0700
+
+ i915: Initialize hardware status page at device load when possible.
+
+ Some chips were unstable with repeated setup/teardown of the hardware status
+ page.
+
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 4c72a01..b3c4ac9 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -71,6 +71,52 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
+ return -EBUSY;
+ }
+
++/**
++ * Sets up the hardware status page for devices that need a physical address
++ * in the register.
++ */
++int i915_init_phys_hws(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ /* Program Hardware Status Page */
++ dev_priv->status_page_dmah =
++ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
++
++ if (!dev_priv->status_page_dmah) {
++ DRM_ERROR("Can not allocate hardware status page\n");
++ return -ENOMEM;
++ }
++ dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
++ dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
++
++ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
++
++ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
++ DRM_DEBUG("Enabled hardware status page\n");
++ return 0;
++}
++
++/**
++ * Frees the hardware status page, whether it's a physical address or a virtual
++ * address set up by the X Server.
++ */
++void i915_free_hws(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ if (dev_priv->status_page_dmah) {
++ drm_pci_free(dev, dev_priv->status_page_dmah);
++ dev_priv->status_page_dmah = NULL;
++ }
++
++ if (dev_priv->status_gfx_addr) {
++ dev_priv->status_gfx_addr = 0;
++ drm_core_ioremapfree(&dev_priv->hws_map, dev);
++ }
++
++ /* Need to rewrite hardware status page */
++ I915_WRITE(HWS_PGA, 0x1ffff000);
++}
++
+ void i915_kernel_lost_context(struct drm_device * dev)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -103,18 +149,9 @@ static int i915_dma_cleanup(struct drm_device * dev)
+ dev_priv->ring.map.size = 0;
+ }
+
+- if (dev_priv->status_page_dmah) {
+- drm_pci_free(dev, dev_priv->status_page_dmah);
+- dev_priv->status_page_dmah = NULL;
+- /* Need to rewrite hardware status page */
+- I915_WRITE(HWS_PGA, 0x1ffff000);
+- }
+-
+- if (dev_priv->status_gfx_addr) {
+- dev_priv->status_gfx_addr = 0;
+- drm_core_ioremapfree(&dev_priv->hws_map, dev);
+- I915_WRITE(HWS_PGA, 0x1ffff000);
+- }
++ /* Clear the HWS virtual address at teardown */
++ if (I915_NEED_GFX_HWS(dev))
++ i915_free_hws(dev);
+
+ return 0;
+ }
+@@ -165,23 +202,6 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+ */
+ dev_priv->allow_batchbuffer = 1;
+
+- /* Program Hardware Status Page */
+- if (!I915_NEED_GFX_HWS(dev)) {
+- dev_priv->status_page_dmah =
+- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+-
+- if (!dev_priv->status_page_dmah) {
+- i915_dma_cleanup(dev);
+- DRM_ERROR("Can not allocate hardware status page\n");
+- return -ENOMEM;
+- }
+- dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+- dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+-
+- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+- }
+- DRM_DEBUG("Enabled hardware status page\n");
+ return 0;
+ }
+
+@@ -773,6 +793,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ _DRM_KERNEL | _DRM_DRIVER,
+ &dev_priv->mmio_map);
+
++ /* Init HWS */
++ if (!I915_NEED_GFX_HWS(dev)) {
++ ret = i915_init_phys_hws(dev);
++ if (ret != 0)
++ return ret;
++ }
+
+ /* On the 945G/GM, the chipset reports the MSI capability on the
+ * integrated graphics even though the support isn't actually there
+@@ -796,6 +822,8 @@ int i915_driver_unload(struct drm_device *dev)
+ if (dev->pdev->msi_enabled)
+ pci_disable_msi(dev->pdev);
+
++ i915_free_hws(dev);
++
+ if (dev_priv->mmio_map)
+ drm_rmmap(dev, dev_priv->mmio_map);
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0008-Add-Intel-ACPI-IGD-OpRegion-support.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0008-Add-Intel-ACPI-IGD-OpRegion-support.patch
new file mode 100644
index 000000000..afa6f9634
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0008-Add-Intel-ACPI-IGD-OpRegion-support.patch
@@ -0,0 +1,572 @@
+commit 91c2ebb8e78aa64f4807399b506ec0090ae5f3d6
+Author: Matthew Garrett <mjg59@srcf.ucam.org>
+Date: Tue Aug 5 19:37:25 2008 +0100
+
+ Add Intel ACPI IGD OpRegion support
+
+ This adds the support necessary for allowing ACPI backlight control to
+ work on some newer Intel-based graphics systems. Tested on Thinkpad T61
+ and HP 2510p hardware.
+
+ Signed-off-by: Matthew Garrett <mjg@redhat.com>
+ Signed-off-by: Dave Airlie <airlied@linux.ie>
+
+diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
+index a9e6046..b032808 100644
+--- a/drivers/gpu/drm/i915/Makefile
++++ b/drivers/gpu/drm/i915/Makefile
+@@ -3,7 +3,7 @@
+ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ ccflags-y := -Iinclude/drm
+-i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
++i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o
+
+ i915-$(CONFIG_COMPAT) += i915_ioc32.o
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index b3c4ac9..cead62f 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -810,6 +810,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ if (!IS_I945G(dev) && !IS_I945GM(dev))
+ pci_enable_msi(dev->pdev);
+
++ intel_opregion_init(dev);
++
+ spin_lock_init(&dev_priv->user_irq_lock);
+
+ return ret;
+@@ -827,6 +829,8 @@ int i915_driver_unload(struct drm_device *dev)
+ if (dev_priv->mmio_map)
+ drm_rmmap(dev, dev_priv->mmio_map);
+
++ intel_opregion_free(dev);
++
+ drm_free(dev->dev_private, sizeof(drm_i915_private_t),
+ DRM_MEM_DRIVER);
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 6c99aab..d95eca2 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -371,6 +371,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
+
+ i915_save_vga(dev);
+
++ intel_opregion_free(dev);
++
+ if (state.event == PM_EVENT_SUSPEND) {
+ /* Shut down the device */
+ pci_disable_device(dev->pdev);
+@@ -532,6 +534,8 @@ static int i915_resume(struct drm_device *dev)
+
+ i915_restore_vga(dev);
+
++ intel_opregion_init(dev);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 8daf0d8..e4bd01c 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -82,6 +82,14 @@ typedef struct _drm_i915_vbl_swap {
+ unsigned int sequence;
+ } drm_i915_vbl_swap_t;
+
++struct intel_opregion {
++ struct opregion_header *header;
++ struct opregion_acpi *acpi;
++ struct opregion_swsci *swsci;
++ struct opregion_asle *asle;
++ int enabled;
++};
++
+ typedef struct drm_i915_private {
+ drm_local_map_t *sarea;
+ drm_local_map_t *mmio_map;
+@@ -122,6 +130,8 @@ typedef struct drm_i915_private {
+ drm_i915_vbl_swap_t vbl_swaps;
+ unsigned int swaps_pending;
+
++ struct intel_opregion opregion;
++
+ /* Register state */
+ u8 saveLBB;
+ u32 saveDSPACNTR;
+@@ -244,6 +254,7 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ extern int i915_vblank_swap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
++extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
+
+ /* i915_mem.c */
+ extern int i915_mem_alloc(struct drm_device *dev, void *data,
+@@ -258,6 +269,12 @@ extern void i915_mem_takedown(struct mem_block **heap);
+ extern void i915_mem_release(struct drm_device * dev,
+ struct drm_file *file_priv, struct mem_block *heap);
+
++/* i915_opregion.c */
++extern int intel_opregion_init(struct drm_device *dev);
++extern void intel_opregion_free(struct drm_device *dev);
++extern void opregion_asle_intr(struct drm_device *dev);
++extern void opregion_enable_asle(struct drm_device *dev);
++
+ #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
+ #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
+ #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 24d11ed..ae7d3a8 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -36,9 +36,11 @@
+ /** These are the interrupts used by the driver */
+ #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \
+- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
++ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT | \
++ I915_ASLE_INTERRUPT | \
++ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
+
+-static inline void
++void
+ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+ {
+ if ((dev_priv->irq_mask_reg & mask) != 0) {
+@@ -274,6 +276,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ return IRQ_NONE;
+ }
+
++ I915_WRITE(PIPEASTAT, pipea_stats);
++ I915_WRITE(PIPEBSTAT, pipeb_stats);
++
+ I915_WRITE(IIR, iir);
+ if (dev->pdev->msi_enabled)
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+@@ -306,14 +311,14 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+
+ if (dev_priv->swaps_pending > 0)
+ drm_locked_tasklet(dev, i915_vblank_tasklet);
+- I915_WRITE(PIPEASTAT,
+- pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
+- PIPE_VBLANK_INTERRUPT_STATUS);
+- I915_WRITE(PIPEBSTAT,
+- pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
+- PIPE_VBLANK_INTERRUPT_STATUS);
+ }
+
++ if (iir & I915_ASLE_INTERRUPT)
++ opregion_asle_intr(dev);
++
++ if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
++ opregion_asle_intr(dev);
++
+ return IRQ_HANDLED;
+ }
+
+@@ -661,10 +666,14 @@ void i915_driver_irq_postinstall(struct drm_device * dev)
+ if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
++ dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
++
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
+ (void) I915_READ(IER);
+
++ opregion_enable_asle(dev);
++
+ DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
+new file mode 100644
+index 0000000..1787a0c
+--- /dev/null
++++ b/drivers/gpu/drm/i915/i915_opregion.c
+@@ -0,0 +1,371 @@
++/*
++ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
++ * Copyright 2008 Red Hat <mjg@redhat.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ */
++
++#include <linux/acpi.h>
++
++#include "drmP.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++#define PCI_ASLE 0xe4
++#define PCI_LBPC 0xf4
++#define PCI_ASLS 0xfc
++
++#define OPREGION_SZ (8*1024)
++#define OPREGION_HEADER_OFFSET 0
++#define OPREGION_ACPI_OFFSET 0x100
++#define OPREGION_SWSCI_OFFSET 0x200
++#define OPREGION_ASLE_OFFSET 0x300
++#define OPREGION_VBT_OFFSET 0x1000
++
++#define OPREGION_SIGNATURE "IntelGraphicsMem"
++#define MBOX_ACPI (1<<0)
++#define MBOX_SWSCI (1<<1)
++#define MBOX_ASLE (1<<2)
++
++struct opregion_header {
++ u8 signature[16];
++ u32 size;
++ u32 opregion_ver;
++ u8 bios_ver[32];
++ u8 vbios_ver[16];
++ u8 driver_ver[16];
++ u32 mboxes;
++ u8 reserved[164];
++} __attribute__((packed));
++
++/* OpRegion mailbox #1: public ACPI methods */
++struct opregion_acpi {
++ u32 drdy; /* driver readiness */
++ u32 csts; /* notification status */
++ u32 cevt; /* current event */
++ u8 rsvd1[20];
++ u32 didl[8]; /* supported display devices ID list */
++ u32 cpdl[8]; /* currently presented display list */
++ u32 cadl[8]; /* currently active display list */
++ u32 nadl[8]; /* next active devices list */
++ u32 aslp; /* ASL sleep time-out */
++ u32 tidx; /* toggle table index */
++ u32 chpd; /* current hotplug enable indicator */
++ u32 clid; /* current lid state*/
++ u32 cdck; /* current docking state */
++ u32 sxsw; /* Sx state resume */
++ u32 evts; /* ASL supported events */
++ u32 cnot; /* current OS notification */
++ u32 nrdy; /* driver status */
++ u8 rsvd2[60];
++} __attribute__((packed));
++
++/* OpRegion mailbox #2: SWSCI */
++struct opregion_swsci {
++ u32 scic; /* SWSCI command|status|data */
++ u32 parm; /* command parameters */
++ u32 dslp; /* driver sleep time-out */
++ u8 rsvd[244];
++} __attribute__((packed));
++
++/* OpRegion mailbox #3: ASLE */
++struct opregion_asle {
++ u32 ardy; /* driver readiness */
++ u32 aslc; /* ASLE interrupt command */
++ u32 tche; /* technology enabled indicator */
++ u32 alsi; /* current ALS illuminance reading */
++ u32 bclp; /* backlight brightness to set */
++ u32 pfit; /* panel fitting state */
++ u32 cblv; /* current brightness level */
++ u16 bclm[20]; /* backlight level duty cycle mapping table */
++ u32 cpfm; /* current panel fitting mode */
++ u32 epfm; /* enabled panel fitting modes */
++ u8 plut[74]; /* panel LUT and identifier */
++ u32 pfmb; /* PWM freq and min brightness */
++ u8 rsvd[102];
++} __attribute__((packed));
++
++/* ASLE irq request bits */
++#define ASLE_SET_ALS_ILLUM (1 << 0)
++#define ASLE_SET_BACKLIGHT (1 << 1)
++#define ASLE_SET_PFIT (1 << 2)
++#define ASLE_SET_PWM_FREQ (1 << 3)
++#define ASLE_REQ_MSK 0xf
++
++/* response bits of ASLE irq request */
++#define ASLE_ALS_ILLUM_FAIL (2<<10)
++#define ASLE_BACKLIGHT_FAIL (2<<12)
++#define ASLE_PFIT_FAIL (2<<14)
++#define ASLE_PWM_FREQ_FAIL (2<<16)
++
++/* ASLE backlight brightness to set */
++#define ASLE_BCLP_VALID (1<<31)
++#define ASLE_BCLP_MSK (~(1<<31))
++
++/* ASLE panel fitting request */
++#define ASLE_PFIT_VALID (1<<31)
++#define ASLE_PFIT_CENTER (1<<0)
++#define ASLE_PFIT_STRETCH_TEXT (1<<1)
++#define ASLE_PFIT_STRETCH_GFX (1<<2)
++
++/* PWM frequency and minimum brightness */
++#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
++#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
++#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
++#define ASLE_PFMB_PWM_VALID (1<<31)
++
++#define ASLE_CBLV_VALID (1<<31)
++
++static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct opregion_asle *asle = dev_priv->opregion.asle;
++ u32 blc_pwm_ctl, blc_pwm_ctl2;
++
++ if (!(bclp & ASLE_BCLP_VALID))
++ return ASLE_BACKLIGHT_FAIL;
++
++ bclp &= ASLE_BCLP_MSK;
++ if (bclp < 0 || bclp > 255)
++ return ASLE_BACKLIGHT_FAIL;
++
++ blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
++ blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
++ blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
++
++ if (blc_pwm_ctl2 & BLM_COMBINATION_MODE)
++ pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
++ else
++ I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1));
++
++ asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
++
++ return 0;
++}
++
++static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
++{
++ /* alsi is the current ALS reading in lux. 0 indicates below sensor
++ range, 0xffff indicates above sensor range. 1-0xfffe are valid */
++ return 0;
++}
++
++static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ if (pfmb & ASLE_PFMB_PWM_VALID) {
++ u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
++ u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
++ blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
++ pwm = pwm >> 9;
++ /* FIXME - what do we do with the PWM? */
++ }
++ return 0;
++}
++
++static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
++{
++ /* Panel fitting is currently controlled by the X code, so this is a
++ noop until modesetting support works fully */
++ if (!(pfit & ASLE_PFIT_VALID))
++ return ASLE_PFIT_FAIL;
++ return 0;
++}
++
++void opregion_asle_intr(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct opregion_asle *asle = dev_priv->opregion.asle;
++ u32 asle_stat = 0;
++ u32 asle_req;
++
++ if (!asle)
++ return;
++
++ asle_req = asle->aslc & ASLE_REQ_MSK;
++
++ if (!asle_req) {
++ DRM_DEBUG("non asle set request??\n");
++ return;
++ }
++
++ if (asle_req & ASLE_SET_ALS_ILLUM)
++ asle_stat |= asle_set_als_illum(dev, asle->alsi);
++
++ if (asle_req & ASLE_SET_BACKLIGHT)
++ asle_stat |= asle_set_backlight(dev, asle->bclp);
++
++ if (asle_req & ASLE_SET_PFIT)
++ asle_stat |= asle_set_pfit(dev, asle->pfit);
++
++ if (asle_req & ASLE_SET_PWM_FREQ)
++ asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
++
++ asle->aslc = asle_stat;
++}
++
++#define ASLE_ALS_EN (1<<0)
++#define ASLE_BLC_EN (1<<1)
++#define ASLE_PFIT_EN (1<<2)
++#define ASLE_PFMB_EN (1<<3)
++
++void opregion_enable_asle(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct opregion_asle *asle = dev_priv->opregion.asle;
++
++ if (asle) {
++ u32 pipeb_stats = I915_READ(PIPEBSTAT);
++ if (IS_MOBILE(dev)) {
++ /* Many devices trigger events with a write to the
++ legacy backlight controller, so we need to ensure
++ that it's able to generate interrupts */
++ I915_WRITE(PIPEBSTAT, pipeb_stats |=
++ I915_LEGACY_BLC_EVENT_ENABLE);
++ i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT |
++ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
++ } else
++ i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT);
++
++ asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
++ ASLE_PFMB_EN;
++ asle->ardy = 1;
++ }
++}
++
++#define ACPI_EV_DISPLAY_SWITCH (1<<0)
++#define ACPI_EV_LID (1<<1)
++#define ACPI_EV_DOCK (1<<2)
++
++static struct intel_opregion *system_opregion;
++
++int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
++ void *data)
++{
++ /* The only video events relevant to opregion are 0x80. These indicate
++ either a docking event, lid switch or display switch request. In
++ Linux, these are handled by the dock, button and video drivers.
++ We might want to fix the video driver to be opregion-aware in
++ future, but right now we just indicate to the firmware that the
++ request has been handled */
++
++ struct opregion_acpi *acpi;
++
++ if (!system_opregion)
++ return NOTIFY_DONE;
++
++ acpi = system_opregion->acpi;
++ acpi->csts = 0;
++
++ return NOTIFY_OK;
++}
++
++static struct notifier_block intel_opregion_notifier = {
++ .notifier_call = intel_opregion_video_event,
++};
++
++int intel_opregion_init(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_opregion *opregion = &dev_priv->opregion;
++ void *base;
++ u32 asls, mboxes;
++ int err = 0;
++
++ pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
++ DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
++ if (asls == 0) {
++ DRM_DEBUG("ACPI OpRegion not supported!\n");
++ return -ENOTSUPP;
++ }
++
++ base = ioremap(asls, OPREGION_SZ);
++ if (!base)
++ return -ENOMEM;
++
++ opregion->header = base;
++ if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
++ DRM_DEBUG("opregion signature mismatch\n");
++ err = -EINVAL;
++ goto err_out;
++ }
++
++ mboxes = opregion->header->mboxes;
++ if (mboxes & MBOX_ACPI) {
++ DRM_DEBUG("Public ACPI methods supported\n");
++ opregion->acpi = base + OPREGION_ACPI_OFFSET;
++ } else {
++ DRM_DEBUG("Public ACPI methods not supported\n");
++ err = -ENOTSUPP;
++ goto err_out;
++ }
++ opregion->enabled = 1;
++
++ if (mboxes & MBOX_SWSCI) {
++ DRM_DEBUG("SWSCI supported\n");
++ opregion->swsci = base + OPREGION_SWSCI_OFFSET;
++ }
++ if (mboxes & MBOX_ASLE) {
++ DRM_DEBUG("ASLE supported\n");
++ opregion->asle = base + OPREGION_ASLE_OFFSET;
++ }
++
++ /* Notify BIOS we are ready to handle ACPI video ext notifs.
++ * Right now, all the events are handled by the ACPI video module.
++ * We don't actually need to do anything with them. */
++ opregion->acpi->csts = 0;
++ opregion->acpi->drdy = 1;
++
++ system_opregion = opregion;
++ register_acpi_notifier(&intel_opregion_notifier);
++
++ return 0;
++
++err_out:
++ iounmap(opregion->header);
++ opregion->header = NULL;
++ return err;
++}
++
++void intel_opregion_free(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_opregion *opregion = &dev_priv->opregion;
++
++ if (!opregion->enabled)
++ return;
++
++ opregion->acpi->drdy = 0;
++
++ system_opregion = NULL;
++ unregister_acpi_notifier(&intel_opregion_notifier);
++
++ /* just clear all opregion memory pointers now */
++ iounmap(opregion->header);
++ opregion->header = NULL;
++ opregion->acpi = NULL;
++ opregion->swsci = NULL;
++ opregion->asle = NULL;
++
++ opregion->enabled = 0;
++}
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 477c64e..43ad2cb 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -740,6 +740,7 @@
+ #define BLC_PWM_CTL 0x61254
+ #define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+ #define BLC_PWM_CTL2 0x61250 /* 965+ only */
++#define BLM_COMBINATION_MODE (1 << 30)
+ /*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0009-drm-fix-sysfs-error-path.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0009-drm-fix-sysfs-error-path.patch
new file mode 100644
index 000000000..8dea82480
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0009-drm-fix-sysfs-error-path.patch
@@ -0,0 +1,23 @@
+commit 2e9c9eedfe0be777c051a2198dddf459adcc407b
+Author: Dave Airlie <airlied@redhat.com>
+Date: Tue Sep 2 10:06:06 2008 +1000
+
+ drm: fix sysfs error path.
+
+ Pointed out by Roel Kluin on dri-devel.
+
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
+index af211a0..1611b9b 100644
+--- a/drivers/gpu/drm/drm_sysfs.c
++++ b/drivers/gpu/drm/drm_sysfs.c
+@@ -184,7 +184,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
+ err_out_files:
+ if (i > 0)
+ for (j = 0; j < i; j++)
+- device_remove_file(&minor->kdev, &device_attrs[i]);
++ device_remove_file(&minor->kdev, &device_attrs[j]);
+ device_unregister(&minor->kdev);
+ err_out:
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0010-i915-separate-suspend-resume-functions.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0010-i915-separate-suspend-resume-functions.patch
new file mode 100644
index 000000000..897d50c39
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0010-i915-separate-suspend-resume-functions.patch
@@ -0,0 +1,1079 @@
+commit a850828c640735fb410c782717c9eb7f8474e356
+Author: Jesse Barnes <jbarnes@virtuousgeek.org>
+Date: Mon Aug 25 15:11:06 2008 -0700
+
+ separate i915 suspend/resume functions into their own file
+
+ [Patch against drm-next. Consider this a trial balloon for our new Linux
+ development model.]
+
+ This is a big chunk of code. Separating it out makes it easier to change
+ without churn on the main i915_drv.c file (and there will be churn as we
+ fix bugs and add things like kernel mode setting). Also makes it easier
+ to share this file with BSD.
+
+ Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
+index b032808..c4bbda6 100644
+--- a/drivers/gpu/drm/i915/Makefile
++++ b/drivers/gpu/drm/i915/Makefile
+@@ -3,7 +3,8 @@
+ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ ccflags-y := -Iinclude/drm
+-i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o
++i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \
++ i915_suspend.o
+
+ i915-$(CONFIG_COMPAT) += i915_ioc32.o
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index d95eca2..eff66ed 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -38,211 +38,9 @@ static struct pci_device_id pciidlist[] = {
+ i915_PCI_IDS
+ };
+
+-enum pipe {
+- PIPE_A = 0,
+- PIPE_B,
+-};
+-
+-static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+-
+- if (pipe == PIPE_A)
+- return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
+- else
+- return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
+-}
+-
+-static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+- u32 *array;
+- int i;
+-
+- if (!i915_pipe_enabled(dev, pipe))
+- return;
+-
+- if (pipe == PIPE_A)
+- array = dev_priv->save_palette_a;
+- else
+- array = dev_priv->save_palette_b;
+-
+- for(i = 0; i < 256; i++)
+- array[i] = I915_READ(reg + (i << 2));
+-}
+-
+-static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+- u32 *array;
+- int i;
+-
+- if (!i915_pipe_enabled(dev, pipe))
+- return;
+-
+- if (pipe == PIPE_A)
+- array = dev_priv->save_palette_a;
+- else
+- array = dev_priv->save_palette_b;
+-
+- for(i = 0; i < 256; i++)
+- I915_WRITE(reg + (i << 2), array[i]);
+-}
+-
+-static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
+-{
+- outb(reg, index_port);
+- return inb(data_port);
+-}
+-
+-static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
+-{
+- inb(st01);
+- outb(palette_enable | reg, VGA_AR_INDEX);
+- return inb(VGA_AR_DATA_READ);
+-}
+-
+-static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
+-{
+- inb(st01);
+- outb(palette_enable | reg, VGA_AR_INDEX);
+- outb(val, VGA_AR_DATA_WRITE);
+-}
+-
+-static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
+-{
+- outb(reg, index_port);
+- outb(val, data_port);
+-}
+-
+-static void i915_save_vga(struct drm_device *dev)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- int i;
+- u16 cr_index, cr_data, st01;
+-
+- /* VGA color palette registers */
+- dev_priv->saveDACMASK = inb(VGA_DACMASK);
+- /* DACCRX automatically increments during read */
+- outb(0, VGA_DACRX);
+- /* Read 3 bytes of color data from each index */
+- for (i = 0; i < 256 * 3; i++)
+- dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
+-
+- /* MSR bits */
+- dev_priv->saveMSR = inb(VGA_MSR_READ);
+- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+- cr_index = VGA_CR_INDEX_CGA;
+- cr_data = VGA_CR_DATA_CGA;
+- st01 = VGA_ST01_CGA;
+- } else {
+- cr_index = VGA_CR_INDEX_MDA;
+- cr_data = VGA_CR_DATA_MDA;
+- st01 = VGA_ST01_MDA;
+- }
+-
+- /* CRT controller regs */
+- i915_write_indexed(cr_index, cr_data, 0x11,
+- i915_read_indexed(cr_index, cr_data, 0x11) &
+- (~0x80));
+- for (i = 0; i <= 0x24; i++)
+- dev_priv->saveCR[i] =
+- i915_read_indexed(cr_index, cr_data, i);
+- /* Make sure we don't turn off CR group 0 writes */
+- dev_priv->saveCR[0x11] &= ~0x80;
+-
+- /* Attribute controller registers */
+- inb(st01);
+- dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
+- for (i = 0; i <= 0x14; i++)
+- dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
+- inb(st01);
+- outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
+- inb(st01);
+-
+- /* Graphics controller registers */
+- for (i = 0; i < 9; i++)
+- dev_priv->saveGR[i] =
+- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
+-
+- dev_priv->saveGR[0x10] =
+- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
+- dev_priv->saveGR[0x11] =
+- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
+- dev_priv->saveGR[0x18] =
+- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
+-
+- /* Sequencer registers */
+- for (i = 0; i < 8; i++)
+- dev_priv->saveSR[i] =
+- i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
+-}
+-
+-static void i915_restore_vga(struct drm_device *dev)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- int i;
+- u16 cr_index, cr_data, st01;
+-
+- /* MSR bits */
+- outb(dev_priv->saveMSR, VGA_MSR_WRITE);
+- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+- cr_index = VGA_CR_INDEX_CGA;
+- cr_data = VGA_CR_DATA_CGA;
+- st01 = VGA_ST01_CGA;
+- } else {
+- cr_index = VGA_CR_INDEX_MDA;
+- cr_data = VGA_CR_DATA_MDA;
+- st01 = VGA_ST01_MDA;
+- }
+-
+- /* Sequencer registers, don't write SR07 */
+- for (i = 0; i < 7; i++)
+- i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
+- dev_priv->saveSR[i]);
+-
+- /* CRT controller regs */
+- /* Enable CR group 0 writes */
+- i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+- for (i = 0; i <= 0x24; i++)
+- i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
+-
+- /* Graphics controller regs */
+- for (i = 0; i < 9; i++)
+- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
+- dev_priv->saveGR[i]);
+-
+- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
+- dev_priv->saveGR[0x10]);
+- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
+- dev_priv->saveGR[0x11]);
+- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
+- dev_priv->saveGR[0x18]);
+-
+- /* Attribute controller registers */
+- inb(st01);
+- for (i = 0; i <= 0x14; i++)
+- i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
+- inb(st01); /* switch back to index mode */
+- outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
+- inb(st01);
+-
+- /* VGA color palette registers */
+- outb(dev_priv->saveDACMASK, VGA_DACMASK);
+- /* DACCRX automatically increments during read */
+- outb(0, VGA_DACWX);
+- /* Read 3 bytes of color data from each index */
+- for (i = 0; i < 256 * 3; i++)
+- outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
+-
+-}
+-
+ static int i915_suspend(struct drm_device *dev, pm_message_t state)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- int i;
+
+ if (!dev || !dev_priv) {
+ printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
+@@ -254,122 +52,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
+ return 0;
+
+ pci_save_state(dev->pdev);
+- pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+-
+- /* Display arbitration control */
+- dev_priv->saveDSPARB = I915_READ(DSPARB);
+-
+- /* Pipe & plane A info */
+- dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
+- dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
+- dev_priv->saveFPA0 = I915_READ(FPA0);
+- dev_priv->saveFPA1 = I915_READ(FPA1);
+- dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+- if (IS_I965G(dev))
+- dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
+- dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
+- dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
+- dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
+- dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
+- dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
+- dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
+- dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+-
+- dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
+- dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
+- dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
+- dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
+- dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
+- if (IS_I965G(dev)) {
+- dev_priv->saveDSPASURF = I915_READ(DSPASURF);
+- dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+- }
+- i915_save_palette(dev, PIPE_A);
+- dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
+-
+- /* Pipe & plane B info */
+- dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
+- dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
+- dev_priv->saveFPB0 = I915_READ(FPB0);
+- dev_priv->saveFPB1 = I915_READ(FPB1);
+- dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+- if (IS_I965G(dev))
+- dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
+- dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
+- dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
+- dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
+- dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
+- dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
+- dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
+- dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+-
+- dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
+- dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
+- dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
+- dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
+- dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
+- if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
+- dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
+- dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+- }
+- i915_save_palette(dev, PIPE_B);
+- dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+-
+- /* CRT state */
+- dev_priv->saveADPA = I915_READ(ADPA);
+
+- /* LVDS state */
+- dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
+- dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+- if (IS_I965G(dev))
+- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+- if (IS_MOBILE(dev) && !IS_I830(dev))
+- dev_priv->saveLVDS = I915_READ(LVDS);
+- if (!IS_I830(dev) && !IS_845G(dev))
+- dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+- dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+- dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+- dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+-
+- /* FIXME: save TV & SDVO state */
+-
+- /* FBC state */
+- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+-
+- /* Interrupt state */
+- dev_priv->saveIIR = I915_READ(IIR);
+- dev_priv->saveIER = I915_READ(IER);
+- dev_priv->saveIMR = I915_READ(IMR);
+-
+- /* VGA state */
+- dev_priv->saveVGA0 = I915_READ(VGA0);
+- dev_priv->saveVGA1 = I915_READ(VGA1);
+- dev_priv->saveVGA_PD = I915_READ(VGA_PD);
+- dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+-
+- /* Clock gating state */
+- dev_priv->saveD_STATE = I915_READ(D_STATE);
+- dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
+-
+- /* Cache mode state */
+- dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+-
+- /* Memory Arbitration state */
+- dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+-
+- /* Scratch space */
+- for (i = 0; i < 16; i++) {
+- dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+- dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+- }
+- for (i = 0; i < 3; i++)
+- dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+-
+- i915_save_vga(dev);
++ i915_save_state(dev);
+
+ intel_opregion_free(dev);
+
+@@ -384,155 +68,13 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
+
+ static int i915_resume(struct drm_device *dev)
+ {
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- int i;
+-
+ pci_set_power_state(dev->pdev, PCI_D0);
+ pci_restore_state(dev->pdev);
+ if (pci_enable_device(dev->pdev))
+ return -1;
+ pci_set_master(dev->pdev);
+
+- pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+-
+- I915_WRITE(DSPARB, dev_priv->saveDSPARB);
+-
+- /* Pipe & plane A info */
+- /* Prime the clock */
+- if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
+- I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
+- ~DPLL_VCO_ENABLE);
+- udelay(150);
+- }
+- I915_WRITE(FPA0, dev_priv->saveFPA0);
+- I915_WRITE(FPA1, dev_priv->saveFPA1);
+- /* Actually enable it */
+- I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
+- udelay(150);
+- if (IS_I965G(dev))
+- I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+- udelay(150);
+-
+- /* Restore mode */
+- I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
+- I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
+- I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
+- I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
+- I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
+- I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
+- I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+-
+- /* Restore plane info */
+- I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
+- I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
+- I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
+- I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
+- I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+- if (IS_I965G(dev)) {
+- I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
+- I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+- }
+-
+- I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+-
+- i915_restore_palette(dev, PIPE_A);
+- /* Enable the plane */
+- I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
+- I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
+-
+- /* Pipe & plane B info */
+- if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
+- I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
+- ~DPLL_VCO_ENABLE);
+- udelay(150);
+- }
+- I915_WRITE(FPB0, dev_priv->saveFPB0);
+- I915_WRITE(FPB1, dev_priv->saveFPB1);
+- /* Actually enable it */
+- I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
+- udelay(150);
+- if (IS_I965G(dev))
+- I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+- udelay(150);
+-
+- /* Restore mode */
+- I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
+- I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
+- I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
+- I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
+- I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
+- I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
+- I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+-
+- /* Restore plane info */
+- I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
+- I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
+- I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
+- I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
+- I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+- if (IS_I965G(dev)) {
+- I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
+- I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+- }
+-
+- I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+-
+- i915_restore_palette(dev, PIPE_B);
+- /* Enable the plane */
+- I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
+- I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+-
+- /* CRT state */
+- I915_WRITE(ADPA, dev_priv->saveADPA);
+-
+- /* LVDS state */
+- if (IS_I965G(dev))
+- I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+- if (IS_MOBILE(dev) && !IS_I830(dev))
+- I915_WRITE(LVDS, dev_priv->saveLVDS);
+- if (!IS_I830(dev) && !IS_845G(dev))
+- I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+-
+- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
+- I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+- I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
+- I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
+- I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
+- I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+-
+- /* FIXME: restore TV & SDVO state */
+-
+- /* FBC info */
+- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+-
+- /* VGA state */
+- I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+- I915_WRITE(VGA0, dev_priv->saveVGA0);
+- I915_WRITE(VGA1, dev_priv->saveVGA1);
+- I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
+- udelay(150);
+-
+- /* Clock gating state */
+- I915_WRITE (D_STATE, dev_priv->saveD_STATE);
+- I915_WRITE(CG_2D_DIS, dev_priv->saveCG_2D_DIS);
+-
+- /* Cache mode state */
+- I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+-
+- /* Memory arbitration state */
+- I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+-
+- for (i = 0; i < 16; i++) {
+- I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
+- I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
+- }
+- for (i = 0; i < 3; i++)
+- I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+-
+- i915_restore_vga(dev);
++ i915_restore_state(dev);
+
+ intel_opregion_init(dev);
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index e4bd01c..a82b487 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -41,6 +41,11 @@
+ #define DRIVER_DESC "Intel Graphics"
+ #define DRIVER_DATE "20060119"
+
++enum pipe {
++ PIPE_A = 0,
++ PIPE_B,
++};
++
+ /* Interface history:
+ *
+ * 1.1: Original.
+@@ -269,6 +274,10 @@ extern void i915_mem_takedown(struct mem_block **heap);
+ extern void i915_mem_release(struct drm_device * dev,
+ struct drm_file *file_priv, struct mem_block *heap);
+
++/* i915_suspend.c */
++extern int i915_save_state(struct drm_device *dev);
++extern int i915_restore_state(struct drm_device *dev);
++
+ /* i915_opregion.c */
+ extern int intel_opregion_init(struct drm_device *dev);
+ extern void intel_opregion_free(struct drm_device *dev);
+@@ -279,6 +288,8 @@ extern void opregion_enable_asle(struct drm_device *dev);
+ #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
+ #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
+ #define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
++#define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg))
++#define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val))
+
+ #define I915_VERBOSE 0
+
+diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
+new file mode 100644
+index 0000000..e0c1fe4
+--- /dev/null
++++ b/drivers/gpu/drm/i915/i915_suspend.c
+@@ -0,0 +1,509 @@
++/*
++ *
++ * Copyright 2008 (c) Intel Corporation
++ * Jesse Barnes <jbarnes@virtuousgeek.org>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
++ if (pipe == PIPE_A)
++ return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
++ else
++ return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
++}
++
++static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
++ u32 *array;
++ int i;
++
++ if (!i915_pipe_enabled(dev, pipe))
++ return;
++
++ if (pipe == PIPE_A)
++ array = dev_priv->save_palette_a;
++ else
++ array = dev_priv->save_palette_b;
++
++ for(i = 0; i < 256; i++)
++ array[i] = I915_READ(reg + (i << 2));
++}
++
++static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
++ u32 *array;
++ int i;
++
++ if (!i915_pipe_enabled(dev, pipe))
++ return;
++
++ if (pipe == PIPE_A)
++ array = dev_priv->save_palette_a;
++ else
++ array = dev_priv->save_palette_b;
++
++ for(i = 0; i < 256; i++)
++ I915_WRITE(reg + (i << 2), array[i]);
++}
++
++static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
++ I915_WRITE8(index_port, reg);
++ return I915_READ8(data_port);
++}
++
++static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
++ I915_READ8(st01);
++ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
++ return I915_READ8(VGA_AR_DATA_READ);
++}
++
++static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
++ I915_READ8(st01);
++ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
++ I915_WRITE8(VGA_AR_DATA_WRITE, val);
++}
++
++static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
++ I915_WRITE8(index_port, reg);
++ I915_WRITE8(data_port, val);
++}
++
++static void i915_save_vga(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ int i;
++ u16 cr_index, cr_data, st01;
++
++ /* VGA color palette registers */
++ dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
++ /* DACCRX automatically increments during read */
++ I915_WRITE8(VGA_DACRX, 0);
++ /* Read 3 bytes of color data from each index */
++ for (i = 0; i < 256 * 3; i++)
++ dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA);
++
++ /* MSR bits */
++ dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
++ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
++ cr_index = VGA_CR_INDEX_CGA;
++ cr_data = VGA_CR_DATA_CGA;
++ st01 = VGA_ST01_CGA;
++ } else {
++ cr_index = VGA_CR_INDEX_MDA;
++ cr_data = VGA_CR_DATA_MDA;
++ st01 = VGA_ST01_MDA;
++ }
++
++ /* CRT controller regs */
++ i915_write_indexed(dev, cr_index, cr_data, 0x11,
++ i915_read_indexed(dev, cr_index, cr_data, 0x11) &
++ (~0x80));
++ for (i = 0; i <= 0x24; i++)
++ dev_priv->saveCR[i] =
++ i915_read_indexed(dev, cr_index, cr_data, i);
++ /* Make sure we don't turn off CR group 0 writes */
++ dev_priv->saveCR[0x11] &= ~0x80;
++
++ /* Attribute controller registers */
++ I915_READ8(st01);
++ dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
++ for (i = 0; i <= 0x14; i++)
++ dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
++ I915_READ8(st01);
++ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
++ I915_READ8(st01);
++
++ /* Graphics controller registers */
++ for (i = 0; i < 9; i++)
++ dev_priv->saveGR[i] =
++ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
++
++ dev_priv->saveGR[0x10] =
++ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
++ dev_priv->saveGR[0x11] =
++ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
++ dev_priv->saveGR[0x18] =
++ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
++
++ /* Sequencer registers */
++ for (i = 0; i < 8; i++)
++ dev_priv->saveSR[i] =
++ i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
++}
++
++static void i915_restore_vga(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ int i;
++ u16 cr_index, cr_data, st01;
++
++ /* MSR bits */
++ I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
++ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
++ cr_index = VGA_CR_INDEX_CGA;
++ cr_data = VGA_CR_DATA_CGA;
++ st01 = VGA_ST01_CGA;
++ } else {
++ cr_index = VGA_CR_INDEX_MDA;
++ cr_data = VGA_CR_DATA_MDA;
++ st01 = VGA_ST01_MDA;
++ }
++
++ /* Sequencer registers, don't write SR07 */
++ for (i = 0; i < 7; i++)
++ i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
++ dev_priv->saveSR[i]);
++
++ /* CRT controller regs */
++ /* Enable CR group 0 writes */
++ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
++ for (i = 0; i <= 0x24; i++)
++ i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
++
++ /* Graphics controller regs */
++ for (i = 0; i < 9; i++)
++ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
++ dev_priv->saveGR[i]);
++
++ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
++ dev_priv->saveGR[0x10]);
++ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
++ dev_priv->saveGR[0x11]);
++ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
++ dev_priv->saveGR[0x18]);
++
++ /* Attribute controller registers */
++ I915_READ8(st01); /* switch back to index mode */
++ for (i = 0; i <= 0x14; i++)
++ i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
++ I915_READ8(st01); /* switch back to index mode */
++ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
++ I915_READ8(st01);
++
++ /* VGA color palette registers */
++ I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
++ /* DACCRX automatically increments during read */
++ I915_WRITE8(VGA_DACWX, 0);
++ /* Read 3 bytes of color data from each index */
++ for (i = 0; i < 256 * 3; i++)
++ I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]);
++
++}
++
++int i915_save_state(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ int i;
++
++ pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
++
++ /* Display arbitration control */
++ dev_priv->saveDSPARB = I915_READ(DSPARB);
++
++ /* Pipe & plane A info */
++ dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
++ dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
++ dev_priv->saveFPA0 = I915_READ(FPA0);
++ dev_priv->saveFPA1 = I915_READ(FPA1);
++ dev_priv->saveDPLL_A = I915_READ(DPLL_A);
++ if (IS_I965G(dev))
++ dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
++ dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
++ dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
++ dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
++ dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
++ dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
++ dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
++ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
++
++ dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
++ dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
++ dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
++ dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
++ dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
++ if (IS_I965G(dev)) {
++ dev_priv->saveDSPASURF = I915_READ(DSPASURF);
++ dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
++ }
++ i915_save_palette(dev, PIPE_A);
++ dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
++
++ /* Pipe & plane B info */
++ dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
++ dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
++ dev_priv->saveFPB0 = I915_READ(FPB0);
++ dev_priv->saveFPB1 = I915_READ(FPB1);
++ dev_priv->saveDPLL_B = I915_READ(DPLL_B);
++ if (IS_I965G(dev))
++ dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
++ dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
++ dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
++ dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
++ dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
++ dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
++ dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
++ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
++
++ dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
++ dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
++ dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
++ dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
++ dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
++ if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
++ dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
++ dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
++ }
++ i915_save_palette(dev, PIPE_B);
++ dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
++
++ /* CRT state */
++ dev_priv->saveADPA = I915_READ(ADPA);
++
++ /* LVDS state */
++ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
++ dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
++ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
++ if (IS_I965G(dev))
++ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
++ if (IS_MOBILE(dev) && !IS_I830(dev))
++ dev_priv->saveLVDS = I915_READ(LVDS);
++ if (!IS_I830(dev) && !IS_845G(dev))
++ dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
++ dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
++ dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
++ dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
++
++ /* FIXME: save TV & SDVO state */
++
++ /* FBC state */
++ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
++ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
++ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
++ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
++
++ /* Interrupt state */
++ dev_priv->saveIIR = I915_READ(IIR);
++ dev_priv->saveIER = I915_READ(IER);
++ dev_priv->saveIMR = I915_READ(IMR);
++
++ /* VGA state */
++ dev_priv->saveVGA0 = I915_READ(VGA0);
++ dev_priv->saveVGA1 = I915_READ(VGA1);
++ dev_priv->saveVGA_PD = I915_READ(VGA_PD);
++ dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
++
++ /* Clock gating state */
++ dev_priv->saveD_STATE = I915_READ(D_STATE);
++ dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
++
++ /* Cache mode state */
++ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
++
++ /* Memory Arbitration state */
++ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
++
++ /* Scratch space */
++ for (i = 0; i < 16; i++) {
++ dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
++ dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
++ }
++ for (i = 0; i < 3; i++)
++ dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
++
++ i915_save_vga(dev);
++
++ return 0;
++}
++
++int i915_restore_state(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ int i;
++
++ pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
++
++ I915_WRITE(DSPARB, dev_priv->saveDSPARB);
++
++ /* Pipe & plane A info */
++ /* Prime the clock */
++ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
++ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
++ ~DPLL_VCO_ENABLE);
++ DRM_UDELAY(150);
++ }
++ I915_WRITE(FPA0, dev_priv->saveFPA0);
++ I915_WRITE(FPA1, dev_priv->saveFPA1);
++ /* Actually enable it */
++ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
++ DRM_UDELAY(150);
++ if (IS_I965G(dev))
++ I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
++ DRM_UDELAY(150);
++
++ /* Restore mode */
++ I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
++ I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
++ I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
++ I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
++ I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
++ I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
++ I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
++
++ /* Restore plane info */
++ I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
++ I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
++ I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
++ I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
++ I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
++ if (IS_I965G(dev)) {
++ I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
++ I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
++ }
++
++ I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
++
++ i915_restore_palette(dev, PIPE_A);
++ /* Enable the plane */
++ I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
++ I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
++
++ /* Pipe & plane B info */
++ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
++ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
++ ~DPLL_VCO_ENABLE);
++ DRM_UDELAY(150);
++ }
++ I915_WRITE(FPB0, dev_priv->saveFPB0);
++ I915_WRITE(FPB1, dev_priv->saveFPB1);
++ /* Actually enable it */
++ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
++ DRM_UDELAY(150);
++ if (IS_I965G(dev))
++ I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
++ DRM_UDELAY(150);
++
++ /* Restore mode */
++ I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
++ I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
++ I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
++ I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
++ I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
++ I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
++ I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
++
++ /* Restore plane info */
++ I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
++ I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
++ I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
++ I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
++ I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
++ if (IS_I965G(dev)) {
++ I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
++ I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
++ }
++
++ I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
++
++ i915_restore_palette(dev, PIPE_B);
++ /* Enable the plane */
++ I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
++ I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
++
++ /* CRT state */
++ I915_WRITE(ADPA, dev_priv->saveADPA);
++
++ /* LVDS state */
++ if (IS_I965G(dev))
++ I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
++ if (IS_MOBILE(dev) && !IS_I830(dev))
++ I915_WRITE(LVDS, dev_priv->saveLVDS);
++ if (!IS_I830(dev) && !IS_845G(dev))
++ I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
++
++ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
++ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
++ I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
++ I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
++ I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
++ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
++
++ /* FIXME: restore TV & SDVO state */
++
++ /* FBC info */
++ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
++ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
++ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
++ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
++
++ /* VGA state */
++ I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
++ I915_WRITE(VGA0, dev_priv->saveVGA0);
++ I915_WRITE(VGA1, dev_priv->saveVGA1);
++ I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
++ DRM_UDELAY(150);
++
++ /* Clock gating state */
++ I915_WRITE (D_STATE, dev_priv->saveD_STATE);
++ I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
++
++ /* Cache mode state */
++ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
++
++ /* Memory arbitration state */
++ I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
++
++ for (i = 0; i < 16; i++) {
++ I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
++ I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
++ }
++ for (i = 0; i < 3; i++)
++ I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
++
++ i915_restore_vga(dev);
++
++ return 0;
++}
++
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0011-drm-vblank-rework.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0011-drm-vblank-rework.patch
new file mode 100644
index 000000000..6161a71f0
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0011-drm-vblank-rework.patch
@@ -0,0 +1,1534 @@
+commit 2aebb4e4e62d09b4a95be7be7c24a7f6528385b7
+Author: Jesse Barnes <jbarnes@virtuousgeek.org>
+Date: Tue Sep 30 12:14:26 2008 -0700
+
+ drm: Rework vblank-wait handling to allow interrupt reduction.
+
+ Previously, drivers supporting vblank interrupt waits would run the interrupt
+ all the time, or all the time that any 3d client was running, preventing the
+ CPU from sleeping for long when the system was otherwise idle. Now, interrupts
+ are disabled any time that no client is waiting on a vblank event. The new
+ method uses vblank counters on the chipsets when the interrupts are turned
+ off, rather than counting interrupts, so that we can continue to present
+ accurate vblank numbers.
+
+ Co-author: Michel Dänzer <michel@tungstengraphics.com>
+ Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+ Signed-off-by: Dave Airlie <airlied@redhat.com>
+
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 452c2d8..fb45fe7 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -116,6 +116,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
+
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
+
++ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
++
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ };
+
+diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
+index 61ed515..d0c13d9 100644
+--- a/drivers/gpu/drm/drm_irq.c
++++ b/drivers/gpu/drm/drm_irq.c
+@@ -71,19 +71,131 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
+ return 0;
+ }
+
++static void vblank_disable_fn(unsigned long arg)
++{
++ struct drm_device *dev = (struct drm_device *)arg;
++ unsigned long irqflags;
++ int i;
++
++ if (!dev->vblank_disable_allowed)
++ return;
++
++ for (i = 0; i < dev->num_crtcs; i++) {
++ spin_lock_irqsave(&dev->vbl_lock, irqflags);
++ if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
++ dev->vblank_enabled[i]) {
++ DRM_DEBUG("disabling vblank on crtc %d\n", i);
++ dev->last_vblank[i] =
++ dev->driver->get_vblank_counter(dev, i);
++ dev->driver->disable_vblank(dev, i);
++ dev->vblank_enabled[i] = 0;
++ }
++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++ }
++}
++
++static void drm_vblank_cleanup(struct drm_device *dev)
++{
++ /* Bail if the driver didn't call drm_vblank_init() */
++ if (dev->num_crtcs == 0)
++ return;
++
++ del_timer(&dev->vblank_disable_timer);
++
++ vblank_disable_fn((unsigned long)dev);
++
++ drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
++ DRM_MEM_DRIVER);
++ drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
++ DRM_MEM_DRIVER);
++ drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
++ dev->num_crtcs, DRM_MEM_DRIVER);
++ drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
++ dev->num_crtcs, DRM_MEM_DRIVER);
++ drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
++ dev->num_crtcs, DRM_MEM_DRIVER);
++ drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
++ DRM_MEM_DRIVER);
++ drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
++ dev->num_crtcs, DRM_MEM_DRIVER);
++
++ dev->num_crtcs = 0;
++}
++
++int drm_vblank_init(struct drm_device *dev, int num_crtcs)
++{
++ int i, ret = -ENOMEM;
++
++ setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
++ (unsigned long)dev);
++ spin_lock_init(&dev->vbl_lock);
++ atomic_set(&dev->vbl_signal_pending, 0);
++ dev->num_crtcs = num_crtcs;
++
++ dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
++ DRM_MEM_DRIVER);
++ if (!dev->vbl_queue)
++ goto err;
++
++ dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
++ DRM_MEM_DRIVER);
++ if (!dev->vbl_sigs)
++ goto err;
++
++ dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
++ DRM_MEM_DRIVER);
++ if (!dev->_vblank_count)
++ goto err;
++
++ dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
++ DRM_MEM_DRIVER);
++ if (!dev->vblank_refcount)
++ goto err;
++
++ dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
++ DRM_MEM_DRIVER);
++ if (!dev->vblank_enabled)
++ goto err;
++
++ dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
++ if (!dev->last_vblank)
++ goto err;
++
++ dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
++ DRM_MEM_DRIVER);
++ if (!dev->vblank_inmodeset)
++ goto err;
++
++ /* Zero per-crtc vblank stuff */
++ for (i = 0; i < num_crtcs; i++) {
++ init_waitqueue_head(&dev->vbl_queue[i]);
++ INIT_LIST_HEAD(&dev->vbl_sigs[i]);
++ atomic_set(&dev->_vblank_count[i], 0);
++ atomic_set(&dev->vblank_refcount[i], 0);
++ }
++
++ dev->vblank_disable_allowed = 0;
++
++ return 0;
++
++err:
++ drm_vblank_cleanup(dev);
++ return ret;
++}
++EXPORT_SYMBOL(drm_vblank_init);
++
+ /**
+ * Install IRQ handler.
+ *
+ * \param dev DRM device.
+- * \param irq IRQ number.
+ *
+- * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
++ * Initializes the IRQ related data. Installs the handler, calling the driver
+ * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
+ * before and after the installation.
+ */
+-static int drm_irq_install(struct drm_device * dev)
++int drm_irq_install(struct drm_device *dev)
+ {
+- int ret;
++ int ret = 0;
+ unsigned long sh_flags = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+@@ -109,17 +221,6 @@ static int drm_irq_install(struct drm_device * dev)
+
+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+
+- if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
+- init_waitqueue_head(&dev->vbl_queue);
+-
+- spin_lock_init(&dev->vbl_lock);
+-
+- INIT_LIST_HEAD(&dev->vbl_sigs);
+- INIT_LIST_HEAD(&dev->vbl_sigs2);
+-
+- dev->vbl_pending = 0;
+- }
+-
+ /* Before installing handler */
+ dev->driver->irq_preinstall(dev);
+
+@@ -141,10 +242,16 @@ static int drm_irq_install(struct drm_device * dev)
+ }
+
+ /* After installing handler */
+- dev->driver->irq_postinstall(dev);
++ ret = dev->driver->irq_postinstall(dev);
++ if (ret < 0) {
++ mutex_lock(&dev->struct_mutex);
++ dev->irq_enabled = 0;
++ mutex_unlock(&dev->struct_mutex);
++ }
+
+- return 0;
++ return ret;
+ }
++EXPORT_SYMBOL(drm_irq_install);
+
+ /**
+ * Uninstall the IRQ handler.
+@@ -174,11 +281,12 @@ int drm_irq_uninstall(struct drm_device * dev)
+
+ free_irq(dev->pdev->irq, dev);
+
++ drm_vblank_cleanup(dev);
++
+ dev->locked_tasklet_func = NULL;
+
+ return 0;
+ }
+-
+ EXPORT_SYMBOL(drm_irq_uninstall);
+
+ /**
+@@ -218,6 +326,174 @@ int drm_control(struct drm_device *dev, void *data,
+ }
+
+ /**
++ * drm_vblank_count - retrieve "cooked" vblank counter value
++ * @dev: DRM device
++ * @crtc: which counter to retrieve
++ *
++ * Fetches the "cooked" vblank count value that represents the number of
++ * vblank events since the system was booted, including lost events due to
++ * modesetting activity.
++ */
++u32 drm_vblank_count(struct drm_device *dev, int crtc)
++{
++ return atomic_read(&dev->_vblank_count[crtc]);
++}
++EXPORT_SYMBOL(drm_vblank_count);
++
++/**
++ * drm_update_vblank_count - update the master vblank counter
++ * @dev: DRM device
++ * @crtc: counter to update
++ *
++ * Call back into the driver to update the appropriate vblank counter
++ * (specified by @crtc). Deal with wraparound, if it occurred, and
++ * update the last read value so we can deal with wraparound on the next
++ * call if necessary.
++ *
++ * Only necessary when going from off->on, to account for frames we
++ * didn't get an interrupt for.
++ *
++ * Note: caller must hold dev->vbl_lock since this reads & writes
++ * device vblank fields.
++ */
++static void drm_update_vblank_count(struct drm_device *dev, int crtc)
++{
++ u32 cur_vblank, diff;
++
++ /*
++ * Interrupts were disabled prior to this call, so deal with counter
++ * wrap if needed.
++ * NOTE! It's possible we lost a full dev->max_vblank_count events
++ * here if the register is small or we had vblank interrupts off for
++ * a long time.
++ */
++ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
++ diff = cur_vblank - dev->last_vblank[crtc];
++ if (cur_vblank < dev->last_vblank[crtc]) {
++ diff += dev->max_vblank_count;
++
++ DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
++ crtc, dev->last_vblank[crtc], cur_vblank, diff);
++ }
++
++ DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
++ crtc, diff);
++
++ atomic_add(diff, &dev->_vblank_count[crtc]);
++}
++
++/**
++ * drm_vblank_get - get a reference count on vblank events
++ * @dev: DRM device
++ * @crtc: which CRTC to own
++ *
++ * Acquire a reference count on vblank events to avoid having them disabled
++ * while in use.
++ *
++ * RETURNS
++ * Zero on success, nonzero on failure.
++ */
++int drm_vblank_get(struct drm_device *dev, int crtc)
++{
++ unsigned long irqflags;
++ int ret = 0;
++
++ spin_lock_irqsave(&dev->vbl_lock, irqflags);
++ /* Going from 0->1 means we have to enable interrupts again */
++ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
++ !dev->vblank_enabled[crtc]) {
++ ret = dev->driver->enable_vblank(dev, crtc);
++ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
++ if (ret)
++ atomic_dec(&dev->vblank_refcount[crtc]);
++ else {
++ dev->vblank_enabled[crtc] = 1;
++ drm_update_vblank_count(dev, crtc);
++ }
++ }
++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++
++ return ret;
++}
++EXPORT_SYMBOL(drm_vblank_get);
++
++/**
++ * drm_vblank_put - give up ownership of vblank events
++ * @dev: DRM device
++ * @crtc: which counter to give up
++ *
++ * Release ownership of a given vblank counter, turning off interrupts
++ * if possible.
++ */
++void drm_vblank_put(struct drm_device *dev, int crtc)
++{
++ /* Last user schedules interrupt disable */
++ if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
++ mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
++}
++EXPORT_SYMBOL(drm_vblank_put);
++
++/**
++ * drm_modeset_ctl - handle vblank event counter changes across mode switch
++ * @DRM_IOCTL_ARGS: standard ioctl arguments
++ *
++ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
++ * ioctls around modesetting so that any lost vblank events are accounted for.
++ *
++ * Generally the counter will reset across mode sets. If interrupts are
++ * enabled around this call, we don't have to do anything since the counter
++ * will have already been incremented.
++ */
++int drm_modeset_ctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_modeset_ctl *modeset = data;
++ unsigned long irqflags;
++ int crtc, ret = 0;
++
++ /* If drm_vblank_init() hasn't been called yet, just no-op */
++ if (!dev->num_crtcs)
++ goto out;
++
++ crtc = modeset->crtc;
++ if (crtc >= dev->num_crtcs) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /*
++ * To avoid all the problems that might happen if interrupts
++ * were enabled/disabled around or between these calls, we just
++ * have the kernel take a reference on the CRTC (just once though
++ * to avoid corrupting the count if multiple, mismatch calls occur),
++ * so that interrupts remain enabled in the interim.
++ */
++ switch (modeset->cmd) {
++ case _DRM_PRE_MODESET:
++ if (!dev->vblank_inmodeset[crtc]) {
++ dev->vblank_inmodeset[crtc] = 1;
++ drm_vblank_get(dev, crtc);
++ }
++ break;
++ case _DRM_POST_MODESET:
++ if (dev->vblank_inmodeset[crtc]) {
++ spin_lock_irqsave(&dev->vbl_lock, irqflags);
++ dev->vblank_disable_allowed = 1;
++ dev->vblank_inmodeset[crtc] = 0;
++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++ drm_vblank_put(dev, crtc);
++ }
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++out:
++ return ret;
++}
++
++/**
+ * Wait for VBLANK.
+ *
+ * \param inode device inode.
+@@ -236,12 +512,12 @@ int drm_control(struct drm_device *dev, void *data,
+ *
+ * If a signal is not requested, then calls vblank_wait().
+ */
+-int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
++int drm_wait_vblank(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
+ {
+ union drm_wait_vblank *vblwait = data;
+- struct timeval now;
+ int ret = 0;
+- unsigned int flags, seq;
++ unsigned int flags, seq, crtc;
+
+ if ((!dev->pdev->irq) || (!dev->irq_enabled))
+ return -EINVAL;
+@@ -255,13 +531,17 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
+ }
+
+ flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
++ crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+
+- if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
+- DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
++ if (crtc >= dev->num_crtcs)
+ return -EINVAL;
+
+- seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
+- : &dev->vbl_received);
++ ret = drm_vblank_get(dev, crtc);
++ if (ret) {
++ DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
++ return ret;
++ }
++ seq = drm_vblank_count(dev, crtc);
+
+ switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
+ case _DRM_VBLANK_RELATIVE:
+@@ -270,7 +550,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
+ case _DRM_VBLANK_ABSOLUTE:
+ break;
+ default:
+- return -EINVAL;
++ ret = -EINVAL;
++ goto done;
+ }
+
+ if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+@@ -280,8 +561,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
+
+ if (flags & _DRM_VBLANK_SIGNAL) {
+ unsigned long irqflags;
+- struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
+- ? &dev->vbl_sigs2 : &dev->vbl_sigs;
++ struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
+ struct drm_vbl_sig *vbl_sig;
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+@@ -302,22 +582,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
+ }
+ }
+
+- if (dev->vbl_pending >= 100) {
++ if (atomic_read(&dev->vbl_signal_pending) >= 100) {
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+- return -EBUSY;
++ ret = -EBUSY;
++ goto done;
+ }
+
+- dev->vbl_pending++;
+-
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+- if (!
+- (vbl_sig =
+- drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
+- return -ENOMEM;
++ vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
++ DRM_MEM_DRIVER);
++ if (!vbl_sig) {
++ ret = -ENOMEM;
++ goto done;
++ }
++
++ ret = drm_vblank_get(dev, crtc);
++ if (ret) {
++ drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
++ DRM_MEM_DRIVER);
++ return ret;
+ }
+
+- memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
++ atomic_inc(&dev->vbl_signal_pending);
+
+ vbl_sig->sequence = vblwait->request.sequence;
+ vbl_sig->info.si_signo = vblwait->request.signal;
+@@ -331,20 +618,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
+
+ vblwait->reply.sequence = seq;
+ } else {
+- if (flags & _DRM_VBLANK_SECONDARY) {
+- if (dev->driver->vblank_wait2)
+- ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
+- } else if (dev->driver->vblank_wait)
+- ret =
+- dev->driver->vblank_wait(dev,
+- &vblwait->request.sequence);
+-
+- do_gettimeofday(&now);
+- vblwait->reply.tval_sec = now.tv_sec;
+- vblwait->reply.tval_usec = now.tv_usec;
++ DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
++ vblwait->request.sequence, crtc);
++ DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
++ ((drm_vblank_count(dev, crtc)
++ - vblwait->request.sequence) <= (1 << 23)));
++
++ if (ret != -EINTR) {
++ struct timeval now;
++
++ do_gettimeofday(&now);
++
++ vblwait->reply.tval_sec = now.tv_sec;
++ vblwait->reply.tval_usec = now.tv_usec;
++ vblwait->reply.sequence = drm_vblank_count(dev, crtc);
++ DRM_DEBUG("returning %d to client\n",
++ vblwait->reply.sequence);
++ } else {
++ DRM_DEBUG("vblank wait interrupted by signal\n");
++ }
+ }
+
+- done:
++done:
++ drm_vblank_put(dev, crtc);
+ return ret;
+ }
+
+@@ -352,44 +648,57 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
+ * Send the VBLANK signals.
+ *
+ * \param dev DRM device.
++ * \param crtc CRTC where the vblank event occurred
+ *
+ * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
+ *
+ * If a signal is not requested, then calls vblank_wait().
+ */
+-void drm_vbl_send_signals(struct drm_device * dev)
++static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
+ {
++ struct drm_vbl_sig *vbl_sig, *tmp;
++ struct list_head *vbl_sigs;
++ unsigned int vbl_seq;
+ unsigned long flags;
+- int i;
+
+ spin_lock_irqsave(&dev->vbl_lock, flags);
+
+- for (i = 0; i < 2; i++) {
+- struct drm_vbl_sig *vbl_sig, *tmp;
+- struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
+- unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
+- &dev->vbl_received);
++ vbl_sigs = &dev->vbl_sigs[crtc];
++ vbl_seq = drm_vblank_count(dev, crtc);
+
+- list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
+- if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
+- vbl_sig->info.si_code = vbl_seq;
+- send_sig_info(vbl_sig->info.si_signo,
+- &vbl_sig->info, vbl_sig->task);
++ list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
++ if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
++ vbl_sig->info.si_code = vbl_seq;
++ send_sig_info(vbl_sig->info.si_signo,
++ &vbl_sig->info, vbl_sig->task);
+
+- list_del(&vbl_sig->head);
+-
+- drm_free(vbl_sig, sizeof(*vbl_sig),
+- DRM_MEM_DRIVER);
++ list_del(&vbl_sig->head);
+
+- dev->vbl_pending--;
+- }
+- }
++ drm_free(vbl_sig, sizeof(*vbl_sig),
++ DRM_MEM_DRIVER);
++ atomic_dec(&dev->vbl_signal_pending);
++ drm_vblank_put(dev, crtc);
++ }
+ }
+
+ spin_unlock_irqrestore(&dev->vbl_lock, flags);
+ }
+
+-EXPORT_SYMBOL(drm_vbl_send_signals);
++/**
++ * drm_handle_vblank - handle a vblank event
++ * @dev: DRM device
++ * @crtc: where this event occurred
++ *
++ * Drivers should call this routine in their vblank interrupt handlers to
++ * update the vblank counter and send any signals that may be pending.
++ */
++void drm_handle_vblank(struct drm_device *dev, int crtc)
++{
++ atomic_inc(&dev->_vblank_count[crtc]);
++ DRM_WAKEUP(&dev->vbl_queue[crtc]);
++ drm_vbl_send_signals(dev, crtc);
++}
++EXPORT_SYMBOL(drm_handle_vblank);
+
+ /**
+ * Tasklet wrapper function.
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index cead62f..8609ec2 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -673,7 +673,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
+
+ switch (param->param) {
+ case I915_PARAM_IRQ_ACTIVE:
+- value = dev->irq_enabled;
++ value = dev->pdev->irq ? 1 : 0;
+ break;
+ case I915_PARAM_ALLOW_BATCHBUFFER:
+ value = dev_priv->allow_batchbuffer ? 1 : 0;
+@@ -808,7 +808,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ * and the registers being closely associated.
+ */
+ if (!IS_I945G(dev) && !IS_I945GM(dev))
+- pci_enable_msi(dev->pdev);
++ if (pci_enable_msi(dev->pdev))
++ DRM_ERROR("failed to enable MSI\n");
+
+ intel_opregion_init(dev);
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index eff66ed..37af03f 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -85,10 +85,8 @@ static struct drm_driver driver = {
+ /* don't use mtrr's here, the Xserver or user space app should
+ * deal with them for intel hardware.
+ */
+- .driver_features =
+- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
+- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
+- DRIVER_IRQ_VBL2,
++ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
++ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+ .load = i915_driver_load,
+ .unload = i915_driver_unload,
+ .lastclose = i915_driver_lastclose,
+@@ -96,8 +94,9 @@ static struct drm_driver driver = {
+ .suspend = i915_suspend,
+ .resume = i915_resume,
+ .device_is_agp = i915_driver_device_is_agp,
+- .vblank_wait = i915_driver_vblank_wait,
+- .vblank_wait2 = i915_driver_vblank_wait2,
++ .get_vblank_counter = i915_get_vblank_counter,
++ .enable_vblank = i915_enable_vblank,
++ .disable_vblank = i915_disable_vblank,
+ .irq_preinstall = i915_driver_irq_preinstall,
+ .irq_postinstall = i915_driver_irq_postinstall,
+ .irq_uninstall = i915_driver_irq_uninstall,
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 71326ca..d1a02be 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -83,10 +83,15 @@ struct mem_block {
+ typedef struct _drm_i915_vbl_swap {
+ struct list_head head;
+ drm_drawable_t drw_id;
+- unsigned int pipe;
++ unsigned int plane;
+ unsigned int sequence;
+ } drm_i915_vbl_swap_t;
+
++struct opregion_header;
++struct opregion_acpi;
++struct opregion_swsci;
++struct opregion_asle;
++
+ struct intel_opregion {
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+@@ -105,7 +110,7 @@ typedef struct drm_i915_private {
+ drm_dma_handle_t *status_page_dmah;
+ void *hw_status_page;
+ dma_addr_t dma_status_page;
+- unsigned long counter;
++ uint32_t counter;
+ unsigned int status_gfx_addr;
+ drm_local_map_t hws_map;
+
+@@ -247,16 +252,17 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
+ extern int i915_irq_wait(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+-extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+-extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+ extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
+ extern void i915_driver_irq_preinstall(struct drm_device * dev);
+-extern void i915_driver_irq_postinstall(struct drm_device * dev);
++extern int i915_driver_irq_postinstall(struct drm_device *dev);
+ extern void i915_driver_irq_uninstall(struct drm_device * dev);
+ extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
++extern int i915_enable_vblank(struct drm_device *dev, int crtc);
++extern void i915_disable_vblank(struct drm_device *dev, int crtc);
++extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
+ extern int i915_vblank_swap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
+@@ -278,6 +284,10 @@ extern void i915_mem_release(struct drm_device * dev,
+ extern int i915_save_state(struct drm_device *dev);
+ extern int i915_restore_state(struct drm_device *dev);
+
++/* i915_suspend.c */
++extern int i915_save_state(struct drm_device *dev);
++extern int i915_restore_state(struct drm_device *dev);
++
+ /* i915_opregion.c */
+ extern int intel_opregion_init(struct drm_device *dev);
+ extern void intel_opregion_free(struct drm_device *dev);
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index ae7d3a8..f875959 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -35,9 +35,8 @@
+
+ /** These are the interrupts used by the driver */
+ #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
+- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \
+- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT | \
+ I915_ASLE_INTERRUPT | \
++ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
+
+ void
+@@ -61,6 +60,64 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+ }
+
+ /**
++ * i915_get_pipe - return the the pipe associated with a given plane
++ * @dev: DRM device
++ * @plane: plane to look for
++ *
++ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
++ * rather than a pipe number, since they may not always be equal. This routine
++ * maps the given @plane back to a pipe number.
++ */
++static int
++i915_get_pipe(struct drm_device *dev, int plane)
++{
++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++ u32 dspcntr;
++
++ dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
++
++ return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
++}
++
++/**
++ * i915_get_plane - return the the plane associated with a given pipe
++ * @dev: DRM device
++ * @pipe: pipe to look for
++ *
++ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
++ * rather than a plane number, since they may not always be equal. This routine
++ * maps the given @pipe back to a plane number.
++ */
++static int
++i915_get_plane(struct drm_device *dev, int pipe)
++{
++ if (i915_get_pipe(dev, 0) == pipe)
++ return 0;
++ return 1;
++}
++
++/**
++ * i915_pipe_enabled - check if a pipe is enabled
++ * @dev: DRM device
++ * @pipe: pipe to check
++ *
++ * Reading certain registers when the pipe is disabled can hang the chip.
++ * Use this routine to make sure the PLL is running and the pipe is active
++ * before reading such registers if unsure.
++ */
++static int
++i915_pipe_enabled(struct drm_device *dev, int pipe)
++{
++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++ unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
++
++ if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
++ return 1;
++
++ return 0;
++}
++
++/**
+ * Emit blits for scheduled buffer swaps.
+ *
+ * This function will be called with the HW lock held.
+@@ -71,8 +128,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
+ unsigned long irqflags;
+ struct list_head *list, *tmp, hits, *hit;
+ int nhits, nrects, slice[2], upper[2], lower[2], i;
+- unsigned counter[2] = { atomic_read(&dev->vbl_received),
+- atomic_read(&dev->vbl_received2) };
++ unsigned counter[2];
+ struct drm_drawable_info *drw;
+ drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ u32 cpp = dev_priv->cpp;
+@@ -94,6 +150,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
+ src_pitch >>= 2;
+ }
+
++ counter[0] = drm_vblank_count(dev, 0);
++ counter[1] = drm_vblank_count(dev, 1);
++
+ DRM_DEBUG("\n");
+
+ INIT_LIST_HEAD(&hits);
+@@ -106,12 +165,14 @@ static void i915_vblank_tasklet(struct drm_device *dev)
+ list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
+ drm_i915_vbl_swap_t *vbl_swap =
+ list_entry(list, drm_i915_vbl_swap_t, head);
++ int pipe = i915_get_pipe(dev, vbl_swap->plane);
+
+- if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
++ if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
+ continue;
+
+ list_del(list);
+ dev_priv->swaps_pending--;
++ drm_vblank_put(dev, pipe);
+
+ spin_unlock(&dev_priv->swaps_lock);
+ spin_lock(&dev->drw_lock);
+@@ -204,7 +265,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
+ drm_i915_vbl_swap_t *swap_hit =
+ list_entry(hit, drm_i915_vbl_swap_t, head);
+ struct drm_clip_rect *rect;
+- int num_rects, pipe;
++ int num_rects, plane;
+ unsigned short top, bottom;
+
+ drw = drm_get_drawable_info(dev, swap_hit->drw_id);
+@@ -213,9 +274,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
+ continue;
+
+ rect = drw->rects;
+- pipe = swap_hit->pipe;
+- top = upper[pipe];
+- bottom = lower[pipe];
++ plane = swap_hit->plane;
++ top = upper[plane];
++ bottom = lower[plane];
+
+ for (num_rects = drw->num_rects; num_rects--; rect++) {
+ int y1 = max(rect->y1, top);
+@@ -252,22 +313,54 @@ static void i915_vblank_tasklet(struct drm_device *dev)
+ }
+ }
+
++u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
++{
++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++ unsigned long high_frame;
++ unsigned long low_frame;
++ u32 high1, high2, low, count;
++ int pipe;
++
++ pipe = i915_get_pipe(dev, plane);
++ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
++ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
++
++ if (!i915_pipe_enabled(dev, pipe)) {
++ DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
++ return 0;
++ }
++
++ /*
++ * High & low register fields aren't synchronized, so make sure
++ * we get a low value that's stable across two reads of the high
++ * register.
++ */
++ do {
++ high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
++ PIPE_FRAME_HIGH_SHIFT);
++ low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
++ PIPE_FRAME_LOW_SHIFT);
++ high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
++ PIPE_FRAME_HIGH_SHIFT);
++ } while (high1 != high2);
++
++ count = (high1 << 8) | low;
++
++ return count;
++}
++
+ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ {
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+- u32 pipea_stats, pipeb_stats;
+ u32 iir;
+-
+- pipea_stats = I915_READ(PIPEASTAT);
+- pipeb_stats = I915_READ(PIPEBSTAT);
++ u32 pipea_stats, pipeb_stats;
++ int vblank = 0;
+
+ if (dev->pdev->msi_enabled)
+ I915_WRITE(IMR, ~0);
+ iir = I915_READ(IIR);
+
+- DRM_DEBUG("iir=%08x\n", iir);
+-
+ if (iir == 0) {
+ if (dev->pdev->msi_enabled) {
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+@@ -276,48 +369,56 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ return IRQ_NONE;
+ }
+
+- I915_WRITE(PIPEASTAT, pipea_stats);
+- I915_WRITE(PIPEBSTAT, pipeb_stats);
+-
+- I915_WRITE(IIR, iir);
+- if (dev->pdev->msi_enabled)
+- I915_WRITE(IMR, dev_priv->irq_mask_reg);
+- (void) I915_READ(IIR); /* Flush posted writes */
+-
+- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+-
+- if (iir & I915_USER_INTERRUPT)
+- DRM_WAKEUP(&dev_priv->irq_queue);
+-
+- if (iir & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
+- int vblank_pipe = dev_priv->vblank_pipe;
+-
+- if ((vblank_pipe &
+- (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
+- == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
+- if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
+- atomic_inc(&dev->vbl_received);
+- if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
+- atomic_inc(&dev->vbl_received2);
+- } else if (((iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
+- (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
+- ((iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
+- (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
+- atomic_inc(&dev->vbl_received);
++ /*
++ * Clear the PIPE(A|B)STAT regs before the IIR otherwise
++ * we may get extra interrupts.
++ */
++ if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
++ pipea_stats = I915_READ(PIPEASTAT);
++ if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
++ pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
++ PIPE_VBLANK_INTERRUPT_STATUS)) {
++ vblank++;
++ drm_handle_vblank(dev, i915_get_plane(dev, 0));
++ }
+
+- DRM_WAKEUP(&dev->vbl_queue);
+- drm_vbl_send_signals(dev);
++ I915_WRITE(PIPEASTAT, pipea_stats);
++ }
++ if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
++ pipeb_stats = I915_READ(PIPEBSTAT);
++ /* Ack the event */
++ I915_WRITE(PIPEBSTAT, pipeb_stats);
++
++ /* The vblank interrupt gets enabled even if we didn't ask for
++ it, so make sure it's shut down again */
++ if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
++ pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
++ PIPE_VBLANK_INTERRUPT_STATUS)) {
++ vblank++;
++ drm_handle_vblank(dev, i915_get_plane(dev, 1));
++ }
+
+- if (dev_priv->swaps_pending > 0)
+- drm_locked_tasklet(dev, i915_vblank_tasklet);
++ if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
++ opregion_asle_intr(dev);
++ I915_WRITE(PIPEBSTAT, pipeb_stats);
+ }
+
+ if (iir & I915_ASLE_INTERRUPT)
+ opregion_asle_intr(dev);
+
+- if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
+- opregion_asle_intr(dev);
++ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
++
++ if (dev->pdev->msi_enabled)
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
++ I915_WRITE(IIR, iir);
++ (void) I915_READ(IIR);
++
++ if (vblank && dev_priv->swaps_pending > 0)
++ drm_locked_tasklet(dev, i915_vblank_tasklet);
+
+ return IRQ_HANDLED;
+ }
+@@ -358,7 +459,7 @@ static void i915_user_irq_get(struct drm_device *dev)
+ spin_unlock(&dev_priv->user_irq_lock);
+ }
+
+-static void i915_user_irq_put(struct drm_device *dev)
++void i915_user_irq_put(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+@@ -395,41 +496,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+ }
+
+ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+- return ret;
+-}
+-
+-static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
+- atomic_t *counter)
+-{
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- unsigned int cur_vblank;
+- int ret = 0;
+-
+- if (!dev_priv) {
+- DRM_ERROR("called with no initialization\n");
+- return -EINVAL;
+- }
+-
+- DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+- (((cur_vblank = atomic_read(counter))
+- - *sequence) <= (1<<23)));
+-
+- *sequence = cur_vblank;
+
+ return ret;
+ }
+
+-
+-int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
+-{
+- return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
+-}
+-
+-int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
+-{
+- return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
+-}
+-
+ /* Needs the lock as it touches the ring.
+ */
+ int i915_irq_emit(struct drm_device *dev, void *data,
+@@ -472,40 +542,88 @@ int i915_irq_wait(struct drm_device *dev, void *data,
+ return i915_wait_irq(dev, irqwait->irq_seq);
+ }
+
++int i915_enable_vblank(struct drm_device *dev, int plane)
++{
++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++ int pipe = i915_get_pipe(dev, plane);
++ u32 pipestat_reg = 0;
++ u32 pipestat;
++
++ switch (pipe) {
++ case 0:
++ pipestat_reg = PIPEASTAT;
++ i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
++ break;
++ case 1:
++ pipestat_reg = PIPEBSTAT;
++ i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
++ break;
++ default:
++ DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
++ pipe);
++ break;
++ }
++
++ if (pipestat_reg) {
++ pipestat = I915_READ(pipestat_reg);
++ if (IS_I965G(dev))
++ pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
++ else
++ pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
++ /* Clear any stale interrupt status */
++ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
++ PIPE_VBLANK_INTERRUPT_STATUS);
++ I915_WRITE(pipestat_reg, pipestat);
++ }
++
++ return 0;
++}
++
++void i915_disable_vblank(struct drm_device *dev, int plane)
++{
++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++ int pipe = i915_get_pipe(dev, plane);
++ u32 pipestat_reg = 0;
++ u32 pipestat;
++
++ switch (pipe) {
++ case 0:
++ pipestat_reg = PIPEASTAT;
++ i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
++ break;
++ case 1:
++ pipestat_reg = PIPEBSTAT;
++ i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
++ break;
++ default:
++ DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
++ pipe);
++ break;
++ }
++
++ if (pipestat_reg) {
++ pipestat = I915_READ(pipestat_reg);
++ pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ /* Clear any stale interrupt status */
++ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
++ PIPE_VBLANK_INTERRUPT_STATUS);
++ I915_WRITE(pipestat_reg, pipestat);
++ }
++}
++
+ /* Set the vblank monitor pipe
+ */
+ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- drm_i915_vblank_pipe_t *pipe = data;
+- u32 enable_mask = 0, disable_mask = 0;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+- if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
+- DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
+- return -EINVAL;
+- }
+-
+- if (pipe->pipe & DRM_I915_VBLANK_PIPE_A)
+- enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+- else
+- disable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+-
+- if (pipe->pipe & DRM_I915_VBLANK_PIPE_B)
+- enable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+- else
+- disable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+-
+- i915_enable_irq(dev_priv, enable_mask);
+- i915_disable_irq(dev_priv, disable_mask);
+-
+- dev_priv->vblank_pipe = pipe->pipe;
+-
+ return 0;
+ }
+
+@@ -514,19 +632,13 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_vblank_pipe_t *pipe = data;
+- u16 flag;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+- flag = I915_READ(IMR);
+- pipe->pipe = 0;
+- if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
+- pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
+- if (flag & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
+- pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
++ pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+ return 0;
+ }
+@@ -540,9 +652,10 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_vblank_swap_t *swap = data;
+ drm_i915_vbl_swap_t *vbl_swap;
+- unsigned int pipe, seqtype, curseq;
++ unsigned int pipe, seqtype, curseq, plane;
+ unsigned long irqflags;
+ struct list_head *list;
++ int ret;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __func__);
+@@ -560,7 +673,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
+ return -EINVAL;
+ }
+
+- pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
++ plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
++ pipe = i915_get_pipe(dev, plane);
+
+ seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
+
+@@ -579,7 +693,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
+
+ spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+- curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
++ /*
++ * We take the ref here and put it when the swap actually completes
++ * in the tasklet.
++ */
++ ret = drm_vblank_get(dev, pipe);
++ if (ret)
++ return ret;
++ curseq = drm_vblank_count(dev, pipe);
+
+ if (seqtype == _DRM_VBLANK_RELATIVE)
+ swap->sequence += curseq;
+@@ -589,6 +710,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
+ swap->sequence = curseq + 1;
+ } else {
+ DRM_DEBUG("Missed target sequence\n");
++ drm_vblank_put(dev, pipe);
+ return -EINVAL;
+ }
+ }
+@@ -599,7 +721,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
+ vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
+
+ if (vbl_swap->drw_id == swap->drawable &&
+- vbl_swap->pipe == pipe &&
++ vbl_swap->plane == plane &&
+ vbl_swap->sequence == swap->sequence) {
+ spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+ DRM_DEBUG("Already scheduled\n");
+@@ -611,6 +733,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
+
+ if (dev_priv->swaps_pending >= 100) {
+ DRM_DEBUG("Too many swaps queued\n");
++ drm_vblank_put(dev, pipe);
+ return -EBUSY;
+ }
+
+@@ -618,13 +741,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
+
+ if (!vbl_swap) {
+ DRM_ERROR("Failed to allocate memory to queue swap\n");
++ drm_vblank_put(dev, pipe);
+ return -ENOMEM;
+ }
+
+ DRM_DEBUG("\n");
+
+ vbl_swap->drw_id = swap->drawable;
+- vbl_swap->pipe = pipe;
++ vbl_swap->plane = plane;
+ vbl_swap->sequence = swap->sequence;
+
+ spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+@@ -643,28 +767,32 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+- I915_WRITE(HWSTAM, 0xfffe);
+- I915_WRITE(IMR, 0x0);
++ I915_WRITE(HWSTAM, 0xeffe);
++ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+ }
+
+-void i915_driver_irq_postinstall(struct drm_device * dev)
++int i915_driver_irq_postinstall(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++ int ret, num_pipes = 2;
+
+ spin_lock_init(&dev_priv->swaps_lock);
+ INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
+ dev_priv->swaps_pending = 0;
+
+- if (!dev_priv->vblank_pipe)
+- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
+-
+ /* Set initial unmasked IRQs to just the selected vblank pipes. */
+ dev_priv->irq_mask_reg = ~0;
+- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
+- dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
+- dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
++
++ ret = drm_vblank_init(dev, num_pipes);
++ if (ret)
++ return ret;
++
++ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
++ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
++ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
++
++ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+
+ dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
+
+@@ -673,22 +801,29 @@ void i915_driver_irq_postinstall(struct drm_device * dev)
+ (void) I915_READ(IER);
+
+ opregion_enable_asle(dev);
+-
+ DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
++
++ return 0;
+ }
+
+ void i915_driver_irq_uninstall(struct drm_device * dev)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+- u16 temp;
++ u32 temp;
+
+ if (!dev_priv)
+ return;
+
+- I915_WRITE(HWSTAM, 0xffff);
+- I915_WRITE(IMR, 0xffff);
++ dev_priv->vblank_pipe = 0;
++
++ I915_WRITE(HWSTAM, 0xffffffff);
++ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+
++ temp = I915_READ(PIPEASTAT);
++ I915_WRITE(PIPEASTAT, temp);
++ temp = I915_READ(PIPEBSTAT);
++ I915_WRITE(PIPEBSTAT, temp);
+ temp = I915_READ(IIR);
+ I915_WRITE(IIR, temp);
+ }
+diff --git a/include/drm/drm.h b/include/drm/drm.h
+index 0864c69..15e5503 100644
+--- a/include/drm/drm.h
++++ b/include/drm/drm.h
+@@ -454,6 +454,7 @@ struct drm_irq_busid {
+ enum drm_vblank_seq_type {
+ _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
+ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
++ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
+ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
+ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
+ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
+@@ -486,6 +487,19 @@ union drm_wait_vblank {
+ struct drm_wait_vblank_reply reply;
+ };
+
++#define _DRM_PRE_MODESET 1
++#define _DRM_POST_MODESET 2
++
++/**
++ * DRM_IOCTL_MODESET_CTL ioctl argument type
++ *
++ * \sa drmModesetCtl().
++ */
++struct drm_modeset_ctl {
++ uint32_t crtc;
++ uint32_t cmd;
++};
++
+ /**
+ * DRM_IOCTL_AGP_ENABLE ioctl argument type.
+ *
+@@ -570,6 +584,7 @@ struct drm_set_version {
+ #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
+ #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
+ #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
++#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+
+ #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
+ #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index 1c1b13e..e79ce07 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -580,11 +580,54 @@ struct drm_driver {
+ int (*kernel_context_switch) (struct drm_device *dev, int old,
+ int new);
+ void (*kernel_context_switch_unlock) (struct drm_device *dev);
+- int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
+- int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
+ int (*dri_library_name) (struct drm_device *dev, char *buf);
+
+ /**
++ * get_vblank_counter - get raw hardware vblank counter
++ * @dev: DRM device
++ * @crtc: counter to fetch
++ *
++ * Driver callback for fetching a raw hardware vblank counter
++ * for @crtc. If a device doesn't have a hardware counter, the
++ * driver can simply return the value of drm_vblank_count and
++ * make the enable_vblank() and disable_vblank() hooks into no-ops,
++ * leaving interrupts enabled at all times.
++ *
++ * Wraparound handling and loss of events due to modesetting is dealt
++ * with in the DRM core code.
++ *
++ * RETURNS
++ * Raw vblank counter value.
++ */
++ u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
++
++ /**
++ * enable_vblank - enable vblank interrupt events
++ * @dev: DRM device
++ * @crtc: which irq to enable
++ *
++ * Enable vblank interrupts for @crtc. If the device doesn't have
++ * a hardware vblank counter, this routine should be a no-op, since
++ * interrupts will have to stay on to keep the count accurate.
++ *
++ * RETURNS
++ * Zero on success, appropriate errno if the given @crtc's vblank
++ * interrupt cannot be enabled.
++ */
++ int (*enable_vblank) (struct drm_device *dev, int crtc);
++
++ /**
++ * disable_vblank - disable vblank interrupt events
++ * @dev: DRM device
++ * @crtc: which irq to enable
++ *
++ * Disable vblank interrupts for @crtc. If the device doesn't have
++ * a hardware vblank counter, this routine should be a no-op, since
++ * interrupts will have to stay on to keep the count accurate.
++ */
++ void (*disable_vblank) (struct drm_device *dev, int crtc);
++
++ /**
+ * Called by \c drm_device_is_agp. Typically used to determine if a
+ * card is really attached to AGP or not.
+ *
+@@ -601,7 +644,7 @@ struct drm_driver {
+
+ irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
+ void (*irq_preinstall) (struct drm_device *dev);
+- void (*irq_postinstall) (struct drm_device *dev);
++ int (*irq_postinstall) (struct drm_device *dev);
+ void (*irq_uninstall) (struct drm_device *dev);
+ void (*reclaim_buffers) (struct drm_device *dev,
+ struct drm_file * file_priv);
+@@ -730,13 +773,28 @@ struct drm_device {
+ /** \name VBLANK IRQ support */
+ /*@{ */
+
+- wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
+- atomic_t vbl_received;
+- atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
++ /*
++ * At load time, disabling the vblank interrupt won't be allowed since
++ * old clients may not call the modeset ioctl and therefore misbehave.
++ * Once the modeset ioctl *has* been called though, we can safely
++ * disable them when unused.
++ */
++ int vblank_disable_allowed;
++
++ wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
++ atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
+ spinlock_t vbl_lock;
+- struct list_head vbl_sigs; /**< signal list to send on VBLANK */
+- struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
+- unsigned int vbl_pending;
++ struct list_head *vbl_sigs; /**< signal list to send on VBLANK */
++ atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
++ atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
++ u32 *last_vblank; /* protected by dev->vbl_lock, used */
++ /* for wraparound handling */
++ int *vblank_enabled; /* so we don't call enable more than
++ once per disable */
++ int *vblank_inmodeset; /* Display driver is setting mode */
++ struct timer_list vblank_disable_timer;
++
++ u32 max_vblank_count; /**< size of vblank counter register */
+ spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
+ void (*locked_tasklet_func)(struct drm_device *dev);
+
+@@ -757,6 +815,7 @@ struct drm_device {
+ struct pci_controller *hose;
+ #endif
+ struct drm_sg_mem *sg; /**< Scatter gather memory */
++ int num_crtcs; /**< Number of CRTCs on this device */
+ void *dev_private; /**< device private data */
+ struct drm_sigdata sigdata; /**< For block_all_signals */
+ sigset_t sigmask;
+@@ -990,10 +1049,19 @@ extern void drm_driver_irq_preinstall(struct drm_device *dev);
+ extern void drm_driver_irq_postinstall(struct drm_device *dev);
+ extern void drm_driver_irq_uninstall(struct drm_device *dev);
+
++extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+ extern int drm_wait_vblank(struct drm_device *dev, void *data,
+- struct drm_file *file_priv);
++ struct drm_file *filp);
+ extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
+-extern void drm_vbl_send_signals(struct drm_device *dev);
++extern void drm_locked_tasklet(struct drm_device *dev,
++ void(*func)(struct drm_device *));
++extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
++extern void drm_handle_vblank(struct drm_device *dev, int crtc);
++extern int drm_vblank_get(struct drm_device *dev, int crtc);
++extern void drm_vblank_put(struct drm_device *dev, int crtc);
++/* Modesetting support */
++extern int drm_modeset_ctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
+ extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
+
+ /* AGP/GART support (drm_agpsupport.h) */
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0012-Export-shmem_file_setup-for-DRM-GEM.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0012-Export-shmem_file_setup-for-DRM-GEM.patch
new file mode 100644
index 000000000..642d89ba7
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0012-Export-shmem_file_setup-for-DRM-GEM.patch
@@ -0,0 +1,25 @@
+commit 48e13db26a25ebaf61f1fc28f612d6b35ddf1965
+Author: Keith Packard <keithp@keithp.com>
+Date: Fri Jun 20 00:08:06 2008 -0700
+
+ Export shmem_file_setup for DRM-GEM
+
+ GEM needs to create shmem files to back buffer objects. Though currently
+ creation of files for objects could have been driven from userland, the
+ modesetting work will require allocation of buffer objects before userland
+ is running, for boot-time message display.
+
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 04fb4f1..515909d 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2582,6 +2582,7 @@ put_memory:
+ shmem_unacct_size(flags, size);
+ return ERR_PTR(error);
+ }
++EXPORT_SYMBOL(shmem_file_setup);
+
+ /**
+ * shmem_zero_setup - setup a shared anonymous mapping
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch
new file mode 100644
index 000000000..cc90d4626
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch
@@ -0,0 +1,24 @@
+commit 25eaa97fc74b225e13cf11ed8d770192ddc9355d
+Author: Eric Anholt <eric@anholt.net>
+Date: Thu Aug 21 12:53:33 2008 -0700
+
+ Export kmap_atomic_pfn for DRM-GEM.
+
+ The driver would like to map IO space directly for copying data in when
+ appropriate, to avoid CPU cache flushing for streaming writes.
+ kmap_atomic_pfn lets us avoid IPIs associated with ioremap for this process.
+
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+
+diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
+index 165c871..d52e91d 100644
+--- a/arch/x86/mm/highmem_32.c
++++ b/arch/x86/mm/highmem_32.c
+@@ -137,6 +137,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+
+ return (void*) vaddr;
+ }
++EXPORT_SYMBOL(kmap_atomic_pfn);
+
+ struct page *kmap_atomic_to_page(void *ptr)
+ {
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch
new file mode 100644
index 000000000..95cca5d0c
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch
@@ -0,0 +1,5483 @@
+commit c97398223c6a505fac2c783a624dc80e0aa5d5d0
+Author: Eric Anholt <eric@anholt.net>
+Date: Wed Jul 30 12:06:12 2008 -0700
+
+ drm: Add GEM ("graphics execution manager") to i915 driver.
+
+ GEM allows the creation of persistent buffer objects accessible by the
+ graphics device through new ioctls for managing execution of commands on the
+ device. The userland API is almost entirely driver-specific to ensure that
+ any driver building on this model can easily map the interface to individual
+ driver requirements.
+
+ GEM is used by the 2d driver for managing its internal state allocations and
+ will be used for pixmap storage to reduce memory consumption and enable
+ zero-copy GLX_EXT_texture_from_pixmap, and in the 3d driver is used to enable
+ GL_EXT_framebuffer_object and GL_ARB_pixel_buffer_object.
+
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+
+diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+index e9f9a97..74da994 100644
+--- a/drivers/gpu/drm/Makefile
++++ b/drivers/gpu/drm/Makefile
+@@ -4,8 +4,9 @@
+
+ ccflags-y := -Iinclude/drm
+
+-drm-y := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
+- drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
++drm-y := drm_auth.o drm_bufs.o drm_cache.o \
++ drm_context.o drm_dma.o drm_drawable.o \
++ drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
+ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
+ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
+diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
+index aefa5ac..2639be2 100644
+--- a/drivers/gpu/drm/drm_agpsupport.c
++++ b/drivers/gpu/drm/drm_agpsupport.c
+@@ -33,6 +33,7 @@
+
+ #include "drmP.h"
+ #include <linux/module.h>
++#include <asm/agp.h>
+
+ #if __OS_HAS_AGP
+
+@@ -452,4 +453,52 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
+ return agp_unbind_memory(handle);
+ }
+
+-#endif /* __OS_HAS_AGP */
++/**
++ * Binds a collection of pages into AGP memory at the given offset, returning
++ * the AGP memory structure containing them.
++ *
++ * No reference is held on the pages during this time -- it is up to the
++ * caller to handle that.
++ */
++DRM_AGP_MEM *
++drm_agp_bind_pages(struct drm_device *dev,
++ struct page **pages,
++ unsigned long num_pages,
++ uint32_t gtt_offset)
++{
++ DRM_AGP_MEM *mem;
++ int ret, i;
++
++ DRM_DEBUG("\n");
++
++ mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
++ AGP_USER_MEMORY);
++ if (mem == NULL) {
++ DRM_ERROR("Failed to allocate memory for %ld pages\n",
++ num_pages);
++ return NULL;
++ }
++
++ for (i = 0; i < num_pages; i++)
++ mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
++ mem->page_count = num_pages;
++
++ mem->is_flushed = true;
++ ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
++ if (ret != 0) {
++ DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
++ agp_free_memory(mem);
++ return NULL;
++ }
++
++ return mem;
++}
++EXPORT_SYMBOL(drm_agp_bind_pages);
++
++void drm_agp_chipset_flush(struct drm_device *dev)
++{
++ agp_flush_chipset(dev->agp->bridge);
++}
++EXPORT_SYMBOL(drm_agp_chipset_flush);
++
++#endif /* __OS_HAS_AGP */
+diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
+new file mode 100644
+index 0000000..9475f7d
+--- /dev/null
++++ b/drivers/gpu/drm/drm_cache.c
+@@ -0,0 +1,76 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++
++#if defined(CONFIG_X86)
++static void
++drm_clflush_page(struct page *page)
++{
++ uint8_t *page_virtual;
++ unsigned int i;
++
++ if (unlikely(page == NULL))
++ return;
++
++ page_virtual = kmap_atomic(page, KM_USER0);
++ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
++ clflush(page_virtual + i);
++ kunmap_atomic(page_virtual, KM_USER0);
++}
++#endif
++
++static void
++drm_clflush_ipi_handler(void *null)
++{
++ wbinvd();
++}
++
++void
++drm_clflush_pages(struct page *pages[], unsigned long num_pages)
++{
++
++#if defined(CONFIG_X86)
++ if (cpu_has_clflush) {
++ unsigned long i;
++
++ mb();
++ for (i = 0; i < num_pages; ++i)
++ drm_clflush_page(*pages++);
++ mb();
++
++ return;
++ }
++#endif
++
++ if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
++ DRM_ERROR("Timed out waiting for cache flush.\n");
++}
++EXPORT_SYMBOL(drm_clflush_pages);
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index fb45fe7..96f416a 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -119,6 +119,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
+ };
+
+ #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
+index dcf8b4d..0d46627 100644
+--- a/drivers/gpu/drm/drm_fops.c
++++ b/drivers/gpu/drm/drm_fops.c
+@@ -256,6 +256,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
+
+ INIT_LIST_HEAD(&priv->lhead);
+
++ if (dev->driver->driver_features & DRIVER_GEM)
++ drm_gem_open(dev, priv);
++
+ if (dev->driver->open) {
+ ret = dev->driver->open(dev, priv);
+ if (ret < 0)
+@@ -400,6 +403,9 @@ int drm_release(struct inode *inode, struct file *filp)
+ dev->driver->reclaim_buffers(dev, file_priv);
+ }
+
++ if (dev->driver->driver_features & DRIVER_GEM)
++ drm_gem_release(dev, file_priv);
++
+ drm_fasync(-1, filp, 0);
+
+ mutex_lock(&dev->ctxlist_mutex);
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+new file mode 100644
+index 0000000..434155b
+--- /dev/null
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -0,0 +1,420 @@
++/*
++ * Copyright © 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/uaccess.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/module.h>
++#include <linux/mman.h>
++#include <linux/pagemap.h>
++#include "drmP.h"
++
++/** @file drm_gem.c
++ *
++ * This file provides some of the base ioctls and library routines for
++ * the graphics memory manager implemented by each device driver.
++ *
++ * Because various devices have different requirements in terms of
++ * synchronization and migration strategies, implementing that is left up to
++ * the driver, and all that the general API provides should be generic --
++ * allocating objects, reading/writing data with the cpu, freeing objects.
++ * Even there, platform-dependent optimizations for reading/writing data with
++ * the CPU mean we'll likely hook those out to driver-specific calls. However,
++ * the DRI2 implementation wants to have at least allocate/mmap be generic.
++ *
++ * The goal was to have swap-backed object allocation managed through
++ * struct file. However, file descriptors as handles to a struct file have
++ * two major failings:
++ * - Process limits prevent more than 1024 or so being used at a time by
++ * default.
++ * - Inability to allocate high fds will aggravate the X Server's select()
++ * handling, and likely that of many GL client applications as well.
++ *
++ * This led to a plan of using our own integer IDs (called handles, following
++ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
++ * ioctls. The objects themselves will still include the struct file so
++ * that we can transition to fds if the required kernel infrastructure shows
++ * up at a later date, and as our interface with shmfs for memory allocation.
++ */
++
++/**
++ * Initialize the GEM device fields
++ */
++
++int
++drm_gem_init(struct drm_device *dev)
++{
++ spin_lock_init(&dev->object_name_lock);
++ idr_init(&dev->object_name_idr);
++ atomic_set(&dev->object_count, 0);
++ atomic_set(&dev->object_memory, 0);
++ atomic_set(&dev->pin_count, 0);
++ atomic_set(&dev->pin_memory, 0);
++ atomic_set(&dev->gtt_count, 0);
++ atomic_set(&dev->gtt_memory, 0);
++ return 0;
++}
++
++/**
++ * Allocate a GEM object of the specified size with shmfs backing store
++ */
++struct drm_gem_object *
++drm_gem_object_alloc(struct drm_device *dev, size_t size)
++{
++ struct drm_gem_object *obj;
++
++ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
++
++ obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
++
++ obj->dev = dev;
++ obj->filp = shmem_file_setup("drm mm object", size, 0);
++ if (IS_ERR(obj->filp)) {
++ kfree(obj);
++ return NULL;
++ }
++
++ kref_init(&obj->refcount);
++ kref_init(&obj->handlecount);
++ obj->size = size;
++ if (dev->driver->gem_init_object != NULL &&
++ dev->driver->gem_init_object(obj) != 0) {
++ fput(obj->filp);
++ kfree(obj);
++ return NULL;
++ }
++ atomic_inc(&dev->object_count);
++ atomic_add(obj->size, &dev->object_memory);
++ return obj;
++}
++EXPORT_SYMBOL(drm_gem_object_alloc);
++
++/**
++ * Removes the mapping from handle to filp for this object.
++ */
++static int
++drm_gem_handle_delete(struct drm_file *filp, int handle)
++{
++ struct drm_device *dev;
++ struct drm_gem_object *obj;
++
++ /* This is gross. The idr system doesn't let us try a delete and
++ * return an error code. It just spews if you fail at deleting.
++ * So, we have to grab a lock around finding the object and then
++ * doing the delete on it and dropping the refcount, or the user
++ * could race us to double-decrement the refcount and cause a
++ * use-after-free later. Given the frequency of our handle lookups,
++ * we may want to use ida for number allocation and a hash table
++ * for the pointers, anyway.
++ */
++ spin_lock(&filp->table_lock);
++
++ /* Check if we currently have a reference on the object */
++ obj = idr_find(&filp->object_idr, handle);
++ if (obj == NULL) {
++ spin_unlock(&filp->table_lock);
++ return -EINVAL;
++ }
++ dev = obj->dev;
++
++ /* Release reference and decrement refcount. */
++ idr_remove(&filp->object_idr, handle);
++ spin_unlock(&filp->table_lock);
++
++ mutex_lock(&dev->struct_mutex);
++ drm_gem_object_handle_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
++
++ return 0;
++}
++
++/**
++ * Create a handle for this object. This adds a handle reference
++ * to the object, which includes a regular reference count. Callers
++ * will likely want to dereference the object afterwards.
++ */
++int
++drm_gem_handle_create(struct drm_file *file_priv,
++ struct drm_gem_object *obj,
++ int *handlep)
++{
++ int ret;
++
++ /*
++ * Get the user-visible handle using idr.
++ */
++again:
++ /* ensure there is space available to allocate a handle */
++ if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
++ return -ENOMEM;
++
++ /* do the allocation under our spinlock */
++ spin_lock(&file_priv->table_lock);
++ ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
++ spin_unlock(&file_priv->table_lock);
++ if (ret == -EAGAIN)
++ goto again;
++
++ if (ret != 0)
++ return ret;
++
++ drm_gem_object_handle_reference(obj);
++ return 0;
++}
++EXPORT_SYMBOL(drm_gem_handle_create);
++
++/** Returns a reference to the object named by the handle. */
++struct drm_gem_object *
++drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
++ int handle)
++{
++ struct drm_gem_object *obj;
++
++ spin_lock(&filp->table_lock);
++
++ /* Check if we currently have a reference on the object */
++ obj = idr_find(&filp->object_idr, handle);
++ if (obj == NULL) {
++ spin_unlock(&filp->table_lock);
++ return NULL;
++ }
++
++ drm_gem_object_reference(obj);
++
++ spin_unlock(&filp->table_lock);
++
++ return obj;
++}
++EXPORT_SYMBOL(drm_gem_object_lookup);
++
++/**
++ * Releases the handle to an mm object.
++ */
++int
++drm_gem_close_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_gem_close *args = data;
++ int ret;
++
++ if (!(dev->driver->driver_features & DRIVER_GEM))
++ return -ENODEV;
++
++ ret = drm_gem_handle_delete(file_priv, args->handle);
++
++ return ret;
++}
++
++/**
++ * Create a global name for an object, returning the name.
++ *
++ * Note that the name does not hold a reference; when the object
++ * is freed, the name goes away.
++ */
++int
++drm_gem_flink_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_gem_flink *args = data;
++ struct drm_gem_object *obj;
++ int ret;
++
++ if (!(dev->driver->driver_features & DRIVER_GEM))
++ return -ENODEV;
++
++ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++ if (obj == NULL)
++ return -EINVAL;
++
++again:
++ if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
++ return -ENOMEM;
++
++ spin_lock(&dev->object_name_lock);
++ if (obj->name) {
++ spin_unlock(&dev->object_name_lock);
++ return -EEXIST;
++ }
++ ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
++ &obj->name);
++ spin_unlock(&dev->object_name_lock);
++ if (ret == -EAGAIN)
++ goto again;
++
++ if (ret != 0) {
++ mutex_lock(&dev->struct_mutex);
++ drm_gem_object_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
++ return ret;
++ }
++
++ /*
++ * Leave the reference from the lookup around as the
++ * name table now holds one
++ */
++ args->name = (uint64_t) obj->name;
++
++ return 0;
++}
++
++/**
++ * Open an object using the global name, returning a handle and the size.
++ *
++ * This handle (of course) holds a reference to the object, so the object
++ * will not go away until the handle is deleted.
++ */
++int
++drm_gem_open_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_gem_open *args = data;
++ struct drm_gem_object *obj;
++ int ret;
++ int handle;
++
++ if (!(dev->driver->driver_features & DRIVER_GEM))
++ return -ENODEV;
++
++ spin_lock(&dev->object_name_lock);
++ obj = idr_find(&dev->object_name_idr, (int) args->name);
++ if (obj)
++ drm_gem_object_reference(obj);
++ spin_unlock(&dev->object_name_lock);
++ if (!obj)
++ return -ENOENT;
++
++ ret = drm_gem_handle_create(file_priv, obj, &handle);
++ mutex_lock(&dev->struct_mutex);
++ drm_gem_object_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
++ if (ret)
++ return ret;
++
++ args->handle = handle;
++ args->size = obj->size;
++
++ return 0;
++}
++
++/**
++ * Called at device open time, sets up the structure for handling refcounting
++ * of mm objects.
++ */
++void
++drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
++{
++ idr_init(&file_private->object_idr);
++ spin_lock_init(&file_private->table_lock);
++}
++
++/**
++ * Called at device close to release the file's
++ * handle references on objects.
++ */
++static int
++drm_gem_object_release_handle(int id, void *ptr, void *data)
++{
++ struct drm_gem_object *obj = ptr;
++
++ drm_gem_object_handle_unreference(obj);
++
++ return 0;
++}
++
++/**
++ * Called at close time when the filp is going away.
++ *
++ * Releases any remaining references on objects by this filp.
++ */
++void
++drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
++{
++ mutex_lock(&dev->struct_mutex);
++ idr_for_each(&file_private->object_idr,
++ &drm_gem_object_release_handle, NULL);
++
++ idr_destroy(&file_private->object_idr);
++ mutex_unlock(&dev->struct_mutex);
++}
++
++/**
++ * Called after the last reference to the object has been lost.
++ *
++ * Frees the object
++ */
++void
++drm_gem_object_free(struct kref *kref)
++{
++ struct drm_gem_object *obj = (struct drm_gem_object *) kref;
++ struct drm_device *dev = obj->dev;
++
++ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
++
++ if (dev->driver->gem_free_object != NULL)
++ dev->driver->gem_free_object(obj);
++
++ fput(obj->filp);
++ atomic_dec(&dev->object_count);
++ atomic_sub(obj->size, &dev->object_memory);
++ kfree(obj);
++}
++EXPORT_SYMBOL(drm_gem_object_free);
++
++/**
++ * Called after the last handle to the object has been closed
++ *
++ * Removes any name for the object. Note that this must be
++ * called before drm_gem_object_free or we'll be touching
++ * freed memory
++ */
++void
++drm_gem_object_handle_free(struct kref *kref)
++{
++ struct drm_gem_object *obj = container_of(kref,
++ struct drm_gem_object,
++ handlecount);
++ struct drm_device *dev = obj->dev;
++
++ /* Remove any name for this object */
++ spin_lock(&dev->object_name_lock);
++ if (obj->name) {
++ idr_remove(&dev->object_name_idr, obj->name);
++ spin_unlock(&dev->object_name_lock);
++ /*
++ * The object name held a reference to this object, drop
++ * that now.
++ */
++ drm_gem_object_unreference(obj);
++ } else
++ spin_unlock(&dev->object_name_lock);
++
++}
++EXPORT_SYMBOL(drm_gem_object_handle_free);
++
+diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
+index 0177012..803bc9e 100644
+--- a/drivers/gpu/drm/drm_memory.c
++++ b/drivers/gpu/drm/drm_memory.c
+@@ -133,6 +133,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
+ {
+ return drm_agp_free_memory(handle) ? 0 : -EINVAL;
+ }
++EXPORT_SYMBOL(drm_free_agp);
+
+ /** Wrapper around agp_bind_memory() */
+ int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+@@ -145,6 +146,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
+ {
+ return drm_agp_unbind_memory(handle);
+ }
++EXPORT_SYMBOL(drm_unbind_agp);
+
+ #else /* __OS_HAS_AGP */
+ static inline void *agp_remap(unsigned long offset, unsigned long size,
+diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
+index dcff9e9..217ad7d 100644
+--- a/drivers/gpu/drm/drm_mm.c
++++ b/drivers/gpu/drm/drm_mm.c
+@@ -169,6 +169,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
+
+ return child;
+ }
++EXPORT_SYMBOL(drm_mm_get_block);
+
+ /*
+ * Put a block. Merge with the previous and / or next block if they are free.
+@@ -217,6 +218,7 @@ void drm_mm_put_block(struct drm_mm_node * cur)
+ drm_free(cur, sizeof(*cur), DRM_MEM_MM);
+ }
+ }
++EXPORT_SYMBOL(drm_mm_put_block);
+
+ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
+ unsigned long size,
+@@ -265,6 +267,7 @@ int drm_mm_clean(struct drm_mm * mm)
+
+ return (head->next->next == head);
+ }
++EXPORT_SYMBOL(drm_mm_search_free);
+
+ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+ {
+@@ -273,7 +276,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+
+ return drm_mm_create_tail_node(mm, start, size);
+ }
+-
++EXPORT_SYMBOL(drm_mm_init);
+
+ void drm_mm_takedown(struct drm_mm * mm)
+ {
+diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
+index 93b1e04..d490db4 100644
+--- a/drivers/gpu/drm/drm_proc.c
++++ b/drivers/gpu/drm/drm_proc.c
+@@ -49,6 +49,10 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data);
+ static int drm_bufs_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data);
++static int drm_gem_name_info(char *buf, char **start, off_t offset,
++ int request, int *eof, void *data);
++static int drm_gem_object_info(char *buf, char **start, off_t offset,
++ int request, int *eof, void *data);
+ #if DRM_DEBUG_CODE
+ static int drm_vma_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data);
+@@ -60,13 +64,16 @@ static int drm_vma_info(char *buf, char **start, off_t offset,
+ static struct drm_proc_list {
+ const char *name; /**< file name */
+ int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
++ u32 driver_features; /**< Required driver features for this entry */
+ } drm_proc_list[] = {
+- {"name", drm_name_info},
+- {"mem", drm_mem_info},
+- {"vm", drm_vm_info},
+- {"clients", drm_clients_info},
+- {"queues", drm_queues_info},
+- {"bufs", drm_bufs_info},
++ {"name", drm_name_info, 0},
++ {"mem", drm_mem_info, 0},
++ {"vm", drm_vm_info, 0},
++ {"clients", drm_clients_info, 0},
++ {"queues", drm_queues_info, 0},
++ {"bufs", drm_bufs_info, 0},
++ {"gem_names", drm_gem_name_info, DRIVER_GEM},
++ {"gem_objects", drm_gem_object_info, DRIVER_GEM},
+ #if DRM_DEBUG_CODE
+ {"vma", drm_vma_info},
+ #endif
+@@ -90,8 +97,9 @@ static struct drm_proc_list {
+ int drm_proc_init(struct drm_minor *minor, int minor_id,
+ struct proc_dir_entry *root)
+ {
++ struct drm_device *dev = minor->dev;
+ struct proc_dir_entry *ent;
+- int i, j;
++ int i, j, ret;
+ char name[64];
+
+ sprintf(name, "%d", minor_id);
+@@ -102,23 +110,42 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
+ }
+
+ for (i = 0; i < DRM_PROC_ENTRIES; i++) {
++ u32 features = drm_proc_list[i].driver_features;
++
++ if (features != 0 &&
++ (dev->driver->driver_features & features) != features)
++ continue;
++
+ ent = create_proc_entry(drm_proc_list[i].name,
+ S_IFREG | S_IRUGO, minor->dev_root);
+ if (!ent) {
+ DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
+ name, drm_proc_list[i].name);
+- for (j = 0; j < i; j++)
+- remove_proc_entry(drm_proc_list[i].name,
+- minor->dev_root);
+- remove_proc_entry(name, root);
+- minor->dev_root = NULL;
+- return -1;
++ ret = -1;
++ goto fail;
+ }
+ ent->read_proc = drm_proc_list[i].f;
+ ent->data = minor;
+ }
+
++ if (dev->driver->proc_init) {
++ ret = dev->driver->proc_init(minor);
++ if (ret) {
++ DRM_ERROR("DRM: Driver failed to initialize "
++ "/proc/dri.\n");
++ goto fail;
++ }
++ }
++
+ return 0;
++ fail:
++
++ for (j = 0; j < i; j++)
++ remove_proc_entry(drm_proc_list[i].name,
++ minor->dev_root);
++ remove_proc_entry(name, root);
++ minor->dev_root = NULL;
++ return ret;
+ }
+
+ /**
+@@ -133,12 +160,16 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
+ */
+ int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
+ {
++ struct drm_device *dev = minor->dev;
+ int i;
+ char name[64];
+
+ if (!root || !minor->dev_root)
+ return 0;
+
++ if (dev->driver->proc_cleanup)
++ dev->driver->proc_cleanup(minor);
++
+ for (i = 0; i < DRM_PROC_ENTRIES; i++)
+ remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
+ sprintf(name, "%d", minor->index);
+@@ -480,6 +511,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
+ return ret;
+ }
+
++struct drm_gem_name_info_data {
++ int len;
++ char *buf;
++ int eof;
++};
++
++static int drm_gem_one_name_info(int id, void *ptr, void *data)
++{
++ struct drm_gem_object *obj = ptr;
++ struct drm_gem_name_info_data *nid = data;
++
++ DRM_INFO("name %d size %d\n", obj->name, obj->size);
++ if (nid->eof)
++ return 0;
++
++ nid->len += sprintf(&nid->buf[nid->len],
++ "%6d%9d%8d%9d\n",
++ obj->name, obj->size,
++ atomic_read(&obj->handlecount.refcount),
++ atomic_read(&obj->refcount.refcount));
++ if (nid->len > DRM_PROC_LIMIT) {
++ nid->eof = 1;
++ return 0;
++ }
++ return 0;
++}
++
++static int drm_gem_name_info(char *buf, char **start, off_t offset,
++ int request, int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ struct drm_gem_name_info_data nid;
++
++ if (offset > DRM_PROC_LIMIT) {
++ *eof = 1;
++ return 0;
++ }
++
++ nid.len = sprintf(buf, " name size handles refcount\n");
++ nid.buf = buf;
++ nid.eof = 0;
++ idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
++
++ *start = &buf[offset];
++ *eof = 0;
++ if (nid.len > request + offset)
++ return request;
++ *eof = 1;
++ return nid.len - offset;
++}
++
++static int drm_gem_object_info(char *buf, char **start, off_t offset,
++ int request, int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ int len = 0;
++
++ if (offset > DRM_PROC_LIMIT) {
++ *eof = 1;
++ return 0;
++ }
++
++ *start = &buf[offset];
++ *eof = 0;
++ DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
++ DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
++ DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
++ DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
++ DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
++ DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
++ if (len > request + offset)
++ return request;
++ *eof = 1;
++ return len - offset;
++}
++
+ #if DRM_DEBUG_CODE
+
+ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
+diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
+index c2f584f..82f4657 100644
+--- a/drivers/gpu/drm/drm_stub.c
++++ b/drivers/gpu/drm/drm_stub.c
+@@ -152,6 +152,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
+ goto error_out_unreg;
+ }
+
++ if (driver->driver_features & DRIVER_GEM) {
++ retcode = drm_gem_init(dev);
++ if (retcode) {
++ DRM_ERROR("Cannot initialize graphics execution "
++ "manager (GEM)\n");
++ goto error_out_unreg;
++ }
++ }
++
+ return 0;
+
+ error_out_unreg:
+@@ -317,6 +326,7 @@ int drm_put_dev(struct drm_device * dev)
+ int drm_put_minor(struct drm_minor **minor_p)
+ {
+ struct drm_minor *minor = *minor_p;
++
+ DRM_DEBUG("release secondary minor %d\n", minor->index);
+
+ if (minor->type == DRM_MINOR_LEGACY)
+diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
+index c4bbda6..5ba78e4 100644
+--- a/drivers/gpu/drm/i915/Makefile
++++ b/drivers/gpu/drm/i915/Makefile
+@@ -4,7 +4,11 @@
+
+ ccflags-y := -Iinclude/drm
+ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \
+- i915_suspend.o
++ i915_suspend.o \
++ i915_gem.o \
++ i915_gem_debug.o \
++ i915_gem_proc.o \
++ i915_gem_tiling.o
+
+ i915-$(CONFIG_COMPAT) += i915_ioc32.o
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 8609ec2..3b5aa74 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -170,24 +170,31 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+ dev_priv->sarea_priv = (drm_i915_sarea_t *)
+ ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
+
+- dev_priv->ring.Start = init->ring_start;
+- dev_priv->ring.End = init->ring_end;
+- dev_priv->ring.Size = init->ring_size;
+- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
++ if (init->ring_size != 0) {
++ if (dev_priv->ring.ring_obj != NULL) {
++ i915_dma_cleanup(dev);
++ DRM_ERROR("Client tried to initialize ringbuffer in "
++ "GEM mode\n");
++ return -EINVAL;
++ }
+
+- dev_priv->ring.map.offset = init->ring_start;
+- dev_priv->ring.map.size = init->ring_size;
+- dev_priv->ring.map.type = 0;
+- dev_priv->ring.map.flags = 0;
+- dev_priv->ring.map.mtrr = 0;
++ dev_priv->ring.Size = init->ring_size;
++ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+
+- drm_core_ioremap(&dev_priv->ring.map, dev);
++ dev_priv->ring.map.offset = init->ring_start;
++ dev_priv->ring.map.size = init->ring_size;
++ dev_priv->ring.map.type = 0;
++ dev_priv->ring.map.flags = 0;
++ dev_priv->ring.map.mtrr = 0;
+
+- if (dev_priv->ring.map.handle == NULL) {
+- i915_dma_cleanup(dev);
+- DRM_ERROR("can not ioremap virtual address for"
+- " ring buffer\n");
+- return -ENOMEM;
++ drm_core_ioremap(&dev_priv->ring.map, dev);
++
++ if (dev_priv->ring.map.handle == NULL) {
++ i915_dma_cleanup(dev);
++ DRM_ERROR("can not ioremap virtual address for"
++ " ring buffer\n");
++ return -ENOMEM;
++ }
+ }
+
+ dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+@@ -377,9 +384,10 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
+ return 0;
+ }
+
+-static int i915_emit_box(struct drm_device * dev,
+- struct drm_clip_rect __user * boxes,
+- int i, int DR1, int DR4)
++int
++i915_emit_box(struct drm_device *dev,
++ struct drm_clip_rect __user *boxes,
++ int i, int DR1, int DR4)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_clip_rect box;
+@@ -681,6 +689,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
+ case I915_PARAM_LAST_DISPATCH:
+ value = READ_BREADCRUMB(dev_priv);
+ break;
++ case I915_PARAM_HAS_GEM:
++ value = 1;
++ break;
+ default:
+ DRM_ERROR("Unknown parameter %d\n", param->param);
+ return -EINVAL;
+@@ -784,6 +795,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ memset(dev_priv, 0, sizeof(drm_i915_private_t));
+
+ dev->dev_private = (void *)dev_priv;
++ dev_priv->dev = dev;
+
+ /* Add register map (needed for suspend/resume) */
+ base = drm_get_resource_start(dev, mmio_bar);
+@@ -793,6 +805,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ _DRM_KERNEL | _DRM_DRIVER,
+ &dev_priv->mmio_map);
+
++ i915_gem_load(dev);
++
+ /* Init HWS */
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ret = i915_init_phys_hws(dev);
+@@ -838,6 +852,25 @@ int i915_driver_unload(struct drm_device *dev)
+ return 0;
+ }
+
++int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
++{
++ struct drm_i915_file_private *i915_file_priv;
++
++ DRM_DEBUG("\n");
++ i915_file_priv = (struct drm_i915_file_private *)
++ drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
++
++ if (!i915_file_priv)
++ return -ENOMEM;
++
++ file_priv->driver_priv = i915_file_priv;
++
++ i915_file_priv->mm.last_gem_seqno = 0;
++ i915_file_priv->mm.last_gem_throttle_seqno = 0;
++
++ return 0;
++}
++
+ void i915_driver_lastclose(struct drm_device * dev)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -845,6 +878,8 @@ void i915_driver_lastclose(struct drm_device * dev)
+ if (!dev_priv)
+ return;
+
++ i915_gem_lastclose(dev);
++
+ if (dev_priv->agp_heap)
+ i915_mem_takedown(&(dev_priv->agp_heap));
+
+@@ -857,6 +892,13 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+ i915_mem_release(dev, file_priv, dev_priv->agp_heap);
+ }
+
++void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
++{
++ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
++
++ drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
++}
++
+ struct drm_ioctl_desc i915_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+@@ -875,6 +917,22 @@ struct drm_ioctl_desc i915_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
+ DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
++ DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
++ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
++ DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
++ DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
++ DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
++ DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
++ DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
++ DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
++ DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
++ DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
++ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
++ DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
++ DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
++ DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
++ DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
+ };
+
+ int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 37af03f..a80ead2 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -85,12 +85,15 @@ static struct drm_driver driver = {
+ /* don't use mtrr's here, the Xserver or user space app should
+ * deal with them for intel hardware.
+ */
+- .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
++ .driver_features =
++ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
++ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ .load = i915_driver_load,
+ .unload = i915_driver_unload,
++ .open = i915_driver_open,
+ .lastclose = i915_driver_lastclose,
+ .preclose = i915_driver_preclose,
++ .postclose = i915_driver_postclose,
+ .suspend = i915_suspend,
+ .resume = i915_resume,
+ .device_is_agp = i915_driver_device_is_agp,
+@@ -104,6 +107,10 @@ static struct drm_driver driver = {
+ .reclaim_buffers = drm_core_reclaim_buffers,
+ .get_map_ofs = drm_core_get_map_ofs,
+ .get_reg_ofs = drm_core_get_reg_ofs,
++ .proc_init = i915_gem_proc_init,
++ .proc_cleanup = i915_gem_proc_cleanup,
++ .gem_init_object = i915_gem_init_object,
++ .gem_free_object = i915_gem_free_object,
+ .ioctls = i915_ioctls,
+ .fops = {
+ .owner = THIS_MODULE,
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index d1a02be..87b071a 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -39,7 +39,7 @@
+
+ #define DRIVER_NAME "i915"
+ #define DRIVER_DESC "Intel Graphics"
+-#define DRIVER_DATE "20060119"
++#define DRIVER_DATE "20080730"
+
+ enum pipe {
+ PIPE_A = 0,
+@@ -60,16 +60,23 @@ enum pipe {
+ #define DRIVER_MINOR 6
+ #define DRIVER_PATCHLEVEL 0
+
++#define WATCH_COHERENCY 0
++#define WATCH_BUF 0
++#define WATCH_EXEC 0
++#define WATCH_LRU 0
++#define WATCH_RELOC 0
++#define WATCH_INACTIVE 0
++#define WATCH_PWRITE 0
++
+ typedef struct _drm_i915_ring_buffer {
+ int tail_mask;
+- unsigned long Start;
+- unsigned long End;
+ unsigned long Size;
+ u8 *virtual_start;
+ int head;
+ int tail;
+ int space;
+ drm_local_map_t map;
++ struct drm_gem_object *ring_obj;
+ } drm_i915_ring_buffer_t;
+
+ struct mem_block {
+@@ -101,6 +108,8 @@ struct intel_opregion {
+ };
+
+ typedef struct drm_i915_private {
++ struct drm_device *dev;
++
+ drm_local_map_t *sarea;
+ drm_local_map_t *mmio_map;
+
+@@ -113,6 +122,7 @@ typedef struct drm_i915_private {
+ uint32_t counter;
+ unsigned int status_gfx_addr;
+ drm_local_map_t hws_map;
++ struct drm_gem_object *hws_obj;
+
+ unsigned int cpp;
+ int back_offset;
+@@ -122,7 +132,6 @@ typedef struct drm_i915_private {
+
+ wait_queue_head_t irq_queue;
+ atomic_t irq_received;
+- atomic_t irq_emitted;
+ /** Protects user_irq_refcount and irq_mask_reg */
+ spinlock_t user_irq_lock;
+ /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
+@@ -230,8 +239,174 @@ typedef struct drm_i915_private {
+ u8 saveDACMASK;
+ u8 saveDACDATA[256*3]; /* 256 3-byte colors */
+ u8 saveCR[37];
++
++ struct {
++ struct drm_mm gtt_space;
++
++ /**
++ * List of objects currently involved in rendering from the
++ * ringbuffer.
++ *
++ * A reference is held on the buffer while on this list.
++ */
++ struct list_head active_list;
++
++ /**
++ * List of objects which are not in the ringbuffer but which
++ * still have a write_domain which needs to be flushed before
++ * unbinding.
++ *
++ * A reference is held on the buffer while on this list.
++ */
++ struct list_head flushing_list;
++
++ /**
++ * LRU list of objects which are not in the ringbuffer and
++ * are ready to unbind, but are still in the GTT.
++ *
++ * A reference is not held on the buffer while on this list,
++ * as merely being GTT-bound shouldn't prevent its being
++ * freed, and we'll pull it off the list in the free path.
++ */
++ struct list_head inactive_list;
++
++ /**
++ * List of breadcrumbs associated with GPU requests currently
++ * outstanding.
++ */
++ struct list_head request_list;
++
++ /**
++ * We leave the user IRQ off as much as possible,
++ * but this means that requests will finish and never
++ * be retired once the system goes idle. Set a timer to
++ * fire periodically while the ring is running. When it
++ * fires, go retire requests.
++ */
++ struct delayed_work retire_work;
++
++ uint32_t next_gem_seqno;
++
++ /**
++ * Waiting sequence number, if any
++ */
++ uint32_t waiting_gem_seqno;
++
++ /**
++ * Last seq seen at irq time
++ */
++ uint32_t irq_gem_seqno;
++
++ /**
++ * Flag if the X Server, and thus DRM, is not currently in
++ * control of the device.
++ *
++ * This is set between LeaveVT and EnterVT. It needs to be
++ * replaced with a semaphore. It also needs to be
++ * transitioned away from for kernel modesetting.
++ */
++ int suspended;
++
++ /**
++ * Flag if the hardware appears to be wedged.
++ *
++ * This is set when attempts to idle the device timeout.
++ * It prevents command submission from occuring and makes
++ * every pending request fail
++ */
++ int wedged;
++
++ /** Bit 6 swizzling required for X tiling */
++ uint32_t bit_6_swizzle_x;
++ /** Bit 6 swizzling required for Y tiling */
++ uint32_t bit_6_swizzle_y;
++ } mm;
+ } drm_i915_private_t;
+
++/** driver private structure attached to each drm_gem_object */
++struct drm_i915_gem_object {
++ struct drm_gem_object *obj;
++
++ /** Current space allocated to this object in the GTT, if any. */
++ struct drm_mm_node *gtt_space;
++
++ /** This object's place on the active/flushing/inactive lists */
++ struct list_head list;
++
++ /**
++ * This is set if the object is on the active or flushing lists
++ * (has pending rendering), and is not set if it's on inactive (ready
++ * to be unbound).
++ */
++ int active;
++
++ /**
++ * This is set if the object has been written to since last bound
++ * to the GTT
++ */
++ int dirty;
++
++ /** AGP memory structure for our GTT binding. */
++ DRM_AGP_MEM *agp_mem;
++
++ struct page **page_list;
++
++ /**
++ * Current offset of the object in GTT space.
++ *
++ * This is the same as gtt_space->start
++ */
++ uint32_t gtt_offset;
++
++ /** Boolean whether this object has a valid gtt offset. */
++ int gtt_bound;
++
++ /** How many users have pinned this object in GTT space */
++ int pin_count;
++
++ /** Breadcrumb of last rendering to the buffer. */
++ uint32_t last_rendering_seqno;
++
++ /** Current tiling mode for the object. */
++ uint32_t tiling_mode;
++
++ /**
++ * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
++ * GEM_DOMAIN_CPU is not in the object's read domain.
++ */
++ uint8_t *page_cpu_valid;
++};
++
++/**
++ * Request queue structure.
++ *
++ * The request queue allows us to note sequence numbers that have been emitted
++ * and may be associated with active buffers to be retired.
++ *
++ * By keeping this list, we can avoid having to do questionable
++ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
++ * an emission time with seqnos for tracking how far ahead of the GPU we are.
++ */
++struct drm_i915_gem_request {
++ /** GEM sequence number associated with this request. */
++ uint32_t seqno;
++
++ /** Time at which this request was emitted, in jiffies. */
++ unsigned long emitted_jiffies;
++
++ /** Cache domains that were flushed at the start of the request. */
++ uint32_t flush_domains;
++
++ struct list_head list;
++};
++
++struct drm_i915_file_private {
++ struct {
++ uint32_t last_gem_seqno;
++ uint32_t last_gem_throttle_seqno;
++ } mm;
++};
++
+ extern struct drm_ioctl_desc i915_ioctls[];
+ extern int i915_max_ioctl;
+
+@@ -239,18 +414,26 @@ extern int i915_max_ioctl;
+ extern void i915_kernel_lost_context(struct drm_device * dev);
+ extern int i915_driver_load(struct drm_device *, unsigned long flags);
+ extern int i915_driver_unload(struct drm_device *);
++extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
+ extern void i915_driver_lastclose(struct drm_device * dev);
+ extern void i915_driver_preclose(struct drm_device *dev,
+ struct drm_file *file_priv);
++extern void i915_driver_postclose(struct drm_device *dev,
++ struct drm_file *file_priv);
+ extern int i915_driver_device_is_agp(struct drm_device * dev);
+ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
++extern int i915_emit_box(struct drm_device *dev,
++ struct drm_clip_rect __user *boxes,
++ int i, int DR1, int DR4);
+
+ /* i915_irq.c */
+ extern int i915_irq_emit(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ extern int i915_irq_wait(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
++void i915_user_irq_get(struct drm_device *dev);
++void i915_user_irq_put(struct drm_device *dev);
+
+ extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
+ extern void i915_driver_irq_preinstall(struct drm_device * dev);
+@@ -279,6 +462,67 @@ extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
+ extern void i915_mem_takedown(struct mem_block **heap);
+ extern void i915_mem_release(struct drm_device * dev,
+ struct drm_file *file_priv, struct mem_block *heap);
++/* i915_gem.c */
++int i915_gem_init_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_create_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_execbuffer(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_set_tiling(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int i915_gem_get_tiling(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++void i915_gem_load(struct drm_device *dev);
++int i915_gem_proc_init(struct drm_minor *minor);
++void i915_gem_proc_cleanup(struct drm_minor *minor);
++int i915_gem_init_object(struct drm_gem_object *obj);
++void i915_gem_free_object(struct drm_gem_object *obj);
++int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
++void i915_gem_object_unpin(struct drm_gem_object *obj);
++void i915_gem_lastclose(struct drm_device *dev);
++uint32_t i915_get_gem_seqno(struct drm_device *dev);
++void i915_gem_retire_requests(struct drm_device *dev);
++void i915_gem_retire_work_handler(struct work_struct *work);
++void i915_gem_clflush_object(struct drm_gem_object *obj);
++
++/* i915_gem_tiling.c */
++void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
++
++/* i915_gem_debug.c */
++void i915_gem_dump_object(struct drm_gem_object *obj, int len,
++ const char *where, uint32_t mark);
++#if WATCH_INACTIVE
++void i915_verify_inactive(struct drm_device *dev, char *file, int line);
++#else
++#define i915_verify_inactive(dev, file, line)
++#endif
++void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
++void i915_gem_dump_object(struct drm_gem_object *obj, int len,
++ const char *where, uint32_t mark);
++void i915_dump_lru(struct drm_device *dev, const char *where);
+
+ /* i915_suspend.c */
+ extern int i915_save_state(struct drm_device *dev);
+@@ -347,6 +591,7 @@ extern void opregion_enable_asle(struct drm_device *dev);
+ */
+ #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
+ #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5)
++#define I915_GEM_HWS_INDEX 0x10
+
+ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+new file mode 100644
+index 0000000..90ae8a0
+--- /dev/null
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -0,0 +1,2497 @@
++/*
++ * Copyright © 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++#include <linux/swap.h>
++
++static int
++i915_gem_object_set_domain(struct drm_gem_object *obj,
++ uint32_t read_domains,
++ uint32_t write_domain);
++static int
++i915_gem_object_set_domain_range(struct drm_gem_object *obj,
++ uint64_t offset,
++ uint64_t size,
++ uint32_t read_domains,
++ uint32_t write_domain);
++static int
++i915_gem_set_domain(struct drm_gem_object *obj,
++ struct drm_file *file_priv,
++ uint32_t read_domains,
++ uint32_t write_domain);
++static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
++static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
++static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
++
++int
++i915_gem_init_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_gem_init *args = data;
++
++ mutex_lock(&dev->struct_mutex);
++
++ if (args->gtt_start >= args->gtt_end ||
++ (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
++ (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
++ mutex_unlock(&dev->struct_mutex);
++ return -EINVAL;
++ }
++
++ drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
++ args->gtt_end - args->gtt_start);
++
++ dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
++
++ mutex_unlock(&dev->struct_mutex);
++
++ return 0;
++}
++
++
++/**
++ * Creates a new mm object and returns a handle to it.
++ */
++int
++i915_gem_create_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_i915_gem_create *args = data;
++ struct drm_gem_object *obj;
++ int handle, ret;
++
++ args->size = roundup(args->size, PAGE_SIZE);
++
++ /* Allocate the new object */
++ obj = drm_gem_object_alloc(dev, args->size);
++ if (obj == NULL)
++ return -ENOMEM;
++
++ ret = drm_gem_handle_create(file_priv, obj, &handle);
++ mutex_lock(&dev->struct_mutex);
++ drm_gem_object_handle_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
++
++ if (ret)
++ return ret;
++
++ args->handle = handle;
++
++ return 0;
++}
++
++/**
++ * Reads data from the object referenced by handle.
++ *
++ * On error, the contents of *data are undefined.
++ */
++int
++i915_gem_pread_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_i915_gem_pread *args = data;
++ struct drm_gem_object *obj;
++ struct drm_i915_gem_object *obj_priv;
++ ssize_t read;
++ loff_t offset;
++ int ret;
++
++ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++ if (obj == NULL)
++ return -EBADF;
++ obj_priv = obj->driver_private;
++
++ /* Bounds check source.
++ *
++ * XXX: This could use review for overflow issues...
++ */
++ if (args->offset > obj->size || args->size > obj->size ||
++ args->offset + args->size > obj->size) {
++ drm_gem_object_unreference(obj);
++ return -EINVAL;
++ }
++
++ mutex_lock(&dev->struct_mutex);
++
++ ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
++ I915_GEM_DOMAIN_CPU, 0);
++ if (ret != 0) {
++ drm_gem_object_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
++ }
++
++ offset = args->offset;
++
++ read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
++ args->size, &offset);
++ if (read != args->size) {
++ drm_gem_object_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
++ if (read < 0)
++ return read;
++ else
++ return -EINVAL;
++ }
++
++ drm_gem_object_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
++
++ return 0;
++}
++
++static int
++i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
++ struct drm_i915_gem_pwrite *args,
++ struct drm_file *file_priv)
++{
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++ ssize_t remain;
++ loff_t offset;
++ char __user *user_data;
++ char *vaddr;
++ int i, o, l;
++ int ret = 0;
++ unsigned long pfn;
++ unsigned long unwritten;
++
++ user_data = (char __user *) (uintptr_t) args->data_ptr;
++ remain = args->size;
++ if (!access_ok(VERIFY_READ, user_data, remain))
++ return -EFAULT;
++
++
++ mutex_lock(&dev->struct_mutex);
++ ret = i915_gem_object_pin(obj, 0);
++ if (ret) {
++ mutex_unlock(&dev->struct_mutex);
++ return ret;
++ }
++ ret = i915_gem_set_domain(obj, file_priv,
++ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
++ if (ret)
++ goto fail;
++
++ obj_priv = obj->driver_private;
++ offset = obj_priv->gtt_offset + args->offset;
++ obj_priv->dirty = 1;
++
++ while (remain > 0) {
++ /* Operation in this page
++ *
++ * i = page number
++ * o = offset within page
++ * l = bytes to copy
++ */
++ i = offset >> PAGE_SHIFT;
++ o = offset & (PAGE_SIZE-1);
++ l = remain;
++ if ((o + l) > PAGE_SIZE)
++ l = PAGE_SIZE - o;
++
++ pfn = (dev->agp->base >> PAGE_SHIFT) + i;
++
++#ifdef CONFIG_HIGHMEM
++ /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
++ */
++ vaddr = kmap_atomic_pfn(pfn, KM_USER0);
++#if WATCH_PWRITE
++ DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
++ i, o, l, pfn, vaddr);
++#endif
++ unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
++ user_data, l);
++ kunmap_atomic(vaddr, KM_USER0);
++
++ if (unwritten)
++#endif /* CONFIG_HIGHMEM */
++ {
++ vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
++#if WATCH_PWRITE
++ DRM_INFO("pwrite slow i %d o %d l %d "
++ "pfn %ld vaddr %p\n",
++ i, o, l, pfn, vaddr);
++#endif
++ if (vaddr == NULL) {
++ ret = -EFAULT;
++ goto fail;
++ }
++ unwritten = __copy_from_user(vaddr + o, user_data, l);
++#if WATCH_PWRITE
++ DRM_INFO("unwritten %ld\n", unwritten);
++#endif
++ iounmap(vaddr);
++ if (unwritten) {
++ ret = -EFAULT;
++ goto fail;
++ }
++ }
++
++ remain -= l;
++ user_data += l;
++ offset += l;
++ }
++#if WATCH_PWRITE && 1
++ i915_gem_clflush_object(obj);
++ i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
++ i915_gem_clflush_object(obj);
++#endif
++
++fail:
++ i915_gem_object_unpin(obj);
++ mutex_unlock(&dev->struct_mutex);
++
++ return ret;
++}
++
++int
++i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
++ struct drm_i915_gem_pwrite *args,
++ struct drm_file *file_priv)
++{
++ int ret;
++ loff_t offset;
++ ssize_t written;
++
++ mutex_lock(&dev->struct_mutex);
++
++ ret = i915_gem_set_domain(obj, file_priv,
++ I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
++ if (ret) {
++ mutex_unlock(&dev->struct_mutex);
++ return ret;
++ }
++
++ offset = args->offset;
++
++ written = vfs_write(obj->filp,
++ (char __user *)(uintptr_t) args->data_ptr,
++ args->size, &offset);
++ if (written != args->size) {
++ mutex_unlock(&dev->struct_mutex);
++ if (written < 0)
++ return written;
++ else
++ return -EINVAL;
++ }
++
++ mutex_unlock(&dev->struct_mutex);
++
++ return 0;
++}
++
++/**
++ * Writes data to the object referenced by handle.
++ *
++ * On error, the contents of the buffer that were to be modified are undefined.
++ */
++int
++i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_i915_gem_pwrite *args = data;
++ struct drm_gem_object *obj;
++ struct drm_i915_gem_object *obj_priv;
++ int ret = 0;
++
++ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++ if (obj == NULL)
++ return -EBADF;
++ obj_priv = obj->driver_private;
++
++ /* Bounds check destination.
++ *
++ * XXX: This could use review for overflow issues...
++ */
++ if (args->offset > obj->size || args->size > obj->size ||
++ args->offset + args->size > obj->size) {
++ drm_gem_object_unreference(obj);
++ return -EINVAL;
++ }
++
++ /* We can only do the GTT pwrite on untiled buffers, as otherwise
++ * it would end up going through the fenced access, and we'll get
++ * different detiling behavior between reading and writing.
++ * pread/pwrite currently are reading and writing from the CPU
++ * perspective, requiring manual detiling by the client.
++ */
++ if (obj_priv->tiling_mode == I915_TILING_NONE &&
++ dev->gtt_total != 0)
++ ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
++ else
++ ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
++
++#if WATCH_PWRITE
++ if (ret)
++ DRM_INFO("pwrite failed %d\n", ret);
++#endif
++
++ drm_gem_object_unreference(obj);
++
++ return ret;
++}
++
++/**
++ * Called when user space prepares to use an object
++ */
++int
++i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_i915_gem_set_domain *args = data;
++ struct drm_gem_object *obj;
++ int ret;
++
++ if (!(dev->driver->driver_features & DRIVER_GEM))
++ return -ENODEV;
++
++ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++ if (obj == NULL)
++ return -EBADF;
++
++ mutex_lock(&dev->struct_mutex);
++#if WATCH_BUF
++ DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
++ obj, obj->size, args->read_domains, args->write_domain);
++#endif
++ ret = i915_gem_set_domain(obj, file_priv,
++ args->read_domains, args->write_domain);
++ drm_gem_object_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
++ return ret;
++}
++
++/**
++ * Called when user space has done writes to this buffer
++ */
++int
++i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_i915_gem_sw_finish *args = data;
++ struct drm_gem_object *obj;
++ struct drm_i915_gem_object *obj_priv;
++ int ret = 0;
++
++ if (!(dev->driver->driver_features & DRIVER_GEM))
++ return -ENODEV;
++
++ mutex_lock(&dev->struct_mutex);
++ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++ if (obj == NULL) {
++ mutex_unlock(&dev->struct_mutex);
++ return -EBADF;
++ }
++
++#if WATCH_BUF
++ DRM_INFO("%s: sw_finish %d (%p %d)\n",
++ __func__, args->handle, obj, obj->size);
++#endif
++ obj_priv = obj->driver_private;
++
++ /* Pinned buffers may be scanout, so flush the cache */
++ if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
++ i915_gem_clflush_object(obj);
++ drm_agp_chipset_flush(dev);
++ }
++ drm_gem_object_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
++ return ret;
++}
++
++/**
++ * Maps the contents of an object, returning the address it is mapped
++ * into.
++ *
++ * While the mapping holds a reference on the contents of the object, it doesn't
++ * imply a ref on the object itself.
++ */
++int
++i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_i915_gem_mmap *args = data;
++ struct drm_gem_object *obj;
++ loff_t offset;
++ unsigned long addr;
++
++ if (!(dev->driver->driver_features & DRIVER_GEM))
++ return -ENODEV;
++
++ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++ if (obj == NULL)
++ return -EBADF;
++
++ offset = args->offset;
++
++ down_write(&current->mm->mmap_sem);
++ addr = do_mmap(obj->filp, 0, args->size,
++ PROT_READ | PROT_WRITE, MAP_SHARED,
++ args->offset);
++ up_write(&current->mm->mmap_sem);
++ mutex_lock(&dev->struct_mutex);
++ drm_gem_object_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
++ if (IS_ERR((void *)addr))
++ return addr;
++
++ args->addr_ptr = (uint64_t) addr;
++
++ return 0;
++}
++
++static void
++i915_gem_object_free_page_list(struct drm_gem_object *obj)
++{
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++ int page_count = obj->size / PAGE_SIZE;
++ int i;
++
++ if (obj_priv->page_list == NULL)
++ return;
++
++
++ for (i = 0; i < page_count; i++)
++ if (obj_priv->page_list[i] != NULL) {
++ if (obj_priv->dirty)
++ set_page_dirty(obj_priv->page_list[i]);
++ mark_page_accessed(obj_priv->page_list[i]);
++ page_cache_release(obj_priv->page_list[i]);
++ }
++ obj_priv->dirty = 0;
++
++ drm_free(obj_priv->page_list,
++ page_count * sizeof(struct page *),
++ DRM_MEM_DRIVER);
++ obj_priv->page_list = NULL;
++}
++
++static void
++i915_gem_object_move_to_active(struct drm_gem_object *obj)
++{
++ struct drm_device *dev = obj->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++ /* Add a reference if we're newly entering the active list. */
++ if (!obj_priv->active) {
++ drm_gem_object_reference(obj);
++ obj_priv->active = 1;
++ }
++ /* Move from whatever list we were on to the tail of execution. */
++ list_move_tail(&obj_priv->list,
++ &dev_priv->mm.active_list);
++}
++
++
++static void
++i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
++{
++ struct drm_device *dev = obj->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++ i915_verify_inactive(dev, __FILE__, __LINE__);
++ if (obj_priv->pin_count != 0)
++ list_del_init(&obj_priv->list);
++ else
++ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
++
++ if (obj_priv->active) {
++ obj_priv->active = 0;
++ drm_gem_object_unreference(obj);
++ }
++ i915_verify_inactive(dev, __FILE__, __LINE__);
++}
++
++/**
++ * Creates a new sequence number, emitting a write of it to the status page
++ * plus an interrupt, which will trigger i915_user_interrupt_handler.
++ *
++ * Must be called with struct_lock held.
++ *
++ * Returned sequence numbers are nonzero on success.
++ */
++static uint32_t
++i915_add_request(struct drm_device *dev, uint32_t flush_domains)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_gem_request *request;
++ uint32_t seqno;
++ int was_empty;
++ RING_LOCALS;
++
++ request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
++ if (request == NULL)
++ return 0;
++
++ /* Grab the seqno we're going to make this request be, and bump the
++ * next (skipping 0 so it can be the reserved no-seqno value).
++ */
++ seqno = dev_priv->mm.next_gem_seqno;
++ dev_priv->mm.next_gem_seqno++;
++ if (dev_priv->mm.next_gem_seqno == 0)
++ dev_priv->mm.next_gem_seqno++;
++
++ BEGIN_LP_RING(4);
++ OUT_RING(MI_STORE_DWORD_INDEX);
++ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
++ OUT_RING(seqno);
++
++ OUT_RING(MI_USER_INTERRUPT);
++ ADVANCE_LP_RING();
++
++ DRM_DEBUG("%d\n", seqno);
++
++ request->seqno = seqno;
++ request->emitted_jiffies = jiffies;
++ request->flush_domains = flush_domains;
++ was_empty = list_empty(&dev_priv->mm.request_list);
++ list_add_tail(&request->list, &dev_priv->mm.request_list);
++
++ if (was_empty)
++ schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
++ return seqno;
++}
++
++/**
++ * Command execution barrier
++ *
++ * Ensures that all commands in the ring are finished
++ * before signalling the CPU
++ */
++uint32_t
++i915_retire_commands(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
++ uint32_t flush_domains = 0;
++ RING_LOCALS;
++
++ /* The sampler always gets flushed on i965 (sigh) */
++ if (IS_I965G(dev))
++ flush_domains |= I915_GEM_DOMAIN_SAMPLER;
++ BEGIN_LP_RING(2);
++ OUT_RING(cmd);
++ OUT_RING(0); /* noop */
++ ADVANCE_LP_RING();
++ return flush_domains;
++}
++
++/**
++ * Moves buffers associated only with the given active seqno from the active
++ * to inactive list, potentially freeing them.
++ */
++static void
++i915_gem_retire_request(struct drm_device *dev,
++ struct drm_i915_gem_request *request)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++
++ /* Move any buffers on the active list that are no longer referenced
++ * by the ringbuffer to the flushing/inactive lists as appropriate.
++ */
++ while (!list_empty(&dev_priv->mm.active_list)) {
++ struct drm_gem_object *obj;
++ struct drm_i915_gem_object *obj_priv;
++
++ obj_priv = list_first_entry(&dev_priv->mm.active_list,
++ struct drm_i915_gem_object,
++ list);
++ obj = obj_priv->obj;
++
++ /* If the seqno being retired doesn't match the oldest in the
++ * list, then the oldest in the list must still be newer than
++ * this seqno.
++ */
++ if (obj_priv->last_rendering_seqno != request->seqno)
++ return;
++#if WATCH_LRU
++ DRM_INFO("%s: retire %d moves to inactive list %p\n",
++ __func__, request->seqno, obj);
++#endif
++
++ if (obj->write_domain != 0) {
++ list_move_tail(&obj_priv->list,
++ &dev_priv->mm.flushing_list);
++ } else {
++ i915_gem_object_move_to_inactive(obj);
++ }
++ }
++
++ if (request->flush_domains != 0) {
++ struct drm_i915_gem_object *obj_priv, *next;
++
++ /* Clear the write domain and activity from any buffers
++ * that are just waiting for a flush matching the one retired.
++ */
++ list_for_each_entry_safe(obj_priv, next,
++ &dev_priv->mm.flushing_list, list) {
++ struct drm_gem_object *obj = obj_priv->obj;
++
++ if (obj->write_domain & request->flush_domains) {
++ obj->write_domain = 0;
++ i915_gem_object_move_to_inactive(obj);
++ }
++ }
++
++ }
++}
++
++/**
++ * Returns true if seq1 is later than seq2.
++ */
++static int
++i915_seqno_passed(uint32_t seq1, uint32_t seq2)
++{
++ return (int32_t)(seq1 - seq2) >= 0;
++}
++
++uint32_t
++i915_get_gem_seqno(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++
++ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
++}
++
++/**
++ * This function clears the request list as sequence numbers are passed.
++ */
++void
++i915_gem_retire_requests(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ uint32_t seqno;
++
++ seqno = i915_get_gem_seqno(dev);
++
++ while (!list_empty(&dev_priv->mm.request_list)) {
++ struct drm_i915_gem_request *request;
++ uint32_t retiring_seqno;
++
++ request = list_first_entry(&dev_priv->mm.request_list,
++ struct drm_i915_gem_request,
++ list);
++ retiring_seqno = request->seqno;
++
++ if (i915_seqno_passed(seqno, retiring_seqno) ||
++ dev_priv->mm.wedged) {
++ i915_gem_retire_request(dev, request);
++
++ list_del(&request->list);
++ drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
++ } else
++ break;
++ }
++}
++
++void
++i915_gem_retire_work_handler(struct work_struct *work)
++{
++ drm_i915_private_t *dev_priv;
++ struct drm_device *dev;
++
++ dev_priv = container_of(work, drm_i915_private_t,
++ mm.retire_work.work);
++ dev = dev_priv->dev;
++
++ mutex_lock(&dev->struct_mutex);
++ i915_gem_retire_requests(dev);
++ if (!list_empty(&dev_priv->mm.request_list))
++ schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
++ mutex_unlock(&dev->struct_mutex);
++}
++
++/**
++ * Waits for a sequence number to be signaled, and cleans up the
++ * request and object lists appropriately for that event.
++ */
++int
++i915_wait_request(struct drm_device *dev, uint32_t seqno)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ int ret = 0;
++
++ BUG_ON(seqno == 0);
++
++ if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
++ dev_priv->mm.waiting_gem_seqno = seqno;
++ i915_user_irq_get(dev);
++ ret = wait_event_interruptible(dev_priv->irq_queue,
++ i915_seqno_passed(i915_get_gem_seqno(dev),
++ seqno) ||
++ dev_priv->mm.wedged);
++ i915_user_irq_put(dev);
++ dev_priv->mm.waiting_gem_seqno = 0;
++ }
++ if (dev_priv->mm.wedged)
++ ret = -EIO;
++
++ if (ret && ret != -ERESTARTSYS)
++ DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
++ __func__, ret, seqno, i915_get_gem_seqno(dev));
++
++ /* Directly dispatch request retiring. While we have the work queue
++ * to handle this, the waiter on a request often wants an associated
++ * buffer to have made it to the inactive list, and we would need
++ * a separate wait queue to handle that.
++ */
++ if (ret == 0)
++ i915_gem_retire_requests(dev);
++
++ return ret;
++}
++
++static void
++i915_gem_flush(struct drm_device *dev,
++ uint32_t invalidate_domains,
++ uint32_t flush_domains)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ uint32_t cmd;
++ RING_LOCALS;
++
++#if WATCH_EXEC
++ DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
++ invalidate_domains, flush_domains);
++#endif
++
++ if (flush_domains & I915_GEM_DOMAIN_CPU)
++ drm_agp_chipset_flush(dev);
++
++ if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
++ I915_GEM_DOMAIN_GTT)) {
++ /*
++ * read/write caches:
++ *
++ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
++ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
++ * also flushed at 2d versus 3d pipeline switches.
++ *
++ * read-only caches:
++ *
++ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
++ * MI_READ_FLUSH is set, and is always flushed on 965.
++ *
++ * I915_GEM_DOMAIN_COMMAND may not exist?
++ *
++ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
++ * invalidated when MI_EXE_FLUSH is set.
++ *
++ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
++ * invalidated with every MI_FLUSH.
++ *
++ * TLBs:
++ *
++ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
++ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
++ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
++ * are flushed at any MI_FLUSH.
++ */
++
++ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
++ if ((invalidate_domains|flush_domains) &
++ I915_GEM_DOMAIN_RENDER)
++ cmd &= ~MI_NO_WRITE_FLUSH;
++ if (!IS_I965G(dev)) {
++ /*
++ * On the 965, the sampler cache always gets flushed
++ * and this bit is reserved.
++ */
++ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
++ cmd |= MI_READ_FLUSH;
++ }
++ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
++ cmd |= MI_EXE_FLUSH;
++
++#if WATCH_EXEC
++ DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
++#endif
++ BEGIN_LP_RING(2);
++ OUT_RING(cmd);
++ OUT_RING(0); /* noop */
++ ADVANCE_LP_RING();
++ }
++}
++
++/**
++ * Ensures that all rendering to the object has completed and the object is
++ * safe to unbind from the GTT or access from the CPU.
++ */
++static int
++i915_gem_object_wait_rendering(struct drm_gem_object *obj)
++{
++ struct drm_device *dev = obj->dev;
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++ int ret;
++
++ /* If there are writes queued to the buffer, flush and
++ * create a new seqno to wait for.
++ */
++ if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
++ uint32_t write_domain = obj->write_domain;
++#if WATCH_BUF
++ DRM_INFO("%s: flushing object %p from write domain %08x\n",
++ __func__, obj, write_domain);
++#endif
++ i915_gem_flush(dev, 0, write_domain);
++
++ i915_gem_object_move_to_active(obj);
++ obj_priv->last_rendering_seqno = i915_add_request(dev,
++ write_domain);
++ BUG_ON(obj_priv->last_rendering_seqno == 0);
++#if WATCH_LRU
++ DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
++#endif
++ }
++
++ /* If there is rendering queued on the buffer being evicted, wait for
++ * it.
++ */
++ if (obj_priv->active) {
++#if WATCH_BUF
++ DRM_INFO("%s: object %p wait for seqno %08x\n",
++ __func__, obj, obj_priv->last_rendering_seqno);
++#endif
++ ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
++ if (ret != 0)
++ return ret;
++ }
++
++ return 0;
++}
++
++/**
++ * Unbinds an object from the GTT aperture.
++ */
++static int
++i915_gem_object_unbind(struct drm_gem_object *obj)
++{
++ struct drm_device *dev = obj->dev;
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++ int ret = 0;
++
++#if WATCH_BUF
++ DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
++ DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
++#endif
++ if (obj_priv->gtt_space == NULL)
++ return 0;
++
++ if (obj_priv->pin_count != 0) {
++ DRM_ERROR("Attempting to unbind pinned buffer\n");
++ return -EINVAL;
++ }
++
++ /* Wait for any rendering to complete
++ */
++ ret = i915_gem_object_wait_rendering(obj);
++ if (ret) {
++ DRM_ERROR("wait_rendering failed: %d\n", ret);
++ return ret;
++ }
++
++ /* Move the object to the CPU domain to ensure that
++ * any possible CPU writes while it's not in the GTT
++ * are flushed when we go to remap it. This will
++ * also ensure that all pending GPU writes are finished
++ * before we unbind.
++ */
++ ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
++ I915_GEM_DOMAIN_CPU);
++ if (ret) {
++ DRM_ERROR("set_domain failed: %d\n", ret);
++ return ret;
++ }
++
++ if (obj_priv->agp_mem != NULL) {
++ drm_unbind_agp(obj_priv->agp_mem);
++ drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
++ obj_priv->agp_mem = NULL;
++ }
++
++ BUG_ON(obj_priv->active);
++
++ i915_gem_object_free_page_list(obj);
++
++ if (obj_priv->gtt_space) {
++ atomic_dec(&dev->gtt_count);
++ atomic_sub(obj->size, &dev->gtt_memory);
++
++ drm_mm_put_block(obj_priv->gtt_space);
++ obj_priv->gtt_space = NULL;
++ }
++
++ /* Remove ourselves from the LRU list if present. */
++ if (!list_empty(&obj_priv->list))
++ list_del_init(&obj_priv->list);
++
++ return 0;
++}
++
++static int
++i915_gem_evict_something(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_gem_object *obj;
++ struct drm_i915_gem_object *obj_priv;
++ int ret = 0;
++
++ for (;;) {
++ /* If there's an inactive buffer available now, grab it
++ * and be done.
++ */
++ if (!list_empty(&dev_priv->mm.inactive_list)) {
++ obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
++ struct drm_i915_gem_object,
++ list);
++ obj = obj_priv->obj;
++ BUG_ON(obj_priv->pin_count != 0);
++#if WATCH_LRU
++ DRM_INFO("%s: evicting %p\n", __func__, obj);
++#endif
++ BUG_ON(obj_priv->active);
++
++ /* Wait on the rendering and unbind the buffer. */
++ ret = i915_gem_object_unbind(obj);
++ break;
++ }
++
++ /* If we didn't get anything, but the ring is still processing
++ * things, wait for one of those things to finish and hopefully
++ * leave us a buffer to evict.
++ */
++ if (!list_empty(&dev_priv->mm.request_list)) {
++ struct drm_i915_gem_request *request;
++
++ request = list_first_entry(&dev_priv->mm.request_list,
++ struct drm_i915_gem_request,
++ list);
++
++ ret = i915_wait_request(dev, request->seqno);
++ if (ret)
++ break;
++
++ /* if waiting caused an object to become inactive,
++ * then loop around and wait for it. Otherwise, we
++ * assume that waiting freed and unbound something,
++ * so there should now be some space in the GTT
++ */
++ if (!list_empty(&dev_priv->mm.inactive_list))
++ continue;
++ break;
++ }
++
++ /* If we didn't have anything on the request list but there
++ * are buffers awaiting a flush, emit one and try again.
++ * When we wait on it, those buffers waiting for that flush
++ * will get moved to inactive.
++ */
++ if (!list_empty(&dev_priv->mm.flushing_list)) {
++ obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
++ struct drm_i915_gem_object,
++ list);
++ obj = obj_priv->obj;
++
++ i915_gem_flush(dev,
++ obj->write_domain,
++ obj->write_domain);
++ i915_add_request(dev, obj->write_domain);
++
++ obj = NULL;
++ continue;
++ }
++
++ DRM_ERROR("inactive empty %d request empty %d "
++ "flushing empty %d\n",
++ list_empty(&dev_priv->mm.inactive_list),
++ list_empty(&dev_priv->mm.request_list),
++ list_empty(&dev_priv->mm.flushing_list));
++ /* If we didn't do any of the above, there's nothing to be done
++ * and we just can't fit it in.
++ */
++ return -ENOMEM;
++ }
++ return ret;
++}
++
++static int
++i915_gem_object_get_page_list(struct drm_gem_object *obj)
++{
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++ int page_count, i;
++ struct address_space *mapping;
++ struct inode *inode;
++ struct page *page;
++ int ret;
++
++ if (obj_priv->page_list)
++ return 0;
++
++ /* Get the list of pages out of our struct file. They'll be pinned
++ * at this point until we release them.
++ */
++ page_count = obj->size / PAGE_SIZE;
++ BUG_ON(obj_priv->page_list != NULL);
++ obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
++ DRM_MEM_DRIVER);
++ if (obj_priv->page_list == NULL) {
++ DRM_ERROR("Faled to allocate page list\n");
++ return -ENOMEM;
++ }
++
++ inode = obj->filp->f_path.dentry->d_inode;
++ mapping = inode->i_mapping;
++ for (i = 0; i < page_count; i++) {
++ page = read_mapping_page(mapping, i, NULL);
++ if (IS_ERR(page)) {
++ ret = PTR_ERR(page);
++ DRM_ERROR("read_mapping_page failed: %d\n", ret);
++ i915_gem_object_free_page_list(obj);
++ return ret;
++ }
++ obj_priv->page_list[i] = page;
++ }
++ return 0;
++}
++
++/**
++ * Finds free space in the GTT aperture and binds the object there.
++ */
++static int
++i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
++{
++ struct drm_device *dev = obj->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++ struct drm_mm_node *free_space;
++ int page_count, ret;
++
++ if (alignment == 0)
++ alignment = PAGE_SIZE;
++ if (alignment & (PAGE_SIZE - 1)) {
++ DRM_ERROR("Invalid object alignment requested %u\n", alignment);
++ return -EINVAL;
++ }
++
++ search_free:
++ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
++ obj->size, alignment, 0);
++ if (free_space != NULL) {
++ obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
++ alignment);
++ if (obj_priv->gtt_space != NULL) {
++ obj_priv->gtt_space->private = obj;
++ obj_priv->gtt_offset = obj_priv->gtt_space->start;
++ }
++ }
++ if (obj_priv->gtt_space == NULL) {
++ /* If the gtt is empty and we're still having trouble
++ * fitting our object in, we're out of memory.
++ */
++#if WATCH_LRU
++ DRM_INFO("%s: GTT full, evicting something\n", __func__);
++#endif
++ if (list_empty(&dev_priv->mm.inactive_list) &&
++ list_empty(&dev_priv->mm.flushing_list) &&
++ list_empty(&dev_priv->mm.active_list)) {
++ DRM_ERROR("GTT full, but LRU list empty\n");
++ return -ENOMEM;
++ }
++
++ ret = i915_gem_evict_something(dev);
++ if (ret != 0) {
++ DRM_ERROR("Failed to evict a buffer %d\n", ret);
++ return ret;
++ }
++ goto search_free;
++ }
++
++#if WATCH_BUF
++ DRM_INFO("Binding object of size %d at 0x%08x\n",
++ obj->size, obj_priv->gtt_offset);
++#endif
++ ret = i915_gem_object_get_page_list(obj);
++ if (ret) {
++ drm_mm_put_block(obj_priv->gtt_space);
++ obj_priv->gtt_space = NULL;
++ return ret;
++ }
++
++ page_count = obj->size / PAGE_SIZE;
++ /* Create an AGP memory structure pointing at our pages, and bind it
++ * into the GTT.
++ */
++ obj_priv->agp_mem = drm_agp_bind_pages(dev,
++ obj_priv->page_list,
++ page_count,
++ obj_priv->gtt_offset);
++ if (obj_priv->agp_mem == NULL) {
++ i915_gem_object_free_page_list(obj);
++ drm_mm_put_block(obj_priv->gtt_space);
++ obj_priv->gtt_space = NULL;
++ return -ENOMEM;
++ }
++ atomic_inc(&dev->gtt_count);
++ atomic_add(obj->size, &dev->gtt_memory);
++
++ /* Assert that the object is not currently in any GPU domain. As it
++ * wasn't in the GTT, there shouldn't be any way it could have been in
++ * a GPU cache
++ */
++ BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
++ BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
++
++ return 0;
++}
++
++void
++i915_gem_clflush_object(struct drm_gem_object *obj)
++{
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++ /* If we don't have a page list set up, then we're not pinned
++ * to GPU, and we can ignore the cache flush because it'll happen
++ * again at bind time.
++ */
++ if (obj_priv->page_list == NULL)
++ return;
++
++ drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
++}
++
++/*
++ * Set the next domain for the specified object. This
++ * may not actually perform the necessary flushing/invaliding though,
++ * as that may want to be batched with other set_domain operations
++ *
++ * This is (we hope) the only really tricky part of gem. The goal
++ * is fairly simple -- track which caches hold bits of the object
++ * and make sure they remain coherent. A few concrete examples may
++ * help to explain how it works. For shorthand, we use the notation
++ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
++ * a pair of read and write domain masks.
++ *
++ * Case 1: the batch buffer
++ *
++ * 1. Allocated
++ * 2. Written by CPU
++ * 3. Mapped to GTT
++ * 4. Read by GPU
++ * 5. Unmapped from GTT
++ * 6. Freed
++ *
++ * Let's take these a step at a time
++ *
++ * 1. Allocated
++ * Pages allocated from the kernel may still have
++ * cache contents, so we set them to (CPU, CPU) always.
++ * 2. Written by CPU (using pwrite)
++ * The pwrite function calls set_domain (CPU, CPU) and
++ * this function does nothing (as nothing changes)
++ * 3. Mapped by GTT
++ * This function asserts that the object is not
++ * currently in any GPU-based read or write domains
++ * 4. Read by GPU
++ * i915_gem_execbuffer calls set_domain (COMMAND, 0).
++ * As write_domain is zero, this function adds in the
++ * current read domains (CPU+COMMAND, 0).
++ * flush_domains is set to CPU.
++ * invalidate_domains is set to COMMAND
++ * clflush is run to get data out of the CPU caches
++ * then i915_dev_set_domain calls i915_gem_flush to
++ * emit an MI_FLUSH and drm_agp_chipset_flush
++ * 5. Unmapped from GTT
++ * i915_gem_object_unbind calls set_domain (CPU, CPU)
++ * flush_domains and invalidate_domains end up both zero
++ * so no flushing/invalidating happens
++ * 6. Freed
++ * yay, done
++ *
++ * Case 2: The shared render buffer
++ *
++ * 1. Allocated
++ * 2. Mapped to GTT
++ * 3. Read/written by GPU
++ * 4. set_domain to (CPU,CPU)
++ * 5. Read/written by CPU
++ * 6. Read/written by GPU
++ *
++ * 1. Allocated
++ * Same as last example, (CPU, CPU)
++ * 2. Mapped to GTT
++ * Nothing changes (assertions find that it is not in the GPU)
++ * 3. Read/written by GPU
++ * execbuffer calls set_domain (RENDER, RENDER)
++ * flush_domains gets CPU
++ * invalidate_domains gets GPU
++ * clflush (obj)
++ * MI_FLUSH and drm_agp_chipset_flush
++ * 4. set_domain (CPU, CPU)
++ * flush_domains gets GPU
++ * invalidate_domains gets CPU
++ * wait_rendering (obj) to make sure all drawing is complete.
++ * This will include an MI_FLUSH to get the data from GPU
++ * to memory
++ * clflush (obj) to invalidate the CPU cache
++ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
++ * 5. Read/written by CPU
++ * cache lines are loaded and dirtied
++ * 6. Read written by GPU
++ * Same as last GPU access
++ *
++ * Case 3: The constant buffer
++ *
++ * 1. Allocated
++ * 2. Written by CPU
++ * 3. Read by GPU
++ * 4. Updated (written) by CPU again
++ * 5. Read by GPU
++ *
++ * 1. Allocated
++ * (CPU, CPU)
++ * 2. Written by CPU
++ * (CPU, CPU)
++ * 3. Read by GPU
++ * (CPU+RENDER, 0)
++ * flush_domains = CPU
++ * invalidate_domains = RENDER
++ * clflush (obj)
++ * MI_FLUSH
++ * drm_agp_chipset_flush
++ * 4. Updated (written) by CPU again
++ * (CPU, CPU)
++ * flush_domains = 0 (no previous write domain)
++ * invalidate_domains = 0 (no new read domains)
++ * 5. Read by GPU
++ * (CPU+RENDER, 0)
++ * flush_domains = CPU
++ * invalidate_domains = RENDER
++ * clflush (obj)
++ * MI_FLUSH
++ * drm_agp_chipset_flush
++ */
++static int
++i915_gem_object_set_domain(struct drm_gem_object *obj,
++ uint32_t read_domains,
++ uint32_t write_domain)
++{
++ struct drm_device *dev = obj->dev;
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++ uint32_t invalidate_domains = 0;
++ uint32_t flush_domains = 0;
++ int ret;
++
++#if WATCH_BUF
++ DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
++ __func__, obj,
++ obj->read_domains, read_domains,
++ obj->write_domain, write_domain);
++#endif
++ /*
++ * If the object isn't moving to a new write domain,
++ * let the object stay in multiple read domains
++ */
++ if (write_domain == 0)
++ read_domains |= obj->read_domains;
++ else
++ obj_priv->dirty = 1;
++
++ /*
++ * Flush the current write domain if
++ * the new read domains don't match. Invalidate
++ * any read domains which differ from the old
++ * write domain
++ */
++ if (obj->write_domain && obj->write_domain != read_domains) {
++ flush_domains |= obj->write_domain;
++ invalidate_domains |= read_domains & ~obj->write_domain;
++ }
++ /*
++ * Invalidate any read caches which may have
++ * stale data. That is, any new read domains.
++ */
++ invalidate_domains |= read_domains & ~obj->read_domains;
++ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
++#if WATCH_BUF
++ DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
++ __func__, flush_domains, invalidate_domains);
++#endif
++ /*
++ * If we're invaliding the CPU cache and flushing a GPU cache,
++ * then pause for rendering so that the GPU caches will be
++ * flushed before the cpu cache is invalidated
++ */
++ if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
++ (flush_domains & ~(I915_GEM_DOMAIN_CPU |
++ I915_GEM_DOMAIN_GTT))) {
++ ret = i915_gem_object_wait_rendering(obj);
++ if (ret)
++ return ret;
++ }
++ i915_gem_clflush_object(obj);
++ }
++
++ if ((write_domain | flush_domains) != 0)
++ obj->write_domain = write_domain;
++
++ /* If we're invalidating the CPU domain, clear the per-page CPU
++ * domain list as well.
++ */
++ if (obj_priv->page_cpu_valid != NULL &&
++ (write_domain != 0 ||
++ read_domains & I915_GEM_DOMAIN_CPU)) {
++ drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
++ DRM_MEM_DRIVER);
++ obj_priv->page_cpu_valid = NULL;
++ }
++ obj->read_domains = read_domains;
++
++ dev->invalidate_domains |= invalidate_domains;
++ dev->flush_domains |= flush_domains;
++#if WATCH_BUF
++ DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
++ __func__,
++ obj->read_domains, obj->write_domain,
++ dev->invalidate_domains, dev->flush_domains);
++#endif
++ return 0;
++}
++
++/**
++ * Set the read/write domain on a range of the object.
++ *
++ * Currently only implemented for CPU reads, otherwise drops to normal
++ * i915_gem_object_set_domain().
++ */
++static int
++i915_gem_object_set_domain_range(struct drm_gem_object *obj,
++ uint64_t offset,
++ uint64_t size,
++ uint32_t read_domains,
++ uint32_t write_domain)
++{
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++ int ret, i;
++
++ if (obj->read_domains & I915_GEM_DOMAIN_CPU)
++ return 0;
++
++ if (read_domains != I915_GEM_DOMAIN_CPU ||
++ write_domain != 0)
++ return i915_gem_object_set_domain(obj,
++ read_domains, write_domain);
++
++ /* Wait on any GPU rendering to the object to be flushed. */
++ if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
++ ret = i915_gem_object_wait_rendering(obj);
++ if (ret)
++ return ret;
++ }
++
++ if (obj_priv->page_cpu_valid == NULL) {
++ obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
++ DRM_MEM_DRIVER);
++ }
++
++ /* Flush the cache on any pages that are still invalid from the CPU's
++ * perspective.
++ */
++ for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
++ if (obj_priv->page_cpu_valid[i])
++ continue;
++
++ drm_clflush_pages(obj_priv->page_list + i, 1);
++
++ obj_priv->page_cpu_valid[i] = 1;
++ }
++
++ return 0;
++}
++
++/**
++ * Once all of the objects have been set in the proper domain,
++ * perform the necessary flush and invalidate operations.
++ *
++ * Returns the write domains flushed, for use in flush tracking.
++ */
++static uint32_t
++i915_gem_dev_set_domain(struct drm_device *dev)
++{
++ uint32_t flush_domains = dev->flush_domains;
++
++ /*
++ * Now that all the buffers are synced to the proper domains,
++ * flush and invalidate the collected domains
++ */
++ if (dev->invalidate_domains | dev->flush_domains) {
++#if WATCH_EXEC
++ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
++ __func__,
++ dev->invalidate_domains,
++ dev->flush_domains);
++#endif
++ i915_gem_flush(dev,
++ dev->invalidate_domains,
++ dev->flush_domains);
++ dev->invalidate_domains = 0;
++ dev->flush_domains = 0;
++ }
++
++ return flush_domains;
++}
++
++/**
++ * Pin an object to the GTT and evaluate the relocations landing in it.
++ */
++static int
++i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
++ struct drm_file *file_priv,
++ struct drm_i915_gem_exec_object *entry)
++{
++ struct drm_device *dev = obj->dev;
++ struct drm_i915_gem_relocation_entry reloc;
++ struct drm_i915_gem_relocation_entry __user *relocs;
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++ int i, ret;
++ uint32_t last_reloc_offset = -1;
++ void *reloc_page = NULL;
++
++ /* Choose the GTT offset for our buffer and put it there. */
++ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
++ if (ret)
++ return ret;
++
++ entry->offset = obj_priv->gtt_offset;
++
++ relocs = (struct drm_i915_gem_relocation_entry __user *)
++ (uintptr_t) entry->relocs_ptr;
++ /* Apply the relocations, using the GTT aperture to avoid cache
++ * flushing requirements.
++ */
++ for (i = 0; i < entry->relocation_count; i++) {
++ struct drm_gem_object *target_obj;
++ struct drm_i915_gem_object *target_obj_priv;
++ uint32_t reloc_val, reloc_offset, *reloc_entry;
++ int ret;
++
++ ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
++ if (ret != 0) {
++ i915_gem_object_unpin(obj);
++ return ret;
++ }
++
++ target_obj = drm_gem_object_lookup(obj->dev, file_priv,
++ reloc.target_handle);
++ if (target_obj == NULL) {
++ i915_gem_object_unpin(obj);
++ return -EBADF;
++ }
++ target_obj_priv = target_obj->driver_private;
++
++ /* The target buffer should have appeared before us in the
++ * exec_object list, so it should have a GTT space bound by now.
++ */
++ if (target_obj_priv->gtt_space == NULL) {
++ DRM_ERROR("No GTT space found for object %d\n",
++ reloc.target_handle);
++ drm_gem_object_unreference(target_obj);
++ i915_gem_object_unpin(obj);
++ return -EINVAL;
++ }
++
++ if (reloc.offset > obj->size - 4) {
++ DRM_ERROR("Relocation beyond object bounds: "
++ "obj %p target %d offset %d size %d.\n",
++ obj, reloc.target_handle,
++ (int) reloc.offset, (int) obj->size);
++ drm_gem_object_unreference(target_obj);
++ i915_gem_object_unpin(obj);
++ return -EINVAL;
++ }
++ if (reloc.offset & 3) {
++ DRM_ERROR("Relocation not 4-byte aligned: "
++ "obj %p target %d offset %d.\n",
++ obj, reloc.target_handle,
++ (int) reloc.offset);
++ drm_gem_object_unreference(target_obj);
++ i915_gem_object_unpin(obj);
++ return -EINVAL;
++ }
++
++ if (reloc.write_domain && target_obj->pending_write_domain &&
++ reloc.write_domain != target_obj->pending_write_domain) {
++ DRM_ERROR("Write domain conflict: "
++ "obj %p target %d offset %d "
++ "new %08x old %08x\n",
++ obj, reloc.target_handle,
++ (int) reloc.offset,
++ reloc.write_domain,
++ target_obj->pending_write_domain);
++ drm_gem_object_unreference(target_obj);
++ i915_gem_object_unpin(obj);
++ return -EINVAL;
++ }
++
++#if WATCH_RELOC
++ DRM_INFO("%s: obj %p offset %08x target %d "
++ "read %08x write %08x gtt %08x "
++ "presumed %08x delta %08x\n",
++ __func__,
++ obj,
++ (int) reloc.offset,
++ (int) reloc.target_handle,
++ (int) reloc.read_domains,
++ (int) reloc.write_domain,
++ (int) target_obj_priv->gtt_offset,
++ (int) reloc.presumed_offset,
++ reloc.delta);
++#endif
++
++ target_obj->pending_read_domains |= reloc.read_domains;
++ target_obj->pending_write_domain |= reloc.write_domain;
++
++ /* If the relocation already has the right value in it, no
++ * more work needs to be done.
++ */
++ if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
++ drm_gem_object_unreference(target_obj);
++ continue;
++ }
++
++ /* Now that we're going to actually write some data in,
++ * make sure that any rendering using this buffer's contents
++ * is completed.
++ */
++ i915_gem_object_wait_rendering(obj);
++
++ /* As we're writing through the gtt, flush
++ * any CPU writes before we write the relocations
++ */
++ if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
++ i915_gem_clflush_object(obj);
++ drm_agp_chipset_flush(dev);
++ obj->write_domain = 0;
++ }
++
++ /* Map the page containing the relocation we're going to
++ * perform.
++ */
++ reloc_offset = obj_priv->gtt_offset + reloc.offset;
++ if (reloc_page == NULL ||
++ (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
++ (reloc_offset & ~(PAGE_SIZE - 1))) {
++ if (reloc_page != NULL)
++ iounmap(reloc_page);
++
++ reloc_page = ioremap(dev->agp->base +
++ (reloc_offset & ~(PAGE_SIZE - 1)),
++ PAGE_SIZE);
++ last_reloc_offset = reloc_offset;
++ if (reloc_page == NULL) {
++ drm_gem_object_unreference(target_obj);
++ i915_gem_object_unpin(obj);
++ return -ENOMEM;
++ }
++ }
++
++ reloc_entry = (uint32_t *)((char *)reloc_page +
++ (reloc_offset & (PAGE_SIZE - 1)));
++ reloc_val = target_obj_priv->gtt_offset + reloc.delta;
++
++#if WATCH_BUF
++ DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
++ obj, (unsigned int) reloc.offset,
++ readl(reloc_entry), reloc_val);
++#endif
++ writel(reloc_val, reloc_entry);
++
++ /* Write the updated presumed offset for this entry back out
++ * to the user.
++ */
++ reloc.presumed_offset = target_obj_priv->gtt_offset;
++ ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
++ if (ret != 0) {
++ drm_gem_object_unreference(target_obj);
++ i915_gem_object_unpin(obj);
++ return ret;
++ }
++
++ drm_gem_object_unreference(target_obj);
++ }
++
++ if (reloc_page != NULL)
++ iounmap(reloc_page);
++
++#if WATCH_BUF
++ if (0)
++ i915_gem_dump_object(obj, 128, __func__, ~0);
++#endif
++ return 0;
++}
++
++/** Dispatch a batchbuffer to the ring
++ */
++static int
++i915_dispatch_gem_execbuffer(struct drm_device *dev,
++ struct drm_i915_gem_execbuffer *exec,
++ uint64_t exec_offset)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
++ (uintptr_t) exec->cliprects_ptr;
++ int nbox = exec->num_cliprects;
++ int i = 0, count;
++ uint32_t exec_start, exec_len;
++ RING_LOCALS;
++
++ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
++ exec_len = (uint32_t) exec->batch_len;
++
++ if ((exec_start | exec_len) & 0x7) {
++ DRM_ERROR("alignment\n");
++ return -EINVAL;
++ }
++
++ if (!exec_start)
++ return -EINVAL;
++
++ count = nbox ? nbox : 1;
++
++ for (i = 0; i < count; i++) {
++ if (i < nbox) {
++ int ret = i915_emit_box(dev, boxes, i,
++ exec->DR1, exec->DR4);
++ if (ret)
++ return ret;
++ }
++
++ if (IS_I830(dev) || IS_845G(dev)) {
++ BEGIN_LP_RING(4);
++ OUT_RING(MI_BATCH_BUFFER);
++ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
++ OUT_RING(exec_start + exec_len - 4);
++ OUT_RING(0);
++ ADVANCE_LP_RING();
++ } else {
++ BEGIN_LP_RING(2);
++ if (IS_I965G(dev)) {
++ OUT_RING(MI_BATCH_BUFFER_START |
++ (2 << 6) |
++ MI_BATCH_NON_SECURE_I965);
++ OUT_RING(exec_start);
++ } else {
++ OUT_RING(MI_BATCH_BUFFER_START |
++ (2 << 6));
++ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
++ }
++ ADVANCE_LP_RING();
++ }
++ }
++
++ /* XXX breadcrumb */
++ return 0;
++}
++
++/* Throttle our rendering by waiting until the ring has completed our requests
++ * emitted over 20 msec ago.
++ *
++ * This should get us reasonable parallelism between CPU and GPU but also
++ * relatively low latency when blocking on a particular request to finish.
++ */
++static int
++i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
++{
++ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
++ int ret = 0;
++ uint32_t seqno;
++
++ mutex_lock(&dev->struct_mutex);
++ seqno = i915_file_priv->mm.last_gem_throttle_seqno;
++ i915_file_priv->mm.last_gem_throttle_seqno =
++ i915_file_priv->mm.last_gem_seqno;
++ if (seqno)
++ ret = i915_wait_request(dev, seqno);
++ mutex_unlock(&dev->struct_mutex);
++ return ret;
++}
++
++int
++i915_gem_execbuffer(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
++ struct drm_i915_gem_execbuffer *args = data;
++ struct drm_i915_gem_exec_object *exec_list = NULL;
++ struct drm_gem_object **object_list = NULL;
++ struct drm_gem_object *batch_obj;
++ int ret, i, pinned = 0;
++ uint64_t exec_offset;
++ uint32_t seqno, flush_domains;
++
++#if WATCH_EXEC
++ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
++ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
++#endif
++
++ /* Copy in the exec list from userland */
++ exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
++ DRM_MEM_DRIVER);
++ object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
++ DRM_MEM_DRIVER);
++ if (exec_list == NULL || object_list == NULL) {
++ DRM_ERROR("Failed to allocate exec or object list "
++ "for %d buffers\n",
++ args->buffer_count);
++ ret = -ENOMEM;
++ goto pre_mutex_err;
++ }
++ ret = copy_from_user(exec_list,
++ (struct drm_i915_relocation_entry __user *)
++ (uintptr_t) args->buffers_ptr,
++ sizeof(*exec_list) * args->buffer_count);
++ if (ret != 0) {
++ DRM_ERROR("copy %d exec entries failed %d\n",
++ args->buffer_count, ret);
++ goto pre_mutex_err;
++ }
++
++ mutex_lock(&dev->struct_mutex);
++
++ i915_verify_inactive(dev, __FILE__, __LINE__);
++
++ if (dev_priv->mm.wedged) {
++ DRM_ERROR("Execbuf while wedged\n");
++ mutex_unlock(&dev->struct_mutex);
++ return -EIO;
++ }
++
++ if (dev_priv->mm.suspended) {
++ DRM_ERROR("Execbuf while VT-switched.\n");
++ mutex_unlock(&dev->struct_mutex);
++ return -EBUSY;
++ }
++
++ /* Zero the gloabl flush/invalidate flags. These
++ * will be modified as each object is bound to the
++ * gtt
++ */
++ dev->invalidate_domains = 0;
++ dev->flush_domains = 0;
++
++ /* Look up object handles and perform the relocations */
++ for (i = 0; i < args->buffer_count; i++) {
++ object_list[i] = drm_gem_object_lookup(dev, file_priv,
++ exec_list[i].handle);
++ if (object_list[i] == NULL) {
++ DRM_ERROR("Invalid object handle %d at index %d\n",
++ exec_list[i].handle, i);
++ ret = -EBADF;
++ goto err;
++ }
++
++ object_list[i]->pending_read_domains = 0;
++ object_list[i]->pending_write_domain = 0;
++ ret = i915_gem_object_pin_and_relocate(object_list[i],
++ file_priv,
++ &exec_list[i]);
++ if (ret) {
++ DRM_ERROR("object bind and relocate failed %d\n", ret);
++ goto err;
++ }
++ pinned = i + 1;
++ }
++
++ /* Set the pending read domains for the batch buffer to COMMAND */
++ batch_obj = object_list[args->buffer_count-1];
++ batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
++ batch_obj->pending_write_domain = 0;
++
++ i915_verify_inactive(dev, __FILE__, __LINE__);
++
++ for (i = 0; i < args->buffer_count; i++) {
++ struct drm_gem_object *obj = object_list[i];
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++ if (obj_priv->gtt_space == NULL) {
++ /* We evicted the buffer in the process of validating
++ * our set of buffers in. We could try to recover by
++ * kicking them everything out and trying again from
++ * the start.
++ */
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ /* make sure all previous memory operations have passed */
++ ret = i915_gem_object_set_domain(obj,
++ obj->pending_read_domains,
++ obj->pending_write_domain);
++ if (ret)
++ goto err;
++ }
++
++ i915_verify_inactive(dev, __FILE__, __LINE__);
++
++ /* Flush/invalidate caches and chipset buffer */
++ flush_domains = i915_gem_dev_set_domain(dev);
++
++ i915_verify_inactive(dev, __FILE__, __LINE__);
++
++#if WATCH_COHERENCY
++ for (i = 0; i < args->buffer_count; i++) {
++ i915_gem_object_check_coherency(object_list[i],
++ exec_list[i].handle);
++ }
++#endif
++
++ exec_offset = exec_list[args->buffer_count - 1].offset;
++
++#if WATCH_EXEC
++ i915_gem_dump_object(object_list[args->buffer_count - 1],
++ args->batch_len,
++ __func__,
++ ~0);
++#endif
++
++ (void)i915_add_request(dev, flush_domains);
++
++ /* Exec the batchbuffer */
++ ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
++ if (ret) {
++ DRM_ERROR("dispatch failed %d\n", ret);
++ goto err;
++ }
++
++ /*
++ * Ensure that the commands in the batch buffer are
++ * finished before the interrupt fires
++ */
++ flush_domains = i915_retire_commands(dev);
++
++ i915_verify_inactive(dev, __FILE__, __LINE__);
++
++ /*
++ * Get a seqno representing the execution of the current buffer,
++ * which we can wait on. We would like to mitigate these interrupts,
++ * likely by only creating seqnos occasionally (so that we have
++ * *some* interrupts representing completion of buffers that we can
++ * wait on when trying to clear up gtt space).
++ */
++ seqno = i915_add_request(dev, flush_domains);
++ BUG_ON(seqno == 0);
++ i915_file_priv->mm.last_gem_seqno = seqno;
++ for (i = 0; i < args->buffer_count; i++) {
++ struct drm_gem_object *obj = object_list[i];
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++ i915_gem_object_move_to_active(obj);
++ obj_priv->last_rendering_seqno = seqno;
++#if WATCH_LRU
++ DRM_INFO("%s: move to exec list %p\n", __func__, obj);
++#endif
++ }
++#if WATCH_LRU
++ i915_dump_lru(dev, __func__);
++#endif
++
++ i915_verify_inactive(dev, __FILE__, __LINE__);
++
++ /* Copy the new buffer offsets back to the user's exec list. */
++ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
++ (uintptr_t) args->buffers_ptr,
++ exec_list,
++ sizeof(*exec_list) * args->buffer_count);
++ if (ret)
++ DRM_ERROR("failed to copy %d exec entries "
++ "back to user (%d)\n",
++ args->buffer_count, ret);
++err:
++ if (object_list != NULL) {
++ for (i = 0; i < pinned; i++)
++ i915_gem_object_unpin(object_list[i]);
++
++ for (i = 0; i < args->buffer_count; i++)
++ drm_gem_object_unreference(object_list[i]);
++ }
++ mutex_unlock(&dev->struct_mutex);
++
++pre_mutex_err:
++ drm_free(object_list, sizeof(*object_list) * args->buffer_count,
++ DRM_MEM_DRIVER);
++ drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
++ DRM_MEM_DRIVER);
++
++ return ret;
++}
++
++int
++i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
++{
++ struct drm_device *dev = obj->dev;
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++ int ret;
++
++ i915_verify_inactive(dev, __FILE__, __LINE__);
++ if (obj_priv->gtt_space == NULL) {
++ ret = i915_gem_object_bind_to_gtt(obj, alignment);
++ if (ret != 0) {
++ DRM_ERROR("Failure to bind: %d", ret);
++ return ret;
++ }
++ }
++ obj_priv->pin_count++;
++
++ /* If the object is not active and not pending a flush,
++ * remove it from the inactive list
++ */
++ if (obj_priv->pin_count == 1) {
++ atomic_inc(&dev->pin_count);
++ atomic_add(obj->size, &dev->pin_memory);
++ if (!obj_priv->active &&
++ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
++ I915_GEM_DOMAIN_GTT)) == 0 &&
++ !list_empty(&obj_priv->list))
++ list_del_init(&obj_priv->list);
++ }
++ i915_verify_inactive(dev, __FILE__, __LINE__);
++
++ return 0;
++}
++
++void
++i915_gem_object_unpin(struct drm_gem_object *obj)
++{
++ struct drm_device *dev = obj->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++ i915_verify_inactive(dev, __FILE__, __LINE__);
++ obj_priv->pin_count--;
++ BUG_ON(obj_priv->pin_count < 0);
++ BUG_ON(obj_priv->gtt_space == NULL);
++
++ /* If the object is no longer pinned, and is
++ * neither active nor being flushed, then stick it on
++ * the inactive list
++ */
++ if (obj_priv->pin_count == 0) {
++ if (!obj_priv->active &&
++ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
++ I915_GEM_DOMAIN_GTT)) == 0)
++ list_move_tail(&obj_priv->list,
++ &dev_priv->mm.inactive_list);
++ atomic_dec(&dev->pin_count);
++ atomic_sub(obj->size, &dev->pin_memory);
++ }
++ i915_verify_inactive(dev, __FILE__, __LINE__);
++}
++
++int
++i915_gem_pin_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_i915_gem_pin *args = data;
++ struct drm_gem_object *obj;
++ struct drm_i915_gem_object *obj_priv;
++ int ret;
++
++ mutex_lock(&dev->struct_mutex);
++
++ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++ if (obj == NULL) {
++ DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
++ args->handle);
++ mutex_unlock(&dev->struct_mutex);
++ return -EBADF;
++ }
++ obj_priv = obj->driver_private;
++
++ ret = i915_gem_object_pin(obj, args->alignment);
++ if (ret != 0) {
++ drm_gem_object_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
++ return ret;
++ }
++
++ /* XXX - flush the CPU caches for pinned objects
++ * as the X server doesn't manage domains yet
++ */
++ if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
++ i915_gem_clflush_object(obj);
++ drm_agp_chipset_flush(dev);
++ obj->write_domain = 0;
++ }
++ args->offset = obj_priv->gtt_offset;
++ drm_gem_object_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
++
++ return 0;
++}
++
++int
++i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_i915_gem_pin *args = data;
++ struct drm_gem_object *obj;
++
++ mutex_lock(&dev->struct_mutex);
++
++ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++ if (obj == NULL) {
++ DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
++ args->handle);
++ mutex_unlock(&dev->struct_mutex);
++ return -EBADF;
++ }
++
++ i915_gem_object_unpin(obj);
++
++ drm_gem_object_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
++ return 0;
++}
++
++int
++i915_gem_busy_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_i915_gem_busy *args = data;
++ struct drm_gem_object *obj;
++ struct drm_i915_gem_object *obj_priv;
++
++ mutex_lock(&dev->struct_mutex);
++ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++ if (obj == NULL) {
++ DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
++ args->handle);
++ mutex_unlock(&dev->struct_mutex);
++ return -EBADF;
++ }
++
++ obj_priv = obj->driver_private;
++ args->busy = obj_priv->active;
++
++ drm_gem_object_unreference(obj);
++ mutex_unlock(&dev->struct_mutex);
++ return 0;
++}
++
++int
++i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return i915_gem_ring_throttle(dev, file_priv);
++}
++
++int i915_gem_init_object(struct drm_gem_object *obj)
++{
++ struct drm_i915_gem_object *obj_priv;
++
++ obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
++ if (obj_priv == NULL)
++ return -ENOMEM;
++
++ /*
++ * We've just allocated pages from the kernel,
++ * so they've just been written by the CPU with
++ * zeros. They'll need to be clflushed before we
++ * use them with the GPU.
++ */
++ obj->write_domain = I915_GEM_DOMAIN_CPU;
++ obj->read_domains = I915_GEM_DOMAIN_CPU;
++
++ obj->driver_private = obj_priv;
++ obj_priv->obj = obj;
++ INIT_LIST_HEAD(&obj_priv->list);
++ return 0;
++}
++
++void i915_gem_free_object(struct drm_gem_object *obj)
++{
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++ while (obj_priv->pin_count > 0)
++ i915_gem_object_unpin(obj);
++
++ i915_gem_object_unbind(obj);
++
++ drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
++ drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
++}
++
++static int
++i915_gem_set_domain(struct drm_gem_object *obj,
++ struct drm_file *file_priv,
++ uint32_t read_domains,
++ uint32_t write_domain)
++{
++ struct drm_device *dev = obj->dev;
++ int ret;
++ uint32_t flush_domains;
++
++ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
++
++ ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
++ if (ret)
++ return ret;
++ flush_domains = i915_gem_dev_set_domain(obj->dev);
++
++ if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
++ (void) i915_add_request(dev, flush_domains);
++
++ return 0;
++}
++
++/** Unbinds all objects that are on the given buffer list. */
++static int
++i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
++{
++ struct drm_gem_object *obj;
++ struct drm_i915_gem_object *obj_priv;
++ int ret;
++
++ while (!list_empty(head)) {
++ obj_priv = list_first_entry(head,
++ struct drm_i915_gem_object,
++ list);
++ obj = obj_priv->obj;
++
++ if (obj_priv->pin_count != 0) {
++ DRM_ERROR("Pinned object in unbind list\n");
++ mutex_unlock(&dev->struct_mutex);
++ return -EINVAL;
++ }
++
++ ret = i915_gem_object_unbind(obj);
++ if (ret != 0) {
++ DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
++ ret);
++ mutex_unlock(&dev->struct_mutex);
++ return ret;
++ }
++ }
++
++
++ return 0;
++}
++
++static int
++i915_gem_idle(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ uint32_t seqno, cur_seqno, last_seqno;
++ int stuck, ret;
++
++ if (dev_priv->mm.suspended)
++ return 0;
++
++ /* Hack! Don't let anybody do execbuf while we don't control the chip.
++ * We need to replace this with a semaphore, or something.
++ */
++ dev_priv->mm.suspended = 1;
++
++ i915_kernel_lost_context(dev);
++
++ /* Flush the GPU along with all non-CPU write domains
++ */
++ i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
++ ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
++ seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
++ I915_GEM_DOMAIN_GTT));
++
++ if (seqno == 0) {
++ mutex_unlock(&dev->struct_mutex);
++ return -ENOMEM;
++ }
++
++ dev_priv->mm.waiting_gem_seqno = seqno;
++ last_seqno = 0;
++ stuck = 0;
++ for (;;) {
++ cur_seqno = i915_get_gem_seqno(dev);
++ if (i915_seqno_passed(cur_seqno, seqno))
++ break;
++ if (last_seqno == cur_seqno) {
++ if (stuck++ > 100) {
++ DRM_ERROR("hardware wedged\n");
++ dev_priv->mm.wedged = 1;
++ DRM_WAKEUP(&dev_priv->irq_queue);
++ break;
++ }
++ }
++ msleep(10);
++ last_seqno = cur_seqno;
++ }
++ dev_priv->mm.waiting_gem_seqno = 0;
++
++ i915_gem_retire_requests(dev);
++
++ /* Active and flushing should now be empty as we've
++ * waited for a sequence higher than any pending execbuffer
++ */
++ BUG_ON(!list_empty(&dev_priv->mm.active_list));
++ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
++
++ /* Request should now be empty as we've also waited
++ * for the last request in the list
++ */
++ BUG_ON(!list_empty(&dev_priv->mm.request_list));
++
++ /* Move all buffers out of the GTT. */
++ ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
++ if (ret)
++ return ret;
++
++ BUG_ON(!list_empty(&dev_priv->mm.active_list));
++ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
++ BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
++ BUG_ON(!list_empty(&dev_priv->mm.request_list));
++ return 0;
++}
++
++static int
++i915_gem_init_hws(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_gem_object *obj;
++ struct drm_i915_gem_object *obj_priv;
++ int ret;
++
++ /* If we need a physical address for the status page, it's already
++ * initialized at driver load time.
++ */
++ if (!I915_NEED_GFX_HWS(dev))
++ return 0;
++
++ obj = drm_gem_object_alloc(dev, 4096);
++ if (obj == NULL) {
++ DRM_ERROR("Failed to allocate status page\n");
++ return -ENOMEM;
++ }
++ obj_priv = obj->driver_private;
++
++ ret = i915_gem_object_pin(obj, 4096);
++ if (ret != 0) {
++ drm_gem_object_unreference(obj);
++ return ret;
++ }
++
++ dev_priv->status_gfx_addr = obj_priv->gtt_offset;
++ dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
++ dev_priv->hws_map.size = 4096;
++ dev_priv->hws_map.type = 0;
++ dev_priv->hws_map.flags = 0;
++ dev_priv->hws_map.mtrr = 0;
++
++ drm_core_ioremap(&dev_priv->hws_map, dev);
++ if (dev_priv->hws_map.handle == NULL) {
++ DRM_ERROR("Failed to map status page.\n");
++ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
++ drm_gem_object_unreference(obj);
++ return -EINVAL;
++ }
++ dev_priv->hws_obj = obj;
++ dev_priv->hw_status_page = dev_priv->hws_map.handle;
++ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
++ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
++ DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
++
++ return 0;
++}
++
++static int
++i915_gem_init_ringbuffer(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_gem_object *obj;
++ struct drm_i915_gem_object *obj_priv;
++ int ret;
++
++ ret = i915_gem_init_hws(dev);
++ if (ret != 0)
++ return ret;
++
++ obj = drm_gem_object_alloc(dev, 128 * 1024);
++ if (obj == NULL) {
++ DRM_ERROR("Failed to allocate ringbuffer\n");
++ return -ENOMEM;
++ }
++ obj_priv = obj->driver_private;
++
++ ret = i915_gem_object_pin(obj, 4096);
++ if (ret != 0) {
++ drm_gem_object_unreference(obj);
++ return ret;
++ }
++
++ /* Set up the kernel mapping for the ring. */
++ dev_priv->ring.Size = obj->size;
++ dev_priv->ring.tail_mask = obj->size - 1;
++
++ dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
++ dev_priv->ring.map.size = obj->size;
++ dev_priv->ring.map.type = 0;
++ dev_priv->ring.map.flags = 0;
++ dev_priv->ring.map.mtrr = 0;
++
++ drm_core_ioremap(&dev_priv->ring.map, dev);
++ if (dev_priv->ring.map.handle == NULL) {
++ DRM_ERROR("Failed to map ringbuffer.\n");
++ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
++ drm_gem_object_unreference(obj);
++ return -EINVAL;
++ }
++ dev_priv->ring.ring_obj = obj;
++ dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
++
++ /* Stop the ring if it's running. */
++ I915_WRITE(PRB0_CTL, 0);
++ I915_WRITE(PRB0_HEAD, 0);
++ I915_WRITE(PRB0_TAIL, 0);
++ I915_WRITE(PRB0_START, 0);
++
++ /* Initialize the ring. */
++ I915_WRITE(PRB0_START, obj_priv->gtt_offset);
++ I915_WRITE(PRB0_CTL,
++ ((obj->size - 4096) & RING_NR_PAGES) |
++ RING_NO_REPORT |
++ RING_VALID);
++
++ /* Update our cache of the ring state */
++ i915_kernel_lost_context(dev);
++
++ return 0;
++}
++
++static void
++i915_gem_cleanup_ringbuffer(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++
++ if (dev_priv->ring.ring_obj == NULL)
++ return;
++
++ drm_core_ioremapfree(&dev_priv->ring.map, dev);
++
++ i915_gem_object_unpin(dev_priv->ring.ring_obj);
++ drm_gem_object_unreference(dev_priv->ring.ring_obj);
++ dev_priv->ring.ring_obj = NULL;
++ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
++
++ if (dev_priv->hws_obj != NULL) {
++ i915_gem_object_unpin(dev_priv->hws_obj);
++ drm_gem_object_unreference(dev_priv->hws_obj);
++ dev_priv->hws_obj = NULL;
++ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
++
++ /* Write high address into HWS_PGA when disabling. */
++ I915_WRITE(HWS_PGA, 0x1ffff000);
++ }
++}
++
++int
++i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ int ret;
++
++ if (dev_priv->mm.wedged) {
++ DRM_ERROR("Reenabling wedged hardware, good luck\n");
++ dev_priv->mm.wedged = 0;
++ }
++
++ ret = i915_gem_init_ringbuffer(dev);
++ if (ret != 0)
++ return ret;
++
++ mutex_lock(&dev->struct_mutex);
++ BUG_ON(!list_empty(&dev_priv->mm.active_list));
++ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
++ BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
++ BUG_ON(!list_empty(&dev_priv->mm.request_list));
++ dev_priv->mm.suspended = 0;
++ mutex_unlock(&dev->struct_mutex);
++ return 0;
++}
++
++int
++i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret;
++
++ mutex_lock(&dev->struct_mutex);
++ ret = i915_gem_idle(dev);
++ if (ret == 0)
++ i915_gem_cleanup_ringbuffer(dev);
++ mutex_unlock(&dev->struct_mutex);
++
++ return 0;
++}
++
++void
++i915_gem_lastclose(struct drm_device *dev)
++{
++ int ret;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++
++ mutex_lock(&dev->struct_mutex);
++
++ if (dev_priv->ring.ring_obj != NULL) {
++ ret = i915_gem_idle(dev);
++ if (ret)
++ DRM_ERROR("failed to idle hardware: %d\n", ret);
++
++ i915_gem_cleanup_ringbuffer(dev);
++ }
++
++ mutex_unlock(&dev->struct_mutex);
++}
++
++void
++i915_gem_load(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++
++ INIT_LIST_HEAD(&dev_priv->mm.active_list);
++ INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
++ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
++ INIT_LIST_HEAD(&dev_priv->mm.request_list);
++ INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
++ i915_gem_retire_work_handler);
++ dev_priv->mm.next_gem_seqno = 1;
++
++ i915_gem_detect_bit_6_swizzle(dev);
++}
+diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
+new file mode 100644
+index 0000000..131c088
+--- /dev/null
++++ b/drivers/gpu/drm/i915/i915_gem_debug.c
+@@ -0,0 +1,201 @@
++/*
++ * Copyright © 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ * Keith Packard <keithp@keithp.com>
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++#if WATCH_INACTIVE
++void
++i915_verify_inactive(struct drm_device *dev, char *file, int line)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_gem_object *obj;
++ struct drm_i915_gem_object *obj_priv;
++
++ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
++ obj = obj_priv->obj;
++ if (obj_priv->pin_count || obj_priv->active ||
++ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
++ I915_GEM_DOMAIN_GTT)))
++ DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
++ obj,
++ obj_priv->pin_count, obj_priv->active,
++ obj->write_domain, file, line);
++ }
++}
++#endif /* WATCH_INACTIVE */
++
++
++#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
++static void
++i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
++ uint32_t bias, uint32_t mark)
++{
++ uint32_t *mem = kmap_atomic(page, KM_USER0);
++ int i;
++ for (i = start; i < end; i += 4)
++ DRM_INFO("%08x: %08x%s\n",
++ (int) (bias + i), mem[i / 4],
++ (bias + i == mark) ? " ********" : "");
++ kunmap_atomic(mem, KM_USER0);
++ /* give syslog time to catch up */
++ msleep(1);
++}
++
++void
++i915_gem_dump_object(struct drm_gem_object *obj, int len,
++ const char *where, uint32_t mark)
++{
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++ int page;
++
++ DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
++ for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
++ int page_len, chunk, chunk_len;
++
++ page_len = len - page * PAGE_SIZE;
++ if (page_len > PAGE_SIZE)
++ page_len = PAGE_SIZE;
++
++ for (chunk = 0; chunk < page_len; chunk += 128) {
++ chunk_len = page_len - chunk;
++ if (chunk_len > 128)
++ chunk_len = 128;
++ i915_gem_dump_page(obj_priv->page_list[page],
++ chunk, chunk + chunk_len,
++ obj_priv->gtt_offset +
++ page * PAGE_SIZE,
++ mark);
++ }
++ }
++}
++#endif
++
++#if WATCH_LRU
++void
++i915_dump_lru(struct drm_device *dev, const char *where)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_gem_object *obj_priv;
++
++ DRM_INFO("active list %s {\n", where);
++ list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
++ list)
++ {
++ DRM_INFO(" %p: %08x\n", obj_priv,
++ obj_priv->last_rendering_seqno);
++ }
++ DRM_INFO("}\n");
++ DRM_INFO("flushing list %s {\n", where);
++ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
++ list)
++ {
++ DRM_INFO(" %p: %08x\n", obj_priv,
++ obj_priv->last_rendering_seqno);
++ }
++ DRM_INFO("}\n");
++ DRM_INFO("inactive %s {\n", where);
++ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
++ DRM_INFO(" %p: %08x\n", obj_priv,
++ obj_priv->last_rendering_seqno);
++ }
++ DRM_INFO("}\n");
++}
++#endif
++
++
++#if WATCH_COHERENCY
++void
++i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
++{
++ struct drm_device *dev = obj->dev;
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++ int page;
++ uint32_t *gtt_mapping;
++ uint32_t *backing_map = NULL;
++ int bad_count = 0;
++
++ DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
++ __func__, obj, obj_priv->gtt_offset, handle,
++ obj->size / 1024);
++
++ gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
++ obj->size);
++ if (gtt_mapping == NULL) {
++ DRM_ERROR("failed to map GTT space\n");
++ return;
++ }
++
++ for (page = 0; page < obj->size / PAGE_SIZE; page++) {
++ int i;
++
++ backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
++
++ if (backing_map == NULL) {
++ DRM_ERROR("failed to map backing page\n");
++ goto out;
++ }
++
++ for (i = 0; i < PAGE_SIZE / 4; i++) {
++ uint32_t cpuval = backing_map[i];
++ uint32_t gttval = readl(gtt_mapping +
++ page * 1024 + i);
++
++ if (cpuval != gttval) {
++ DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
++ "0x%08x vs 0x%08x\n",
++ (int)(obj_priv->gtt_offset +
++ page * PAGE_SIZE + i * 4),
++ cpuval, gttval);
++ if (bad_count++ >= 8) {
++ DRM_INFO("...\n");
++ goto out;
++ }
++ }
++ }
++ kunmap_atomic(backing_map, KM_USER0);
++ backing_map = NULL;
++ }
++
++ out:
++ if (backing_map != NULL)
++ kunmap_atomic(backing_map, KM_USER0);
++ iounmap(gtt_mapping);
++
++ /* give syslog time to catch up */
++ msleep(1);
++
++ /* Directly flush the object, since we just loaded values with the CPU
++ * from the backing pages and we don't want to disturb the cache
++ * management that we're trying to observe.
++ */
++
++ i915_gem_clflush_object(obj);
++}
++#endif
+diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
+new file mode 100644
+index 0000000..15d4160
+--- /dev/null
++++ b/drivers/gpu/drm/i915/i915_gem_proc.c
+@@ -0,0 +1,292 @@
++/*
++ * Copyright © 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ * Keith Packard <keithp@keithp.com>
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++static int i915_gem_active_info(char *buf, char **start, off_t offset,
++ int request, int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_gem_object *obj_priv;
++ int len = 0;
++
++ if (offset > DRM_PROC_LIMIT) {
++ *eof = 1;
++ return 0;
++ }
++
++ *start = &buf[offset];
++ *eof = 0;
++ DRM_PROC_PRINT("Active:\n");
++ list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
++ list)
++ {
++ struct drm_gem_object *obj = obj_priv->obj;
++ if (obj->name) {
++ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
++ obj, obj->name,
++ obj->read_domains, obj->write_domain,
++ obj_priv->last_rendering_seqno);
++ } else {
++ DRM_PROC_PRINT(" %p: %08x %08x %d\n",
++ obj,
++ obj->read_domains, obj->write_domain,
++ obj_priv->last_rendering_seqno);
++ }
++ }
++ if (len > request + offset)
++ return request;
++ *eof = 1;
++ return len - offset;
++}
++
++static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
++ int request, int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_gem_object *obj_priv;
++ int len = 0;
++
++ if (offset > DRM_PROC_LIMIT) {
++ *eof = 1;
++ return 0;
++ }
++
++ *start = &buf[offset];
++ *eof = 0;
++ DRM_PROC_PRINT("Flushing:\n");
++ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
++ list)
++ {
++ struct drm_gem_object *obj = obj_priv->obj;
++ if (obj->name) {
++ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
++ obj, obj->name,
++ obj->read_domains, obj->write_domain,
++ obj_priv->last_rendering_seqno);
++ } else {
++ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
++ obj->read_domains, obj->write_domain,
++ obj_priv->last_rendering_seqno);
++ }
++ }
++ if (len > request + offset)
++ return request;
++ *eof = 1;
++ return len - offset;
++}
++
++static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
++ int request, int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_gem_object *obj_priv;
++ int len = 0;
++
++ if (offset > DRM_PROC_LIMIT) {
++ *eof = 1;
++ return 0;
++ }
++
++ *start = &buf[offset];
++ *eof = 0;
++ DRM_PROC_PRINT("Inactive:\n");
++ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
++ list)
++ {
++ struct drm_gem_object *obj = obj_priv->obj;
++ if (obj->name) {
++ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
++ obj, obj->name,
++ obj->read_domains, obj->write_domain,
++ obj_priv->last_rendering_seqno);
++ } else {
++ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
++ obj->read_domains, obj->write_domain,
++ obj_priv->last_rendering_seqno);
++ }
++ }
++ if (len > request + offset)
++ return request;
++ *eof = 1;
++ return len - offset;
++}
++
++static int i915_gem_request_info(char *buf, char **start, off_t offset,
++ int request, int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_gem_request *gem_request;
++ int len = 0;
++
++ if (offset > DRM_PROC_LIMIT) {
++ *eof = 1;
++ return 0;
++ }
++
++ *start = &buf[offset];
++ *eof = 0;
++ DRM_PROC_PRINT("Request:\n");
++ list_for_each_entry(gem_request, &dev_priv->mm.request_list,
++ list)
++ {
++ DRM_PROC_PRINT(" %d @ %d %08x\n",
++ gem_request->seqno,
++ (int) (jiffies - gem_request->emitted_jiffies),
++ gem_request->flush_domains);
++ }
++ if (len > request + offset)
++ return request;
++ *eof = 1;
++ return len - offset;
++}
++
++static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
++ int request, int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ int len = 0;
++
++ if (offset > DRM_PROC_LIMIT) {
++ *eof = 1;
++ return 0;
++ }
++
++ *start = &buf[offset];
++ *eof = 0;
++ DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
++ DRM_PROC_PRINT("Waiter sequence: %d\n",
++ dev_priv->mm.waiting_gem_seqno);
++ DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
++ if (len > request + offset)
++ return request;
++ *eof = 1;
++ return len - offset;
++}
++
++
++static int i915_interrupt_info(char *buf, char **start, off_t offset,
++ int request, int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ int len = 0;
++
++ if (offset > DRM_PROC_LIMIT) {
++ *eof = 1;
++ return 0;
++ }
++
++ *start = &buf[offset];
++ *eof = 0;
++ DRM_PROC_PRINT("Interrupt enable: %08x\n",
++ I915_READ(IER));
++ DRM_PROC_PRINT("Interrupt identity: %08x\n",
++ I915_READ(IIR));
++ DRM_PROC_PRINT("Interrupt mask: %08x\n",
++ I915_READ(IMR));
++ DRM_PROC_PRINT("Pipe A stat: %08x\n",
++ I915_READ(PIPEASTAT));
++ DRM_PROC_PRINT("Pipe B stat: %08x\n",
++ I915_READ(PIPEBSTAT));
++ DRM_PROC_PRINT("Interrupts received: %d\n",
++ atomic_read(&dev_priv->irq_received));
++ DRM_PROC_PRINT("Current sequence: %d\n",
++ i915_get_gem_seqno(dev));
++ DRM_PROC_PRINT("Waiter sequence: %d\n",
++ dev_priv->mm.waiting_gem_seqno);
++ DRM_PROC_PRINT("IRQ sequence: %d\n",
++ dev_priv->mm.irq_gem_seqno);
++ if (len > request + offset)
++ return request;
++ *eof = 1;
++ return len - offset;
++}
++
++static struct drm_proc_list {
++ /** file name */
++ const char *name;
++ /** proc callback*/
++ int (*f) (char *, char **, off_t, int, int *, void *);
++} i915_gem_proc_list[] = {
++ {"i915_gem_active", i915_gem_active_info},
++ {"i915_gem_flushing", i915_gem_flushing_info},
++ {"i915_gem_inactive", i915_gem_inactive_info},
++ {"i915_gem_request", i915_gem_request_info},
++ {"i915_gem_seqno", i915_gem_seqno_info},
++ {"i915_gem_interrupt", i915_interrupt_info},
++};
++
++#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
++
++int i915_gem_proc_init(struct drm_minor *minor)
++{
++ struct proc_dir_entry *ent;
++ int i, j;
++
++ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
++ ent = create_proc_entry(i915_gem_proc_list[i].name,
++ S_IFREG | S_IRUGO, minor->dev_root);
++ if (!ent) {
++ DRM_ERROR("Cannot create /proc/dri/.../%s\n",
++ i915_gem_proc_list[i].name);
++ for (j = 0; j < i; j++)
++ remove_proc_entry(i915_gem_proc_list[i].name,
++ minor->dev_root);
++ return -1;
++ }
++ ent->read_proc = i915_gem_proc_list[i].f;
++ ent->data = minor;
++ }
++ return 0;
++}
++
++void i915_gem_proc_cleanup(struct drm_minor *minor)
++{
++ int i;
++
++ if (!minor->dev_root)
++ return;
++
++ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
++ remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
++}
+diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
+new file mode 100644
+index 0000000..0c1b3a0
+--- /dev/null
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -0,0 +1,256 @@
++/*
++ * Copyright © 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++/** @file i915_gem_tiling.c
++ *
++ * Support for managing tiling state of buffer objects.
++ *
++ * The idea behind tiling is to increase cache hit rates by rearranging
++ * pixel data so that a group of pixel accesses are in the same cacheline.
++ * Performance improvement from doing this on the back/depth buffer are on
++ * the order of 30%.
++ *
++ * Intel architectures make this somewhat more complicated, though, by
++ * adjustments made to addressing of data when the memory is in interleaved
++ * mode (matched pairs of DIMMS) to improve memory bandwidth.
++ * For interleaved memory, the CPU sends every sequential 64 bytes
++ * to an alternate memory channel so it can get the bandwidth from both.
++ *
++ * The GPU also rearranges its accesses for increased bandwidth to interleaved
++ * memory, and it matches what the CPU does for non-tiled. However, when tiled
++ * it does it a little differently, since one walks addresses not just in the
++ * X direction but also Y. So, along with alternating channels when bit
++ * 6 of the address flips, it also alternates when other bits flip -- Bits 9
++ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
++ * are common to both the 915 and 965-class hardware.
++ *
++ * The CPU also sometimes XORs in higher bits as well, to improve
++ * bandwidth doing strided access like we do so frequently in graphics. This
++ * is called "Channel XOR Randomization" in the MCH documentation. The result
++ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
++ * decode.
++ *
++ * All of this bit 6 XORing has an effect on our memory management,
++ * as we need to make sure that the 3d driver can correctly address object
++ * contents.
++ *
++ * If we don't have interleaved memory, all tiling is safe and no swizzling is
++ * required.
++ *
++ * When bit 17 is XORed in, we simply refuse to tile at all. Bit
++ * 17 is not just a page offset, so as we page an objet out and back in,
++ * individual pages in it will have different bit 17 addresses, resulting in
++ * each 64 bytes being swapped with its neighbor!
++ *
++ * Otherwise, if interleaved, we have to tell the 3d driver what the address
++ * swizzling it needs to do is, since it's writing with the CPU to the pages
++ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
++ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
++ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
++ * to match what the GPU expects.
++ */
++
++/**
++ * Detects bit 6 swizzling of address lookup between IGD access and CPU
++ * access through main memory.
++ */
++void
++i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
++ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
++
++ if (!IS_I9XX(dev)) {
++ /* As far as we know, the 865 doesn't have these bit 6
++ * swizzling issues.
++ */
++ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
++ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
++ } else if (!IS_I965G(dev) || IS_I965GM(dev)) {
++ uint32_t dcc;
++
++ /* On 915-945 and GM965, channel interleave by the CPU is
++ * determined by DCC. The CPU will alternate based on bit 6
++ * in interleaved mode, and the GPU will then also alternate
++ * on bit 6, 9, and 10 for X, but the CPU may also optionally
++ * alternate based on bit 17 (XOR not disabled and XOR
++ * bit == 17).
++ */
++ dcc = I915_READ(DCC);
++ switch (dcc & DCC_ADDRESSING_MODE_MASK) {
++ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
++ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
++ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
++ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
++ break;
++ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
++ if (IS_I915G(dev) || IS_I915GM(dev) ||
++ dcc & DCC_CHANNEL_XOR_DISABLE) {
++ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
++ swizzle_y = I915_BIT_6_SWIZZLE_9;
++ } else if (IS_I965GM(dev)) {
++ /* GM965 only does bit 11-based channel
++ * randomization
++ */
++ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
++ swizzle_y = I915_BIT_6_SWIZZLE_9_11;
++ } else {
++ /* Bit 17 or perhaps other swizzling */
++ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
++ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
++ }
++ break;
++ }
++ if (dcc == 0xffffffff) {
++ DRM_ERROR("Couldn't read from MCHBAR. "
++ "Disabling tiling.\n");
++ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
++ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
++ }
++ } else {
++ /* The 965, G33, and newer, have a very flexible memory
++ * configuration. It will enable dual-channel mode
++ * (interleaving) on as much memory as it can, and the GPU
++ * will additionally sometimes enable different bit 6
++ * swizzling for tiled objects from the CPU.
++ *
++ * Here's what I found on the G965:
++ * slot fill memory size swizzling
++ * 0A 0B 1A 1B 1-ch 2-ch
++ * 512 0 0 0 512 0 O
++ * 512 0 512 0 16 1008 X
++ * 512 0 0 512 16 1008 X
++ * 0 512 0 512 16 1008 X
++ * 1024 1024 1024 0 2048 1024 O
++ *
++ * We could probably detect this based on either the DRB
++ * matching, which was the case for the swizzling required in
++ * the table above, or from the 1-ch value being less than
++ * the minimum size of a rank.
++ */
++ if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
++ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
++ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
++ } else {
++ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
++ swizzle_y = I915_BIT_6_SWIZZLE_9;
++ }
++ }
++
++ dev_priv->mm.bit_6_swizzle_x = swizzle_x;
++ dev_priv->mm.bit_6_swizzle_y = swizzle_y;
++}
++
++/**
++ * Sets the tiling mode of an object, returning the required swizzling of
++ * bit 6 of addresses in the object.
++ */
++int
++i915_gem_set_tiling(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_i915_gem_set_tiling *args = data;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_gem_object *obj;
++ struct drm_i915_gem_object *obj_priv;
++
++ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++ if (obj == NULL)
++ return -EINVAL;
++ obj_priv = obj->driver_private;
++
++ mutex_lock(&dev->struct_mutex);
++
++ if (args->tiling_mode == I915_TILING_NONE) {
++ obj_priv->tiling_mode = I915_TILING_NONE;
++ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
++ } else {
++ if (args->tiling_mode == I915_TILING_X)
++ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
++ else
++ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
++ /* If we can't handle the swizzling, make it untiled. */
++ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
++ args->tiling_mode = I915_TILING_NONE;
++ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
++ }
++ }
++ obj_priv->tiling_mode = args->tiling_mode;
++
++ mutex_unlock(&dev->struct_mutex);
++
++ drm_gem_object_unreference(obj);
++
++ return 0;
++}
++
++/**
++ * Returns the current tiling mode and required bit 6 swizzling for the object.
++ */
++int
++i915_gem_get_tiling(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_i915_gem_get_tiling *args = data;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_gem_object *obj;
++ struct drm_i915_gem_object *obj_priv;
++
++ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++ if (obj == NULL)
++ return -EINVAL;
++ obj_priv = obj->driver_private;
++
++ mutex_lock(&dev->struct_mutex);
++
++ args->tiling_mode = obj_priv->tiling_mode;
++ switch (obj_priv->tiling_mode) {
++ case I915_TILING_X:
++ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
++ break;
++ case I915_TILING_Y:
++ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
++ break;
++ case I915_TILING_NONE:
++ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
++ break;
++ default:
++ DRM_ERROR("unknown tiling mode\n");
++ }
++
++ mutex_unlock(&dev->struct_mutex);
++
++ drm_gem_object_unreference(obj);
++
++ return 0;
++}
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index f875959..f295bdf 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -407,15 +407,20 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ I915_WRITE(PIPEBSTAT, pipeb_stats);
+ }
+
+- if (iir & I915_ASLE_INTERRUPT)
+- opregion_asle_intr(dev);
++ I915_WRITE(IIR, iir);
++ if (dev->pdev->msi_enabled)
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
++ (void) I915_READ(IIR); /* Flush posted writes */
+
+ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+- if (dev->pdev->msi_enabled)
+- I915_WRITE(IMR, dev_priv->irq_mask_reg);
+- I915_WRITE(IIR, iir);
+- (void) I915_READ(IIR);
++ if (iir & I915_USER_INTERRUPT) {
++ dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
++ DRM_WAKEUP(&dev_priv->irq_queue);
++ }
++
++ if (iir & I915_ASLE_INTERRUPT)
++ opregion_asle_intr(dev);
+
+ if (vblank && dev_priv->swaps_pending > 0)
+ drm_locked_tasklet(dev, i915_vblank_tasklet);
+@@ -449,7 +454,7 @@ static int i915_emit_irq(struct drm_device * dev)
+ return dev_priv->counter;
+ }
+
+-static void i915_user_irq_get(struct drm_device *dev)
++void i915_user_irq_get(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 43ad2cb..5c2d9f2 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -25,19 +25,6 @@
+ #ifndef _I915_REG_H_
+ #define _I915_REG_H_
+
+-/* MCH MMIO space */
+-/** 915-945 and GM965 MCH register controlling DRAM channel access */
+-#define DCC 0x200
+-#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
+-#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
+-#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
+-#define DCC_ADDRESSING_MODE_MASK (3 << 0)
+-#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
+-
+-/** 965 MCH register controlling DRAM channel configuration */
+-#define CHDECMISC 0x111
+-#define CHDECMISC_FLEXMEMORY (1 << 1)
+-
+ /*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
+@@ -516,6 +503,30 @@
+ #define PALETTE_A 0x0a000
+ #define PALETTE_B 0x0a800
+
++/* MCH MMIO space */
++
++/*
++ * MCHBAR mirror.
++ *
++ * This mirrors the MCHBAR MMIO space whose location is determined by
++ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
++ * every way. It is not accessible from the CP register read instructions.
++ *
++ */
++#define MCHBAR_MIRROR_BASE 0x10000
++
++/** 915-945 and GM965 MCH register controlling DRAM channel access */
++#define DCC 0x10200
++#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
++#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
++#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
++#define DCC_ADDRESSING_MODE_MASK (3 << 0)
++#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
++
++/** 965 MCH register controlling DRAM channel configuration */
++#define C0DRB3 0x10206
++#define C1DRB3 0x10606
++
+ /*
+ * Overlay regs
+ */
+diff --git a/include/drm/drm.h b/include/drm/drm.h
+index 15e5503..f46ba4b 100644
+--- a/include/drm/drm.h
++++ b/include/drm/drm.h
+@@ -570,6 +570,34 @@ struct drm_set_version {
+ int drm_dd_minor;
+ };
+
++/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
++struct drm_gem_close {
++ /** Handle of the object to be closed. */
++ uint32_t handle;
++ uint32_t pad;
++};
++
++/** DRM_IOCTL_GEM_FLINK ioctl argument type */
++struct drm_gem_flink {
++ /** Handle for the object being named */
++ uint32_t handle;
++
++ /** Returned global name */
++ uint32_t name;
++};
++
++/** DRM_IOCTL_GEM_OPEN ioctl argument type */
++struct drm_gem_open {
++ /** Name of object being opened */
++ uint32_t name;
++
++ /** Returned handle for the object */
++ uint32_t handle;
++
++ /** Returned size of the object */
++ uint64_t size;
++};
++
+ #define DRM_IOCTL_BASE 'd'
+ #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
+ #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
+@@ -585,6 +613,9 @@ struct drm_set_version {
+ #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
+ #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
+ #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
++#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
++#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
++#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
+
+ #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
+ #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index e79ce07..1469a1b 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -104,6 +104,7 @@ struct drm_device;
+ #define DRIVER_DMA_QUEUE 0x200
+ #define DRIVER_FB_DMA 0x400
+ #define DRIVER_IRQ_VBL2 0x800
++#define DRIVER_GEM 0x1000
+
+ /***********************************************************************/
+ /** \name Begin the DRM... */
+@@ -387,6 +388,10 @@ struct drm_file {
+ struct drm_minor *minor;
+ int remove_auth_on_close;
+ unsigned long lock_count;
++ /** Mapping of mm object handles to object pointers. */
++ struct idr object_idr;
++ /** Lock for synchronization of access to object_idr. */
++ spinlock_t table_lock;
+ struct file *filp;
+ void *driver_priv;
+ };
+@@ -558,6 +563,56 @@ struct drm_ati_pcigart_info {
+ };
+
+ /**
++ * This structure defines the drm_mm memory object, which will be used by the
++ * DRM for its buffer objects.
++ */
++struct drm_gem_object {
++ /** Reference count of this object */
++ struct kref refcount;
++
++ /** Handle count of this object. Each handle also holds a reference */
++ struct kref handlecount;
++
++ /** Related drm device */
++ struct drm_device *dev;
++
++ /** File representing the shmem storage */
++ struct file *filp;
++
++ /**
++ * Size of the object, in bytes. Immutable over the object's
++ * lifetime.
++ */
++ size_t size;
++
++ /**
++ * Global name for this object, starts at 1. 0 means unnamed.
++ * Access is covered by the object_name_lock in the related drm_device
++ */
++ int name;
++
++ /**
++ * Memory domains. These monitor which caches contain read/write data
++ * related to the object. When transitioning from one set of domains
++ * to another, the driver is called to ensure that caches are suitably
++ * flushed and invalidated
++ */
++ uint32_t read_domains;
++ uint32_t write_domain;
++
++ /**
++ * While validating an exec operation, the
++ * new read/write domain values are computed here.
++ * They will be transferred to the above values
++ * at the point that any cache flushing occurs
++ */
++ uint32_t pending_read_domains;
++ uint32_t pending_write_domain;
++
++ void *driver_private;
++};
++
++/**
+ * DRM driver structure. This structure represent the common code for
+ * a family of cards. There will one drm_device for each card present
+ * in this family
+@@ -657,6 +712,18 @@ struct drm_driver {
+ void (*set_version) (struct drm_device *dev,
+ struct drm_set_version *sv);
+
++ int (*proc_init)(struct drm_minor *minor);
++ void (*proc_cleanup)(struct drm_minor *minor);
++
++ /**
++ * Driver-specific constructor for drm_gem_objects, to set up
++ * obj->driver_private.
++ *
++ * Returns 0 on success.
++ */
++ int (*gem_init_object) (struct drm_gem_object *obj);
++ void (*gem_free_object) (struct drm_gem_object *obj);
++
+ int major;
+ int minor;
+ int patchlevel;
+@@ -830,6 +897,22 @@ struct drm_device {
+ spinlock_t drw_lock;
+ struct idr drw_idr;
+ /*@} */
++
++ /** \name GEM information */
++ /*@{ */
++ spinlock_t object_name_lock;
++ struct idr object_name_idr;
++ atomic_t object_count;
++ atomic_t object_memory;
++ atomic_t pin_count;
++ atomic_t pin_memory;
++ atomic_t gtt_count;
++ atomic_t gtt_memory;
++ uint32_t gtt_total;
++ uint32_t invalidate_domains; /* domains pending invalidation */
++ uint32_t flush_domains; /* domains pending flush */
++ /*@} */
++
+ };
+
+ static __inline__ int drm_core_check_feature(struct drm_device *dev,
+@@ -926,6 +1009,10 @@ extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
+ extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
+ extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
+ extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
++extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
++ struct page **pages,
++ unsigned long num_pages,
++ uint32_t gtt_offset);
+ extern int drm_unbind_agp(DRM_AGP_MEM * handle);
+
+ /* Misc. IOCTL support (drm_ioctl.h) */
+@@ -988,6 +1075,9 @@ extern int drm_getmagic(struct drm_device *dev, void *data,
+ extern int drm_authmagic(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
++/* Cache management (drm_cache.c) */
++void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
++
+ /* Locking IOCTL support (drm_lock.h) */
+ extern int drm_lock(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+@@ -1094,6 +1184,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
+ extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
+ extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
+ extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
++extern void drm_agp_chipset_flush(struct drm_device *dev);
+
+ /* Stub support (drm_stub.h) */
+ extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+@@ -1156,6 +1247,66 @@ extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
+ extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
+ extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
+
++/* Graphics Execution Manager library functions (drm_gem.c) */
++int drm_gem_init(struct drm_device *dev);
++void drm_gem_object_free(struct kref *kref);
++struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
++ size_t size);
++void drm_gem_object_handle_free(struct kref *kref);
++
++static inline void
++drm_gem_object_reference(struct drm_gem_object *obj)
++{
++ kref_get(&obj->refcount);
++}
++
++static inline void
++drm_gem_object_unreference(struct drm_gem_object *obj)
++{
++ if (obj == NULL)
++ return;
++
++ kref_put(&obj->refcount, drm_gem_object_free);
++}
++
++int drm_gem_handle_create(struct drm_file *file_priv,
++ struct drm_gem_object *obj,
++ int *handlep);
++
++static inline void
++drm_gem_object_handle_reference(struct drm_gem_object *obj)
++{
++ drm_gem_object_reference(obj);
++ kref_get(&obj->handlecount);
++}
++
++static inline void
++drm_gem_object_handle_unreference(struct drm_gem_object *obj)
++{
++ if (obj == NULL)
++ return;
++
++ /*
++ * Must bump handle count first as this may be the last
++ * ref, in which case the object would disappear before we
++ * checked for a name
++ */
++ kref_put(&obj->handlecount, drm_gem_object_handle_free);
++ drm_gem_object_unreference(obj);
++}
++
++struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
++ struct drm_file *filp,
++ int handle);
++int drm_gem_close_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++int drm_gem_open_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
++void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
++
+ extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
+ extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
+ extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
+diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
+index 05c66cf..59d08fc 100644
+--- a/include/drm/i915_drm.h
++++ b/include/drm/i915_drm.h
+@@ -143,6 +143,22 @@ typedef struct _drm_i915_sarea {
+ #define DRM_I915_GET_VBLANK_PIPE 0x0e
+ #define DRM_I915_VBLANK_SWAP 0x0f
+ #define DRM_I915_HWS_ADDR 0x11
++#define DRM_I915_GEM_INIT 0x13
++#define DRM_I915_GEM_EXECBUFFER 0x14
++#define DRM_I915_GEM_PIN 0x15
++#define DRM_I915_GEM_UNPIN 0x16
++#define DRM_I915_GEM_BUSY 0x17
++#define DRM_I915_GEM_THROTTLE 0x18
++#define DRM_I915_GEM_ENTERVT 0x19
++#define DRM_I915_GEM_LEAVEVT 0x1a
++#define DRM_I915_GEM_CREATE 0x1b
++#define DRM_I915_GEM_PREAD 0x1c
++#define DRM_I915_GEM_PWRITE 0x1d
++#define DRM_I915_GEM_MMAP 0x1e
++#define DRM_I915_GEM_SET_DOMAIN 0x1f
++#define DRM_I915_GEM_SW_FINISH 0x20
++#define DRM_I915_GEM_SET_TILING 0x21
++#define DRM_I915_GEM_GET_TILING 0x22
+
+ #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
+ #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
+@@ -160,6 +176,20 @@ typedef struct _drm_i915_sarea {
+ #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+ #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+ #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
++#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
++#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
++#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
++#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
++#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
++#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
++#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
++#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
++#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
++#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
++#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
++#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
++#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
++#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
+
+ /* Allow drivers to submit batchbuffers directly to hardware, relying
+ * on the security mechanisms provided by hardware.
+@@ -200,6 +230,7 @@ typedef struct drm_i915_irq_wait {
+ #define I915_PARAM_IRQ_ACTIVE 1
+ #define I915_PARAM_ALLOW_BATCHBUFFER 2
+ #define I915_PARAM_LAST_DISPATCH 3
++#define I915_PARAM_HAS_GEM 5
+
+ typedef struct drm_i915_getparam {
+ int param;
+@@ -267,4 +298,305 @@ typedef struct drm_i915_hws_addr {
+ uint64_t addr;
+ } drm_i915_hws_addr_t;
+
++struct drm_i915_gem_init {
++ /**
++ * Beginning offset in the GTT to be managed by the DRM memory
++ * manager.
++ */
++ uint64_t gtt_start;
++ /**
++ * Ending offset in the GTT to be managed by the DRM memory
++ * manager.
++ */
++ uint64_t gtt_end;
++};
++
++struct drm_i915_gem_create {
++ /**
++ * Requested size for the object.
++ *
++ * The (page-aligned) allocated size for the object will be returned.
++ */
++ uint64_t size;
++ /**
++ * Returned handle for the object.
++ *
++ * Object handles are nonzero.
++ */
++ uint32_t handle;
++ uint32_t pad;
++};
++
++struct drm_i915_gem_pread {
++ /** Handle for the object being read. */
++ uint32_t handle;
++ uint32_t pad;
++ /** Offset into the object to read from */
++ uint64_t offset;
++ /** Length of data to read */
++ uint64_t size;
++ /**
++ * Pointer to write the data into.
++ *
++ * This is a fixed-size type for 32/64 compatibility.
++ */
++ uint64_t data_ptr;
++};
++
++struct drm_i915_gem_pwrite {
++ /** Handle for the object being written to. */
++ uint32_t handle;
++ uint32_t pad;
++ /** Offset into the object to write to */
++ uint64_t offset;
++ /** Length of data to write */
++ uint64_t size;
++ /**
++ * Pointer to read the data from.
++ *
++ * This is a fixed-size type for 32/64 compatibility.
++ */
++ uint64_t data_ptr;
++};
++
++struct drm_i915_gem_mmap {
++ /** Handle for the object being mapped. */
++ uint32_t handle;
++ uint32_t pad;
++ /** Offset in the object to map. */
++ uint64_t offset;
++ /**
++ * Length of data to map.
++ *
++ * The value will be page-aligned.
++ */
++ uint64_t size;
++ /**
++ * Returned pointer the data was mapped at.
++ *
++ * This is a fixed-size type for 32/64 compatibility.
++ */
++ uint64_t addr_ptr;
++};
++
++struct drm_i915_gem_set_domain {
++ /** Handle for the object */
++ uint32_t handle;
++
++ /** New read domains */
++ uint32_t read_domains;
++
++ /** New write domain */
++ uint32_t write_domain;
++};
++
++struct drm_i915_gem_sw_finish {
++ /** Handle for the object */
++ uint32_t handle;
++};
++
++struct drm_i915_gem_relocation_entry {
++ /**
++ * Handle of the buffer being pointed to by this relocation entry.
++ *
++ * It's appealing to make this be an index into the mm_validate_entry
++ * list to refer to the buffer, but this allows the driver to create
++ * a relocation list for state buffers and not re-write it per
++ * exec using the buffer.
++ */
++ uint32_t target_handle;
++
++ /**
++ * Value to be added to the offset of the target buffer to make up
++ * the relocation entry.
++ */
++ uint32_t delta;
++
++ /** Offset in the buffer the relocation entry will be written into */
++ uint64_t offset;
++
++ /**
++ * Offset value of the target buffer that the relocation entry was last
++ * written as.
++ *
++ * If the buffer has the same offset as last time, we can skip syncing
++ * and writing the relocation. This value is written back out by
++ * the execbuffer ioctl when the relocation is written.
++ */
++ uint64_t presumed_offset;
++
++ /**
++ * Target memory domains read by this operation.
++ */
++ uint32_t read_domains;
++
++ /**
++ * Target memory domains written by this operation.
++ *
++ * Note that only one domain may be written by the whole
++ * execbuffer operation, so that where there are conflicts,
++ * the application will get -EINVAL back.
++ */
++ uint32_t write_domain;
++};
++
++/** @{
++ * Intel memory domains
++ *
++ * Most of these just align with the various caches in
++ * the system and are used to flush and invalidate as
++ * objects end up cached in different domains.
++ */
++/** CPU cache */
++#define I915_GEM_DOMAIN_CPU 0x00000001
++/** Render cache, used by 2D and 3D drawing */
++#define I915_GEM_DOMAIN_RENDER 0x00000002
++/** Sampler cache, used by texture engine */
++#define I915_GEM_DOMAIN_SAMPLER 0x00000004
++/** Command queue, used to load batch buffers */
++#define I915_GEM_DOMAIN_COMMAND 0x00000008
++/** Instruction cache, used by shader programs */
++#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
++/** Vertex address cache */
++#define I915_GEM_DOMAIN_VERTEX 0x00000020
++/** GTT domain - aperture and scanout */
++#define I915_GEM_DOMAIN_GTT 0x00000040
++/** @} */
++
++struct drm_i915_gem_exec_object {
++ /**
++ * User's handle for a buffer to be bound into the GTT for this
++ * operation.
++ */
++ uint32_t handle;
++
++ /** Number of relocations to be performed on this buffer */
++ uint32_t relocation_count;
++ /**
++ * Pointer to array of struct drm_i915_gem_relocation_entry containing
++ * the relocations to be performed in this buffer.
++ */
++ uint64_t relocs_ptr;
++
++ /** Required alignment in graphics aperture */
++ uint64_t alignment;
++
++ /**
++ * Returned value of the updated offset of the object, for future
++ * presumed_offset writes.
++ */
++ uint64_t offset;
++};
++
++struct drm_i915_gem_execbuffer {
++ /**
++ * List of buffers to be validated with their relocations to be
++ * performend on them.
++ *
++ * This is a pointer to an array of struct drm_i915_gem_validate_entry.
++ *
++ * These buffers must be listed in an order such that all relocations
++ * a buffer is performing refer to buffers that have already appeared
++ * in the validate list.
++ */
++ uint64_t buffers_ptr;
++ uint32_t buffer_count;
++
++ /** Offset in the batchbuffer to start execution from. */
++ uint32_t batch_start_offset;
++ /** Bytes used in batchbuffer from batch_start_offset */
++ uint32_t batch_len;
++ uint32_t DR1;
++ uint32_t DR4;
++ uint32_t num_cliprects;
++ /** This is a struct drm_clip_rect *cliprects */
++ uint64_t cliprects_ptr;
++};
++
++struct drm_i915_gem_pin {
++ /** Handle of the buffer to be pinned. */
++ uint32_t handle;
++ uint32_t pad;
++
++ /** alignment required within the aperture */
++ uint64_t alignment;
++
++ /** Returned GTT offset of the buffer. */
++ uint64_t offset;
++};
++
++struct drm_i915_gem_unpin {
++ /** Handle of the buffer to be unpinned. */
++ uint32_t handle;
++ uint32_t pad;
++};
++
++struct drm_i915_gem_busy {
++ /** Handle of the buffer to check for busy */
++ uint32_t handle;
++
++ /** Return busy status (1 if busy, 0 if idle) */
++ uint32_t busy;
++};
++
++#define I915_TILING_NONE 0
++#define I915_TILING_X 1
++#define I915_TILING_Y 2
++
++#define I915_BIT_6_SWIZZLE_NONE 0
++#define I915_BIT_6_SWIZZLE_9 1
++#define I915_BIT_6_SWIZZLE_9_10 2
++#define I915_BIT_6_SWIZZLE_9_11 3
++#define I915_BIT_6_SWIZZLE_9_10_11 4
++/* Not seen by userland */
++#define I915_BIT_6_SWIZZLE_UNKNOWN 5
++
++struct drm_i915_gem_set_tiling {
++ /** Handle of the buffer to have its tiling state updated */
++ uint32_t handle;
++
++ /**
++ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
++ * I915_TILING_Y).
++ *
++ * This value is to be set on request, and will be updated by the
++ * kernel on successful return with the actual chosen tiling layout.
++ *
++ * The tiling mode may be demoted to I915_TILING_NONE when the system
++ * has bit 6 swizzling that can't be managed correctly by GEM.
++ *
++ * Buffer contents become undefined when changing tiling_mode.
++ */
++ uint32_t tiling_mode;
++
++ /**
++ * Stride in bytes for the object when in I915_TILING_X or
++ * I915_TILING_Y.
++ */
++ uint32_t stride;
++
++ /**
++ * Returned address bit 6 swizzling required for CPU access through
++ * mmap mapping.
++ */
++ uint32_t swizzle_mode;
++};
++
++struct drm_i915_gem_get_tiling {
++ /** Handle of the buffer to get tiling state for. */
++ uint32_t handle;
++
++ /**
++ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
++ * I915_TILING_Y).
++ */
++ uint32_t tiling_mode;
++
++ /**
++ * Returned address bit 6 swizzling required for CPU access through
++ * mmap mapping.
++ */
++ uint32_t swizzle_mode;
++};
++
+ #endif /* _I915_DRM_H_ */
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0015-i915-Add-chip-set-ID-param.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0015-i915-Add-chip-set-ID-param.patch
new file mode 100644
index 000000000..c3bf8ebd1
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0015-i915-Add-chip-set-ID-param.patch
@@ -0,0 +1,35 @@
+commit 26ead293ddf664f33dc0ba12b726887c40ce3957
+Author: Kristian Høgsberg <krh@redhat.com>
+Date: Wed Aug 20 11:08:52 2008 -0400
+
+ i915: Add chip set ID param.
+
+ Signed-off-by: Kristian Høgsberg <krh@redhat.com>
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 3b5aa74..205d21e 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -689,6 +689,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
+ case I915_PARAM_LAST_DISPATCH:
+ value = READ_BREADCRUMB(dev_priv);
+ break;
++ case I915_PARAM_CHIPSET_ID:
++ value = dev->pci_device;
++ break;
+ case I915_PARAM_HAS_GEM:
+ value = 1;
+ break;
+diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
+index 59d08fc..eb4b350 100644
+--- a/include/drm/i915_drm.h
++++ b/include/drm/i915_drm.h
+@@ -230,6 +230,7 @@ typedef struct drm_i915_irq_wait {
+ #define I915_PARAM_IRQ_ACTIVE 1
+ #define I915_PARAM_ALLOW_BATCHBUFFER 2
+ #define I915_PARAM_LAST_DISPATCH 3
++#define I915_PARAM_CHIPSET_ID 4
+ #define I915_PARAM_HAS_GEM 5
+
+ typedef struct drm_i915_getparam {
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch
new file mode 100644
index 000000000..910f37e9c
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch
@@ -0,0 +1,205 @@
+commit 8a524209fce67d3b6d2e831b5dad4eced796ce98
+Author: Eric Anholt <eric@anholt.net>
+Date: Mon Sep 1 16:45:29 2008 -0700
+
+ i915: Use struct_mutex to protect ring in GEM mode.
+
+ In the conversion for GEM, we had stopped using the hardware lock to protect
+ ring usage, since it was all internal to the DRM now. However, some paths
+ weren't converted to using struct_mutex to prevent multiple threads from
+ concurrently working on the ring, in particular between the vblank swap handler
+ and ioctls.
+
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 205d21e..25f59c1 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -588,9 +588,15 @@ static int i915_quiescent(struct drm_device * dev)
+ static int i915_flush_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+- LOCK_TEST_WITH_RETURN(dev, file_priv);
++ int ret;
++
++ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+- return i915_quiescent(dev);
++ mutex_lock(&dev->struct_mutex);
++ ret = i915_quiescent(dev);
++ mutex_unlock(&dev->struct_mutex);
++
++ return ret;
+ }
+
+ static int i915_batchbuffer(struct drm_device *dev, void *data,
+@@ -611,14 +617,16 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
+ DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
+ batch->start, batch->used, batch->num_cliprects);
+
+- LOCK_TEST_WITH_RETURN(dev, file_priv);
++ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
+ batch->num_cliprects *
+ sizeof(struct drm_clip_rect)))
+ return -EFAULT;
+
++ mutex_lock(&dev->struct_mutex);
+ ret = i915_dispatch_batchbuffer(dev, batch);
++ mutex_unlock(&dev->struct_mutex);
+
+ sarea_priv->last_dispatch = (int)hw_status[5];
+ return ret;
+@@ -637,7 +645,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
+ DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
+ cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
+
+- LOCK_TEST_WITH_RETURN(dev, file_priv);
++ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ if (cmdbuf->num_cliprects &&
+ DRM_VERIFYAREA_READ(cmdbuf->cliprects,
+@@ -647,7 +655,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
+ return -EFAULT;
+ }
+
++ mutex_lock(&dev->struct_mutex);
+ ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
++ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
+ return ret;
+@@ -660,11 +670,17 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
+ static int i915_flip_bufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
++ int ret;
++
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+- LOCK_TEST_WITH_RETURN(dev, file_priv);
++ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+- return i915_dispatch_flip(dev);
++ mutex_lock(&dev->struct_mutex);
++ ret = i915_dispatch_flip(dev);
++ mutex_unlock(&dev->struct_mutex);
++
++ return ret;
+ }
+
+ static int i915_getparam(struct drm_device *dev, void *data,
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 87b071a..8547f0a 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -285,6 +285,9 @@ typedef struct drm_i915_private {
+ */
+ struct delayed_work retire_work;
+
++ /** Work task for vblank-related ring access */
++ struct work_struct vblank_work;
++
+ uint32_t next_gem_seqno;
+
+ /**
+@@ -435,6 +438,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
+ void i915_user_irq_get(struct drm_device *dev);
+ void i915_user_irq_put(struct drm_device *dev);
+
++extern void i915_gem_vblank_work_handler(struct work_struct *work);
+ extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
+ extern void i915_driver_irq_preinstall(struct drm_device * dev);
+ extern int i915_driver_irq_postinstall(struct drm_device *dev);
+@@ -538,6 +542,17 @@ extern void intel_opregion_free(struct drm_device *dev);
+ extern void opregion_asle_intr(struct drm_device *dev);
+ extern void opregion_enable_asle(struct drm_device *dev);
+
++/**
++ * Lock test for when it's just for synchronization of ring access.
++ *
++ * In that case, we don't need to do it when GEM is initialized as nobody else
++ * has access to the ring.
++ */
++#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
++ if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
++ LOCK_TEST_WITH_RETURN(dev, file_priv); \
++} while (0)
++
+ #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
+ #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
+ #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 90ae8a0..bb6e5a3 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2491,6 +2491,8 @@ i915_gem_load(struct drm_device *dev)
+ INIT_LIST_HEAD(&dev_priv->mm.request_list);
+ INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+ i915_gem_retire_work_handler);
++ INIT_WORK(&dev_priv->mm.vblank_work,
++ i915_gem_vblank_work_handler);
+ dev_priv->mm.next_gem_seqno = 1;
+
+ i915_gem_detect_bit_6_swizzle(dev);
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index f295bdf..d04c526 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -349,6 +349,21 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
+ return count;
+ }
+
++void
++i915_gem_vblank_work_handler(struct work_struct *work)
++{
++ drm_i915_private_t *dev_priv;
++ struct drm_device *dev;
++
++ dev_priv = container_of(work, drm_i915_private_t,
++ mm.vblank_work);
++ dev = dev_priv->dev;
++
++ mutex_lock(&dev->struct_mutex);
++ i915_vblank_tasklet(dev);
++ mutex_unlock(&dev->struct_mutex);
++}
++
+ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ {
+ struct drm_device *dev = (struct drm_device *) arg;
+@@ -422,8 +437,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ if (iir & I915_ASLE_INTERRUPT)
+ opregion_asle_intr(dev);
+
+- if (vblank && dev_priv->swaps_pending > 0)
+- drm_locked_tasklet(dev, i915_vblank_tasklet);
++ if (vblank && dev_priv->swaps_pending > 0) {
++ if (dev_priv->ring.ring_obj == NULL)
++ drm_locked_tasklet(dev, i915_vblank_tasklet);
++ else
++ schedule_work(&dev_priv->mm.vblank_work);
++ }
+
+ return IRQ_HANDLED;
+ }
+@@ -514,14 +533,15 @@ int i915_irq_emit(struct drm_device *dev, void *data,
+ drm_i915_irq_emit_t *emit = data;
+ int result;
+
+- LOCK_TEST_WITH_RETURN(dev, file_priv);
++ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+-
++ mutex_lock(&dev->struct_mutex);
+ result = i915_emit_irq(dev);
++ mutex_unlock(&dev->struct_mutex);
+
+ if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+ DRM_ERROR("copy_to_user\n");
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0017-i915-Make-use-of-sarea_priv-conditional.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0017-i915-Make-use-of-sarea_priv-conditional.patch
new file mode 100644
index 000000000..542b69dd5
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0017-i915-Make-use-of-sarea_priv-conditional.patch
@@ -0,0 +1,147 @@
+commit 69749cf99189a8a78de201ac24990c91ee111469
+Author: Kristian Høgsberg <krh@redhat.com>
+Date: Wed Aug 20 11:20:13 2008 -0400
+
+ i915: Make use of sarea_priv conditional.
+
+ We fail ioctls that depend on the sarea_priv with EINVAL.
+
+ Signed-off-by: Kristian Høgsberg <krh@redhat.com>
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 25f59c1..dbd3f49 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -55,7 +55,8 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
+ if (ring->space >= n)
+ return 0;
+
+- dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
++ if (dev_priv->sarea_priv)
++ dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+ if (ring->head != last_head)
+ i = 0;
+@@ -128,7 +129,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
+ if (ring->space < 0)
+ ring->space += ring->Size;
+
+- if (ring->head == ring->tail)
++ if (ring->head == ring->tail && dev_priv->sarea_priv)
+ dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
+ }
+
+@@ -433,10 +434,11 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+- dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
+-
++ dev_priv->counter++;
+ if (dev_priv->counter > 0x7FFFFFFFUL)
+- dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
++ dev_priv->counter = 0;
++ if (dev_priv->sarea_priv)
++ dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+@@ -534,6 +536,9 @@ static int i915_dispatch_flip(struct drm_device * dev)
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
++ if (!dev_priv->sarea_priv)
++ return -EINVAL;
++
+ DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
+ __FUNCTION__,
+ dev_priv->current_page,
+@@ -628,7 +633,8 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
+ ret = i915_dispatch_batchbuffer(dev, batch);
+ mutex_unlock(&dev->struct_mutex);
+
+- sarea_priv->last_dispatch = (int)hw_status[5];
++ if (sarea_priv)
++ sarea_priv->last_dispatch = (int)hw_status[5];
+ return ret;
+ }
+
+@@ -663,7 +669,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
+ return ret;
+ }
+
+- sarea_priv->last_dispatch = (int)hw_status[5];
++ if (sarea_priv)
++ sarea_priv->last_dispatch = (int)hw_status[5];
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index d04c526..ef03a59 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -427,7 +427,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(IIR); /* Flush posted writes */
+
+- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
++ if (dev_priv->sarea_priv)
++ dev_priv->sarea_priv->last_dispatch =
++ READ_BREADCRUMB(dev_priv);
+
+ if (iir & I915_USER_INTERRUPT) {
+ dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
+@@ -456,10 +458,11 @@ static int i915_emit_irq(struct drm_device * dev)
+
+ DRM_DEBUG("\n");
+
+- dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
+-
++ dev_priv->counter++;
+ if (dev_priv->counter > 0x7FFFFFFFUL)
+- dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
++ dev_priv->counter = 1;
++ if (dev_priv->sarea_priv)
++ dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+
+ BEGIN_LP_RING(6);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+@@ -503,11 +506,15 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+ READ_BREADCRUMB(dev_priv));
+
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
++ if (dev_priv->sarea_priv) {
++ dev_priv->sarea_priv->last_dispatch =
++ READ_BREADCRUMB(dev_priv);
++ }
+ return 0;
+ }
+
+- dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
++ if (dev_priv->sarea_priv)
++ dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+ i915_user_irq_get(dev);
+ DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
+@@ -519,7 +526,9 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+ READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+ }
+
+- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
++ if (dev_priv->sarea_priv)
++ dev_priv->sarea_priv->last_dispatch =
++ READ_BREADCRUMB(dev_priv);
+
+ return ret;
+ }
+@@ -682,7 +691,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
+ struct list_head *list;
+ int ret;
+
+- if (!dev_priv) {
++ if (!dev_priv || !dev_priv->sarea_priv) {
+ DRM_ERROR("%s called with no initialization\n", __func__);
+ return -EINVAL;
+ }
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch
new file mode 100644
index 000000000..3593fa582
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch
@@ -0,0 +1,44 @@
+commit 7ad6d5861b04bbb2cdc36d1dcf8989e16f86e659
+Author: Kristian Høgsberg <krh@redhat.com>
+Date: Wed Aug 20 11:04:27 2008 -0400
+
+ i915 gem: install and uninstall irq handler in entervt and leavevt ioctls.
+
+ Signed-off-by: Kristian Høgsberg <krh@redhat.com>
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index bb6e5a3..5fe5034 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2443,6 +2443,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
+ dev_priv->mm.suspended = 0;
+ mutex_unlock(&dev->struct_mutex);
++
++ drm_irq_install(dev);
++
+ return 0;
+ }
+
+@@ -2458,6 +2461,8 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+ i915_gem_cleanup_ringbuffer(dev);
+ mutex_unlock(&dev->struct_mutex);
+
++ drm_irq_uninstall(dev);
++
+ return 0;
+ }
+
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index 1469a1b..51ee72c 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -1134,6 +1134,7 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev,
+ extern int drm_control(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
++extern int drm_irq_install(struct drm_device *dev);
+ extern int drm_irq_uninstall(struct drm_device *dev);
+ extern void drm_driver_irq_preinstall(struct drm_device *dev);
+ extern void drm_driver_irq_postinstall(struct drm_device *dev);
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch
new file mode 100644
index 000000000..6de4514e2
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch
@@ -0,0 +1,32 @@
+commit c3de45b0488762a9161e9b9e8bf419f63c100c47
+Author: Eric Anholt <eric@anholt.net>
+Date: Tue Sep 9 11:40:34 2008 -0700
+
+ DRM: Return -EBADF on bad object in flink, and return curent name if it exists.
+
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 434155b..ccd1afd 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -251,7 +251,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+- return -EINVAL;
++ return -EBADF;
+
+ again:
+ if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
+@@ -259,8 +259,9 @@ again:
+
+ spin_lock(&dev->object_name_lock);
+ if (obj->name) {
++ args->name = obj->name;
+ spin_unlock(&dev->object_name_lock);
+- return -EEXIST;
++ return 0;
+ }
+ ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
+ &obj->name);
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch
new file mode 100644
index 000000000..7080907cd
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch
@@ -0,0 +1,23 @@
+commit 880db7a8dbed226d638b3a48aa1a3996f8624911
+Author: Eric Anholt <eric@anholt.net>
+Date: Wed Sep 10 14:22:49 2008 -0700
+
+ drm: Avoid oops in GEM execbuffers with bad arguments.
+
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 5fe5034..29d9d21 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1763,6 +1763,10 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+ #endif
+
++ if (args->buffer_count < 1) {
++ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
++ return -EINVAL;
++ }
+ /* Copy in the exec list from userland */
+ exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
+ DRM_MEM_DRIVER);
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch
new file mode 100644
index 000000000..f5481d7d8
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch
@@ -0,0 +1,23 @@
+commit 930469634910fa87c21f0a7423c98b270d35d8c6
+Author: Eric Anholt <eric@anholt.net>
+Date: Mon Sep 15 13:13:34 2008 -0700
+
+ drm: G33-class hardware has a newer 965-style MCH (no DCC register).
+
+ Fixes bad software fallback rendering in Mesa in dual-channel configurations.
+
+ d9a2470012588dc5313a5ac8bb2f03575af00e99
+
+diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
+index 0c1b3a0..6b3f1e4 100644
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -96,7 +96,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+- } else if (!IS_I965G(dev) || IS_I965GM(dev)) {
++ } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev)) {
+ uint32_t dcc;
+
+ /* On 915-945 and GM965, channel interleave by the CPU is
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch
new file mode 100644
index 000000000..8e6cbe95a
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch
@@ -0,0 +1,58 @@
+commit d9f2382adde582f8792ad96e9570716bcbea21a0
+Author: Eric Anholt <eric@anholt.net>
+Date: Tue Sep 23 14:50:57 2008 -0700
+
+ drm: Use ioremap_wc in i915_driver instead of ioremap, since we always want WC.
+
+ Fixes failure to map the ringbuffer when PAT tells us we don't get to do
+ uncached on something that's already mapped WC, or something along those lines.
+
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 29d9d21..6ecfd10 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -233,7 +233,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ if (unwritten)
+ #endif /* CONFIG_HIGHMEM */
+ {
+- vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
++ vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
+ #if WATCH_PWRITE
+ DRM_INFO("pwrite slow i %d o %d l %d "
+ "pfn %ld vaddr %p\n",
+@@ -1612,9 +1612,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+ if (reloc_page != NULL)
+ iounmap(reloc_page);
+
+- reloc_page = ioremap(dev->agp->base +
+- (reloc_offset & ~(PAGE_SIZE - 1)),
+- PAGE_SIZE);
++ reloc_page = ioremap_wc(dev->agp->base +
++ (reloc_offset &
++ ~(PAGE_SIZE - 1)),
++ PAGE_SIZE);
+ last_reloc_offset = reloc_offset;
+ if (reloc_page == NULL) {
+ drm_gem_object_unreference(target_obj);
+@@ -2318,7 +2319,9 @@ i915_gem_init_hws(struct drm_device *dev)
+ dev_priv->hws_map.flags = 0;
+ dev_priv->hws_map.mtrr = 0;
+
+- drm_core_ioremap(&dev_priv->hws_map, dev);
++ /* Ioremapping here is the wrong thing to do. We want cached access.
++ */
++ drm_core_ioremap_wc(&dev_priv->hws_map, dev);
+ if (dev_priv->hws_map.handle == NULL) {
+ DRM_ERROR("Failed to map status page.\n");
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+@@ -2369,7 +2372,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
+ dev_priv->ring.map.flags = 0;
+ dev_priv->ring.map.mtrr = 0;
+
+- drm_core_ioremap(&dev_priv->ring.map, dev);
++ drm_core_ioremap_wc(&dev_priv->ring.map, dev);
+ if (dev_priv->ring.map.handle == NULL) {
+ DRM_ERROR("Failed to map ringbuffer.\n");
+ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0023-drm-clean-up-many-sparse-warnings-in-i915.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0023-drm-clean-up-many-sparse-warnings-in-i915.patch
new file mode 100644
index 000000000..236b16158
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0023-drm-clean-up-many-sparse-warnings-in-i915.patch
@@ -0,0 +1,192 @@
+commit 034994cfffbb2371b720e3f49378031ebc12645e
+Author: Eric Anholt <eric@anholt.net>
+Date: Thu Oct 2 12:24:47 2008 -0700
+
+ drm: Clean up many sparse warnings in i915.
+
+ Signed-off-by: Eric Anholt <eric@anholt.net>
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index dbd3f49..814cc12 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -76,7 +76,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
+ * Sets up the hardware status page for devices that need a physical address
+ * in the register.
+ */
+-int i915_init_phys_hws(struct drm_device *dev)
++static int i915_init_phys_hws(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ /* Program Hardware Status Page */
+@@ -101,7 +101,7 @@ int i915_init_phys_hws(struct drm_device *dev)
+ * Frees the hardware status page, whether it's a physical address or a virtual
+ * address set up by the X Server.
+ */
+-void i915_free_hws(struct drm_device *dev)
++static void i915_free_hws(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (dev_priv->status_page_dmah) {
+@@ -145,8 +145,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
+
+ if (dev_priv->ring.virtual_start) {
+ drm_core_ioremapfree(&dev_priv->ring.map, dev);
+- dev_priv->ring.virtual_start = 0;
+- dev_priv->ring.map.handle = 0;
++ dev_priv->ring.virtual_start = NULL;
++ dev_priv->ring.map.handle = NULL;
+ dev_priv->ring.map.size = 0;
+ }
+
+@@ -827,9 +827,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ base = drm_get_resource_start(dev, mmio_bar);
+ size = drm_get_resource_len(dev, mmio_bar);
+
+- ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
+- _DRM_KERNEL | _DRM_DRIVER,
+- &dev_priv->mmio_map);
++ dev_priv->regs = ioremap(base, size);
+
+ i915_gem_load(dev);
+
+@@ -867,8 +865,8 @@ int i915_driver_unload(struct drm_device *dev)
+
+ i915_free_hws(dev);
+
+- if (dev_priv->mmio_map)
+- drm_rmmap(dev, dev_priv->mmio_map);
++ if (dev_priv->regs != NULL)
++ iounmap(dev_priv->regs);
+
+ intel_opregion_free(dev);
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 8547f0a..b184d54 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -110,8 +110,8 @@ struct intel_opregion {
+ typedef struct drm_i915_private {
+ struct drm_device *dev;
+
++ void __iomem *regs;
+ drm_local_map_t *sarea;
+- drm_local_map_t *mmio_map;
+
+ drm_i915_sarea_t *sarea_priv;
+ drm_i915_ring_buffer_t ring;
+@@ -553,12 +553,12 @@ extern void opregion_enable_asle(struct drm_device *dev);
+ LOCK_TEST_WITH_RETURN(dev, file_priv); \
+ } while (0)
+
+-#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
+-#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
+-#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
+-#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
+-#define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg))
+-#define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val))
++#define I915_READ(reg) readl(dev_priv->regs + (reg))
++#define I915_WRITE(reg,val) writel(val, dev_priv->regs + (reg))
++#define I915_READ16(reg) readw(dev_priv->regs + (reg))
++#define I915_WRITE16(reg,val) writel(val, dev_priv->regs + (reg))
++#define I915_READ8(reg) readb(dev_priv->regs + (reg))
++#define I915_WRITE8(reg,val) writeb(val, dev_priv->regs + (reg))
+
+ #define I915_VERBOSE 0
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 6ecfd10..6a89449 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -176,7 +176,8 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ ssize_t remain;
+ loff_t offset;
+ char __user *user_data;
+- char *vaddr;
++ char __iomem *vaddr;
++ char *vaddr_atomic;
+ int i, o, l;
+ int ret = 0;
+ unsigned long pfn;
+@@ -219,16 +220,20 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ pfn = (dev->agp->base >> PAGE_SHIFT) + i;
+
+ #ifdef CONFIG_HIGHMEM
+- /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
++ /* This is a workaround for the low performance of iounmap
++ * (approximate 10% cpu cost on normal 3D workloads).
++ * kmap_atomic on HIGHMEM kernels happens to let us map card
++ * memory without taking IPIs. When the vmap rework lands
++ * we should be able to dump this hack.
+ */
+- vaddr = kmap_atomic_pfn(pfn, KM_USER0);
++ vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0);
+ #if WATCH_PWRITE
+ DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
+- i, o, l, pfn, vaddr);
++ i, o, l, pfn, vaddr_atomic);
+ #endif
+- unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
++ unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o,
+ user_data, l);
+- kunmap_atomic(vaddr, KM_USER0);
++ kunmap_atomic(vaddr_atomic, KM_USER0);
+
+ if (unwritten)
+ #endif /* CONFIG_HIGHMEM */
+@@ -271,7 +276,7 @@ fail:
+ return ret;
+ }
+
+-int
++static int
+ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+@@ -587,7 +592,7 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
+ * Ensures that all commands in the ring are finished
+ * before signalling the CPU
+ */
+-uint32_t
++static uint32_t
+ i915_retire_commands(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -734,7 +739,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+-int
++static int
+ i915_wait_request(struct drm_device *dev, uint32_t seqno)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -1483,7 +1488,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int i, ret;
+ uint32_t last_reloc_offset = -1;
+- void *reloc_page = NULL;
++ void __iomem *reloc_page = NULL;
+
+ /* Choose the GTT offset for our buffer and put it there. */
+ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
+@@ -1500,8 +1505,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_gem_object *target_obj;
+ struct drm_i915_gem_object *target_obj_priv;
+- uint32_t reloc_val, reloc_offset, *reloc_entry;
+- int ret;
++ uint32_t reloc_val, reloc_offset;
++ uint32_t __iomem *reloc_entry;
+
+ ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
+ if (ret != 0) {
+@@ -1624,7 +1629,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+ }
+ }
+
+- reloc_entry = (uint32_t *)((char *)reloc_page +
++ reloc_entry = (uint32_t __iomem *)(reloc_page +
+ (reloc_offset & (PAGE_SIZE - 1)));
+ reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0024-fastboot-create-a-asynchronous-initlevel.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0024-fastboot-create-a-asynchronous-initlevel.patch
new file mode 100644
index 000000000..7c19053b0
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0024-fastboot-create-a-asynchronous-initlevel.patch
@@ -0,0 +1,136 @@
+From ac9103dd8e4dc65c110d6cba9a3380c6c617ffa7 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Fri, 18 Jul 2008 15:16:08 -0700
+Subject: [PATCH] fastboot: create a "asynchronous" initlevel
+
+This patch creates an asynchronous initlevel (6a) which is at the same
+level as the normal device initcalls, but with the difference that they
+are run asynchronous from all the other initcalls. The purpose of this
+*selective* level is that we can move long waiting inits that are not
+boot-critical to this level one at a time.
+
+To keep things not totally insane, the asynchronous initcalls are async
+to the other initcalls, but are still ordered to themselves; think of it
+as "bottom-half-not-softirq". This has the benefit that async drivers
+still have stable device ordering between them.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+---
+ include/asm-generic/vmlinux.lds.h | 3 +++
+ include/linux/init.h | 6 ++++++
+ init/main.c | 36 +++++++++++++++++++++++++++++++++---
+ 3 files changed, 42 insertions(+), 3 deletions(-)
+
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 729f6b0..39c1afc 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -372,6 +372,9 @@
+ *(.initcall5.init) \
+ *(.initcall5s.init) \
+ *(.initcallrootfs.init) \
++ __async_initcall_start = .; \
++ *(.initcall6a.init) \
++ __async_initcall_end = .; \
+ *(.initcall6.init) \
+ *(.initcall6s.init) \
+ *(.initcall7.init) \
+diff --git a/include/linux/init.h b/include/linux/init.h
+index 21d658c..75db909 100644
+--- a/include/linux/init.h
++++ b/include/linux/init.h
+@@ -197,11 +197,13 @@
+ #define fs_initcall_sync(fn) __define_initcall("5s",fn,5s)
+ #define rootfs_initcall(fn) __define_initcall("rootfs",fn,rootfs)
+ #define device_initcall(fn) __define_initcall("6",fn,6)
++#define device_initcall_async(fn) __define_initcall("6a", fn, 6a)
+ #define device_initcall_sync(fn) __define_initcall("6s",fn,6s)
+ #define late_initcall(fn) __define_initcall("7",fn,7)
+ #define late_initcall_sync(fn) __define_initcall("7s",fn,7s)
+
+ #define __initcall(fn) device_initcall(fn)
++#define __initcall_async(fn) device_initcall_async(fn)
+
+ #define __exitcall(fn) \
+ static exitcall_t __exitcall_##fn __exit_call = fn
+@@ -257,6 +259,7 @@
+ * be one per module.
+ */
+ #define module_init(x) __initcall(x);
++#define module_init_async(x) __initcall_async(x);
+
+ /**
+ * module_exit() - driver exit entry point
+@@ -279,10 +282,13 @@
+ #define subsys_initcall(fn) module_init(fn)
+ #define fs_initcall(fn) module_init(fn)
+ #define device_initcall(fn) module_init(fn)
++#define device_initcall_async(fn) module_init(fn)
+ #define late_initcall(fn) module_init(fn)
+
+ #define security_initcall(fn) module_init(fn)
+
++#define module_init_async(fn) module_init(fn)
++
+ /* Each module must use one module_init(). */
+ #define module_init(initfn) \
+ static inline initcall_t __inittest(void) \
+diff --git a/init/main.c b/init/main.c
+index edeace0..6961de2 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -746,18 +746,47 @@
+
+
+ extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[];
++extern initcall_t __async_initcall_start[], __async_initcall_end[];
+
+-static void __init do_initcalls(void)
++static void __init do_async_initcalls(struct work_struct *dummy)
+ {
+ initcall_t *call;
+
+- for (call = __early_initcall_end; call < __initcall_end; call++)
++ for (call = __async_initcall_start; call < __async_initcall_end; call++)
+ do_one_initcall(*call);
++}
++
++static struct workqueue_struct *async_init_wq;
++
++static void __init do_initcalls(void)
++{
++ initcall_t *call;
++ static DECLARE_WORK(async_work, do_async_initcalls);
++ int phase = 0; /* 0 = levels 0 - 6, 1 = level 6a, 2 = after level 6a */
++
++ async_init_wq = create_singlethread_workqueue("kasyncinit");
++
++ for (call = __early_initcall_end; call < __initcall_end; call++) {
++ if (phase == 0 && call >= __async_initcall_start) {
++ phase = 1;
++ queue_work(async_init_wq, &async_work);
++ }
++ if (phase == 1 && call >= __async_initcall_end)
++ phase = 2;
++ if (phase != 1)
++ do_one_initcall(*call);
++ }
+
+- /* Make sure there is no pending stuff from the initcall sequence */
++ /*
++ * Make sure there is no pending stuff from the initcall sequence,
++ * including the async initcalls
++ */
+ flush_scheduled_work();
++ flush_workqueue(async_init_wq);
++ destroy_workqueue(async_init_wq);
+ }
+
++
+ /*
+ * Ok, the machine is now initialized. None of the devices
+ * have been touched yet, but the CPU subsystem is up and
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch
new file mode 100644
index 000000000..75d4151b3
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch
@@ -0,0 +1,62 @@
+From d1a26186ee222329a797bb0b2c8e2b5bc7d94d42 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Fri, 18 Jul 2008 15:16:53 -0700
+Subject: [PATCH] fastboot: turn the USB hostcontroller initcalls into async initcalls
+
+the USB host controller init calls take a long time, mostly due to a
+"minimally 100 msec" delay *per port* during initialization.
+These are prime candidates for going in parallel to everything else.
+
+The USB device ordering is not affected by this due to the
+serialized-within-eachother property of async initcalls.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+---
+ drivers/usb/host/ehci-hcd.c | 2 +-
+ drivers/usb/host/ohci-hcd.c | 2 +-
+ drivers/usb/host/uhci-hcd.c | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index 369a8a5..8f84b17 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -1101,7 +1101,7 @@ clean0:
+ #endif
+ return retval;
+ }
+-module_init(ehci_hcd_init);
++module_init_async(ehci_hcd_init);
+
+ static void __exit ehci_hcd_cleanup(void)
+ {
+diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
+index a8160d6..e060ed1 100644
+--- a/drivers/usb/host/ohci-hcd.c
++++ b/drivers/usb/host/ohci-hcd.c
+@@ -1165,7 +1165,7 @@ static int __init ohci_hcd_mod_init(void)
+
+ return retval;
+ }
+-module_init(ohci_hcd_mod_init);
++module_init_async(ohci_hcd_mod_init);
+
+ static void __exit ohci_hcd_mod_exit(void)
+ {
+diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
+index 3a7bfe7..f2a05ac 100644
+--- a/drivers/usb/host/uhci-hcd.c
++++ b/drivers/usb/host/uhci-hcd.c
+@@ -999,7 +999,7 @@ static void __exit uhci_hcd_cleanup(void)
+ kfree(errbuf);
+ }
+
+-module_init(uhci_hcd_init);
++module_init_async(uhci_hcd_init);
+ module_exit(uhci_hcd_cleanup);
+
+ MODULE_AUTHOR(DRIVER_AUTHOR);
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch
new file mode 100644
index 000000000..efd8ca9c9
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch
@@ -0,0 +1,54 @@
+From 60ddc2e5c44b4b9f5fcb440065469eacbeabf5eb Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Fri, 18 Jul 2008 15:17:35 -0700
+Subject: [PATCH] fastboot: convert a few non-critical ACPI drivers to async initcalls
+
+This patch converts a few non-critical ACPI drivers to async initcalls;
+these initcalls (battery, button and thermal) tend to take quite a bit of
+time (100's of milliseconds) due to the hardware they need to talk to,
+but are otherwise clearly non-essential for the boot process.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+---
+ drivers/acpi/battery.c | 2 +-
+ drivers/acpi/button.c | 2 +-
+ drivers/acpi/thermal.c | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index b1c723f..d5d30ca 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -904,5 +904,5 @@ static void __exit acpi_battery_exit(void)
+ #endif
+ }
+
+-module_init(acpi_battery_init);
++module_init_async(acpi_battery_init);
+ module_exit(acpi_battery_exit);
+diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
+index 1dfec41..46b3805 100644
+--- a/drivers/acpi/button.c
++++ b/drivers/acpi/button.c
+@@ -545,5 +545,5 @@ static void __exit acpi_button_exit(void)
+ remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir);
+ }
+
+-module_init(acpi_button_init);
++module_init_async(acpi_button_init);
+ module_exit(acpi_button_exit);
+diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
+index 84c795f..1f529af 100644
+--- a/drivers/acpi/thermal.c
++++ b/drivers/acpi/thermal.c
+@@ -1833,5 +1833,5 @@ static void __exit acpi_thermal_exit(void)
+ return;
+ }
+
+-module_init(acpi_thermal_init);
++module_init_async(acpi_thermal_init);
+ module_exit(acpi_thermal_exit);
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch
new file mode 100644
index 000000000..129823b6c
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch
@@ -0,0 +1,40 @@
+From 3e6558b693dd1e69e3177bc248977f067a769f14 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 20 Jul 2008 08:59:24 -0700
+Subject: [PATCH] fastboot: hold the BKL over the async init call sequence
+
+Regular init calls are called with the BKL held; make sure
+the async init calls are also called with the BKL held.
+While this reduces parallelism a little, it does provide
+lock-for-lock compatibility. The hit to prallelism isn't too
+bad, most of the init calls are done immediately or actually
+block for their delays.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+---
+ init/main.c | 6 ++++++
+ 1 files changed, 6 insertions(+), 0 deletions(-)
+
+diff --git a/init/main.c b/init/main.c
+index 6961de2..9e2aee8 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -744,8 +744,14 @@ static void __init do_async_initcalls(struct work_struct *dummy)
+ {
+ initcall_t *call;
+
++ /*
++ * For compatibility with normal init calls... take the BKL
++ * not pretty, not desirable, but compatibility first
++ */
++ lock_kernel();
+ for (call = __async_initcall_start; call < __async_initcall_end; call++)
+ do_one_initcall(*call);
++ unlock_kernel();
+ }
+
+ static struct workqueue_struct *async_init_wq;
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0028-fastboot-sync-the-async-execution-before-late_initc.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0028-fastboot-sync-the-async-execution-before-late_initc.patch
new file mode 100644
index 000000000..0700fb318
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0028-fastboot-sync-the-async-execution-before-late_initc.patch
@@ -0,0 +1,95 @@
+From 660625fb93f2fc0e633da9cb71d13d895b385f64 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 20 Jul 2008 09:00:41 -0700
+Subject: [PATCH] fastboot: sync the async execution before late_initcall and move level 6s (sync) first
+
+Rene Herman points out several cases where it's basically needed to have
+all level 6/6a/6s calls done before the level 7 (late_initcall) code
+runs. This patch adds a sync point in the transition from the 6's to the
+7's.
+
+Second, this patch makes sure that level 6s (sync) happens before the
+async code starts, and puts a user in driver/pci in this category that
+needs to happen before device init.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+---
+ drivers/pci/pci.c | 2 +-
+ include/asm-generic/vmlinux.lds.h | 3 ++-
+ init/main.c | 14 +++++++++++++-
+ 3 files changed, 16 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 44a46c9..d75295d 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1889,7 +1889,7 @@ static int __devinit pci_setup(char *str)
+ }
+ early_param("pci", pci_setup);
+
+-device_initcall(pci_init);
++device_initcall_sync(pci_init);
+
+ EXPORT_SYMBOL(pci_reenable_device);
+ EXPORT_SYMBOL(pci_enable_device_io);
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 39c1afc..020c641 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -372,11 +372,12 @@
+ *(.initcall5.init) \
+ *(.initcall5s.init) \
+ *(.initcallrootfs.init) \
++ *(.initcall6s.init) \
+ __async_initcall_start = .; \
+ *(.initcall6a.init) \
+ __async_initcall_end = .; \
+ *(.initcall6.init) \
+- *(.initcall6s.init) \
++ __device_initcall_end = .; \
+ *(.initcall7.init) \
+ *(.initcall7s.init)
+
+diff --git a/init/main.c b/init/main.c
+index 9e2aee8..6be1756 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -739,6 +739,7 @@ static void __init do_one_initcall(initcall_t fn)
+
+ extern initcall_t __initcall_start[], __initcall_end[];
+ extern initcall_t __async_initcall_start[], __async_initcall_end[];
++extern initcall_t __device_initcall_end[];
+
+ static void __init do_async_initcalls(struct work_struct *dummy)
+ {
+@@ -762,7 +763,13 @@ static void __init do_initcalls(void)
+ {
+ initcall_t *call;
+ static DECLARE_WORK(async_work, do_async_initcalls);
+- int phase = 0; /* 0 = levels 0 - 6, 1 = level 6a, 2 = after level 6a */
++ /*
++ * 0 = levels 0 - 6,
++ * 1 = level 6a,
++ * 2 = after level 6a,
++ * 3 = after level 6
++ */
++ int phase = 0;
+
+ async_init_wq = create_singlethread_workqueue("kasyncinit");
+
+@@ -773,6 +780,11 @@ static void __init do_initcalls(void)
+ }
+ if (phase == 1 && call >= __async_initcall_end)
+ phase = 2;
++ if (phase == 2 && call >= __device_initcall_end) {
++ phase = 3;
++ /* make sure all async work is done before level 7 */
++ flush_workqueue(async_init_wq);
++ }
+ if (phase != 1)
+ do_one_initcall(*call);
+ }
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0029-fastboot-make-fastboot-a-config-option.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0029-fastboot-make-fastboot-a-config-option.patch
new file mode 100644
index 000000000..faf962209
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0029-fastboot-make-fastboot-a-config-option.patch
@@ -0,0 +1,56 @@
+From 50b6962016b824dfac254b8f36fc6cac301c8a8d Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 20 Jul 2008 10:20:49 -0700
+Subject: [PATCH] fastboot: make fastboot a config option
+
+to mitigate the risks of async bootup, make fastboot a configuration
+option...
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+---
+ init/Kconfig | 11 +++++++++++
+ init/main.c | 4 ++++
+ 2 files changed, 15 insertions(+), 0 deletions(-)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index 6199d11..7545c8b 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -524,6 +524,17 @@ config CC_OPTIMIZE_FOR_SIZE
+
+ If unsure, say N.
+
++config FASTBOOT
++ bool "Fast boot support"
++ help
++ The fastboot option will cause the kernel to try to optimize
++ for faster boot.
++
++ This includes doing some of the device initialization asynchronous
++ as well as opportunistically trying to mount the root fs early.
++
++ If unsure, say N.
++
+ config SYSCTL
+ bool
+
+diff --git a/init/main.c b/init/main.c
+index 6be1756..bb97add 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -776,7 +776,11 @@ static void __init do_initcalls(void)
+ for (call = __initcall_start; call < __initcall_end; call++) {
+ if (phase == 0 && call >= __async_initcall_start) {
+ phase = 1;
++#ifdef CONFIG_FASTBOOT
+ queue_work(async_init_wq, &async_work);
++#else
++ do_async_initcalls(NULL);
++#endif
+ }
+ if (phase == 1 && call >= __async_initcall_end)
+ phase = 2;
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch
new file mode 100644
index 000000000..e2c373793
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch
@@ -0,0 +1,67 @@
+From db62cd29f9b9142c19c574ca00916f66ff22ed4a Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 20 Jul 2008 13:01:28 -0700
+Subject: [PATCH] fastboot: retry mounting the root fs if we can't find init
+
+currently we wait until all device init is done before trying to mount
+the root fs, and to consequently execute init.
+
+In preparation for relaxing the first delay, this patch adds a retry
+attempt in case /sbin/init is not found. Before retrying, the code
+will wait for all device init to complete.
+
+While this patch by itself doesn't gain boot time yet (it needs follow on
+patches), the alternative already is to panic()...
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ init/main.c | 19 +++++++++++++++++++
+ 1 files changed, 19 insertions(+), 0 deletions(-)
+
+diff --git a/init/main.c b/init/main.c
+index 3575b84..73785a4 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -853,6 +853,7 @@ static void run_init_process(char *init_filename)
+ */
+ static int noinline init_post(void)
+ {
++ int retry_count = 1;
+ free_initmem();
+ unlock_kernel();
+ mark_rodata_ro();
+@@ -873,6 +874,7 @@ static int noinline init_post(void)
+ ramdisk_execute_command);
+ }
+
++retry:
+ /*
+ * We try each of these until one succeeds.
+ *
+@@ -885,6 +887,23 @@ static int noinline init_post(void)
+ "defaults...\n", execute_command);
+ }
+ run_init_process("/sbin/init");
++
++ if (retry_count > 0) {
++ retry_count--;
++ /*
++ * We haven't found init yet... potentially because the device
++ * is still being probed. We need to
++ * - flush keventd and friends
++ * - wait for the known devices to complete their probing
++ * - try to mount the root fs again
++ */
++ flush_scheduled_work();
++ while (driver_probe_done() != 0)
++ msleep(100);
++ prepare_namespace();
++ goto retry;
++ }
++
+ run_init_process("/etc/init");
+ run_init_process("/bin/init");
+ run_init_process("/bin/sh");
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch
new file mode 100644
index 000000000..03b3b8220
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch
@@ -0,0 +1,41 @@
+From b52c36a95ed8026b6925fe8595ebcab6921ae62d Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 20 Jul 2008 13:07:09 -0700
+Subject: [PATCH] fastboot: make the raid autodetect code wait for all devices to init
+
+The raid autodetect code really needs to have all devices probed before
+it can detect raid arrays; not doing so would give rather messy situations
+where arrays would get detected as degraded while they shouldn't be etc.
+
+This is in preparation of removing the "wait for everything to init"
+code that makes everyone pay, not just raid users.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ init/do_mounts_md.c | 7 +++++++
+ 1 files changed, 7 insertions(+), 0 deletions(-)
+
+diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
+index 693d246..c0412a9 100644
+--- a/init/do_mounts_md.c
++++ b/init/do_mounts_md.c
+@@ -267,9 +267,16 @@ __setup("md=", md_setup);
+ void __init md_run_setup(void)
+ {
+ create_dev("/dev/md0", MKDEV(MD_MAJOR, 0));
++
+ if (raid_noautodetect)
+ printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=noautodetect)\n");
+ else {
++ /*
++ * Since we don't want to detect and use half a raid array, we need to
++ * wait for the known devices to complete their probing
++ */
++ while (driver_probe_done() != 0)
++ msleep(100);
+ int fd = sys_open("/dev/md0", 0, 0);
+ if (fd >= 0) {
+ sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0032-fastboot-remove-wait-for-all-devices-before-mounti.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0032-fastboot-remove-wait-for-all-devices-before-mounti.patch
new file mode 100644
index 000000000..4e9be15e1
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0032-fastboot-remove-wait-for-all-devices-before-mounti.patch
@@ -0,0 +1,44 @@
+From 1b5a2bd0602010398cb473d1b821a9f1c1399caf Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 20 Jul 2008 13:12:16 -0700
+Subject: [PATCH] fastboot: remove "wait for all devices before mounting root" delay
+
+In the non-initrd case, we wait for all devices to finish their
+probing before we try to mount the rootfs.
+In practice, this means that we end up waiting 2 extra seconds for
+the PS/2 mouse probing even though the root holding device has been
+ready since a long time.
+
+The previous two patches in this series made the RAID autodetect code
+do it's own "wait for probing to be done" code, and added
+"wait and retry" functionality in case the root device isn't actually
+available.
+
+These two changes should make it safe to remove the delay itself,
+and this patch does this. On my test laptop, this reduces the boot time
+by 2 seconds (kernel time goes from 3.9 to 1.9 seconds).
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ init/do_mounts.c | 2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+diff --git a/init/do_mounts.c b/init/do_mounts.c
+index a1de1bf..c984fab 100644
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -364,9 +364,11 @@ void __init prepare_namespace(void)
+ ssleep(root_delay);
+ }
+
++#ifndef CONFIG_FASTBOOT
+ /* wait for the known devices to complete their probing */
+ while (driver_probe_done() != 0)
+ msleep(100);
++#endif
+
+ md_run_setup();
+
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch
new file mode 100644
index 000000000..55c6c1ada
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch
@@ -0,0 +1,32 @@
+From 799d0da9e645258b9d1ae11d4aac73c9474906e3 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 20 Jul 2008 16:30:29 -0700
+Subject: [PATCH] fastboot: make the RAID autostart code print a message just before waiting
+
+As requested/suggested by Neil Brown: make the raid code print that it's
+about to wait for probing to be done as well as give a suggestion on how
+to disable the probing if the user doesn't use raid.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com
+---
+ init/do_mounts_md.c | 4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
+index c0412a9..1ec5c41 100644
+--- a/init/do_mounts_md.c
++++ b/init/do_mounts_md.c
+@@ -275,7 +275,9 @@ void __init md_run_setup(void)
+ * Since we don't want to detect and use half a raid array, we need to
+ * wait for the known devices to complete their probing
+ */
+- while (driver_probe_done() != 0)
++ printk(KERN_INFO "md: Waiting for all devices to be available before autodetect\n");
++ printk(KERN_INFO "md: If you don't use raid, use raid=noautodetect\n");
++ while (driver_probe_done() < 0)
+ msleep(100);
+ int fd = sys_open("/dev/md0", 0, 0);
+ if (fd >= 0) {
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0034-fastboot-fix-typo-in-init-Kconfig-text.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0034-fastboot-fix-typo-in-init-Kconfig-text.patch
new file mode 100644
index 000000000..742629354
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0034-fastboot-fix-typo-in-init-Kconfig-text.patch
@@ -0,0 +1,29 @@
+From 1a23ed42e1baf0481cc70c2f71d97b0bf0f1be70 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@elte.hu>
+Date: Thu, 31 Jul 2008 12:52:29 +0200
+Subject: [PATCH] fastboot: fix typo in init/Kconfig text
+
+noticed by Randy Dunlap.
+
+Reported-by: Randy Dunlap <randy.dunlap@oracle.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+---
+ init/Kconfig | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index 4f73780..6b4de4d 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -530,7 +530,7 @@ config FASTBOOT
+ The fastboot option will cause the kernel to try to optimize
+ for faster boot.
+
+- This includes doing some of the device initialization asynchronous
++ This includes doing some of the device initialization asynchronously
+ as well as opportunistically trying to mount the root fs early.
+
+ If unsure, say N.
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0035-fastboot-remove-duplicate-unpack_to_rootfs.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0035-fastboot-remove-duplicate-unpack_to_rootfs.patch
new file mode 100644
index 000000000..b8af74eaf
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0035-fastboot-remove-duplicate-unpack_to_rootfs.patch
@@ -0,0 +1,161 @@
+From 8929dda869d51b953c8f300864da62297db8a74e Mon Sep 17 00:00:00 2001
+From: Li, Shaohua <shaohua.li@intel.com>
+Date: Wed, 13 Aug 2008 17:26:01 +0800
+Subject: [PATCH] fastboot: remove duplicate unpack_to_rootfs()
+
+we check if initrd is initramfs first and then do real unpack. The
+check isn't required, we can directly do unpack. If initrd isn't
+initramfs, we can remove garbage. In my laptop, this saves 0.1s boot
+time. This penalizes non-initramfs case, but now initramfs is mostly
+widely used.
+
+Signed-off-by: Shaohua Li <shaohua.li@intel.com>
+Acked-by: Arjan van de Ven <arjan@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+---
+ init/initramfs.c | 71 ++++++++++++++++++++++++++++++++++++++++++-----------
+ 1 files changed, 56 insertions(+), 15 deletions(-)
+
+diff --git a/init/initramfs.c b/init/initramfs.c
+index 644fc01..da8d030 100644
+--- a/init/initramfs.c
++++ b/init/initramfs.c
+@@ -5,6 +5,7 @@
+ #include <linux/fcntl.h>
+ #include <linux/delay.h>
+ #include <linux/string.h>
++#include <linux/dirent.h>
+ #include <linux/syscalls.h>
+
+ static __initdata char *message;
+@@ -121,8 +122,6 @@ static __initdata char *victim;
+ static __initdata unsigned count;
+ static __initdata loff_t this_header, next_header;
+
+-static __initdata int dry_run;
+-
+ static inline void __init eat(unsigned n)
+ {
+ victim += n;
+@@ -183,10 +182,6 @@ static int __init do_header(void)
+ parse_header(collected);
+ next_header = this_header + N_ALIGN(name_len) + body_len;
+ next_header = (next_header + 3) & ~3;
+- if (dry_run) {
+- read_into(name_buf, N_ALIGN(name_len), GotName);
+- return 0;
+- }
+ state = SkipIt;
+ if (name_len <= 0 || name_len > PATH_MAX)
+ return 0;
+@@ -257,8 +252,6 @@ static int __init do_name(void)
+ free_hash();
+ return 0;
+ }
+- if (dry_run)
+- return 0;
+ clean_path(collected, mode);
+ if (S_ISREG(mode)) {
+ int ml = maybe_link();
+@@ -423,10 +416,9 @@ static void __init flush_window(void)
+ outcnt = 0;
+ }
+
+-static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
++static char * __init unpack_to_rootfs(char *buf, unsigned len)
+ {
+ int written;
+- dry_run = check_only;
+ header_buf = kmalloc(110, GFP_KERNEL);
+ symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
+ name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
+@@ -520,10 +512,57 @@ skip:
+ initrd_end = 0;
+ }
+
++#define BUF_SIZE 1024
++static void __init clean_rootfs(void)
++{
++ int fd;
++ void *buf;
++ struct linux_dirent64 *dirp;
++ int count;
++
++ fd = sys_open("/", O_RDONLY, 0);
++ WARN_ON(fd < 0);
++ if (fd < 0)
++ return;
++ buf = kzalloc(BUF_SIZE, GFP_KERNEL);
++ WARN_ON(!buf);
++ if (!buf) {
++ sys_close(fd);
++ return;
++ }
++
++ dirp = buf;
++ count = sys_getdents64(fd, dirp, BUF_SIZE);
++ while (count > 0) {
++ while (count > 0) {
++ struct stat st;
++ int ret;
++
++ ret = sys_newlstat(dirp->d_name, &st);
++ WARN_ON_ONCE(ret);
++ if (!ret) {
++ if (S_ISDIR(st.st_mode))
++ sys_rmdir(dirp->d_name);
++ else
++ sys_unlink(dirp->d_name);
++ }
++
++ count -= dirp->d_reclen;
++ dirp = (void *)dirp + dirp->d_reclen;
++ }
++ dirp = buf;
++ memset(buf, 0, BUF_SIZE);
++ count = sys_getdents64(fd, dirp, BUF_SIZE);
++ }
++
++ sys_close(fd);
++ kfree(buf);
++}
++
+ static int __init populate_rootfs(void)
+ {
+ char *err = unpack_to_rootfs(__initramfs_start,
+- __initramfs_end - __initramfs_start, 0);
++ __initramfs_end - __initramfs_start);
+ if (err)
+ panic(err);
+ if (initrd_start) {
+@@ -531,13 +570,15 @@ static int __init populate_rootfs(void)
+ int fd;
+ printk(KERN_INFO "checking if image is initramfs...");
+ err = unpack_to_rootfs((char *)initrd_start,
+- initrd_end - initrd_start, 1);
++ initrd_end - initrd_start);
+ if (!err) {
+ printk(" it is\n");
+- unpack_to_rootfs((char *)initrd_start,
+- initrd_end - initrd_start, 0);
+ free_initrd();
+ return 0;
++ } else {
++ clean_rootfs();
++ unpack_to_rootfs(__initramfs_start,
++ __initramfs_end - __initramfs_start);
+ }
+ printk("it isn't (%s); looks like an initrd\n", err);
+ fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700);
+@@ -550,7 +591,7 @@ static int __init populate_rootfs(void)
+ #else
+ printk(KERN_INFO "Unpacking initramfs...");
+ err = unpack_to_rootfs((char *)initrd_start,
+- initrd_end - initrd_start, 0);
++ initrd_end - initrd_start);
+ if (err)
+ panic(err);
+ printk(" done\n");
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0036-warning-fix-init-do_mounts_md-c.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0036-warning-fix-init-do_mounts_md-c.patch
new file mode 100644
index 000000000..9ba44a892
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0036-warning-fix-init-do_mounts_md-c.patch
@@ -0,0 +1,82 @@
+From fa3038625d7df2a1244c5b753069e7fdf99af3b5 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@elte.hu>
+Date: Mon, 18 Aug 2008 12:54:00 +0200
+Subject: [PATCH] warning: fix init do_mounts_md c
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf-8
+Content-Transfer-Encoding: 8bit
+
+fix warning:
+
+ init/do_mounts_md.c: In function ‘md_run_setup’:
+ init/do_mounts_md.c:282: warning: ISO C90 forbids mixed declarations and code
+
+also, use the opportunity to put the RAID autodetection code
+into a separate function - this also solves a checkpatch style warning.
+
+No code changed:
+
+md5:
+ aa36a35faef371b05f1974ad583bdbbd do_mounts_md.o.before.asm
+ aa36a35faef371b05f1974ad583bdbbd do_mounts_md.o.after.asm
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+---
+ init/do_mounts_md.c | 36 +++++++++++++++++++++---------------
+ 1 files changed, 21 insertions(+), 15 deletions(-)
+
+diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
+index 1ec5c41..c0dfd3c 100644
+--- a/init/do_mounts_md.c
++++ b/init/do_mounts_md.c
+@@ -264,26 +264,32 @@ static int __init raid_setup(char *str)
+ __setup("raid=", raid_setup);
+ __setup("md=", md_setup);
+
++static void autodetect_raid(void)
++{
++ int fd;
++
++ /*
++ * Since we don't want to detect and use half a raid array, we need to
++ * wait for the known devices to complete their probing
++ */
++ printk(KERN_INFO "md: Waiting for all devices to be available before autodetect\n");
++ printk(KERN_INFO "md: If you don't use raid, use raid=noautodetect\n");
++ while (driver_probe_done() < 0)
++ msleep(100);
++ fd = sys_open("/dev/md0", 0, 0);
++ if (fd >= 0) {
++ sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
++ sys_close(fd);
++ }
++}
++
+ void __init md_run_setup(void)
+ {
+ create_dev("/dev/md0", MKDEV(MD_MAJOR, 0));
+
+ if (raid_noautodetect)
+ printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=noautodetect)\n");
+- else {
+- /*
+- * Since we don't want to detect and use half a raid array, we need to
+- * wait for the known devices to complete their probing
+- */
+- printk(KERN_INFO "md: Waiting for all devices to be available before autodetect\n");
+- printk(KERN_INFO "md: If you don't use raid, use raid=noautodetect\n");
+- while (driver_probe_done() < 0)
+- msleep(100);
+- int fd = sys_open("/dev/md0", 0, 0);
+- if (fd >= 0) {
+- sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
+- sys_close(fd);
+- }
+- }
++ else
++ autodetect_raid();
+ md_setup_drive();
+ }
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0037-init-initramfs.c-unused-function-when-compiling-wit.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0037-init-initramfs.c-unused-function-when-compiling-wit.patch
new file mode 100644
index 000000000..159f98867
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0037-init-initramfs.c-unused-function-when-compiling-wit.patch
@@ -0,0 +1,37 @@
+From b4931e6c151acad06b4c12dc7cdb634366d7d27a Mon Sep 17 00:00:00 2001
+From: Steven Noonan <steven@uplinklabs.net>
+Date: Mon, 8 Sep 2008 16:19:10 -0700
+Subject: [PATCH] init/initramfs.c: unused function when compiling without CONFIG_BLK_DEV_RAM
+
+Fixing compiler warning when the kernel isn't compiled with support
+for RAM block devices enabled.
+
+Signed-off-by: Steven Noonan <steven@uplinklabs.net>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+---
+ init/initramfs.c | 2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+diff --git a/init/initramfs.c b/init/initramfs.c
+index da8d030..2f056e2 100644
+--- a/init/initramfs.c
++++ b/init/initramfs.c
+@@ -512,6 +512,7 @@ skip:
+ initrd_end = 0;
+ }
+
++#ifdef CONFIG_BLK_DEV_RAM
+ #define BUF_SIZE 1024
+ static void __init clean_rootfs(void)
+ {
+@@ -558,6 +559,7 @@ static void __init clean_rootfs(void)
+ sys_close(fd);
+ kfree(buf);
+ }
++#endif
+
+ static int __init populate_rootfs(void)
+ {
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch
new file mode 100644
index 000000000..8d1e3f22f
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch
@@ -0,0 +1,38 @@
+From 5e4f25d1f43991324794657655bbbc43983522a2 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@infradead.org>
+Date: Wed, 10 Sep 2008 08:25:34 -0700
+Subject: [PATCH] fastboot: fix blackfin breakage due to vmlinux.lds change
+
+As reported by Mike Frysinger, the vmlinux.lds changes should
+have used VMLINUX_SYMBOL()...
+
+Reported-by: Mike Frysinger <vapier.adi@gmail.com>
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Bryan Wu <cooloney@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+---
+ include/asm-generic/vmlinux.lds.h | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index b9be858..ccabc4e 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -377,11 +377,11 @@
+ *(.initcall5s.init) \
+ *(.initcallrootfs.init) \
+ *(.initcall6s.init) \
+- __async_initcall_start = .; \
++ VMLINUX_SYMBOL(__async_initcall_start) = .; \
+ *(.initcall6a.init) \
+- __async_initcall_end = .; \
++ VMLINUX_SYMBOL(__async_initcall_end) = .; \
+ *(.initcall6.init) \
+- __device_initcall_end = .; \
++ VMLINUX_SYMBOL(__device_initcall_end) = .; \
+ *(.initcall7.init) \
+ *(.initcall7s.init)
+
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0039-Add-a-script-to-visualize-the-kernel-boot-process.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0039-Add-a-script-to-visualize-the-kernel-boot-process.patch
new file mode 100644
index 000000000..da72d3bb7
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0039-Add-a-script-to-visualize-the-kernel-boot-process.patch
@@ -0,0 +1,183 @@
+From 77e9695b9d5c9ce761dedc193045d9cb64b8e245 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sat, 13 Sep 2008 09:36:06 -0700
+Subject: [PATCH] Add a script to visualize the kernel boot process / time
+
+When optimizing the kernel boot time, it's very valuable to visualize
+what is going on at which time. In addition, with the fastboot asynchronous
+initcall level, it's very valuable to see which initcall gets run where
+and when.
+
+This patch adds a script to turn a dmesg into a SVG graph (that can be
+shown with tools such as InkScape, Gimp or Firefox) and a small change
+to the initcall code to print the PID of the thread calling the initcall
+(so that the script can work out the parallelism).
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ init/main.c | 3 +-
+ scripts/bootgraph.pl | 138 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 140 insertions(+), 1 deletions(-)
+ create mode 100644 scripts/bootgraph.pl
+
+diff --git a/init/main.c b/init/main.c
+index a1b95f3..14f2609 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -708,7 +708,8 @@ int do_one_initcall(initcall_t fn)
+ int result;
+
+ if (initcall_debug) {
+- print_fn_descriptor_symbol("calling %s\n", fn);
++ print_fn_descriptor_symbol("calling %s", fn);
++ printk(" @ %i\n", task_pid_nr(current));
+ t0 = ktime_get();
+ }
+
+diff --git a/scripts/bootgraph.pl b/scripts/bootgraph.pl
+new file mode 100644
+index 0000000..d459b8b
+--- /dev/null
++++ b/scripts/bootgraph.pl
+@@ -0,0 +1,138 @@
++#!/usr/bin/perl
++
++# Copyright 2008, Intel Corporation
++#
++# This file is part of the Linux kernel
++#
++# This program file is free software; you can redistribute it and/or modify it
++# under the terms of the GNU General Public License as published by the
++# Free Software Foundation; version 2 of the License.
++#
++# This program is distributed in the hope that it will be useful, but WITHOUT
++# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++# for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program in a file named COPYING; if not, write to the
++# Free Software Foundation, Inc.,
++# 51 Franklin Street, Fifth Floor,
++# Boston, MA 02110-1301 USA
++#
++# Authors:
++# Arjan van de Ven <arjan@linux.intel.com>
++
++
++#
++# This script turns a dmesg output into a SVG graphic that shows which
++# functions take how much time. You can view SVG graphics with various
++# programs, including Inkscape, The Gimp and Firefox.
++#
++#
++# For this script to work, the kernel needs to be compiled with the
++# CONFIG_PRINTK_TIME configuration option enabled, and with
++# "initcall_debug" passed on the kernel command line.
++#
++# usage:
++# dmesg | perl scripts/bootgraph.pl > output.svg
++#
++
++my @rows;
++my %start, %end, %row;
++my $done = 0;
++my $rowcount = 0;
++my $maxtime = 0;
++my $count = 0;
++while (<>) {
++ my $line = $_;
++ if ($line =~ /([0-9\.]+)\] calling ([a-zA-Z\_]+)\+/) {
++ my $func = $2;
++ if ($done == 0) {
++ $start{$func} = $1;
++ }
++ $row{$func} = 1;
++ if ($line =~ /\@ ([0-9]+)/) {
++ my $pid = $1;
++ if (!defined($rows[$pid])) {
++ $rowcount = $rowcount + 1;
++ $rows[$pid] = $rowcount;
++ }
++ $row{$func} = $rows[$pid];
++ }
++ $count = $count + 1;
++ }
++
++ if ($line =~ /([0-9\.]+)\] initcall ([a-zA-Z\_]+)\+.*returned/) {
++ if ($done == 0) {
++ $end{$2} = $1;
++ $maxtime = $1;
++ }
++ }
++ if ($line =~ /Write protecting the/) {
++ $done = 1;
++ }
++}
++
++if ($count == 0) {
++ print "No data found in the dmesg. Make sure CONFIG_PRINTK_TIME is enabled and\n";
++ print "that initcall_debug is passed on the kernel command line.\n\n";
++ print "Usage: \n";
++ print " dmesg | perl scripts/bootgraph.pl > output.svg\n\n";
++ exit;
++}
++
++print "<?xml version=\"1.0\" standalone=\"no\"?> \n";
++print "<svg width=\"1000\" height=\"100%\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n";
++
++my @styles;
++
++$styles[0] = "fill:rgb(0,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
++$styles[1] = "fill:rgb(0,255,0);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
++$styles[2] = "fill:rgb(255,0,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
++$styles[3] = "fill:rgb(255,255,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
++$styles[4] = "fill:rgb(255,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
++$styles[5] = "fill:rgb(0,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
++$styles[6] = "fill:rgb(0,128,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
++$styles[7] = "fill:rgb(0,255,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
++$styles[8] = "fill:rgb(255,0,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
++$styles[9] = "fill:rgb(255,255,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
++$styles[10] = "fill:rgb(255,128,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
++$styles[11] = "fill:rgb(128,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
++
++my $mult = 950.0 / $maxtime;
++my $threshold = 0.0500 / $maxtime;
++my $stylecounter = 0;
++while (($key,$value) = each %start) {
++ my $duration = $end{$key} - $start{$key};
++
++ if ($duration >= $threshold) {
++ my $s, $s2, $e, $y;
++ $s = $value * $mult;
++ $s2 = $s + 6;
++ $e = $end{$key} * $mult;
++ $w = $e - $s;
++
++ $y = $row{$key} * 150;
++ $y2 = $y + 4;
++
++ $style = $styles[$stylecounter];
++ $stylecounter = $stylecounter + 1;
++ if ($stylecounter > 11) {
++ $stylecounter = 0;
++ };
++
++ print "<rect x=\"$s\" width=\"$w\" y=\"$y\" height=\"145\" style=\"$style\"/>\n";
++ print "<text transform=\"translate($s2,$y2) rotate(90)\">$key</text>\n";
++ }
++}
++
++
++# print the time line on top
++my $time = 0.0;
++while ($time < $maxtime) {
++ my $s2 = $time * $mult;
++ print "<text transform=\"translate($s2,89) rotate(90)\">$time</text>\n";
++ $time = $time + 0.1;
++}
++
++print "</svg>\n";
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch
new file mode 100644
index 000000000..0daba9d2c
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch
@@ -0,0 +1,91 @@
+From 5470e09b98074974316bbf98c8b8da01d670c2a4 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 14 Sep 2008 15:30:52 -0700
+Subject: [PATCH] fastboot: fix issues and improve output of bootgraph.pl
+
+David Sanders reported some issues with bootgraph.pl's display
+of his sytems bootup; this commit fixes these by scaling the graph
+not from 0 - end time but from the first initcall to the end time;
+the minimum display size etc also now need to scale with this, as does
+the axis display.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ scripts/bootgraph.pl | 25 +++++++++++++++++--------
+ 1 files changed, 17 insertions(+), 8 deletions(-)
+
+diff --git a/scripts/bootgraph.pl b/scripts/bootgraph.pl
+index d459b8b..4e5f4ab 100644
+--- a/scripts/bootgraph.pl
++++ b/scripts/bootgraph.pl
+@@ -42,6 +42,7 @@ my %start, %end, %row;
+ my $done = 0;
+ my $rowcount = 0;
+ my $maxtime = 0;
++my $firsttime = 100;
+ my $count = 0;
+ while (<>) {
+ my $line = $_;
+@@ -49,6 +50,9 @@ while (<>) {
+ my $func = $2;
+ if ($done == 0) {
+ $start{$func} = $1;
++ if ($1 < $firsttime) {
++ $firsttime = $1;
++ }
+ }
+ $row{$func} = 1;
+ if ($line =~ /\@ ([0-9]+)/) {
+@@ -71,6 +75,9 @@ while (<>) {
+ if ($line =~ /Write protecting the/) {
+ $done = 1;
+ }
++ if ($line =~ /Freeing unused kernel memory/) {
++ $done = 1;
++ }
+ }
+
+ if ($count == 0) {
+@@ -99,17 +106,17 @@ $styles[9] = "fill:rgb(255,255,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0
+ $styles[10] = "fill:rgb(255,128,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
+ $styles[11] = "fill:rgb(128,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
+
+-my $mult = 950.0 / $maxtime;
+-my $threshold = 0.0500 / $maxtime;
++my $mult = 950.0 / ($maxtime - $firsttime);
++my $threshold = ($maxtime - $firsttime) / 60.0;
+ my $stylecounter = 0;
+ while (($key,$value) = each %start) {
+ my $duration = $end{$key} - $start{$key};
+
+ if ($duration >= $threshold) {
+ my $s, $s2, $e, $y;
+- $s = $value * $mult;
++ $s = ($value - $firsttime) * $mult;
+ $s2 = $s + 6;
+- $e = $end{$key} * $mult;
++ $e = ($end{$key} - $firsttime) * $mult;
+ $w = $e - $s;
+
+ $y = $row{$key} * 150;
+@@ -128,11 +135,13 @@ while (($key,$value) = each %start) {
+
+
+ # print the time line on top
+-my $time = 0.0;
++my $time = $firsttime;
++my $step = ($maxtime - $firsttime) / 15;
+ while ($time < $maxtime) {
+- my $s2 = $time * $mult;
+- print "<text transform=\"translate($s2,89) rotate(90)\">$time</text>\n";
+- $time = $time + 0.1;
++ my $s2 = ($time - $firsttime) * $mult;
++ my $tm = int($time * 100) / 100.0;
++ print "<text transform=\"translate($s2,89) rotate(90)\">$tm</text>\n";
++ $time = $time + $step;
+ }
+
+ print "</svg>\n";
+--
+1.5.4.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0041-r8169-8101e.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0041-r8169-8101e.patch
new file mode 100644
index 000000000..781c9a127
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0041-r8169-8101e.patch
@@ -0,0 +1,940 @@
+From 771c0d99c0ab3ca7f1a9bc400e8259171b518d5f Mon Sep 17 00:00:00 2001
+From: Francois Romieu <romieu@fr.zoreil.com>
+Date: Thu, 21 Aug 2008 23:20:40 +0200
+Subject: [PATCH] r8169: fix RxMissed register access
+
+- the register location is defined for the 8169 chipset only and
+ there is no 8169 beyond RTL_GIGA_MAC_VER_06
+- only the lower 3 bytes of the register are valid
+
+Fixes:
+1. http://bugzilla.kernel.org/show_bug.cgi?id=10180
+2. http://bugzilla.kernel.org/show_bug.cgi?id=11062 (bits of)
+
+Tested by Hermann Gausterer and Adam Huffman.
+
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Cc: Edward Hsu <edward_hsu@realtek.com.tw>
+---
+ drivers/net/r8169.c | 25 ++++++++++++++-----------
+ 1 files changed, 14 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
+index 0f6f974..4190ee7 100644
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -2099,8 +2099,6 @@ static void rtl_hw_start_8168(struct net_device *dev)
+
+ RTL_R8(IntrMask);
+
+- RTL_W32(RxMissed, 0);
+-
+ rtl_set_rx_mode(dev);
+
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+@@ -2143,8 +2141,6 @@ static void rtl_hw_start_8101(struct net_device *dev)
+
+ RTL_R8(IntrMask);
+
+- RTL_W32(RxMissed, 0);
+-
+ rtl_set_rx_mode(dev);
+
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+@@ -2922,6 +2918,17 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
+ return work_done;
+ }
+
++static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
++{
++ struct rtl8169_private *tp = netdev_priv(dev);
++
++ if (tp->mac_version > RTL_GIGA_MAC_VER_06)
++ return;
++
++ dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
++ RTL_W32(RxMissed, 0);
++}
++
+ static void rtl8169_down(struct net_device *dev)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+@@ -2939,9 +2946,7 @@ core_down:
+
+ rtl8169_asic_down(ioaddr);
+
+- /* Update the error counts. */
+- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
+- RTL_W32(RxMissed, 0);
++ rtl8169_rx_missed(dev, ioaddr);
+
+ spin_unlock_irq(&tp->lock);
+
+@@ -3063,8 +3068,7 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
+
+ if (netif_running(dev)) {
+ spin_lock_irqsave(&tp->lock, flags);
+- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
+- RTL_W32(RxMissed, 0);
++ rtl8169_rx_missed(dev, ioaddr);
+ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+
+@@ -3089,8 +3093,7 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
+
+ rtl8169_asic_down(ioaddr);
+
+- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
+- RTL_W32(RxMissed, 0);
++ rtl8169_rx_missed(dev, ioaddr);
+
+ spin_unlock_irq(&tp->lock);
+
+--
+1.5.3.3
+
+From 6ee4bc96d446a9c466a18b715c7ab2d662c03ebd Mon Sep 17 00:00:00 2001
+From: Francois Romieu <romieu@fr.zoreil.com>
+Date: Sat, 26 Jul 2008 14:26:06 +0200
+Subject: [PATCH] r8169: get ethtool settings through the generic mii helper
+
+It avoids to report unsupported link capabilities with
+the fast-ethernet only 8101/8102.
+
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Tested-by: Martin Capitanio <martin@capitanio.org>
+Fixed-by: Ivan Vecera <ivecera@redhat.com>
+Cc: Edward Hsu <edward_hsu@realtek.com.tw>
+---
+ drivers/net/r8169.c | 99 +++++++++++++++++++++++---------------------------
+ 1 files changed, 46 insertions(+), 53 deletions(-)
+
+diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
+index 4190ee7..7e026a6 100644
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -370,8 +370,9 @@ struct ring_info {
+ };
+
+ enum features {
+- RTL_FEATURE_WOL = (1 << 0),
+- RTL_FEATURE_MSI = (1 << 1),
++ RTL_FEATURE_WOL = (1 << 0),
++ RTL_FEATURE_MSI = (1 << 1),
++ RTL_FEATURE_GMII = (1 << 2),
+ };
+
+ struct rtl8169_private {
+@@ -406,13 +407,15 @@ struct rtl8169_private {
+ struct vlan_group *vlgrp;
+ #endif
+ int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
+- void (*get_settings)(struct net_device *, struct ethtool_cmd *);
++ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ void (*phy_reset_enable)(void __iomem *);
+ void (*hw_start)(struct net_device *);
+ unsigned int (*phy_reset_pending)(void __iomem *);
+ unsigned int (*link_ok)(void __iomem *);
+ struct delayed_work task;
+ unsigned features;
++
++ struct mii_if_info mii;
+ };
+
+ MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
+@@ -482,6 +485,23 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
+ return value;
+ }
+
++static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
++ int val)
++{
++ struct rtl8169_private *tp = netdev_priv(dev);
++ void __iomem *ioaddr = tp->mmio_addr;
++
++ mdio_write(ioaddr, location, val);
++}
++
++static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
++{
++ struct rtl8169_private *tp = netdev_priv(dev);
++ void __iomem *ioaddr = tp->mmio_addr;
++
++ return mdio_read(ioaddr, location);
++}
++
+ static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+ {
+ RTL_W16(IntrMask, 0x0000);
+@@ -850,7 +870,7 @@ static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
+
+ #endif
+
+-static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
++static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+@@ -867,65 +887,29 @@ static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+
+ cmd->speed = SPEED_1000;
+ cmd->duplex = DUPLEX_FULL; /* Always set */
++
++ return 0;
+ }
+
+-static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
++static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+- void __iomem *ioaddr = tp->mmio_addr;
+- u8 status;
+-
+- cmd->supported = SUPPORTED_10baseT_Half |
+- SUPPORTED_10baseT_Full |
+- SUPPORTED_100baseT_Half |
+- SUPPORTED_100baseT_Full |
+- SUPPORTED_1000baseT_Full |
+- SUPPORTED_Autoneg |
+- SUPPORTED_TP;
+-
+- cmd->autoneg = 1;
+- cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
+-
+- if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
+- cmd->advertising |= ADVERTISED_10baseT_Half;
+- if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
+- cmd->advertising |= ADVERTISED_10baseT_Full;
+- if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
+- cmd->advertising |= ADVERTISED_100baseT_Half;
+- if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
+- cmd->advertising |= ADVERTISED_100baseT_Full;
+- if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
+- cmd->advertising |= ADVERTISED_1000baseT_Full;
+-
+- status = RTL_R8(PHYstatus);
+-
+- if (status & _1000bpsF)
+- cmd->speed = SPEED_1000;
+- else if (status & _100bps)
+- cmd->speed = SPEED_100;
+- else if (status & _10bps)
+- cmd->speed = SPEED_10;
+-
+- if (status & TxFlowCtrl)
+- cmd->advertising |= ADVERTISED_Asym_Pause;
+- if (status & RxFlowCtrl)
+- cmd->advertising |= ADVERTISED_Pause;
+-
+- cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
+- DUPLEX_FULL : DUPLEX_HALF;
++
++ return mii_ethtool_gset(&tp->mii, cmd);
+ }
+
+ static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
++ int rc;
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+- tp->get_settings(dev, cmd);
++ rc = tp->get_settings(dev, cmd);
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+- return 0;
++ return rc;
+ }
+
+ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+@@ -1513,7 +1497,7 @@ static const struct rtl_cfg_info {
+ unsigned int align;
+ u16 intr_event;
+ u16 napi_event;
+- unsigned msi;
++ unsigned features;
+ } rtl_cfg_infos [] = {
+ [RTL_CFG_0] = {
+ .hw_start = rtl_hw_start_8169,
+@@ -1522,7 +1506,7 @@ static const struct rtl_cfg_info {
+ .intr_event = SYSErr | LinkChg | RxOverflow |
+ RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
+ .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
+- .msi = 0
++ .features = RTL_FEATURE_GMII
+ },
+ [RTL_CFG_1] = {
+ .hw_start = rtl_hw_start_8168,
+@@ -1531,7 +1515,7 @@ static const struct rtl_cfg_info {
+ .intr_event = SYSErr | LinkChg | RxOverflow |
+ TxErr | TxOK | RxOK | RxErr,
+ .napi_event = TxErr | TxOK | RxOK | RxOverflow,
+- .msi = RTL_FEATURE_MSI
++ .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI
+ },
+ [RTL_CFG_2] = {
+ .hw_start = rtl_hw_start_8101,
+@@ -1540,7 +1524,7 @@ static const struct rtl_cfg_info {
+ .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
+ RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
+ .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
+- .msi = RTL_FEATURE_MSI
++ .features = RTL_FEATURE_MSI
+ }
+ };
+
+@@ -1552,7 +1536,7 @@ static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+ u8 cfg2;
+
+ cfg2 = RTL_R8(Config2) & ~MSIEnable;
+- if (cfg->msi) {
++ if (cfg->features & RTL_FEATURE_MSI) {
+ if (pci_enable_msi(pdev)) {
+ dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+ } else {
+@@ -1578,6 +1562,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
+ const unsigned int region = cfg->region;
+ struct rtl8169_private *tp;
++ struct mii_if_info *mii;
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ unsigned int i;
+@@ -1602,6 +1587,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ tp->pci_dev = pdev;
+ tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
+
++ mii = &tp->mii;
++ mii->dev = dev;
++ mii->mdio_read = rtl_mdio_read;
++ mii->mdio_write = rtl_mdio_write;
++ mii->phy_id_mask = 0x1f;
++ mii->reg_num_mask = 0x1f;
++ mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
++
+ /* enable device (incl. PCI PM wakeup and hotplug setup) */
+ rc = pci_enable_device(pdev);
+ if (rc < 0) {
+--
+1.5.3.3
+
+From ef60b2a38e223a331e13ef503aee7cd5d4d5c12c Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hugh@veritas.com>
+Date: Mon, 8 Sep 2008 21:49:01 +0100
+Subject: [PATCH] r8169: select MII in Kconfig
+
+drivers/built-in.o: In function `rtl8169_gset_xmii':
+r8169.c:(.text+0x82259): undefined reference to `mii_ethtool_gset'
+suggests that the r8169 driver now needs to select MII.
+
+Signed-off-by: Hugh Dickins <hugh@veritas.com>
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Cc: Edward Hsu <edward_hsu@realtek.com.tw>
+---
+ drivers/net/Kconfig | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index 4a11296..60a0453 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -2046,6 +2046,7 @@ config R8169
+ tristate "Realtek 8169 gigabit ethernet support"
+ depends on PCI
+ select CRC32
++ select MII
+ ---help---
+ Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
+
+--
+1.5.3.3
+
+From bca31864fca6004c4a4a9bd549e95c93b3c3bb10 Mon Sep 17 00:00:00 2001
+From: Francois Romieu <romieu@fr.zoreil.com>
+Date: Sat, 2 Aug 2008 15:50:02 +0200
+Subject: [PATCH] r8169: Tx performance tweak helper
+
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Cc: Edward Hsu <edward_hsu@realtek.com.tw>
+---
+ drivers/net/r8169.c | 15 ++++++++++-----
+ 1 files changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
+index 7e026a6..eea96fb 100644
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -2054,12 +2054,20 @@ static void rtl_hw_start_8169(struct net_device *dev)
+ RTL_W16(IntrMask, tp->intr_event);
+ }
+
++static void rtl_tx_performance_tweak(struct pci_dev *pdev, u8 force)
++{
++ u8 ctl;
++
++ pci_read_config_byte(pdev, 0x69, &ctl);
++ ctl = (ctl & ~0x70) | force;
++ pci_write_config_byte(pdev, 0x69, ctl);
++}
++
+ static void rtl_hw_start_8168(struct net_device *dev)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+- u8 ctl;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+@@ -2073,10 +2081,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
+
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+
+- /* Tx performance tweak. */
+- pci_read_config_byte(pdev, 0x69, &ctl);
+- ctl = (ctl & ~0x70) | 0x50;
+- pci_write_config_byte(pdev, 0x69, ctl);
++ rtl_tx_performance_tweak(pdev, 0x50);
+
+ RTL_W16(IntrMitigate, 0x5151);
+
+--
+1.5.3.3
+
+From 7a929ae7d5a3618f56bf1ccaf8c62df628e820aa Mon Sep 17 00:00:00 2001
+From: Francois Romieu <romieu@fr.zoreil.com>
+Date: Sat, 5 Jul 2008 00:21:15 +0200
+Subject: [PATCH] r8169: use pci_find_capability for the PCI-E features
+
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Cc: Edward Hsu <edward_hsu@realtek.com.tw>
+---
+ drivers/net/r8169.c | 32 ++++++++++++++++++++++++--------
+ 1 files changed, 24 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
+index eea96fb..5c00522 100644
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -61,6 +61,7 @@ static const int multicast_filter_limit = 32;
+ /* MAC address length */
+ #define MAC_ADDR_LEN 6
+
++#define MAX_READ_REQUEST_SHIFT 12
+ #define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
+ #define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+ #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+@@ -412,6 +413,7 @@ struct rtl8169_private {
+ void (*hw_start)(struct net_device *);
+ unsigned int (*phy_reset_pending)(void __iomem *);
+ unsigned int (*link_ok)(void __iomem *);
++ int pcie_cap;
+ struct delayed_work task;
+ unsigned features;
+
+@@ -1663,6 +1665,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ goto err_out_free_res_4;
+ }
+
++ tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
++ if (!tp->pcie_cap && netif_msg_probe(tp))
++ dev_info(&pdev->dev, "no PCI Express capability\n");
++
+ /* Unneeded ? Don't mess with Mrs. Murphy. */
+ rtl8169_irq_mask_and_ack(ioaddr);
+
+@@ -2054,13 +2060,19 @@ static void rtl_hw_start_8169(struct net_device *dev)
+ RTL_W16(IntrMask, tp->intr_event);
+ }
+
+-static void rtl_tx_performance_tweak(struct pci_dev *pdev, u8 force)
++static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
+ {
+- u8 ctl;
++ struct net_device *dev = pci_get_drvdata(pdev);
++ struct rtl8169_private *tp = netdev_priv(dev);
++ int cap = tp->pcie_cap;
++
++ if (cap) {
++ u16 ctl;
+
+- pci_read_config_byte(pdev, 0x69, &ctl);
+- ctl = (ctl & ~0x70) | force;
+- pci_write_config_byte(pdev, 0x69, ctl);
++ pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
++ ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
++ pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
++ }
+ }
+
+ static void rtl_hw_start_8168(struct net_device *dev)
+@@ -2081,7 +2093,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
+
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+
+- rtl_tx_performance_tweak(pdev, 0x50);
++ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W16(IntrMitigate, 0x5151);
+
+@@ -2114,8 +2126,12 @@ static void rtl_hw_start_8101(struct net_device *dev)
+
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
+- pci_write_config_word(pdev, 0x68, 0x00);
+- pci_write_config_word(pdev, 0x69, 0x08);
++ int cap = tp->pcie_cap;
++
++ if (cap) {
++ pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
++ PCI_EXP_DEVCTL_NOSNOOP_EN);
++ }
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+--
+1.5.3.3
+
+From ba648bdcbca93084360d348eb43dde4b19b2489e Mon Sep 17 00:00:00 2001
+From: Francois Romieu <romieu@fr.zoreil.com>
+Date: Sun, 1 Jun 2008 22:37:49 +0200
+Subject: [PATCH] r8169: add 8168/8101 registers description
+
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Cc: Edward Hsu <edward_hsu@realtek.com.tw>
+---
+ drivers/net/r8169.c | 47 +++++++++++++++++++++++++++++++++++++++++++----
+ 1 files changed, 43 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
+index 5c00522..0b8db03 100644
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -197,9 +197,6 @@ enum rtl_registers {
+ Config5 = 0x56,
+ MultiIntr = 0x5c,
+ PHYAR = 0x60,
+- TBICSR = 0x64,
+- TBI_ANAR = 0x68,
+- TBI_LPAR = 0x6a,
+ PHYstatus = 0x6c,
+ RxMaxSize = 0xda,
+ CPlusCmd = 0xe0,
+@@ -213,6 +210,32 @@ enum rtl_registers {
+ FuncForceEvent = 0xfc,
+ };
+
++enum rtl8110_registers {
++ TBICSR = 0x64,
++ TBI_ANAR = 0x68,
++ TBI_LPAR = 0x6a,
++};
++
++enum rtl8168_8101_registers {
++ CSIDR = 0x64,
++ CSIAR = 0x68,
++#define CSIAR_FLAG 0x80000000
++#define CSIAR_WRITE_CMD 0x80000000
++#define CSIAR_BYTE_ENABLE 0x0f
++#define CSIAR_BYTE_ENABLE_SHIFT 12
++#define CSIAR_ADDR_MASK 0x0fff
++
++ EPHYAR = 0x80,
++#define EPHYAR_FLAG 0x80000000
++#define EPHYAR_WRITE_CMD 0x80000000
++#define EPHYAR_REG_MASK 0x1f
++#define EPHYAR_REG_SHIFT 16
++#define EPHYAR_DATA_MASK 0xffff
++ DBG_REG = 0xd1,
++#define FIX_NAK_1 (1 << 4)
++#define FIX_NAK_2 (1 << 3)
++};
++
+ enum rtl_register_content {
+ /* InterruptStatusBits */
+ SYSErr = 0x8000,
+@@ -266,7 +289,13 @@ enum rtl_register_content {
+ TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
+
+ /* Config1 register p.24 */
++ LEDS1 = (1 << 7),
++ LEDS0 = (1 << 6),
+ MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
++ Speed_down = (1 << 4),
++ MEMMAP = (1 << 3),
++ IOMAP = (1 << 2),
++ VPD = (1 << 1),
+ PMEnable = (1 << 0), /* Power Management Enable */
+
+ /* Config2 register p. 25 */
+@@ -276,6 +305,7 @@ enum rtl_register_content {
+ /* Config3 register p.25 */
+ MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
+ LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
++ Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
+
+ /* Config5 register p.27 */
+ BWF = (1 << 6), /* Accept Broadcast wakeup frame */
+@@ -293,7 +323,16 @@ enum rtl_register_content {
+ TBINwComplete = 0x01000000,
+
+ /* CPlusCmd p.31 */
+- PktCntrDisable = (1 << 7), // 8168
++ EnableBist = (1 << 15), // 8168 8101
++ Mac_dbgo_oe = (1 << 14), // 8168 8101
++ Normal_mode = (1 << 13), // unused
++ Force_half_dup = (1 << 12), // 8168 8101
++ Force_rxflow_en = (1 << 11), // 8168 8101
++ Force_txflow_en = (1 << 10), // 8168 8101
++ Cxpl_dbg_sel = (1 << 9), // 8168 8101
++ ASF = (1 << 8), // 8168 8101
++ PktCntrDisable = (1 << 7), // 8168 8101
++ Mac_dbgo_sel = 0x001c, // 8168
+ RxVlan = (1 << 6),
+ RxChkSum = (1 << 5),
+ PCIDAC = (1 << 4),
+--
+1.5.3.3
+
+From 61650c9e3d637b0990d9f26b1421ac4b55f5c744 Mon Sep 17 00:00:00 2001
+From: Francois Romieu <romieu@fr.zoreil.com>
+Date: Sat, 2 Aug 2008 20:44:13 +0200
+Subject: [PATCH] r8169: add hw start helpers for the 8168 and the 8101
+
+This commit triggers three 'defined but not used' warnings but
+I prefer avoiding to tie these helpers to a specific change in
+the hw start sequences of the 8168 or of the 8101.
+
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Cc: Edward Hsu <edward_hsu@realtek.com.tw>
+---
+ drivers/net/r8169.c | 96 +++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 96 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
+index 0b8db03..52eba5c 100644
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -526,6 +526,11 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
+ return value;
+ }
+
++static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value)
++{
++ mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value);
++}
++
+ static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
+ int val)
+ {
+@@ -543,6 +548,72 @@ static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
+ return mdio_read(ioaddr, location);
+ }
+
++static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
++{
++ unsigned int i;
++
++ RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
++ (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
++
++ for (i = 0; i < 100; i++) {
++ if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
++ break;
++ udelay(10);
++ }
++}
++
++static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
++{
++ u16 value = 0xffff;
++ unsigned int i;
++
++ RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
++
++ for (i = 0; i < 100; i++) {
++ if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
++ value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
++ break;
++ }
++ udelay(10);
++ }
++
++ return value;
++}
++
++static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
++{
++ unsigned int i;
++
++ RTL_W32(CSIDR, value);
++ RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
++ CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
++
++ for (i = 0; i < 100; i++) {
++ if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
++ break;
++ udelay(10);
++ }
++}
++
++static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
++{
++ u32 value = ~0x00;
++ unsigned int i;
++
++ RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
++ CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
++
++ for (i = 0; i < 100; i++) {
++ if (RTL_R32(CSIAR) & CSIAR_FLAG) {
++ value = RTL_R32(CSIDR);
++ break;
++ }
++ udelay(10);
++ }
++
++ return value;
++}
++
+ static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+ {
+ RTL_W16(IntrMask, 0x0000);
+@@ -2114,6 +2185,31 @@ static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
+ }
+ }
+
++static void rtl_csi_access_enable(void __iomem *ioaddr)
++{
++ u32 csi;
++
++ csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
++ rtl_csi_write(ioaddr, 0x070c, csi | 0x27000000);
++}
++
++struct ephy_info {
++ unsigned int offset;
++ u16 mask;
++ u16 bits;
++};
++
++static void rtl_ephy_init(void __iomem *ioaddr, struct ephy_info *e, int len)
++{
++ u16 w;
++
++ while (len-- > 0) {
++ w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
++ rtl_ephy_write(ioaddr, e->offset, w);
++ e++;
++ }
++}
++
+ static void rtl_hw_start_8168(struct net_device *dev)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+--
+1.5.3.3
+
+From 81fbfc404f2a13646bee46fa98545c0023e3a67a Mon Sep 17 00:00:00 2001
+From: Francois Romieu <romieu@fr.zoreil.com>
+Date: Sat, 2 Aug 2008 21:08:49 +0200
+Subject: [PATCH] r8169: additional 8101 and 8102 support
+
+Signed-off-by: Ivan Vecera <ivecera@redhat.com>
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Cc: Edward Hsu <edward_hsu@realtek.com.tw>
+---
+ drivers/net/r8169.c | 124 ++++++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 files changed, 122 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
+index 52eba5c..f28c202 100644
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -96,6 +96,10 @@ enum mac_version {
+ RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB
+ RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd
+ RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe
++ RTL_GIGA_MAC_VER_07 = 0x07, // 8102e
++ RTL_GIGA_MAC_VER_08 = 0x08, // 8102e
++ RTL_GIGA_MAC_VER_09 = 0x09, // 8102e
++ RTL_GIGA_MAC_VER_10 = 0x0a, // 8101e
+ RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
+ RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be
+ RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb
+@@ -122,6 +126,10 @@ static const struct {
+ _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB
+ _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd
+ _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_06, 0xff7e1880), // 8110SCe
++ _R("RTL8102e", RTL_GIGA_MAC_VER_07, 0xff7e1880), // PCI-E
++ _R("RTL8102e", RTL_GIGA_MAC_VER_08, 0xff7e1880), // PCI-E
++ _R("RTL8102e", RTL_GIGA_MAC_VER_09, 0xff7e1880), // PCI-E
++ _R("RTL8101e", RTL_GIGA_MAC_VER_10, 0xff7e1880), // PCI-E
+ _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
+ _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
+ _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
+@@ -837,8 +845,12 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
+ }
+ }
+
+- /* The 8100e/8101e do Fast Ethernet only. */
+- if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
++ /* The 8100e/8101e/8102e do Fast Ethernet only. */
++ if ((tp->mac_version == RTL_GIGA_MAC_VER_07) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_08) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_09) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_10) ||
++ (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_15) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
+@@ -1212,8 +1224,17 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
+ { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
+
+ /* 8101 family. */
++ { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
++ { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
++ { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
++ { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
++ { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
++ { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
+ { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
++ { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
+ { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
++ { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
++ { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
+ { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
+ /* FIXME: where did these entries come from ? -- FR */
+ { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
+@@ -1375,6 +1396,22 @@ static void rtl8168cx_hw_phy_config(void __iomem *ioaddr)
+ rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ }
+
++static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
++{
++ struct phy_reg phy_reg_init[] = {
++ { 0x1f, 0x0003 },
++ { 0x08, 0x441d },
++ { 0x01, 0x9100 },
++ { 0x1f, 0x0000 }
++ };
++
++ mdio_write(ioaddr, 0x1f, 0x0000);
++ mdio_patch(ioaddr, 0x11, 1 << 12);
++ mdio_patch(ioaddr, 0x19, 1 << 13);
++
++ rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
++}
++
+ static void rtl_hw_phy_config(struct net_device *dev)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+@@ -1392,6 +1429,11 @@ static void rtl_hw_phy_config(struct net_device *dev)
+ case RTL_GIGA_MAC_VER_04:
+ rtl8169sb_hw_phy_config(ioaddr);
+ break;
++ case RTL_GIGA_MAC_VER_07:
++ case RTL_GIGA_MAC_VER_08:
++ case RTL_GIGA_MAC_VER_09:
++ rtl8102e_hw_phy_config(ioaddr);
++ break;
+ case RTL_GIGA_MAC_VER_18:
+ rtl8168cp_hw_phy_config(ioaddr);
+ break;
+@@ -2253,6 +2295,70 @@ static void rtl_hw_start_8168(struct net_device *dev)
+ RTL_W16(IntrMask, tp->intr_event);
+ }
+
++#define R810X_CPCMD_QUIRK_MASK (\
++ EnableBist | \
++ Mac_dbgo_oe | \
++ Force_half_dup | \
++ Force_half_dup | \
++ Force_txflow_en | \
++ Cxpl_dbg_sel | \
++ ASF | \
++ PktCntrDisable | \
++ PCIDAC | \
++ PCIMulRW)
++
++static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
++{
++ static struct ephy_info e_info_8102e_1[] = {
++ { 0x01, 0, 0x6e65 },
++ { 0x02, 0, 0x091f },
++ { 0x03, 0, 0xc2f9 },
++ { 0x06, 0, 0xafb5 },
++ { 0x07, 0, 0x0e00 },
++ { 0x19, 0, 0xec80 },
++ { 0x01, 0, 0x2e65 },
++ { 0x01, 0, 0x6e65 }
++ };
++ u8 cfg1;
++
++ rtl_csi_access_enable(ioaddr);
++
++ RTL_W8(DBG_REG, FIX_NAK_1);
++
++ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
++
++ RTL_W8(Config1,
++ LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
++ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
++
++ cfg1 = RTL_R8(Config1);
++ if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
++ RTL_W8(Config1, cfg1 & ~LEDS0);
++
++ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
++
++ rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
++}
++
++static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
++{
++ rtl_csi_access_enable(ioaddr);
++
++ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
++
++ RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
++ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
++
++ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
++}
++
++static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
++{
++ rtl_hw_start_8102e_2(ioaddr, pdev);
++
++ rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
++}
++
+ static void rtl_hw_start_8101(struct net_device *dev)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+@@ -2269,6 +2375,20 @@ static void rtl_hw_start_8101(struct net_device *dev)
+ }
+ }
+
++ switch (tp->mac_version) {
++ case RTL_GIGA_MAC_VER_07:
++ rtl_hw_start_8102e_1(ioaddr, pdev);
++ break;
++
++ case RTL_GIGA_MAC_VER_08:
++ rtl_hw_start_8102e_3(ioaddr, pdev);
++ break;
++
++ case RTL_GIGA_MAC_VER_09:
++ rtl_hw_start_8102e_2(ioaddr, pdev);
++ break;
++ }
++
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+ RTL_W8(EarlyTxThres, EarlyTxThld);
+--
+1.5.3.3
+
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/0042-intelfb-945gme.patch b/meta/packages/linux/linux-moblin-2.6.27-rc6/0042-intelfb-945gme.patch
new file mode 100644
index 000000000..15ebe5632
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/0042-intelfb-945gme.patch
@@ -0,0 +1,153 @@
+The following patch adds support for Intel's 945GME graphics chip to
+the intelfb driver. I have assumed that the 945GME is identical to the
+already-supported 945GM apart from its PCI IDs; this is based on a quick
+look at the X driver for these chips which seems to treat them
+identically.
+
+Signed-off-by: Phil Endecott <spam_from_intelfb@chezphil.org>
+
+---
+
+The 945GME is used in the ASUS Eee 901, and I coded this in the hope that
+I'd be able to use it to get a console at the native 1024x600 resolution
+which is not known to the BIOS. I realised too late that the intelfb
+driver does not support mode changing on laptops, so it won't be any
+use for me. But rather than throw it away I will post it here as
+essentially "untested"; maybe someone who knows more about this driver,
+and with more useful hardware to test on, can pick it up.
+
+diff --git a/Documentation/fb/intelfb.txt b/Documentation/fb/intelfb.txt
+index 27a3160..dd9e944 100644
+--- a/Documentation/fb/intelfb.txt
++++ b/Documentation/fb/intelfb.txt
+@@ -14,6 +14,7 @@ graphics devices. These would include:
+ Intel 915GM
+ Intel 945G
+ Intel 945GM
++ Intel 945GME
+ Intel 965G
+ Intel 965GM
+
+diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h
+index 3325fbd..a50bea6 100644
+--- a/drivers/video/intelfb/intelfb.h
++++ b/drivers/video/intelfb/intelfb.h
+@@ -12,9 +12,9 @@
+ #endif
+
+ /*** Version/name ***/
+-#define INTELFB_VERSION "0.9.5"
++#define INTELFB_VERSION "0.9.6"
+ #define INTELFB_MODULE_NAME "intelfb"
+-#define SUPPORTED_CHIPSETS "830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM"
++#define SUPPORTED_CHIPSETS "830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/945GME/965G/965GM"
+
+
+ /*** Debug/feature defines ***/
+@@ -58,6 +58,7 @@
+ #define PCI_DEVICE_ID_INTEL_915GM 0x2592
+ #define PCI_DEVICE_ID_INTEL_945G 0x2772
+ #define PCI_DEVICE_ID_INTEL_945GM 0x27A2
++#define PCI_DEVICE_ID_INTEL_945GME 0x27AE
+ #define PCI_DEVICE_ID_INTEL_965G 0x29A2
+ #define PCI_DEVICE_ID_INTEL_965GM 0x2A02
+
+@@ -160,6 +161,7 @@ enum intel_chips {
+ INTEL_915GM,
+ INTEL_945G,
+ INTEL_945GM,
++ INTEL_945GME,
+ INTEL_965G,
+ INTEL_965GM,
+ };
+@@ -363,6 +365,7 @@ struct intelfb_info {
+ ((dinfo)->chipset == INTEL_915GM) || \
+ ((dinfo)->chipset == INTEL_945G) || \
+ ((dinfo)->chipset == INTEL_945GM) || \
++ ((dinfo)->chipset == INTEL_945GME) || \
+ ((dinfo)->chipset == INTEL_965G) || \
+ ((dinfo)->chipset == INTEL_965GM))
+
+diff --git a/drivers/video/intelfb/intelfb_i2c.c b/drivers/video/intelfb/intelfb_i2c.c
+index fcf9fad..5d896b8 100644
+--- a/drivers/video/intelfb/intelfb_i2c.c
++++ b/drivers/video/intelfb/intelfb_i2c.c
+@@ -171,6 +171,7 @@ void intelfb_create_i2c_busses(struct intelfb_info *dinfo)
+ /* has some LVDS + tv-out */
+ case INTEL_945G:
+ case INTEL_945GM:
++ case INTEL_945GME:
+ case INTEL_965G:
+ case INTEL_965GM:
+ /* SDVO ports have a single control bus - 2 devices */
+diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
+index e44303f..a09e236 100644
+--- a/drivers/video/intelfb/intelfbdrv.c
++++ b/drivers/video/intelfb/intelfbdrv.c
+@@ -2,7 +2,7 @@
+ * intelfb
+ *
+ * Linux framebuffer driver for Intel(R) 830M/845G/852GM/855GM/865G/915G/915GM/
+- * 945G/945GM/965G/965GM integrated graphics chips.
++ * 945G/945GM/945GME/965G/965GM integrated graphics chips.
+ *
+ * Copyright © 2002, 2003 David Dawes <dawes@xfree86.org>
+ * 2004 Sylvain Meyer
+@@ -102,6 +102,9 @@
+ *
+ * 04/2008 - Version 0.9.5
+ * Add support for 965G/965GM. (Maik Broemme <mbroemme@plusserver.de>)
++ *
++ * 08/2008 - Version 0.9.6
++ * Add support for 945GME. (Phil Endecott <spam_from_intelfb@chezphil.org>)
+ */
+
+ #include <linux/module.h>
+@@ -183,6 +186,7 @@ static struct pci_device_id intelfb_pci_table[] __devinitdata = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915GM },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945G },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945GM },
++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945GME, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945GME },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_965G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_965G },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_965GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_965GM },
+ { 0, }
+@@ -555,6 +559,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
+ (ent->device == PCI_DEVICE_ID_INTEL_915GM) ||
+ (ent->device == PCI_DEVICE_ID_INTEL_945G) ||
+ (ent->device == PCI_DEVICE_ID_INTEL_945GM) ||
++ (ent->device == PCI_DEVICE_ID_INTEL_945GME) ||
+ (ent->device == PCI_DEVICE_ID_INTEL_965G) ||
+ (ent->device == PCI_DEVICE_ID_INTEL_965GM)) {
+
+diff --git a/drivers/video/intelfb/intelfbhw.c b/drivers/video/intelfb/intelfbhw.c
+index 8e6d6a4..8b26b27 100644
+--- a/drivers/video/intelfb/intelfbhw.c
++++ b/drivers/video/intelfb/intelfbhw.c
+@@ -143,6 +143,12 @@ int intelfbhw_get_chipset(struct pci_dev *pdev, struct intelfb_info *dinfo)
+ dinfo->mobile = 1;
+ dinfo->pll_index = PLLS_I9xx;
+ return 0;
++ case PCI_DEVICE_ID_INTEL_945GME:
++ dinfo->name = "Intel(R) 945GME";
++ dinfo->chipset = INTEL_945GME;
++ dinfo->mobile = 1;
++ dinfo->pll_index = PLLS_I9xx;
++ return 0;
+ case PCI_DEVICE_ID_INTEL_965G:
+ dinfo->name = "Intel(R) 965G";
+ dinfo->chipset = INTEL_965G;
+@@ -186,6 +192,7 @@ int intelfbhw_get_memory(struct pci_dev *pdev, int *aperture_size,
+ case PCI_DEVICE_ID_INTEL_915GM:
+ case PCI_DEVICE_ID_INTEL_945G:
+ case PCI_DEVICE_ID_INTEL_945GM:
++ case PCI_DEVICE_ID_INTEL_945GME:
+ case PCI_DEVICE_ID_INTEL_965G:
+ case PCI_DEVICE_ID_INTEL_965GM:
+ /* 915, 945 and 965 chipsets support a 256MB aperture.
+
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at http://vger.kernel.org/majordomo-info.html
+Please read the FAQ at http://www.tux.org/lkml/ \ No newline at end of file
diff --git a/meta/packages/linux/linux-moblin-2.6.27-rc6/defconfig-eee901 b/meta/packages/linux/linux-moblin-2.6.27-rc6/defconfig-eee901
new file mode 100644
index 000000000..02c1a32f8
--- /dev/null
+++ b/meta/packages/linux/linux-moblin-2.6.27-rc6/defconfig-eee901
@@ -0,0 +1,2407 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.27-rc6
+# Thu Oct 9 14:48:08 2008
+#
+# CONFIG_64BIT is not set
+CONFIG_X86_32=y
+# CONFIG_X86_64 is not set
+CONFIG_X86=y
+CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
+# CONFIG_GENERIC_LOCKBREAK is not set
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_FAST_CMPXCHG_LOCAL=y
+CONFIG_MMU=y
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_HWEIGHT=y
+# CONFIG_GENERIC_GPIO is not set
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+# CONFIG_GENERIC_TIME_VSYSCALL is not set
+CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_ZONE_DMA32 is not set
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+# CONFIG_AUDIT_ARCH is not set
+CONFIG_ARCH_SUPPORTS_AOUT=y
+CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_X86_SMP=y
+CONFIG_X86_32_SMP=y
+CONFIG_X86_HT=y
+CONFIG_X86_BIOS_REBOOT=y
+CONFIG_X86_TRAMPOLINE=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION="-eee901"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_TREE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_CGROUPS is not set
+CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+CONFIG_RELAY=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+CONFIG_USER_NS=y
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_FASTBOOT=y
+CONFIG_SYSCTL=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+# CONFIG_MARKERS is not set
+# CONFIG_OPROFILE is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+# CONFIG_HAVE_ARCH_TRACEHOOK is not set
+# CONFIG_HAVE_DMA_ATTRS is not set
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+# CONFIG_HAVE_CLK is not set
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_LSF is not set
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_CLASSIC_RCU=y
+
+#
+# Processor type and features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SMP=y
+CONFIG_X86_FIND_SMP_CONFIG=y
+CONFIG_X86_MPPARSE=y
+CONFIG_X86_PC=y
+# CONFIG_X86_ELAN is not set
+# CONFIG_X86_VOYAGER is not set
+# CONFIG_X86_GENERICARCH is not set
+# CONFIG_X86_VSMP is not set
+# CONFIG_X86_RDC321X is not set
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+# CONFIG_PARAVIRT_GUEST is not set
+# CONFIG_MEMTEST is not set
+# CONFIG_M386 is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+CONFIG_M686=y
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MEFFICEON is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP2 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MGEODEGX1 is not set
+# CONFIG_MGEODE_LX is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_MVIAC7 is not set
+# CONFIG_MPSC is not set
+# CONFIG_MCORE2 is not set
+# CONFIG_GENERIC_CPU is not set
+# CONFIG_X86_GENERIC is not set
+CONFIG_X86_CPU=y
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_L1_CACHE_SHIFT=5
+CONFIG_X86_XADD=y
+# CONFIG_X86_PPRO_FENCE is not set
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+CONFIG_X86_TSC=y
+CONFIG_X86_CMOV=y
+CONFIG_X86_MINIMUM_CPU_FAMILY=4
+CONFIG_X86_DEBUGCTLMSR=y
+CONFIG_HPET_TIMER=y
+CONFIG_HPET_EMULATE_RTC=y
+CONFIG_DMI=y
+# CONFIG_IOMMU_HELPER is not set
+CONFIG_NR_CPUS=2
+CONFIG_SCHED_SMT=y
+CONFIG_SCHED_MC=y
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+CONFIG_X86_MCE=y
+# CONFIG_X86_MCE_NONFATAL is not set
+# CONFIG_X86_MCE_P4THERMAL is not set
+CONFIG_VM86=y
+# CONFIG_TOSHIBA is not set
+# CONFIG_I8K is not set
+# CONFIG_X86_REBOOTFIXUPS is not set
+CONFIG_MICROCODE=y
+CONFIG_MICROCODE_OLD_INTERFACE=y
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+# CONFIG_NOHIGHMEM is not set
+CONFIG_HIGHMEM4G=y
+# CONFIG_HIGHMEM64G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_HIGHMEM=y
+CONFIG_NEED_NODE_MEMMAP_SIZE=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_SELECT_MEMORY_MODEL=y
+# CONFIG_FLATMEM_MANUAL is not set
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+CONFIG_SPARSEMEM_STATIC=y
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+
+#
+# Memory hotplug is currently incompatible with Software Suspend
+#
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_RESOURCES_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_HIGHPTE is not set
+# CONFIG_MATH_EMULATION is not set
+CONFIG_MTRR=y
+# CONFIG_MTRR_SANITIZER is not set
+# CONFIG_X86_PAT is not set
+# CONFIG_EFI is not set
+# CONFIG_IRQBALANCE is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_HZ_100 is not set
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+CONFIG_HZ_1000=y
+CONFIG_HZ=1000
+CONFIG_SCHED_HRTICK=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+# CONFIG_KEXEC_JUMP is not set
+CONFIG_PHYSICAL_START=0x400000
+CONFIG_RELOCATABLE=y
+CONFIG_PHYSICAL_ALIGN=0x200000
+CONFIG_HOTPLUG_CPU=y
+CONFIG_COMPAT_VDSO=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+
+#
+# Power management options
+#
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+# CONFIG_PM_VERBOSE is not set
+CONFIG_CAN_PM_TRACE=y
+CONFIG_PM_TRACE=y
+CONFIG_PM_TRACE_RTC=y
+CONFIG_PM_SLEEP_SMP=y
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION=""
+CONFIG_ACPI=y
+CONFIG_ACPI_SLEEP=y
+CONFIG_ACPI_PROCFS=y
+CONFIG_ACPI_PROCFS_POWER=y
+CONFIG_ACPI_SYSFS_POWER=y
+CONFIG_ACPI_PROC_EVENT=y
+CONFIG_ACPI_AC=y
+CONFIG_ACPI_BATTERY=m
+CONFIG_ACPI_BUTTON=y
+CONFIG_ACPI_VIDEO=y
+CONFIG_ACPI_FAN=y
+CONFIG_ACPI_DOCK=y
+# CONFIG_ACPI_BAY is not set
+CONFIG_ACPI_PROCESSOR=y
+CONFIG_ACPI_HOTPLUG_CPU=y
+CONFIG_ACPI_THERMAL=y
+CONFIG_ACPI_WMI=m
+CONFIG_ACPI_ASUS=y
+# CONFIG_ACPI_TOSHIBA is not set
+# CONFIG_ACPI_CUSTOM_DSDT is not set
+CONFIG_ACPI_BLACKLIST_YEAR=0
+# CONFIG_ACPI_DEBUG is not set
+CONFIG_ACPI_EC=y
+# CONFIG_ACPI_PCI_SLOT is not set
+CONFIG_ACPI_POWER=y
+CONFIG_ACPI_SYSTEM=y
+CONFIG_X86_PM_TIMER=y
+CONFIG_ACPI_CONTAINER=y
+CONFIG_ACPI_SBS=m
+# CONFIG_APM is not set
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_DEBUG=y
+CONFIG_CPU_FREQ_STAT=m
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+
+#
+# CPUFreq processor drivers
+#
+CONFIG_X86_ACPI_CPUFREQ=y
+# CONFIG_X86_POWERNOW_K6 is not set
+# CONFIG_X86_POWERNOW_K7 is not set
+# CONFIG_X86_POWERNOW_K8 is not set
+# CONFIG_X86_GX_SUSPMOD is not set
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+# CONFIG_X86_SPEEDSTEP_ICH is not set
+# CONFIG_X86_SPEEDSTEP_SMI is not set
+# CONFIG_X86_P4_CLOCKMOD is not set
+# CONFIG_X86_CPUFREQ_NFORCE2 is not set
+# CONFIG_X86_LONGRUN is not set
+# CONFIG_X86_LONGHAUL is not set
+# CONFIG_X86_E_POWERSAVER is not set
+
+#
+# shared options
+#
+# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
+# CONFIG_X86_SPEEDSTEP_LIB is not set
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+
+#
+# Bus options (PCI etc.)
+#
+CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+# CONFIG_PCI_GODIRECT is not set
+# CONFIG_PCI_GOOLPC is not set
+CONFIG_PCI_GOANY=y
+CONFIG_PCI_BIOS=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIEAER=y
+CONFIG_PCIEASPM=y
+# CONFIG_PCIEASPM_DEBUG is not set
+CONFIG_ARCH_SUPPORTS_MSI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_DEBUG is not set
+CONFIG_HT_IRQ=y
+CONFIG_ISA_DMA_API=y
+# CONFIG_ISA is not set
+# CONFIG_MCA is not set
+# CONFIG_SCx200 is not set
+# CONFIG_OLPC is not set
+CONFIG_K8_NB=y
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Executable file formats / Emulations
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_XFRM_IPCOMP=m
+CONFIG_NET_KEY=m
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_TUNNEL=m
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=m
+CONFIG_INET_TCP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=m
+CONFIG_TCP_CONG_CUBIC=y
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_TCP_CONG_HSTCP is not set
+# CONFIG_TCP_CONG_HYBLA is not set
+# CONFIG_TCP_CONG_VEGAS is not set
+# CONFIG_TCP_CONG_SCALABLE is not set
+# CONFIG_TCP_CONG_LP is not set
+# CONFIG_TCP_CONG_VENO is not set
+# CONFIG_TCP_CONG_YEAH is not set
+# CONFIG_TCP_CONG_ILLINOIS is not set
+# CONFIG_DEFAULT_BIC is not set
+CONFIG_DEFAULT_CUBIC=y
+# CONFIG_DEFAULT_HTCP is not set
+# CONFIG_DEFAULT_VEGAS is not set
+# CONFIG_DEFAULT_WESTWOOD is not set
+# CONFIG_DEFAULT_RENO is not set
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_TUNNEL=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+# CONFIG_IPV6_MROUTE is not set
+CONFIG_NETLABEL=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_QUEUE=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CT_ACCT=y
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+# CONFIG_NF_CT_PROTO_DCCP is not set
+CONFIG_NF_CT_PROTO_GRE=m
+CONFIG_NF_CT_PROTO_SCTP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_RATEEST=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_CONNTRACK_IPV4=y
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_NF_NAT_SNMP_BASIC=m
+CONFIG_NF_NAT_PROTO_GRE=m
+CONFIG_NF_NAT_PROTO_UDPLITE=m
+CONFIG_NF_NAT_PROTO_SCTP=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+CONFIG_NF_NAT_TFTP=m
+CONFIG_NF_NAT_AMANDA=m
+CONFIG_NF_NAT_PPTP=m
+CONFIG_NF_NAT_H323=m
+CONFIG_NF_NAT_SIP=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_RAW=m
+# CONFIG_IP_NF_SECURITY is not set
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_RAW=m
+# CONFIG_IP6_NF_SECURITY is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+CONFIG_NET_CLS_ROUTE=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_SCO=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+# CONFIG_BT_BNEP_MC_FILTER is not set
+# CONFIG_BT_BNEP_PROTO_FILTER is not set
+# CONFIG_BT_HIDP is not set
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIUSB=m
+CONFIG_BT_HCIUSB_SCO=y
+# CONFIG_BT_HCIBTUSB is not set
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIVHCI=m
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+
+#
+# Wireless
+#
+CONFIG_CFG80211=m
+CONFIG_NL80211=y
+CONFIG_WIRELESS_EXT=y
+# CONFIG_WIRELESS_EXT_SYSFS is not set
+CONFIG_MAC80211=m
+
+#
+# Rate control algorithm selection
+#
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_RC_DEFAULT="pid"
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211_DEBUGFS=y
+# CONFIG_MAC80211_DEBUG_MENU is not set
+CONFIG_IEEE80211=m
+# CONFIG_IEEE80211_DEBUG is not set
+CONFIG_IEEE80211_CRYPT_WEP=m
+CONFIG_IEEE80211_CRYPT_CCMP=m
+CONFIG_IEEE80211_CRYPT_TKIP=m
+CONFIG_RFKILL=m
+CONFIG_RFKILL_INPUT=m
+CONFIG_RFKILL_LEDS=y
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+CONFIG_DEBUG_DEVRES=y
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_PNP=y
+# CONFIG_PNP_DEBUG is not set
+
+#
+# Protocols
+#
+CONFIG_PNPACPI=y
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=262144
+# CONFIG_BLK_DEV_XIP is not set
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_IBM_ASM is not set
+# CONFIG_PHANTOM is not set
+CONFIG_EEPROM_93CX6=m
+# CONFIG_SGI_IOC4 is not set
+CONFIG_TIFM_CORE=m
+CONFIG_TIFM_7XX1=m
+# CONFIG_ACER_WMI is not set
+# CONFIG_FUJITSU_LAPTOP is not set
+# CONFIG_TC1100_WMI is not set
+# CONFIG_HP_WMI is not set
+# CONFIG_MSI_LAPTOP is not set
+# CONFIG_COMPAL_LAPTOP is not set
+# CONFIG_SONY_LAPTOP is not set
+# CONFIG_THINKPAD_ACPI is not set
+# CONFIG_INTEL_MENLOW is not set
+CONFIG_EEEPC_LAPTOP=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+# CONFIG_CHR_DEV_SG is not set
+CONFIG_CHR_DEV_SCH=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_MVSAS is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_DH is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_ACPI=y
+# CONFIG_SATA_PMP is not set
+CONFIG_SATA_AHCI=y
+# CONFIG_SATA_SIL24 is not set
+CONFIG_ATA_SFF=y
+# CONFIG_SATA_SVW is not set
+CONFIG_ATA_PIIX=y
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_PATA_ACPI is not set
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD640_PCI is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CS5535 is not set
+# CONFIG_PATA_CS5536 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NINJA32 is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_NS87415 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+CONFIG_PATA_SCH=y
+# CONFIG_MD is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# Enable only one of the two stacks, unless you know what you are doing
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+CONFIG_MACVLAN=m
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_NET_SB1000 is not set
+# CONFIG_ARCNET is not set
+CONFIG_PHYLIB=m
+
+#
+# MII PHY device drivers
+#
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_MDIO_BITBANG=m
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_HAPPYMEAL=m
+CONFIG_SUNGEM=m
+CONFIG_CASSINI=m
+CONFIG_NET_VENDOR_3COM=y
+# CONFIG_VORTEX is not set
+# CONFIG_TYPHOON is not set
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_NET_PCI is not set
+# CONFIG_B44 is not set
+CONFIG_NETDEV_1000=y
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_E1000E is not set
+# CONFIG_IP1000 is not set
+# CONFIG_IGB is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_QLA3XXX is not set
+CONFIG_ATL1=m
+CONFIG_ATL1E=m
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_TR is not set
+
+#
+# Wireless LAN
+#
+CONFIG_WLAN_PRE80211=y
+# CONFIG_STRIP is not set
+CONFIG_WLAN_80211=y
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_AIRO is not set
+# CONFIG_HERMES is not set
+# CONFIG_ATMEL is not set
+# CONFIG_PRISM54 is not set
+CONFIG_USB_ZD1201=m
+CONFIG_USB_NET_RNDIS_WLAN=m
+CONFIG_RTL8180=m
+CONFIG_RTL8187=m
+# CONFIG_ADM8211 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_P54_COMMON is not set
+CONFIG_ATH5K=m
+# CONFIG_ATH5K_DEBUG is not set
+# CONFIG_ATH9K is not set
+CONFIG_IWLWIFI=m
+CONFIG_IWLCORE=m
+# CONFIG_IWLWIFI_LEDS is not set
+CONFIG_IWLWIFI_RFKILL=y
+# CONFIG_IWLWIFI_DEBUG is not set
+# CONFIG_IWLAGN is not set
+CONFIG_IWL3945=m
+CONFIG_IWL3945_RFKILL=y
+# CONFIG_IWL3945_SPECTRUM_MEASUREMENT is not set
+# CONFIG_IWL3945_LEDS is not set
+# CONFIG_IWL3945_DEBUG is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_ZD1211RW is not set
+CONFIG_RT2X00=m
+CONFIG_RT2X00_LIB=m
+CONFIG_RT2X00_LIB_PCI=m
+CONFIG_RT2X00_LIB_USB=m
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_RFKILL=y
+CONFIG_RT2X00_LIB_LEDS=y
+CONFIG_RT2400PCI=m
+CONFIG_RT2400PCI_RFKILL=y
+CONFIG_RT2400PCI_LEDS=y
+CONFIG_RT2500PCI=m
+CONFIG_RT2500PCI_RFKILL=y
+CONFIG_RT2500PCI_LEDS=y
+CONFIG_RT61PCI=m
+CONFIG_RT61PCI_RFKILL=y
+CONFIG_RT61PCI_LEDS=y
+CONFIG_RT2500USB=m
+CONFIG_RT2500USB_LEDS=y
+CONFIG_RT73USB=m
+CONFIG_RT73USB_LEDS=y
+CONFIG_RT2X00_LIB_DEBUGFS=y
+# CONFIG_RT2X00_DEBUG is not set
+
+#
+# USB Network Adapters
+#
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_AX8817X=m
+CONFIG_USB_NET_CDCETHER=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_NET1080=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_RNDIS_HOST=m
+CONFIG_USB_NET_CDC_SUBSET=m
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_BELKIN=y
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_KC2190=y
+CONFIG_USB_NET_ZAURUS=m
+# CONFIG_USB_HSO is not set
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+# CONFIG_PPP_BSDCOMP is not set
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPPOL2TP=m
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+CONFIG_NET_FC=y
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_NETPOLL=y
+CONFIG_NETPOLL_TRAP=y
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+CONFIG_INPUT_POLLDEV=m
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+CONFIG_INPUT_JOYDEV=m
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+CONFIG_MOUSE_SERIAL=m
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+CONFIG_MOUSE_VSXXXAA=m
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+# CONFIG_JOYSTICK_XPAD is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_FUJITSU=m
+CONFIG_TOUCHSCREEN_GUNZE=m
+CONFIG_TOUCHSCREEN_ELO=m
+CONFIG_TOUCHSCREEN_MTOUCH=m
+CONFIG_TOUCHSCREEN_INEXIO=m
+CONFIG_TOUCHSCREEN_MK712=m
+CONFIG_TOUCHSCREEN_PENMOUNT=m
+CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
+CONFIG_TOUCHSCREEN_TOUCHWIN=m
+CONFIG_TOUCHSCREEN_UCB1400=m
+# CONFIG_TOUCHSCREEN_WM97XX is not set
+CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
+CONFIG_TOUCHSCREEN_USB_EGALAX=y
+CONFIG_TOUCHSCREEN_USB_PANJIT=y
+CONFIG_TOUCHSCREEN_USB_3M=y
+CONFIG_TOUCHSCREEN_USB_ITM=y
+CONFIG_TOUCHSCREEN_USB_ETURBO=y
+CONFIG_TOUCHSCREEN_USB_GUNZE=y
+CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
+CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
+CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
+CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
+CONFIG_TOUCHSCREEN_USB_GOTOP=y
+CONFIG_TOUCHSCREEN_TOUCHIT213=m
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_APANEL is not set
+# CONFIG_INPUT_WISTRON_BTNS is not set
+CONFIG_INPUT_ATLAS_BTNS=m
+CONFIG_INPUT_ATI_REMOTE=m
+CONFIG_INPUT_ATI_REMOTE2=m
+CONFIG_INPUT_KEYSPAN_REMOTE=m
+CONFIG_INPUT_POWERMATE=m
+CONFIG_INPUT_YEALINK=m
+CONFIG_INPUT_UINPUT=m
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_DEVKMEM is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+CONFIG_FIX_EARLYCON_MEM=y
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+# CONFIG_MWAVE is not set
+# CONFIG_PC8736x_GPIO is not set
+# CONFIG_NSC_GPIO is not set
+# CONFIG_CS5535_GPIO is not set
+# CONFIG_RAW_DRIVER is not set
+CONFIG_HPET=y
+# CONFIG_HPET_MMAP is not set
+# CONFIG_HANGCHECK_TIMER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+# CONFIG_I2C_CHARDEV is not set
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Graphics adapter I2C/DDC channel drivers
+#
+# CONFIG_I2C_VOODOO3 is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_SCx200_ACB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_AT24 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_BATTERY_DS2760 is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_CORETEMP is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_HDAPS is not set
+# CONFIG_SENSORS_APPLESMC is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+CONFIG_THERMAL=y
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2_COMMON=y
+# CONFIG_VIDEO_ALLOW_V4L1 is not set
+CONFIG_VIDEO_V4L1_COMPAT=y
+CONFIG_DVB_CORE=y
+CONFIG_VIDEO_MEDIA=y
+
+#
+# Multimedia drivers
+#
+# CONFIG_MEDIA_ATTACH is not set
+CONFIG_MEDIA_TUNER=y
+# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
+CONFIG_MEDIA_TUNER_SIMPLE=y
+CONFIG_MEDIA_TUNER_TDA8290=y
+CONFIG_MEDIA_TUNER_TDA9887=y
+CONFIG_MEDIA_TUNER_TEA5761=y
+CONFIG_MEDIA_TUNER_TEA5767=y
+CONFIG_MEDIA_TUNER_MT20XX=y
+CONFIG_MEDIA_TUNER_XC2028=y
+CONFIG_MEDIA_TUNER_XC5000=y
+CONFIG_VIDEO_V4L2=y
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_BT848 is not set
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_VIDEO_SAA7134 is not set
+# CONFIG_VIDEO_HEXIUM_ORION is not set
+# CONFIG_VIDEO_HEXIUM_GEMINI is not set
+# CONFIG_VIDEO_CX88 is not set
+# CONFIG_VIDEO_CX23885 is not set
+# CONFIG_VIDEO_AU0828 is not set
+# CONFIG_VIDEO_CX18 is not set
+# CONFIG_VIDEO_CAFE_CCIC is not set
+CONFIG_V4L_USB_DRIVERS=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+# CONFIG_USB_GSPCA is not set
+# CONFIG_VIDEO_PVRUSB2 is not set
+# CONFIG_VIDEO_EM28XX is not set
+# CONFIG_VIDEO_USBVISION is not set
+# CONFIG_USB_ET61X251 is not set
+# CONFIG_USB_SN9C102 is not set
+# CONFIG_USB_ZC0301 is not set
+# CONFIG_USB_ZR364XX is not set
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_S2255 is not set
+# CONFIG_SOC_CAMERA is not set
+# CONFIG_VIDEO_SH_MOBILE_CEU is not set
+# CONFIG_RADIO_ADAPTERS is not set
+# CONFIG_DVB_CAPTURE_DRIVERS is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+CONFIG_AGP=y
+# CONFIG_AGP_ALI is not set
+# CONFIG_AGP_ATI is not set
+# CONFIG_AGP_AMD is not set
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+# CONFIG_AGP_NVIDIA is not set
+# CONFIG_AGP_SIS is not set
+# CONFIG_AGP_SWORKS is not set
+# CONFIG_AGP_VIA is not set
+# CONFIG_AGP_EFFICEON is not set
+CONFIG_DRM=y
+# CONFIG_DRM_TDFX is not set
+# CONFIG_DRM_R128 is not set
+# CONFIG_DRM_RADEON is not set
+CONFIG_DRM_I810=y
+# CONFIG_DRM_I830 is not set
+CONFIG_DRM_I915=y
+# CONFIG_DRM_MGA is not set
+# CONFIG_DRM_SIS is not set
+# CONFIG_DRM_VIA is not set
+# CONFIG_DRM_SAVAGE is not set
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_DDC=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ARC is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_UVESA is not set
+# CONFIG_FB_VESA is not set
+# CONFIG_FB_EFI is not set
+# CONFIG_FB_N411 is not set
+# CONFIG_FB_HGA is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I810 is not set
+# CONFIG_FB_LE80578 is not set
+CONFIG_FB_INTEL=y
+CONFIG_FB_INTEL_DEBUG=y
+CONFIG_FB_INTEL_I2C=y
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_CYBLA is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_GEODE is not set
+# CONFIG_FB_VIRTUAL is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_ILI9320 is not set
+CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_CORGI is not set
+# CONFIG_BACKLIGHT_PROGEAR is not set
+CONFIG_BACKLIGHT_MBP_NVIDIA=y
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=y
+
+#
+# Display hardware drivers
+#
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+CONFIG_VGACON_SOFT_SCROLLBACK=y
+CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
+CONFIG_VIDEO_SELECT=y
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=y
+CONFIG_SND_RAWMIDI=m
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_SEQ_DUMMY=y
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_SEQUENCER_OSS is not set
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_DEBUG=y
+# CONFIG_SND_DEBUG_VERBOSE is not set
+CONFIG_SND_PCM_XRUN_DEBUG=y
+CONFIG_SND_VMASTER=y
+CONFIG_SND_AC97_CODEC=y
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_PCSP is not set
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_AC97_POWER_SAVE=y
+CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5
+CONFIG_SND_PCI=y
+# CONFIG_SND_AD1889 is not set
+# CONFIG_SND_ALS300 is not set
+# CONFIG_SND_ALS4000 is not set
+# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_ATIIXP_MODEM is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AW2 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_OXYGEN is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CS5530 is not set
+# CONFIG_SND_CS5535AUDIO is not set
+# CONFIG_SND_DARLA20 is not set
+# CONFIG_SND_GINA20 is not set
+# CONFIG_SND_LAYLA20 is not set
+# CONFIG_SND_DARLA24 is not set
+# CONFIG_SND_GINA24 is not set
+# CONFIG_SND_LAYLA24 is not set
+# CONFIG_SND_MONA is not set
+# CONFIG_SND_MIA is not set
+# CONFIG_SND_ECHO3G is not set
+# CONFIG_SND_INDIGO is not set
+# CONFIG_SND_INDIGOIO is not set
+# CONFIG_SND_INDIGODJ is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_EMU10K1X is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_FM801 is not set
+CONFIG_SND_HDA_INTEL=y
+CONFIG_SND_HDA_HWDEP=y
+CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_ANALOG=y
+CONFIG_SND_HDA_CODEC_SIGMATEL=y
+CONFIG_SND_HDA_CODEC_VIA=y
+CONFIG_SND_HDA_CODEC_ATIHDMI=y
+CONFIG_SND_HDA_CODEC_CONEXANT=y
+CONFIG_SND_HDA_CODEC_CMEDIA=y
+CONFIG_SND_HDA_CODEC_SI3054=y
+CONFIG_SND_HDA_GENERIC=y
+CONFIG_SND_HDA_POWER_SAVE=y
+CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_HIFIER is not set
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+CONFIG_SND_INTEL8X0=y
+# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_RIPTIDE is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_SIS7019 is not set
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_TRIDENT is not set
+# CONFIG_SND_VIA82XX is not set
+# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VIRTUOSO is not set
+# CONFIG_SND_VX222 is not set
+# CONFIG_SND_YMFPCI is not set
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_USB_USX2Y=m
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=y
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+CONFIG_HID_DEBUG=y
+CONFIG_HIDRAW=y
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+CONFIG_USB_HIDINPUT_POWERBOOK=y
+CONFIG_HID_FF=y
+CONFIG_HID_PID=y
+CONFIG_LOGITECH_FF=y
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+CONFIG_PANTHERLORD_FF=y
+CONFIG_THRUSTMASTER_FF=y
+CONFIG_ZEROPLUS_FF=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_SUSPEND=y
+# CONFIG_USB_OTG is not set
+CONFIG_USB_MON=y
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_TT_NEWSCHED=y
+CONFIG_USB_ISP116X_HCD=m
+# CONFIG_USB_ISP1760_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_U132_HCD=m
+CONFIG_USB_SL811_HCD=m
+# CONFIG_USB_R8A66597_HCD is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+# CONFIG_USB_WDM is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_DPCM=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+CONFIG_USB_STORAGE_KARMA=y
+# CONFIG_USB_STORAGE_SIERRA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+CONFIG_USB_MDC800=m
+CONFIG_USB_MICROTEK=m
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+CONFIG_USB_EZUSB=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_AIRCABLE=m
+CONFIG_USB_SERIAL_ARK3116=m
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_CP2101=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_FUNSOFT=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_IUU=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_MOS7720=m
+CONFIG_USB_SERIAL_MOS7840=m
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+CONFIG_USB_SERIAL_NAVMAN=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OTI6858=m
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
+CONFIG_USB_SERIAL_HP4X=m
+CONFIG_USB_SERIAL_SAFE=m
+CONFIG_USB_SERIAL_SAFE_PADDED=y
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_SERIAL_DEBUG=m
+
+#
+# USB Miscellaneous drivers
+#
+CONFIG_USB_EMI62=m
+CONFIG_USB_EMI26=m
+CONFIG_USB_ADUTUX=m
+# CONFIG_USB_RIO500 is not set
+CONFIG_USB_LEGOTOWER=m
+CONFIG_USB_LCD=m
+CONFIG_USB_BERRY_CHARGE=m
+CONFIG_USB_LED=m
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+CONFIG_USB_PHIDGET=m
+CONFIG_USB_PHIDGETKIT=m
+CONFIG_USB_PHIDGETMOTORCONTROL=m
+CONFIG_USB_PHIDGETSERVO=m
+CONFIG_USB_IDMOUSE=m
+CONFIG_USB_FTDI_ELAN=m
+CONFIG_USB_APPLEDISPLAY=m
+CONFIG_USB_SISUSBVGA=m
+CONFIG_USB_SISUSBVGA_CON=y
+CONFIG_USB_LD=m
+CONFIG_USB_TRANCEVIBRATOR=m
+CONFIG_USB_IOWARRIOR=m
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_GADGET is not set
+CONFIG_MMC=m
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD Card Drivers
+#
+CONFIG_MMC_BLOCK=m
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_SDIO_UART=m
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=m
+# CONFIG_MMC_SDHCI_PCI is not set
+CONFIG_MMC_WBSD=m
+CONFIG_MMC_TIFM_SD=m
+CONFIG_MEMSTICK=m
+CONFIG_MEMSTICK_DEBUG=y
+
+#
+# MemoryStick drivers
+#
+# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
+CONFIG_MSPRO_BLOCK=m
+
+#
+# MemoryStick Host Controller Drivers
+#
+# CONFIG_MEMSTICK_TIFM_MS is not set
+# CONFIG_MEMSTICK_JMICRON_38X is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_CLEVO_MAIL is not set
+# CONFIG_LEDS_PCA955X is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+# CONFIG_EDAC is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_FIRMWARE_MEMMAP=y
+# CONFIG_DELL_RBU is not set
+# CONFIG_DCDBAS is not set
+# CONFIG_DMIID is not set
+# CONFIG_ISCSI_IBFT_FIND is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=m
+CONFIG_GENERIC_ACL=y
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_VMCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+CONFIG_CONFIGFS_FS=m
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+# CONFIG_ATARI_PARTITION is not set
+CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_LDM_PARTITION is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_LIST=y
+# CONFIG_DEBUG_SG is not set
+CONFIG_FRAME_POINTER=y
+CONFIG_BOOT_PRINTK_DELAY=y
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_LATENCYTOP=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_HAVE_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_TRACING=y
+# CONFIG_FTRACE is not set
+# CONFIG_IRQSOFF_TRACER is not set
+CONFIG_SYSPROF_TRACER=y
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+CONFIG_X86_PTDUMP=y
+CONFIG_DEBUG_RODATA=y
+# CONFIG_DEBUG_RODATA_TEST is not set
+# CONFIG_DEBUG_NX_TEST is not set
+# CONFIG_4KSTACKS is not set
+CONFIG_DOUBLEFAULT=y
+# CONFIG_MMIOTRACE is not set
+CONFIG_IO_DELAY_TYPE_0X80=0
+CONFIG_IO_DELAY_TYPE_0XED=1
+CONFIG_IO_DELAY_TYPE_UDELAY=2
+CONFIG_IO_DELAY_TYPE_NONE=3
+CONFIG_IO_DELAY_0X80=y
+# CONFIG_IO_DELAY_0XED is not set
+# CONFIG_IO_DELAY_UDELAY is not set
+# CONFIG_IO_DELAY_NONE is not set
+CONFIG_DEFAULT_IO_DELAY_TYPE=0
+CONFIG_DEBUG_BOOT_PARAMS=y
+# CONFIG_CPA_DEBUG is not set
+# CONFIG_OPTIMIZE_INLINING is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_NETWORK_XFRM=y
+CONFIG_SECURITY_FILE_CAPABILITIES=y
+# CONFIG_SECURITY_ROOTPLUG is not set
+CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536
+# CONFIG_SECURITY_SELINUX is not set
+# CONFIG_SECURITY_SMACK is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_SEQIV=m
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=m
+CONFIG_CRYPTO_CTR=m
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_AES_586 is not set
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+# CONFIG_CRYPTO_SALSA20_586 is not set
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+# CONFIG_CRYPTO_TWOFISH_586 is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_PADLOCK is not set
+# CONFIG_CRYPTO_DEV_GEODE is not set
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+CONFIG_HAVE_KVM=y
+# CONFIG_VIRTUALIZATION is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_FIRST_BIT=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=m
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=m
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
+CONFIG_TEXTSEARCH=y
+CONFIG_TEXTSEARCH_KMP=m
+CONFIG_TEXTSEARCH_BM=m
+CONFIG_TEXTSEARCH_FSM=m
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/meta/packages/linux/linux-moblin.inc b/meta/packages/linux/linux-moblin.inc
index 802bde26e..2892a5f7a 100644
--- a/meta/packages/linux/linux-moblin.inc
+++ b/meta/packages/linux/linux-moblin.inc
@@ -1,4 +1,4 @@
-DESCRIPTION = "2.6 Linux Development Kernel for moblin2 platforms"
+DESCRIPTION = "2.6 Linux Development Kernel for moblin Atom based platforms"
SECTION = "kernel"
LICENSE = "GPL"
diff --git a/meta/packages/linux/linux-moblin_2.6.27-rc1.bb b/meta/packages/linux/linux-moblin_2.6.27-rc1.bb
index fc507dea1..7dbea3055 100644
--- a/meta/packages/linux/linux-moblin_2.6.27-rc1.bb
+++ b/meta/packages/linux/linux-moblin_2.6.27-rc1.bb
@@ -2,8 +2,6 @@ require linux-moblin.inc
PR = "r4"
-DEFAULT_PREFERENCE_eee901 = "1"
-
SRC_URI = "${KERNELORG_MIRROR}pub/linux/kernel/v2.6/linux-2.6.26.tar.bz2 \
${KERNELORG_MIRROR}pub/linux/kernel/v2.6/testing/patch-2.6.27-rc1.bz2;patch=1 \
file://0001_Export_shmem_file_setup_for_DRM-GEM.patch;patch=1 \
diff --git a/meta/packages/linux/linux-moblin_2.6.27-rc6.bb b/meta/packages/linux/linux-moblin_2.6.27-rc6.bb
new file mode 100644
index 000000000..aae5b1502
--- /dev/null
+++ b/meta/packages/linux/linux-moblin_2.6.27-rc6.bb
@@ -0,0 +1,54 @@
+require linux-moblin.inc
+
+PR = "r2"
+
+DEFAULT_PREFERENCE = "-1"
+DEFAULT_PREFERENCE_eee901 = "1"
+
+SRC_URI = "${KERNELORG_MIRROR}pub/linux/kernel/v2.6/linux-2.6.26.tar.bz2 \
+ ${KERNELORG_MIRROR}pub/linux/kernel/v2.6/testing/patch-2.6.27-rc6.bz2;patch=1 \
+ file://0001-drm-remove-define-for-non-linux-systems.patch;patch=1 \
+ file://0002-i915-remove-settable-use_mi_batchbuffer_start.patch;patch=1 \
+ file://0003-i915-Ignore-X-server-provided-mmio-address.patch;patch=1 \
+ file://0004-i915-Use-more-consistent-names-for-regs-and-store.patch;patch=1 \
+ file://0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch;patch=1 \
+ file://0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch;patch=1 \
+ file://0007-i915-Initialize-hardware-status-page-at-device-load.patch;patch=1 \
+ file://0008-Add-Intel-ACPI-IGD-OpRegion-support.patch;patch=1 \
+ file://0009-drm-fix-sysfs-error-path.patch;patch=1 \
+ file://0010-i915-separate-suspend-resume-functions.patch;patch=1 \
+ file://0011-drm-vblank-rework.patch;patch=1 \
+ file://0012-Export-shmem_file_setup-for-DRM-GEM.patch;patch=1 \
+ file://0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch;patch=1 \
+ file://0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch;patch=1 \
+ file://0015-i915-Add-chip-set-ID-param.patch;patch=1 \
+ file://0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch;patch=1 \
+ file://0017-i915-Make-use-of-sarea_priv-conditional.patch;patch=1 \
+ file://0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch;patch=1 \
+ file://0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch;patch=1 \
+ file://0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch;patch=1 \
+ file://0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch;patch=1 \
+ file://0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch;patch=1 \
+ file://0023-drm-clean-up-many-sparse-warnings-in-i915.patch;patch=1 \
+ file://0024-fastboot-create-a-asynchronous-initlevel.patch;patch=1 \
+ file://0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch;patch=1 \
+ file://0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch;patch=1 \
+ file://0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch;patch=1 \
+ file://0028-fastboot-sync-the-async-execution-before-late_initc.patch;patch=1 \
+ file://0029-fastboot-make-fastboot-a-config-option.patch;patch=1 \
+ file://0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch;patch=1 \
+ file://0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch;patch=1 \
+ file://0032-fastboot-remove-wait-for-all-devices-before-mounti.patch;patch=1 \
+ file://0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch;patch=1 \
+ file://0034-fastboot-fix-typo-in-init-Kconfig-text.patch;patch=1 \
+ file://0035-fastboot-remove-duplicate-unpack_to_rootfs.patch;patch=1 \
+ file://0036-warning-fix-init-do_mounts_md-c.patch;patch=1 \
+ file://0037-init-initramfs.c-unused-function-when-compiling-wit.patch;patch=1 \
+ file://0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch;patch=1 \
+ file://0039-Add-a-script-to-visualize-the-kernel-boot-process.patch;patch=1 \
+ file://0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch;patch=1 \
+ file://0041-r8169-8101e.patch;patch=1 \
+ file://0042-intelfb-945gme.patch;patch=1 \
+ file://defconfig-eee901"
+
+S = "${WORKDIR}/linux-2.6.26"